diff --git a/.codecov.yml b/.codecov.yml new file mode 100644 index 0000000000..f91e5c1fe5 --- /dev/null +++ b/.codecov.yml @@ -0,0 +1,8 @@ +coverage: + status: + project: + default: + target: 40% + threshold: null + patch: false + changes: false diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index b0e8f57679..312ffddf43 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,7 +1,6 @@ **What this PR does / why we need it**: diff --git a/.gitignore b/.gitignore index a4136efea4..6b187ddf7d 100644 --- a/.gitignore +++ b/.gitignore @@ -30,3 +30,20 @@ Session.vim /gover.coverprofile e2e-tests + +coverage.txt +test/e2e/e2e\.test + +# mkdocs +site + +# temporal github pages +gh-pages + +# Docker-based builds +/test/binaries +/.env +/.gocache/ +/bin/ + +test/e2e-image/wait-for-nginx\.sh diff --git a/.luacheckrc b/.luacheckrc new file mode 100644 index 0000000000..e7fb65c82b --- /dev/null +++ b/.luacheckrc @@ -0,0 +1,10 @@ +std = 'ngx_lua' +globals = { + '_TEST' +} +exclude_files = {'./rootfs/etc/nginx/lua/test/**/*.lua'} +files["rootfs/etc/nginx/lua/lua_ingress.lua"] = { + ignore = { "122" }, + -- TODO(elvinefendi) figure out why this does not work + --read_globals = {"math.randomseed"}, +} diff --git a/.travis.yml b/.travis.yml index bf40ab59ce..e79b076e07 100644 --- a/.travis.yml +++ b/.travis.yml @@ -5,70 +5,25 @@ sudo: required services: - docker -language: go +language: generic notifications: email: on_failure: always on_success: never -go: - - 1.9.2 - -go_import_path: k8s.io/ingress-nginx - -# New secure variables can be added using travis encrypt -r kubernetes/ingress-nginx --add K=V +# New secure variables can be added using travis encrypt -r kubernetes/ingress-nginx --add K=V env: global: - - CHANGE_MINIKUBE_NONE_USER=true - - KUBERNETES_VERSION=v1.8.0 - - DOCKER=docker - - secure: LIS2XpZufWTcJ53jiRsSZy2Gi1EUJ1XmLg7z3f2ZHeMnyG2Jhk3GW4vod1FNru+PY4PWgddLdCdIl+jqOYXndFlbdAWF3/Oy5fEkYLXdYV7tdlHcPWDkqNFrfiyZ4guChN+b2Nk6FqU7o5fsZAIR7VAbgqNRF5XMo9Mhn/vhDCQRcnbXy7uq7JTrYUkqDbQoyYvT6b480GCY5gags1zp/xZfPDNZEe936o8i5IPTyiykRyNOXN/AH6kd3pR5e1xYgcvJ9KpSVPghcwFE7kJ4fOVMRhRG5ML+IyML+xD0jX43EMNoqRKZ/HS42kIMCInFbJEcxVde7DPNBZ7Y3GAqh7HO6qrE70Dn3ha6DID6zCoH2ArW39BxG4zempjn2VxYoMRGREyZszWQb++dwGoHmo5FHt6zvIrYBG0dA0H8ja9VkZkjFwtYTGHU1ooPzUfJK4O4VBayV8LqZibyZQR+GrmyQc0aagUY7J/fe4A2PJyI4DbkeZ7GX1ELj0ciDz4urQSzUc8l/T3aU3X+FuJItjgYtMLPmqcjA5uifDCtutE8Z9L2gSpanqUdvLSOozuxPho/KNl+2YlF7fXqPW3LnRf5mHD+NbOff306pvKlHJOb2Vmth+HBQ1XDzt/Cy5+sfwS3E0Vmh6UTq/NtkUXxwH10BDMF7FMVlQ4zdHQvyZ0= - - secure: rKDoy9IYYYy0fYBs4+9mwuBVq/TcxfFwMfE0ywYWhUUdgzrUYSJAwpoe/96EQ4YmESUefwC2nDNq4G3XzJKYOWf83PaIveb9Z//zmMrCQXjDuDBDLpwV3sXSh7evXiVDohJz4ogBCeMRUCMKYsyKBM9yWfa/iu+yI92dbphpK9peOKW6yBc0uspJlln4swN3GS2WT9LVuPY2Azv9U2UqrXufOPDKG/qEb/Vrn4yZ2lR/50r2k45e9nSvDoByvr10V8ubM5Zc0iP0vBuAUVRdByv6N53Q4gaBGapY6SxhIjIPC/h0rNnuT9EXp7MWaPT5FmBxLt9wnyleT9QhZJnFyaBYqFgcz/DKifYQkryY4M5dLMo/Rt3yATyAy8Y0df1TOoV2dKdqwOOwQ8bXB1wDfyrGxmQj9HY4Ffnphx3wPE1a+Sjuh+S5Epm7XJbPx5pZJqNO2hd4sTbk0Xp3gpPbihny2r/jtNwHl0wpFCfOM68RNrsVRlIwG3UhzbZvblbQ/M/mmWCdgzINjt07I2SGCJxfKG0e98Q49SKUoDoOgQTTRDqTC9IgOEDxyfAkT0Vr6BtlP88Nsgnf6kmboyigBrRAiaDQGTxn3SP6LnQI3CeopaRDYvFZe/rTwPXE9XlKoTn9FTWnAqF3MuWaLslDcDKYEh7OaYJjF01piu6g4Nc= - - secure: qCCk7HIEnOph2q8mQ55MKS2MM0RSpCbwDZx7csF6NHRr5khVRyhg2r8jN0iUW+peoAChRYV91YOnl5v8K49O38IEQpzgADixiLu4VPFcYddwKrtTJF+AGvFGzBKtqDksRuUTqfJ+PdxGnO9iNkS0MFzF1ImSQGp1QfkegC8wSrZF8svAedjNOC9XV+FX0tTyj14eTSy3KUYafIyuhjG+nSjhlQxAI1Tq4EClcTZOzAIYNhkeZ4Gcu1nHPQMTQT5AQgRAhG8i7rNKfghqX8OccKNWUhvFB3eOFFf4dlb02IA2L/b8Fl4NnZpyAWcwF+CBZrzQoFARBE1xIvGfaNa9i6noyrpJ/g+0g7EyKgTsixaQInBmZ7ECVpQkSO+/3leWfwssZs7H4cqy2HeXH6dkE+JUeI0WDjYV7YwdVNoFm8wXszDu+MCQTGXJ4moO4F/jMvY4w+tNo8ISJiNZ/+uQaIlPaijCdwu9FPvAY59lJXORGVHd1Fq2pKkGkNjQVHtu9BH7ufO1fX5a6FtYbclMwm7w9BE5jnJNoP+y8Yq0bVwbGONSUFTyMWCbSCYDsyUPzmaZLkFpZPbnJua5y9c1x0/OYijNizBW0UVQDZauortsTPzwYlZ1J7TywVtpUEoI8CGuUb2QEWh+O/IwrogtiKvFtPrrYakIwV/lr7mO294= + - GH_REF=github.com/kubernetes/ingress-nginx + - secure: LIS2XpZufWTcJ53jiRsSZy2Gi1EUJ1XmLg7z3f2ZHeMnyG2Jhk3GW4vod1FNru+PY4PWgddLdCdIl+jqOYXndFlbdAWF3/Oy5fEkYLXdYV7tdlHcPWDkqNFrfiyZ4guChN+b2Nk6FqU7o5fsZAIR7VAbgqNRF5XMo9Mhn/vhDCQRcnbXy7uq7JTrYUkqDbQoyYvT6b480GCY5gags1zp/xZfPDNZEe936o8i5IPTyiykRyNOXN/AH6kd3pR5e1xYgcvJ9KpSVPghcwFE7kJ4fOVMRhRG5ML+IyML+xD0jX43EMNoqRKZ/HS42kIMCInFbJEcxVde7DPNBZ7Y3GAqh7HO6qrE70Dn3ha6DID6zCoH2ArW39BxG4zempjn2VxYoMRGREyZszWQb++dwGoHmo5FHt6zvIrYBG0dA0H8ja9VkZkjFwtYTGHU1ooPzUfJK4O4VBayV8LqZibyZQR+GrmyQc0aagUY7J/fe4A2PJyI4DbkeZ7GX1ELj0ciDz4urQSzUc8l/T3aU3X+FuJItjgYtMLPmqcjA5uifDCtutE8Z9L2gSpanqUdvLSOozuxPho/KNl+2YlF7fXqPW3LnRf5mHD+NbOff306pvKlHJOb2Vmth+HBQ1XDzt/Cy5+sfwS3E0Vmh6UTq/NtkUXxwH10BDMF7FMVlQ4zdHQvyZ0= + - secure: rKDoy9IYYYy0fYBs4+9mwuBVq/TcxfFwMfE0ywYWhUUdgzrUYSJAwpoe/96EQ4YmESUefwC2nDNq4G3XzJKYOWf83PaIveb9Z//zmMrCQXjDuDBDLpwV3sXSh7evXiVDohJz4ogBCeMRUCMKYsyKBM9yWfa/iu+yI92dbphpK9peOKW6yBc0uspJlln4swN3GS2WT9LVuPY2Azv9U2UqrXufOPDKG/qEb/Vrn4yZ2lR/50r2k45e9nSvDoByvr10V8ubM5Zc0iP0vBuAUVRdByv6N53Q4gaBGapY6SxhIjIPC/h0rNnuT9EXp7MWaPT5FmBxLt9wnyleT9QhZJnFyaBYqFgcz/DKifYQkryY4M5dLMo/Rt3yATyAy8Y0df1TOoV2dKdqwOOwQ8bXB1wDfyrGxmQj9HY4Ffnphx3wPE1a+Sjuh+S5Epm7XJbPx5pZJqNO2hd4sTbk0Xp3gpPbihny2r/jtNwHl0wpFCfOM68RNrsVRlIwG3UhzbZvblbQ/M/mmWCdgzINjt07I2SGCJxfKG0e98Q49SKUoDoOgQTTRDqTC9IgOEDxyfAkT0Vr6BtlP88Nsgnf6kmboyigBrRAiaDQGTxn3SP6LnQI3CeopaRDYvFZe/rTwPXE9XlKoTn9FTWnAqF3MuWaLslDcDKYEh7OaYJjF01piu6g4Nc= + - secure: qCCk7HIEnOph2q8mQ55MKS2MM0RSpCbwDZx7csF6NHRr5khVRyhg2r8jN0iUW+peoAChRYV91YOnl5v8K49O38IEQpzgADixiLu4VPFcYddwKrtTJF+AGvFGzBKtqDksRuUTqfJ+PdxGnO9iNkS0MFzF1ImSQGp1QfkegC8wSrZF8svAedjNOC9XV+FX0tTyj14eTSy3KUYafIyuhjG+nSjhlQxAI1Tq4EClcTZOzAIYNhkeZ4Gcu1nHPQMTQT5AQgRAhG8i7rNKfghqX8OccKNWUhvFB3eOFFf4dlb02IA2L/b8Fl4NnZpyAWcwF+CBZrzQoFARBE1xIvGfaNa9i6noyrpJ/g+0g7EyKgTsixaQInBmZ7ECVpQkSO+/3leWfwssZs7H4cqy2HeXH6dkE+JUeI0WDjYV7YwdVNoFm8wXszDu+MCQTGXJ4moO4F/jMvY4w+tNo8ISJiNZ/+uQaIlPaijCdwu9FPvAY59lJXORGVHd1Fq2pKkGkNjQVHtu9BH7ufO1fX5a6FtYbclMwm7w9BE5jnJNoP+y8Yq0bVwbGONSUFTyMWCbSCYDsyUPzmaZLkFpZPbnJua5y9c1x0/OYijNizBW0UVQDZauortsTPzwYlZ1J7TywVtpUEoI8CGuUb2QEWh+O/IwrogtiKvFtPrrYakIwV/lr7mO294= + - secure: ZZlcwdr4X2ZeIuA4f5wiT04qNCpSiNQb9d3dITG7MdtxIpiC1mi9rUFAkMDDlNjKumHO82O/a/X4RYKjXny7eixeHl5lgQ++IV9APwvWfsCiREFhiQFspfL+j0d9sZ5I4pfyPC671984We1T4G+ltuMcN3nQdPm3mP4xPT3h0IBQ9iAHonKck0TdLieNZ47vPPB8C8oxbx5NpdW8aSfQJGo3bFGiXNxWWFZ4P7BsMBDrBZaXuh0rAml/0nCJBGohgSqC8h/UObBOHeehEWnF1zzfQPRezHwVkUaMf2+xQtLGhB5rPjFhBKX0C/JZeqDgHEQ0auC2bLbfG5QCYQauy7jCq5kc6XPT7xFxCUd/sS7Wu2gg6KcgFeTE+Rnn4KWFZx2jMP2EPQYP2+LrM/VbfY1HW4QkpIkPVSFBatciuePUnIkEX6+jVM+GEZOhOOEqZ89zwjsGpa2GkFAJrwX/dphXXtn6oS20mLbu1kqocWTbGUJl/fYztTxCdOt/NoH/hiQMxy+TOGFF3Dx85MJiMUOlgk/NbPqUwBn5RbuD71L69vFZZLpU09V4PuablWW8ACQxgp8BMeqLhaLRn/I3r0ntRc8AdQ1xubPlrVWO9DDbhGfj44YPNoLUAC/7QHkRyCbP98Yv2FTXrJFcx9isA2viFx2UxzTsvXcAKHbCSAw= jobs: include: - - stage: Static Check - script: - - go get github.com/golang/lint/golint - - make fmt lint vet - - stage: Coverage - before_script: - # start minikube - - test/e2e/up.sh - script: - - go get github.com/mattn/goveralls - - go get github.com/modocache/gover - - if ! go get github.com/golang/tools/cmd/cover; then go get golang.org/x/tools/cmd/cover;fi - - if ! go get github.com/jteeuwen/go-bindata/...; then github.com/jteeuwen/go-bindata/...;fi - - make cover - - stage: e2e - before_script: - - if ! go get github.com/jteeuwen/go-bindata/...; then github.com/jteeuwen/go-bindata/...;fi - - make e2e-image - - test/e2e/up.sh - - test/e2e/wait-for-nginx.sh - script: - - make e2e-test - # split builds to avoid job timeouts - - stage: publish amd64 - if: type = api AND branch = master AND repo = kubernetes/ingress-nginx - script: - - ARCH=amd64 .travis/publish.sh - - stage: publish arm - if: type = api AND branch = master AND repo = kubernetes/ingress-nginx - script: - - ARCH=arm .travis/publish.sh - - stage: publish arm64 - if: type = api AND branch = master AND repo = kubernetes/ingress-nginx - script: - - ARCH=arm64 .travis/publish.sh - - stage: publish ppc64le - if: type = api AND branch = master AND repo = kubernetes/ingress-nginx - script: - - ARCH=ppc64le .travis/publish.sh - - stage: publish s390x - if: type = api AND branch = master AND repo = kubernetes/ingress-nginx - script: - - ARCH=s390x .travis/publish.sh + - stage: Publish docs + if: type = api AND branch = master AND repo = kubernetes/ingress-nginx AND env(COMPONENT) = "docs" + script: + - .travis/publish-docs.sh diff --git a/.travis/common.sh b/.travis/common.sh index 8c658e39a5..3417f05179 100755 --- a/.travis/common.sh +++ b/.travis/common.sh @@ -82,7 +82,7 @@ fi function docker_tag_exists() { TAG=${2//\"/} IMAGES=$(curl -s -H "Authorization: Bearer ${QUAY_PASSWORD}" https://quay.io/api/v1/repository/$1-$3/image/ | jq '.images | sort_by(.sort_index) | .[] .tags | select(.[] !=null) | .[0]' | sed s/\"//g) - if echo "$IMAGES" | grep -q "$TAG" ; then + if echo "$IMAGES" | grep -q -P "(^|\s)$TAG(?=\s|$)" ; then return 0 fi diff --git a/.travis/ingress-controller.sh b/.travis/ingress-controller.sh deleted file mode 100755 index 8af37b9948..0000000000 --- a/.travis/ingress-controller.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2017 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" - -source $DIR/common.sh - -IMAGE=$(make -s -C $DIR/../ image-info) - -if docker_tag_exists "kubernetes-ingress-controller/nginx-ingress-controller" $(echo $IMAGE | jq .tag) "$ARCH"; then - echo "Image already published" - exit 0 -fi - -echo "building nginx-ingress-controller-$ARCH image..." -make -C $DIR/../ sub-container-$ARCH -make -C $DIR/../ sub-push-$ARCH diff --git a/.travis/nginx.sh b/.travis/nginx.sh deleted file mode 100755 index bb5966b250..0000000000 --- a/.travis/nginx.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2017 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" - -source $DIR/common.sh - -IMAGE=$(make -s -C $DIR/../images/nginx image-info) - -if docker_tag_exists "kubernetes-ingress-controller/nginx" $(echo $IMAGE | jq .tag) "$ARCH"; then - echo "Image already published" - exit 0 -fi - -echo "building nginx-$ARCH image..." -make -C $DIR/../images/nginx sub-container-$ARCH -make -C $DIR/../images/nginx sub-push-$ARCH diff --git a/.travis/publish-docs.sh b/.travis/publish-docs.sh new file mode 100755 index 0000000000..3511fbe168 --- /dev/null +++ b/.travis/publish-docs.sh @@ -0,0 +1,45 @@ +#!/usr/bin/env bash + +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o pipefail + +if ! [ -z $DEBUG ]; then + set -x +fi + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +if [ "$COMPONENT" != "docs" ]; then + echo "This task runs only to publish docs" + exit 0 +fi + +make -C ${DIR}/.. build-docs + +git config --global user.email "travis@travis-ci.com" +git config --global user.name "Travis Bot" + +git clone --branch=gh-pages --depth=1 https://${GH_REF} ${DIR}/gh-pages +cd ${DIR}/gh-pages + +git rm -r . + +cp -r ${DIR}/../site/* . + +git add . +git commit -m "Deploy GitHub Pages" +git push --force --quiet "https://${GH_TOKEN}@${GH_REF}" gh-pages > /dev/null 2>&1 diff --git a/.travis/publish.sh b/.travis/publish.sh deleted file mode 100755 index a06f105dee..0000000000 --- a/.travis/publish.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2017 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" - -source $DIR/common.sh - -echo "Login to quay.io..." -docker login --username=$QUAY_USERNAME --password=$QUAY_PASSWORD quay.io >/dev/null 2>&1 - -case "$COMPONENT" in -"ingress-controller") - $DIR/ingress-controller.sh - ;; -"nginx") - $DIR/nginx.sh - ;; -*) - echo "Invalid option in environment variable COMPONENT" - exit 1 - ;; -esac diff --git a/.travis/release-from-travis.sh b/.travis/release-from-travis.sh deleted file mode 100755 index 539bb291d2..0000000000 --- a/.travis/release-from-travis.sh +++ /dev/null @@ -1,67 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2017 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -if ! [ -z $DEBUG ]; then - set -x -fi - -set -o errexit -set -o pipefail - -if [ "$TRAVIS_CI_TOKEN" == "" ]; -then - echo "Environment variable TRAVIS_CI_TOKEN is missing."; - exit 1; -fi - -function publish() { - -body=$(cat < Don't reload nginx when L4 endpoints changed +- [X] [#3696](https://github.com/kubernetes/ingress-nginx/pull/3696) Apply annotations to default location +- [X] [#3698](https://github.com/kubernetes/ingress-nginx/pull/3698) Fix --disable-catch-all +- [X] [#3702](https://github.com/kubernetes/ingress-nginx/pull/3702) Add params for access log +- [X] [#3704](https://github.com/kubernetes/ingress-nginx/pull/3704) make sure dev-env forces context to be minikube +- [X] [#3728](https://github.com/kubernetes/ingress-nginx/pull/3728) Fix flaky test +- [X] [#3730](https://github.com/kubernetes/ingress-nginx/pull/3730) Changes CustomHTTPErrors annotation to use custom default backend +- [X] [#3734](https://github.com/kubernetes/ingress-nginx/pull/3734) remove old unused lua dicts +- [X] [#3736](https://github.com/kubernetes/ingress-nginx/pull/3736) do not unnecessarily log +- [X] [#3737](https://github.com/kubernetes/ingress-nginx/pull/3737) Adjust probe timeouts +- [X] [#3739](https://github.com/kubernetes/ingress-nginx/pull/3739) dont log unnecessarily +- [X] [#3740](https://github.com/kubernetes/ingress-nginx/pull/3740) Fix ingress updating for session-cookie-* annotation changes +- [X] [#3747](https://github.com/kubernetes/ingress-nginx/pull/3747) Update nginx and modules +- [X] [#3748](https://github.com/kubernetes/ingress-nginx/pull/3748) Update nginx image +- [X] [#3749](https://github.com/kubernetes/ingress-nginx/pull/3749) Enhance Unit Tests for Annotations +- [X] [#3750](https://github.com/kubernetes/ingress-nginx/pull/3750) Update go dependencies +- [X] [#3751](https://github.com/kubernetes/ingress-nginx/pull/3751) Parse environment variables in OpenTracing configuration +- [X] [#3756](https://github.com/kubernetes/ingress-nginx/pull/3756) Create custom annotation for satisfy "value" +- [X] [#3757](https://github.com/kubernetes/ingress-nginx/pull/3757) Add mention of secure-backends to backend-protocol docs +- [X] [#3764](https://github.com/kubernetes/ingress-nginx/pull/3764) delete confusing CustomErrors attribute to make things more explicit +- [X] [#3765](https://github.com/kubernetes/ingress-nginx/pull/3765) simplify customhttperrors e2e test and add regression test and fix a bug +- [X] [#3766](https://github.com/kubernetes/ingress-nginx/pull/3766) Support Opentracing with Datadog - part 2 +- [X] [#3767](https://github.com/kubernetes/ingress-nginx/pull/3767) Support Opentracing with Datadog - part 1 +- [X] [#3771](https://github.com/kubernetes/ingress-nginx/pull/3771) Do not log unnecessarily +- [X] [#3772](https://github.com/kubernetes/ingress-nginx/pull/3772) Fix dashboard link [skip ci] +- [X] [#3775](https://github.com/kubernetes/ingress-nginx/pull/3775) Fix DNS lookup failures in L4 services +- [X] [#3779](https://github.com/kubernetes/ingress-nginx/pull/3779) Add kubectl plugin +- [X] [#3780](https://github.com/kubernetes/ingress-nginx/pull/3780) Enable access log for default backend +- [X] [#3781](https://github.com/kubernetes/ingress-nginx/pull/3781) feat: configurable proxy buffers number +- [X] [#3782](https://github.com/kubernetes/ingress-nginx/pull/3782) Lua bridge tracer +- [X] [#3784](https://github.com/kubernetes/ingress-nginx/pull/3784) use correct host for jaeger-collector-host in docs +- [X] [#3785](https://github.com/kubernetes/ingress-nginx/pull/3785) use latest base nginx image +- [X] [#3787](https://github.com/kubernetes/ingress-nginx/pull/3787) Use UsePortInRedirects only if enabled +- [X] [#3791](https://github.com/kubernetes/ingress-nginx/pull/3791) - remove annotations in nginxcontroller struct +- [X] [#3792](https://github.com/kubernetes/ingress-nginx/pull/3792) dont restart minikube when it is already running +- [X] [#3793](https://github.com/kubernetes/ingress-nginx/pull/3793) Update mergo dependency +- [X] [#3794](https://github.com/kubernetes/ingress-nginx/pull/3794) use use-context that actually changes the context +- [X] [#3795](https://github.com/kubernetes/ingress-nginx/pull/3795) do not warn when optional annotations arent set +- [X] [#3799](https://github.com/kubernetes/ingress-nginx/pull/3799) Add /dbg certs command +- [X] [#3800](https://github.com/kubernetes/ingress-nginx/pull/3800) Refactor e2e +- [X] [#3809](https://github.com/kubernetes/ingress-nginx/pull/3809) Upgrade openresty/lua-resty-balancer +- [X] [#3810](https://github.com/kubernetes/ingress-nginx/pull/3810) Update nginx image +- [X] [#3811](https://github.com/kubernetes/ingress-nginx/pull/3811) Fix e2e tests +- [X] [#3812](https://github.com/kubernetes/ingress-nginx/pull/3812) Removes unused const from customhttperrors e2e test +- [X] [#3813](https://github.com/kubernetes/ingress-nginx/pull/3813) Prevent dep from vendoring grpc-fortune-teller dependencies +- [X] [#3819](https://github.com/kubernetes/ingress-nginx/pull/3819) Fix e2e test in osx +- [X] [#3820](https://github.com/kubernetes/ingress-nginx/pull/3820) Update nginx image +- [X] [#3821](https://github.com/kubernetes/ingress-nginx/pull/3821) Update nginx to 1.15.9 +- [X] [#3822](https://github.com/kubernetes/ingress-nginx/pull/3822) Set default for satisfy annotation to nothing + +_Documentation:_ + +- [X] [#3680](https://github.com/kubernetes/ingress-nginx/pull/3680) mention rewrite-target change for 0.22.0 +- [X] [#3693](https://github.com/kubernetes/ingress-nginx/pull/3693) Correcting links for gRPC Fortune Teller app +- [X] [#3701](https://github.com/kubernetes/ingress-nginx/pull/3701) Update usage documentation for default-backend annotation +- [X] [#3705](https://github.com/kubernetes/ingress-nginx/pull/3705) Increase Unit Test Coverage for Templates +- [X] [#3708](https://github.com/kubernetes/ingress-nginx/pull/3708) Update OWNERS +- [X] [#3731](https://github.com/kubernetes/ingress-nginx/pull/3731) Update a doc example that uses rewrite-target + +_Deprecations:_ + +- The annotation `session-cookie-hash` is deprecated and will be removed in 0.24. +- Flag `--force-namespace-isolation` is deprecated and will be removed in 0.24. Currently this annotation is being replaced by `--watch-namespace` + +### 0.22.0 + +**Image:** `quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.22.0` + +_New Features:_ + +- NGINX 1.15.8 +- New balancer implementation: consistent hash subset +- Adds support for HTTP2 Push Preload annotation +- Allow to disable NGINX prometheus metrics +- New --disable-catch-all flag to ignore catch-all ingresses +- Add flag --metrics-per-host to make per-host metrics optional + +_Breaking changes:_ + +- Annotation `nginx.ingress.kubernetes.io/rewrite-target` has changed and will not behave as expected if you don't update them. + + Refer to [https://kubernetes.github.io/ingress-nginx/examples/rewrite/#rewrite-target](https://kubernetes.github.io/ingress-nginx/examples/rewrite/#rewrite-target) on how to change it. + + Refer to [https://github.com/kubernetes/ingress-nginx/pull/3174#issuecomment-455665710](https://github.com/kubernetes/ingress-nginx/pull/3174#issuecomment-455665710) on how to do seamless migration. + +- Annotations `nginx.ingress.kubernetes.io/add-base-url` and `nginx.ingress.kubernetes.io/base-url-scheme` were removed. + + Please check issue [#3174](https://github.com/kubernetes/ingress-nginx/pull/3174) for details. + +- By default do not trust any client to extract true client IP address from X-Forwarded-For header using realip module (`use-forwarded-headers: "false"`) + +_Changes:_ + +- [X] [#3174](https://github.com/kubernetes/ingress-nginx/pull/3174) Generalize Rewrite Block Creation and Deprecate AddBaseUrl (not backwards compatible) +- [X] [#3240](https://github.com/kubernetes/ingress-nginx/pull/3240) Adds support for HTTP2 Push Preload annotation +- [X] [#3333](https://github.com/kubernetes/ingress-nginx/pull/3333) breaking change: by default do not trust any client +- [X] [#3342](https://github.com/kubernetes/ingress-nginx/pull/3342) Allow privilege escalation +- [X] [#3363](https://github.com/kubernetes/ingress-nginx/pull/3363) Document for cookie expires annotation +- [X] [#3396](https://github.com/kubernetes/ingress-nginx/pull/3396) New balancer implementation: consistent hash subset +- [X] [#3446](https://github.com/kubernetes/ingress-nginx/pull/3446) add more testing for mergeAlternativeBackends +- [X] [#3453](https://github.com/kubernetes/ingress-nginx/pull/3453) Monitor fixes +- [X] [#3455](https://github.com/kubernetes/ingress-nginx/pull/3455) Watch controller Pods and make then available in k8sStore +- [X] [#3465](https://github.com/kubernetes/ingress-nginx/pull/3465) Bump nginx-opentracing for gRPC support +- [X] [#3467](https://github.com/kubernetes/ingress-nginx/pull/3467) store ewma stats per backend +- [X] [#3470](https://github.com/kubernetes/ingress-nginx/pull/3470) Use opentracing_grpc_propagate_context when necessary +- [X] [#3474](https://github.com/kubernetes/ingress-nginx/pull/3474) Improve parsing of annotations and use of Ingress wrapper +- [X] [#3476](https://github.com/kubernetes/ingress-nginx/pull/3476) Fix nginx directory permissions +- [X] [#3477](https://github.com/kubernetes/ingress-nginx/pull/3477) clarify canary ingress +- [X] [#3478](https://github.com/kubernetes/ingress-nginx/pull/3478) delete unused buildLoadBalancingConfig +- [X] [#3487](https://github.com/kubernetes/ingress-nginx/pull/3487) dynamic certificate mode should support widlcard hosts +- [X] [#3488](https://github.com/kubernetes/ingress-nginx/pull/3488) Add probes to deployments used in e2e tests +- [X] [#3492](https://github.com/kubernetes/ingress-nginx/pull/3492) Fix data size validations +- [X] [#3494](https://github.com/kubernetes/ingress-nginx/pull/3494) Since dynamic mode only checking for 'return 503' is not valid anymore +- [X] [#3495](https://github.com/kubernetes/ingress-nginx/pull/3495) Adjust default timeout for e2e tests +- [X] [#3497](https://github.com/kubernetes/ingress-nginx/pull/3497) Wait for the right number of endpoints +- [X] [#3498](https://github.com/kubernetes/ingress-nginx/pull/3498) Update godeps +- [X] [#3501](https://github.com/kubernetes/ingress-nginx/pull/3501) be consistent with what Nginx supports +- [X] [#3503](https://github.com/kubernetes/ingress-nginx/pull/3503) compare error with error types from k8s.io/apimachinery/pkg/api/errors +- [X] [#3504](https://github.com/kubernetes/ingress-nginx/pull/3504) fix an ewma unit test +- [X] [#3505](https://github.com/kubernetes/ingress-nginx/pull/3505) Update lua configuration_data when number of controller pod change +- [X] [#3507](https://github.com/kubernetes/ingress-nginx/pull/3507) Remove temporal configuration file after a while +- [X] [#3508](https://github.com/kubernetes/ingress-nginx/pull/3508) Update nginx to 1.15.7 +- [X] [#3509](https://github.com/kubernetes/ingress-nginx/pull/3509) [1759] Ingress affinity session cookie with Secure flag for HTTPS +- [X] [#3512](https://github.com/kubernetes/ingress-nginx/pull/3512) Allow to disable NGINX metrics +- [X] [#3518](https://github.com/kubernetes/ingress-nginx/pull/3518) Fix log output format +- [X] [#3521](https://github.com/kubernetes/ingress-nginx/pull/3521) Fix a bug with Canary becoming main server +- [X] [#3522](https://github.com/kubernetes/ingress-nginx/pull/3522) {tcp,udp}-services cm appear twice +- [X] [#3525](https://github.com/kubernetes/ingress-nginx/pull/3525) make canary ingresses independent of the order they were applied +- [X] [#3530](https://github.com/kubernetes/ingress-nginx/pull/3530) Update nginx image +- [X] [#3532](https://github.com/kubernetes/ingress-nginx/pull/3532) Ignore updates of ingresses with invalid class +- [X] [#3536](https://github.com/kubernetes/ingress-nginx/pull/3536) Replace dockerfile entrypoint +- [X] [#3548](https://github.com/kubernetes/ingress-nginx/pull/3548) e2e test to ensure graceful shutdown does not lose requests +- [X] [#3551](https://github.com/kubernetes/ingress-nginx/pull/3551) Fix --enable-dynamic-certificates for nested subdomain +- [X] [#3553](https://github.com/kubernetes/ingress-nginx/pull/3553) handle_error_when_executing_diff +- [X] [#3562](https://github.com/kubernetes/ingress-nginx/pull/3562) Rename nginx.yaml to nginx.json +- [X] [#3566](https://github.com/kubernetes/ingress-nginx/pull/3566) Add Unit Tests for getIngressInformation +- [X] [#3569](https://github.com/kubernetes/ingress-nginx/pull/3569) fix status updated: make sure ingress.status is copied +- [X] [#3573](https://github.com/kubernetes/ingress-nginx/pull/3573) Update Certificate Generation Docs to not use MD5 +- [X] [#3581](https://github.com/kubernetes/ingress-nginx/pull/3581) lua randomseed per worker +- [X] [#3582](https://github.com/kubernetes/ingress-nginx/pull/3582) Sort ingresses by creation timestamp +- [X] [#3584](https://github.com/kubernetes/ingress-nginx/pull/3584) Update go to 1.11.4 +- [X] [#3586](https://github.com/kubernetes/ingress-nginx/pull/3586) Add --disable-catch-all option to disable catch-all server +- [X] [#3587](https://github.com/kubernetes/ingress-nginx/pull/3587) adjust dind istallation +- [X] [#3594](https://github.com/kubernetes/ingress-nginx/pull/3594) Add a flag to make per-host metrics optional +- [X] [#3596](https://github.com/kubernetes/ingress-nginx/pull/3596) Fix proxy_host variable configuration +- [X] [#3601](https://github.com/kubernetes/ingress-nginx/pull/3601) Update nginx to 1.15.8 +- [X] [#3602](https://github.com/kubernetes/ingress-nginx/pull/3602) Update nginx image +- [X] [#3604](https://github.com/kubernetes/ingress-nginx/pull/3604) Add an option to automatically set worker_connections based on worker_rlimit_nofile +- [X] [#3615](https://github.com/kubernetes/ingress-nginx/pull/3615) Pass k8s `Service` data through to the TCP balancer script. +- [X] [#3620](https://github.com/kubernetes/ingress-nginx/pull/3620) Added server alias to metrics +- [X] [#3624](https://github.com/kubernetes/ingress-nginx/pull/3624) Update nginx to fix geoip database deprecation +- [X] [#3625](https://github.com/kubernetes/ingress-nginx/pull/3625) Update nginx image +- [X] [#3633](https://github.com/kubernetes/ingress-nginx/pull/3633) Fix a bug in Ingress update handler +- [X] [#3634](https://github.com/kubernetes/ingress-nginx/pull/3634) canary by cookie should support hypen in cookie name +- [X] [#3635](https://github.com/kubernetes/ingress-nginx/pull/3635) Fix duplicate alternative backend merging +- [X] [#3637](https://github.com/kubernetes/ingress-nginx/pull/3637) Add support for redirect https to https (from-to-www-redirect) +- [X] [#3640](https://github.com/kubernetes/ingress-nginx/pull/3640) add limit connection status code +- [X] [#3641](https://github.com/kubernetes/ingress-nginx/pull/3641) Replace deprecated apiVersion in deploy folder +- [X] [#3643](https://github.com/kubernetes/ingress-nginx/pull/3643) Update nginx +- [X] [#3644](https://github.com/kubernetes/ingress-nginx/pull/3644) Update nginx image +- [X] [#3648](https://github.com/kubernetes/ingress-nginx/pull/3648) Remove stickyness cookie domain from Lua balancer to match old behavior +- [X] [#3649](https://github.com/kubernetes/ingress-nginx/pull/3649) Empty access_by_lua_block breaks satisfy any +- [X] [#3655](https://github.com/kubernetes/ingress-nginx/pull/3655) Remove flag sort-backends +- [X] [#3656](https://github.com/kubernetes/ingress-nginx/pull/3656) Change default value of flag for ssl chain completion +- [X] [#3660](https://github.com/kubernetes/ingress-nginx/pull/3660) Revert max-worker-connections default value +- [X] [#3664](https://github.com/kubernetes/ingress-nginx/pull/3664) Fix invalid validation creating prometheus valid host values + +_Documentation:_ + +- [X] [#3513](https://github.com/kubernetes/ingress-nginx/pull/3513) Revert removal of TCP and UDP support configmaps in mandatroy manifest +- [X] [#3456](https://github.com/kubernetes/ingress-nginx/pull/3456) Revert TCP/UDP documentation removal and links +- [X] [#3482](https://github.com/kubernetes/ingress-nginx/pull/3482) Annotations doc links: minor fixes and unification +- [X] [#3491](https://github.com/kubernetes/ingress-nginx/pull/3491) Update example to use latest Dashboard version. +- [X] [#3510](https://github.com/kubernetes/ingress-nginx/pull/3510) Update mkdocs [skip ci] +- [X] [#3516](https://github.com/kubernetes/ingress-nginx/pull/3516) Fix error in configmap yaml definition +- [X] [#3575](https://github.com/kubernetes/ingress-nginx/pull/3575) Add documentation for spec.rules.host format +- [X] [#3577](https://github.com/kubernetes/ingress-nginx/pull/3577) Add standard labels to namespace specs +- [X] [#3592](https://github.com/kubernetes/ingress-nginx/pull/3592) Add inside the User Guide documentation section a basic usage section and example +- [X] [#3605](https://github.com/kubernetes/ingress-nginx/pull/3605) Fix CLA URLs +- [X] [#3627](https://github.com/kubernetes/ingress-nginx/pull/3627) Typo: docs/examples/rewrite/README.md +- [X] [#3632](https://github.com/kubernetes/ingress-nginx/pull/3632) Fixed: error parsing with-rbac.yaml: error converting YAML to JSON + +### 0.21.0 + +**Image:** `quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.21.0` + +_New Features:_ + +- NGINX 1.15.6 with fixes for vulnerabilities in HTTP/2 ([CVE-2018-16843](http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-16843), [CVE-2018-16844](http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-16844)) +- Support for TLSv1.3. Disabled by default. Use [ssl-protocols](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#ssl-protocols) `ssl-protocols: TLSv1.3 TLSv1.2` +- New annotation for [canary deployments](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/annotations/#canary) +- Support for configuration snippets when the authentication annotation is used +- Support for custom ModSecurity configuration +- LUA upstream configuration for TCP and UDP services + +_Changes:_ + +- [X] [#3156](https://github.com/kubernetes/ingress-nginx/pull/3156) [404-server] Removes 404 server +- [X] [#3170](https://github.com/kubernetes/ingress-nginx/pull/3170) Move mainSnippet before events to fix load_module issue. +- [X] [#3187](https://github.com/kubernetes/ingress-nginx/pull/3187) UPT: annotation enhancement for resty-lua-waf +- [X] [#3190](https://github.com/kubernetes/ingress-nginx/pull/3190) Refactor e2e Tests to use common helper +- [X] [#3193](https://github.com/kubernetes/ingress-nginx/pull/3193) Add E2E tests for HealthCheck +- [X] [#3194](https://github.com/kubernetes/ingress-nginx/pull/3194) Make literal $ character work in set $location_path +- [X] [#3195](https://github.com/kubernetes/ingress-nginx/pull/3195) Add e2e Tests for AuthTLS +- [X] [#3196](https://github.com/kubernetes/ingress-nginx/pull/3196) Remove default backend requirement +- [X] [#3197](https://github.com/kubernetes/ingress-nginx/pull/3197) Remove support for TCP and UDP services +- [X] [#3198](https://github.com/kubernetes/ingress-nginx/pull/3198) Only support dynamic configuration +- [X] [#3199](https://github.com/kubernetes/ingress-nginx/pull/3199) Remove duplication in files +- [X] [#3201](https://github.com/kubernetes/ingress-nginx/pull/3201) no data shows for config reloads charts when select to namespace or controller +- [X] [#3203](https://github.com/kubernetes/ingress-nginx/pull/3203) Remove annotations grpc-backend and secure-backend already deprecated +- [X] [#3204](https://github.com/kubernetes/ingress-nginx/pull/3204) Flags publish-service and publish-status-address are mutually exclusive +- [X] [#3205](https://github.com/kubernetes/ingress-nginx/pull/3205) Update OWNERS [skip ci] +- [X] [#3207](https://github.com/kubernetes/ingress-nginx/pull/3207) delete upstream healthcheck annotation +- [X] [#3209](https://github.com/kubernetes/ingress-nginx/pull/3209) Fix: update config map name +- [X] [#3212](https://github.com/kubernetes/ingress-nginx/pull/3212) Add some extra detail to the client cert auth example regarding potential gotcha +- [X] [#3213](https://github.com/kubernetes/ingress-nginx/pull/3213) Update deps +- [X] [#3214](https://github.com/kubernetes/ingress-nginx/pull/3214) Cleanup of nginx image +- [X] [#3219](https://github.com/kubernetes/ingress-nginx/pull/3219) Update nginx image +- [X] [#3222](https://github.com/kubernetes/ingress-nginx/pull/3222) Allow Ability to Configure Upstream Keepalive +- [X] [#3230](https://github.com/kubernetes/ingress-nginx/pull/3230) Retry initial backend configuration +- [X] [#3231](https://github.com/kubernetes/ingress-nginx/pull/3231) Improve dynamic lua configuration +- [X] [#3234](https://github.com/kubernetes/ingress-nginx/pull/3234) Added e2e tests for backend protocols +- [X] [#3247](https://github.com/kubernetes/ingress-nginx/pull/3247) Refactor probe url requests +- [X] [#3252](https://github.com/kubernetes/ingress-nginx/pull/3252) remove the command args of enable-dynamic-configuration +- [X] [#3257](https://github.com/kubernetes/ingress-nginx/pull/3257) Add e2e tests for upstream vhost +- [X] [#3260](https://github.com/kubernetes/ingress-nginx/pull/3260) fix logging calls +- [X] [#3261](https://github.com/kubernetes/ingress-nginx/pull/3261) Mount minikube volume to docker container +- [X] [#3265](https://github.com/kubernetes/ingress-nginx/pull/3265) Update kubeadm-dind-cluster +- [X] [#3266](https://github.com/kubernetes/ingress-nginx/pull/3266) fix two bugs with backend-protocol annotation +- [X] [#3267](https://github.com/kubernetes/ingress-nginx/pull/3267) Fix status update in case of connection errors +- [X] [#3270](https://github.com/kubernetes/ingress-nginx/pull/3270) Don't sort IngressStatus from each Goroutine(update for each ingress) +- [X] [#3277](https://github.com/kubernetes/ingress-nginx/pull/3277) Add e2e test for configuration snippet +- [X] [#3279](https://github.com/kubernetes/ingress-nginx/pull/3279) Fix usages of %q formatting for numbers (%d) +- [X] [#3280](https://github.com/kubernetes/ingress-nginx/pull/3280) Add e2e test for from-to-www-redirect +- [X] [#3281](https://github.com/kubernetes/ingress-nginx/pull/3281) Add e2e test for log +- [X] [#3285](https://github.com/kubernetes/ingress-nginx/pull/3285) Add health-check-timeout as command line argument +- [X] [#3286](https://github.com/kubernetes/ingress-nginx/pull/3286) fix bug with balancer.lua configuration +- [X] [#3295](https://github.com/kubernetes/ingress-nginx/pull/3295) Refactor EWMA to not use shared dictionaries +- [X] [#3296](https://github.com/kubernetes/ingress-nginx/pull/3296) Update nginx and add support for TLSv1.3 +- [X] [#3297](https://github.com/kubernetes/ingress-nginx/pull/3297) Add e2e test for force-ssl-redirect +- [X] [#3301](https://github.com/kubernetes/ingress-nginx/pull/3301) Add e2e tests for IP Whitelist +- [X] [#3302](https://github.com/kubernetes/ingress-nginx/pull/3302) Add e2e test for server snippet +- [X] [#3304](https://github.com/kubernetes/ingress-nginx/pull/3304) Update kubeadm-dind-cluster script +- [X] [#3305](https://github.com/kubernetes/ingress-nginx/pull/3305) Add e2e test for app-root +- [X] [#3306](https://github.com/kubernetes/ingress-nginx/pull/3306) Update e2e test to verify redirect code +- [X] [#3309](https://github.com/kubernetes/ingress-nginx/pull/3309) Customize ModSecurity to be used in Locations +- [X] [#3310](https://github.com/kubernetes/ingress-nginx/pull/3310) Fix geoip2 db files +- [X] [#3313](https://github.com/kubernetes/ingress-nginx/pull/3313) Support cookie expires +- [X] [#3320](https://github.com/kubernetes/ingress-nginx/pull/3320) Update nginx image and QEMU version +- [X] [#3321](https://github.com/kubernetes/ingress-nginx/pull/3321) Add configuration for geoip2 module +- [X] [#3322](https://github.com/kubernetes/ingress-nginx/pull/3322) Remove e2e boilerplate +- [X] [#3324](https://github.com/kubernetes/ingress-nginx/pull/3324) Fix sticky session +- [X] [#3325](https://github.com/kubernetes/ingress-nginx/pull/3325) Fix e2e tests +- [X] [#3328](https://github.com/kubernetes/ingress-nginx/pull/3328) Code linting +- [X] [#3332](https://github.com/kubernetes/ingress-nginx/pull/3332) Update build-single-manifest-sh,remove tcp-services-configmap.yaml and udp-services-configmap.yaml +- [X] [#3338](https://github.com/kubernetes/ingress-nginx/pull/3338) Avoid reloads when endpoints are not available +- [X] [#3341](https://github.com/kubernetes/ingress-nginx/pull/3341) Add canary annotation and alternative backends for traffic shaping +- [X] [#3343](https://github.com/kubernetes/ingress-nginx/pull/3343) Auth snippet +- [X] [#3344](https://github.com/kubernetes/ingress-nginx/pull/3344) Adds CustomHTTPErrors ingress annotation and test +- [X] [#3345](https://github.com/kubernetes/ingress-nginx/pull/3345) update annotation +- [X] [#3346](https://github.com/kubernetes/ingress-nginx/pull/3346) Add e2e test for session-cookie-hash +- [X] [#3347](https://github.com/kubernetes/ingress-nginx/pull/3347) Add e2e test for ssl-redirect +- [X] [#3348](https://github.com/kubernetes/ingress-nginx/pull/3348) Update cli-arguments.md. Remove tcp and udp, add health-check-timeout. +- [X] [#3353](https://github.com/kubernetes/ingress-nginx/pull/3353) Update nginx modules +- [X] [#3354](https://github.com/kubernetes/ingress-nginx/pull/3354) Update nginx image +- [X] [#3356](https://github.com/kubernetes/ingress-nginx/pull/3356) Download latest dep releases instead of fetching from HEAD +- [X] [#3357](https://github.com/kubernetes/ingress-nginx/pull/3357) Add missing modsecurity unicode.mapping file +- [X] [#3367](https://github.com/kubernetes/ingress-nginx/pull/3367) Remove reloads when there is no endpoints +- [X] [#3372](https://github.com/kubernetes/ingress-nginx/pull/3372) Add annotation for session affinity path +- [X] [#3373](https://github.com/kubernetes/ingress-nginx/pull/3373) Update nginx +- [X] [#3374](https://github.com/kubernetes/ingress-nginx/pull/3374) Revert removal of support for TCP and UDP services +- [X] [#3383](https://github.com/kubernetes/ingress-nginx/pull/3383) Only set cookies on paths that enable session affinity +- [X] [#3387](https://github.com/kubernetes/ingress-nginx/pull/3387) Modify the wrong function name +- [X] [#3390](https://github.com/kubernetes/ingress-nginx/pull/3390) Add e2e test for round robin load balancing +- [X] [#3400](https://github.com/kubernetes/ingress-nginx/pull/3400) Add Snippet for ModSecurity +- [X] [#3404](https://github.com/kubernetes/ingress-nginx/pull/3404) Update nginx image +- [X] [#3405](https://github.com/kubernetes/ingress-nginx/pull/3405) Prevent X-Forwarded-Proto forward during external auth subrequest +- [X] [#3406](https://github.com/kubernetes/ingress-nginx/pull/3406) Update nginx and e2e image +- [X] [#3407](https://github.com/kubernetes/ingress-nginx/pull/3407) Restructure load balance e2e tests and update round robin test +- [X] [#3408](https://github.com/kubernetes/ingress-nginx/pull/3408) Fix modsecurity configuration file location +- [X] [#3409](https://github.com/kubernetes/ingress-nginx/pull/3409) Convert isValidClientBodyBufferSize to something more generic +- [X] [#3410](https://github.com/kubernetes/ingress-nginx/pull/3410) fix logging calls +- [X] [#3415](https://github.com/kubernetes/ingress-nginx/pull/3415) bugfix: set canary attributes when initializing balancer +- [X] [#3417](https://github.com/kubernetes/ingress-nginx/pull/3417) bugfix: do not merge catch-all canary backends with itself +- [X] [#3421](https://github.com/kubernetes/ingress-nginx/pull/3421) Fix X-Forwarded-Proto typo +- [X] [#3424](https://github.com/kubernetes/ingress-nginx/pull/3424) Update nginx image +- [X] [#3425](https://github.com/kubernetes/ingress-nginx/pull/3425) Update nginx modules +- [X] [#3428](https://github.com/kubernetes/ingress-nginx/pull/3428) Set proxy_host variable to avoid using default value from proxy_pass +- [X] [#3437](https://github.com/kubernetes/ingress-nginx/pull/3437) Use struct to pack Ingress and its annotations +- [X] [#3441](https://github.com/kubernetes/ingress-nginx/pull/3441) Match buffer +- [X] [#3442](https://github.com/kubernetes/ingress-nginx/pull/3442) Increase log level when there is an invalid size value +- [X] [#3453](https://github.com/kubernetes/ingress-nginx/pull/3453) Monitor fixes + +_Documentation:_ + +- [X] [#3166](https://github.com/kubernetes/ingress-nginx/pull/3166) Added ingress tls values.yaml example to documentation +- [X] [#3215](https://github.com/kubernetes/ingress-nginx/pull/3215) align opentracing user-guide with nginx configmap configuration +- [X] [#3229](https://github.com/kubernetes/ingress-nginx/pull/3229) Fix documentation links [skip ci] +- [X] [#3232](https://github.com/kubernetes/ingress-nginx/pull/3232) Fix typo +- [X] [#3242](https://github.com/kubernetes/ingress-nginx/pull/3242) Add a note to the deployment into GKE +- [X] [#3249](https://github.com/kubernetes/ingress-nginx/pull/3249) Clarify mandatory script doc +- [X] [#3262](https://github.com/kubernetes/ingress-nginx/pull/3262) Add e2e test for connection +- [X] [#3263](https://github.com/kubernetes/ingress-nginx/pull/3263) "diretly" typo +- [X] [#3264](https://github.com/kubernetes/ingress-nginx/pull/3264) Add missing annotations to Docs +- [X] [#3271](https://github.com/kubernetes/ingress-nginx/pull/3271) the sample ingress spec error +- [X] [#3275](https://github.com/kubernetes/ingress-nginx/pull/3275) Add Better Documentation for using AuthTLS +- [X] [#3282](https://github.com/kubernetes/ingress-nginx/pull/3282) Fix some typos +- [X] [#3312](https://github.com/kubernetes/ingress-nginx/pull/3312) Delete some extra words +- [X] [#3319](https://github.com/kubernetes/ingress-nginx/pull/3319) Fix links in deploy index docs +- [X] [#3326](https://github.com/kubernetes/ingress-nginx/pull/3326) fix broken link +- [X] [#3349](https://github.com/kubernetes/ingress-nginx/pull/3349) fix typo +- [X] [#3364](https://github.com/kubernetes/ingress-nginx/pull/3364) Fix links format [skip-ci] +- [X] [#3366](https://github.com/kubernetes/ingress-nginx/pull/3366) Fix some typos +- [X] [#3369](https://github.com/kubernetes/ingress-nginx/pull/3369) Fix some typos +- [X] [#3370](https://github.com/kubernetes/ingress-nginx/pull/3370) Fix typo: whitlelist -> whitelist +- [X] [#3377](https://github.com/kubernetes/ingress-nginx/pull/3377) Fix typos and default value +- [X] [#3379](https://github.com/kubernetes/ingress-nginx/pull/3379) Fix typos +- [X] [#3382](https://github.com/kubernetes/ingress-nginx/pull/3382) Fix typos: reqrite -> rewrite +- [X] [#3388](https://github.com/kubernetes/ingress-nginx/pull/3388) Update annotations.md. Remove Duplication. +- [X] [#3392](https://github.com/kubernetes/ingress-nginx/pull/3392) Fix link in documentation [skip ci] +- [X] [#3395](https://github.com/kubernetes/ingress-nginx/pull/3395) Fix some documents issues + +### 0.20.0 + +**Image:** `quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.20.0` + +_New Features:_ + +- NGINX 1.15.5 +- Support for *regular expressions* in paths https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/ingress-path-matching.md +- Provide possibility to block IPs, User-Agents and Referers globally +- Remove --default-backend-service requirement. Use the flag only for custom default backends +- Valgrind and Openresty gdb tools + +_Changes:_ + +- [X] [#2997](https://github.com/kubernetes/ingress-nginx/pull/2997) Provide possibility to block IPs, User-Agents and Referers globally +- [X] [#3016](https://github.com/kubernetes/ingress-nginx/pull/3016) Log Errors Missing in Internal +- [X] [#3017](https://github.com/kubernetes/ingress-nginx/pull/3017) Add e2e tests for CORS +- [X] [#3022](https://github.com/kubernetes/ingress-nginx/pull/3022) Add support for valgrind +- [X] [#3029](https://github.com/kubernetes/ingress-nginx/pull/3029) add support for http2-max-requests in configmap +- [X] [#3035](https://github.com/kubernetes/ingress-nginx/pull/3035) Fixup #2970: Add Missing Label `app.kubernetes.io/part-of: ingress-nginx` +- [X] [#3049](https://github.com/kubernetes/ingress-nginx/pull/3049) fix: Don't try and find local certs when secretName is not specified +- [X] [#3050](https://github.com/kubernetes/ingress-nginx/pull/3050) Add Ingress variable in Grafana dashboard +- [X] [#3062](https://github.com/kubernetes/ingress-nginx/pull/3062) Pass Host header for custom errors +- [X] [#3065](https://github.com/kubernetes/ingress-nginx/pull/3065) Join host/port with go helper (supports ipv6) +- [X] [#3067](https://github.com/kubernetes/ingress-nginx/pull/3067) fix missing datasource value +- [X] [#3069](https://github.com/kubernetes/ingress-nginx/pull/3069) Replace client-go deprecated method +- [X] [#3072](https://github.com/kubernetes/ingress-nginx/pull/3072) Update ingress service IP +- [X] [#3073](https://github.com/kubernetes/ingress-nginx/pull/3073) do not hardcode the path +- [X] [#3078](https://github.com/kubernetes/ingress-nginx/pull/3078) Fix Rewrite-Target Annotation Edge Case +- [X] [#3079](https://github.com/kubernetes/ingress-nginx/pull/3079) Openresty gdb tools +- [X] [#3080](https://github.com/kubernetes/ingress-nginx/pull/3080) Update nginx image to 0.62 +- [X] [#3098](https://github.com/kubernetes/ingress-nginx/pull/3098) make upstream keepalive work for http +- [X] [#3100](https://github.com/kubernetes/ingress-nginx/pull/3100) update annotation name from rewrite-log to enable-rewrite-log +- [X] [#3118](https://github.com/kubernetes/ingress-nginx/pull/3118) Replace standard json encoding with jsoniter +- [X] [#3121](https://github.com/kubernetes/ingress-nginx/pull/3121) Typo fix: adresses -> addresses +- [X] [#3126](https://github.com/kubernetes/ingress-nginx/pull/3126) do not require --default-backend-service +- [X] [#3130](https://github.com/kubernetes/ingress-nginx/pull/3130) fix newlines location denied +- [X] [#3133](https://github.com/kubernetes/ingress-nginx/pull/3133) multi-tls readme example to reference the file +- [X] [#3134](https://github.com/kubernetes/ingress-nginx/pull/3134) Update nginx to 1.15.4 +- [X] [#3135](https://github.com/kubernetes/ingress-nginx/pull/3135) Remove payload from post log +- [X] [#3136](https://github.com/kubernetes/ingress-nginx/pull/3136) Update nginx image +- [X] [#3137](https://github.com/kubernetes/ingress-nginx/pull/3137) Docker run as user +- [X] [#3143](https://github.com/kubernetes/ingress-nginx/pull/3143) Ensure monitoring for custom error pages +- [X] [#3144](https://github.com/kubernetes/ingress-nginx/pull/3144) Fix incorrect .DisableLua access. +- [X] [#3145](https://github.com/kubernetes/ingress-nginx/pull/3145) Add "use-regex" Annotation to Toggle Regular Expression Location Modifier +- [X] [#3146](https://github.com/kubernetes/ingress-nginx/pull/3146) Update default backend image +- [X] [#3147](https://github.com/kubernetes/ingress-nginx/pull/3147) Fix error publishing docs [skip ci] +- [X] [#3149](https://github.com/kubernetes/ingress-nginx/pull/3149) Add e2e Tests for Proxy Annotations +- [X] [#3151](https://github.com/kubernetes/ingress-nginx/pull/3151) Add e2e test for SSL-Ciphers +- [X] [#3159](https://github.com/kubernetes/ingress-nginx/pull/3159) Pass --shell to minikube docker-env +- [X] [#3178](https://github.com/kubernetes/ingress-nginx/pull/3178) Update nginx to 1.15.5 +- [X] [#3179](https://github.com/kubernetes/ingress-nginx/pull/3179) Update nginx image +- [X] [#3182](https://github.com/kubernetes/ingress-nginx/pull/3182) Allow curly braces to be used in regex paths + +_Documentation:_ + +- [X] [#3021](https://github.com/kubernetes/ingress-nginx/pull/3021) Fix documentation search +- [X] [#3027](https://github.com/kubernetes/ingress-nginx/pull/3027) Add documentation about running Ingress NGINX on bare-metal +- [X] [#3039](https://github.com/kubernetes/ingress-nginx/pull/3039) Remove link to invalid example [ci-skip] +- [X] [#3046](https://github.com/kubernetes/ingress-nginx/pull/3046) Document when to modify ELB idle timeouts and set default value to 60s +- [X] [#3059](https://github.com/kubernetes/ingress-nginx/pull/3059) fix some typos +- [X] [#3068](https://github.com/kubernetes/ingress-nginx/pull/3068) Complete documentation about SSL Passthrough +- [X] [#3074](https://github.com/kubernetes/ingress-nginx/pull/3074) Add MetalLB to bare-metal deployment page +- [X] [#3090](https://github.com/kubernetes/ingress-nginx/pull/3090) Add note about default namespace and merge behavior +- [X] [#3092](https://github.com/kubernetes/ingress-nginx/pull/3092) Update mkdocs and travis-ci +- [X] [#3094](https://github.com/kubernetes/ingress-nginx/pull/3094) Fix baremetal images [skip ci] +- [X] [#3097](https://github.com/kubernetes/ingress-nginx/pull/3097) Added notes to regarding external access when using TCP/UDP proxy in Ingress +- [X] [#3102](https://github.com/kubernetes/ingress-nginx/pull/3102) Replace kubernetes-users mailing list links with discuss forum link +- [X] [#3111](https://github.com/kubernetes/ingress-nginx/pull/3111) doc issue related to monitor part +- [X] [#3113](https://github.com/kubernetes/ingress-nginx/pull/3113) fix typos +- [X] [#3115](https://github.com/kubernetes/ingress-nginx/pull/3115) Fixed link to aws elastic loadbalancer +- [X] [#3162](https://github.com/kubernetes/ingress-nginx/pull/3162) update name of config map in README.md +- [X] [#3175](https://github.com/kubernetes/ingress-nginx/pull/3175) Fix yaml indentation in annotations server-snippet doc + +### 0.19.0 + +**Image:** `quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.19.0` + +_New Features:_ + +- NGINX 1.15.3 +- Serve SSL certificates synamically instead of reloading NGINX when they are created, updated, or deleted. + Feature behind the flag `--enable-dynamic-certificates` +- GDB binary is included in the image to help [troubleshooting issues](https://github.com/kubernetes/ingress-nginx/pull/3002) +- Adjust the number of CPUs when CGROUP limits are defined (`worker-processes=auto` uses all the availables) + +_Changes:_ + +- [x] [#2616](https://github.com/kubernetes/ingress-nginx/pull/2616) Add use-forwarded-headers configmap option. +- [x] [#2857](https://github.com/kubernetes/ingress-nginx/pull/2857) remove unnecessary encoding/decoding also fix ipv6 issue +- [x] [#2884](https://github.com/kubernetes/ingress-nginx/pull/2884) [grafana] Rate over 2 minutes since default Prometheus interval is 1m +- [x] [#2889](https://github.com/kubernetes/ingress-nginx/pull/2889) Add Lua endpoint to support dynamic certificate serving functionality +- [x] [#2899](https://github.com/kubernetes/ingress-nginx/pull/2899) fixed rewrites for paths not ending in / +- [x] [#2923](https://github.com/kubernetes/ingress-nginx/pull/2923) Add dynamic certificate serving feature to controller +- [x] [#2925](https://github.com/kubernetes/ingress-nginx/pull/2925) Update nginx dependencies +- [x] [#2932](https://github.com/kubernetes/ingress-nginx/pull/2932) Fixed typo in flags.go +- [x] [#2934](https://github.com/kubernetes/ingress-nginx/pull/2934) Datasource input variable +- [x] [#2941](https://github.com/kubernetes/ingress-nginx/pull/2941) now actually using the $controller and $namespace variables +- [x] [#2942](https://github.com/kubernetes/ingress-nginx/pull/2942) Update nginx image +- [x] [#2946](https://github.com/kubernetes/ingress-nginx/pull/2946) Add unit tests to configuration_test.lua that cover Backends configuration +- [x] [#2955](https://github.com/kubernetes/ingress-nginx/pull/2955) Update nginx opentracing zipkin module +- [x] [#2956](https://github.com/kubernetes/ingress-nginx/pull/2956) Update nginx and e2e images +- [x] [#2957](https://github.com/kubernetes/ingress-nginx/pull/2957) Batch metrics and flush periodically +- [x] [#2964](https://github.com/kubernetes/ingress-nginx/pull/2964) fix variable parsing when key is number +- [x] [#2965](https://github.com/kubernetes/ingress-nginx/pull/2965) Add Lua module to serve SSL Certificates dynamically +- [x] [#2966](https://github.com/kubernetes/ingress-nginx/pull/2966) Add unit tests for sticky lua module +- [x] [#2970](https://github.com/kubernetes/ingress-nginx/pull/2970) Update labels +- [x] [#2972](https://github.com/kubernetes/ingress-nginx/pull/2972) consistently fallback to default certificate when TLS is configured +- [x] [#2977](https://github.com/kubernetes/ingress-nginx/pull/2977) Pass real source IP address to auth request +- [x] [#2979](https://github.com/kubernetes/ingress-nginx/pull/2979) clear dynamic configuration e2e tests +- [x] [#2987](https://github.com/kubernetes/ingress-nginx/pull/2987) cleanup dynamic cert e2e tests +- [x] [#2988](https://github.com/kubernetes/ingress-nginx/pull/2988) Update go to 1.11 +- [x] [#2990](https://github.com/kubernetes/ingress-nginx/pull/2990) Check if cgroup cpu limits are defined to get the number of CPUs +- [x] [#3003](https://github.com/kubernetes/ingress-nginx/pull/3003) Update nginx to 1.15.3 +- [x] [#3004](https://github.com/kubernetes/ingress-nginx/pull/3004) Update nginx image +- [x] [#3005](https://github.com/kubernetes/ingress-nginx/pull/3005) Fix gdb issue and update e2e image +- [x] [#3006](https://github.com/kubernetes/ingress-nginx/pull/3006) apply nginx patch to make ssl_certificate_by_lua_block work properly +- [x] [#3011](https://github.com/kubernetes/ingress-nginx/pull/3011) Update nginx image + +_Documentation:_ + +- [x] [#2806](https://github.com/kubernetes/ingress-nginx/pull/2806) add help for tls prerequisite for ingress.yaml +- [x] [#2912](https://github.com/kubernetes/ingress-nginx/pull/2912) Add documentation to install prometheus and grafana +- [x] [#2928](https://github.com/kubernetes/ingress-nginx/pull/2928) docs: Precisations on the usage of the InfluxDB module +- [x] [#2962](https://github.com/kubernetes/ingress-nginx/pull/2962) Fix broken anchor link to GCE/GKE +- [x] [#2983](https://github.com/kubernetes/ingress-nginx/pull/2983) Add documentation for enable-dynamic-certificates feature +- [x] [#2998](https://github.com/kubernetes/ingress-nginx/pull/2998) fixed jsonpath command in examples +- [x] [#3002](https://github.com/kubernetes/ingress-nginx/pull/3002) Enhance Troubleshooting Documentation + +### 0.18.0 + +**Image:** `quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.18.0` + +_New Features:_ + +- NGINX 1.15.2 +- Dynamic configuration is enabled by default +- Support for AJP protocol +- Use of authbind to bind privileged ports +- Replace minikube with [kubeadm-dind-cluster](https://github.com/kubernetes-sigs/kubeadm-dind-cluster) to run e2e tests + +_Changes:_ + +- [x] [#2789](https://github.com/kubernetes/ingress-nginx/pull/2789) Remove KubeConfig Dependency for Store Tests +- [x] [#2794](https://github.com/kubernetes/ingress-nginx/pull/2794) enable dynamic backend configuration by default +- [x] [#2795](https://github.com/kubernetes/ingress-nginx/pull/2795) start minikube before trying to build the image +- [x] [#2804](https://github.com/kubernetes/ingress-nginx/pull/2804) add support for ExternalName service type in dynamic mode +- [x] [#2808](https://github.com/kubernetes/ingress-nginx/pull/2808) fix the bug #2799, add prefix (?i) in rewrite statement. +- [x] [#2811](https://github.com/kubernetes/ingress-nginx/pull/2811) Escape $request_uri for external auth +- [x] [#2812](https://github.com/kubernetes/ingress-nginx/pull/2812) modified annotation name "rewrite-to" to "rewrite-target" in comments +- [x] [#2819](https://github.com/kubernetes/ingress-nginx/pull/2819) Catch errors waiting for controller deployment +- [x] [#2823](https://github.com/kubernetes/ingress-nginx/pull/2823) Multiple optimizations to build targets +- [x] [#2825](https://github.com/kubernetes/ingress-nginx/pull/2825) Refactoring of how we run as user +- [x] [#2826](https://github.com/kubernetes/ingress-nginx/pull/2826) Remove setcap from image and update nginx to 0.15.1 +- [x] [#2827](https://github.com/kubernetes/ingress-nginx/pull/2827) Use nginx image as base and install go on top +- [x] [#2829](https://github.com/kubernetes/ingress-nginx/pull/2829) use resty-cli for running lua unit tests +- [x] [#2830](https://github.com/kubernetes/ingress-nginx/pull/2830) Remove lua mocks +- [x] [#2834](https://github.com/kubernetes/ingress-nginx/pull/2834) Added permanent-redirect-code +- [x] [#2844](https://github.com/kubernetes/ingress-nginx/pull/2844) Do not allow invalid latency values in metrics +- [x] [#2852](https://github.com/kubernetes/ingress-nginx/pull/2852) fix custom-error-pages functionality in dynamic mode +- [x] [#2853](https://github.com/kubernetes/ingress-nginx/pull/2853) improve annotations/default_backend e2e test +- [x] [#2858](https://github.com/kubernetes/ingress-nginx/pull/2858) Update build image +- [x] [#2859](https://github.com/kubernetes/ingress-nginx/pull/2859) Fix inconsistent metric labels +- [x] [#2863](https://github.com/kubernetes/ingress-nginx/pull/2863) Replace minikube for e2e tests +- [x] [#2867](https://github.com/kubernetes/ingress-nginx/pull/2867) fix bug with lua e2e test suite +- [x] [#2868](https://github.com/kubernetes/ingress-nginx/pull/2868) Use an existing e2e image +- [x] [#2869](https://github.com/kubernetes/ingress-nginx/pull/2869) describe under what circumstances and how we avoid Nginx reload +- [x] [#2871](https://github.com/kubernetes/ingress-nginx/pull/2871) Add support for AJP protocol +- [x] [#2872](https://github.com/kubernetes/ingress-nginx/pull/2872) Update nginx to 1.15.2 +- [x] [#2874](https://github.com/kubernetes/ingress-nginx/pull/2874) Delay initial prometheus status metric +- [x] [#2876](https://github.com/kubernetes/ingress-nginx/pull/2876) Remove dashboard an tune sync-frequency +- [x] [#2877](https://github.com/kubernetes/ingress-nginx/pull/2877) Refactor entrypoint to avoid issues with volumes +- [x] [#2885](https://github.com/kubernetes/ingress-nginx/pull/2885) fix: Sort TCP/UDP upstream order +- [x] [#2888](https://github.com/kubernetes/ingress-nginx/pull/2888) Fix grafana datasources +- [x] [#2890](https://github.com/kubernetes/ingress-nginx/pull/2890) Usability improvements to build steps +- [x] [#2893](https://github.com/kubernetes/ingress-nginx/pull/2893) Update nginx image +- [x] [#2894](https://github.com/kubernetes/ingress-nginx/pull/2894) Use authbind to bind privileged ports +- [x] [#2895](https://github.com/kubernetes/ingress-nginx/pull/2895) support custom configuration to main context of nginx config +- [x] [#2896](https://github.com/kubernetes/ingress-nginx/pull/2896) support configuring multi_accept directive via configmap +- [x] [#2897](https://github.com/kubernetes/ingress-nginx/pull/2897) Enable reuse-port by default +- [x] [#2905](https://github.com/kubernetes/ingress-nginx/pull/2905) Fix IPV6 detection + +_Documentation:_ + +- [x] [#2816](https://github.com/kubernetes/ingress-nginx/pull/2816) doc log-format: add variables about ingress +- [x] [#2866](https://github.com/kubernetes/ingress-nginx/pull/2866) Update index.md +- [x] [#2898](https://github.com/kubernetes/ingress-nginx/pull/2898) Fix default sync-period doc +- [x] [#2903](https://github.com/kubernetes/ingress-nginx/pull/2903) Very minor grammar fix + +### 0.17.1 + +**Image:** `quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.17.1` + +_Changes:_ + +- [x] [#2782](https://github.com/kubernetes/ingress-nginx/pull/2782) Add Better Error Handling for SSLSessionTicketKey +- [x] [#2790](https://github.com/kubernetes/ingress-nginx/pull/2790) Update prometheus labels + +_Documentation:_ + +- [x] [#2770](https://github.com/kubernetes/ingress-nginx/pull/2770) Basic-Auth doc misleading: fix double quotes leading to nginx config error + +### 0.17.0 + +**Image:** `quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.17.0` + +_New Features:_ + +- [Grafana dashboards](https://github.com/kubernetes/ingress-nginx/tree/master/deploy/grafana/dashboards) + +_Changes:_ + +- [x] [#2705](https://github.com/kubernetes/ingress-nginx/pull/2705) Remove duplicated securityContext +- [x] [#2719](https://github.com/kubernetes/ingress-nginx/pull/2719) Sample rate configmap option for zipkin in nginx-opentracing +- [x] [#2726](https://github.com/kubernetes/ingress-nginx/pull/2726) Cleanup prometheus metrics after a reload +- [x] [#2727](https://github.com/kubernetes/ingress-nginx/pull/2727) Add e2e tests for Client-Body-Buffer-Size +- [x] [#2732](https://github.com/kubernetes/ingress-nginx/pull/2732) Improve logging +- [x] [#2741](https://github.com/kubernetes/ingress-nginx/pull/2741) Add redirect uri for oauth2 login +- [x] [#2744](https://github.com/kubernetes/ingress-nginx/pull/2744) fix: Use the correct opentracing plugin for Jaeger +- [x] [#2747](https://github.com/kubernetes/ingress-nginx/pull/2747) Update opentracing-cpp and modsecurity +- [x] [#2748](https://github.com/kubernetes/ingress-nginx/pull/2748) Update nginx image to 0.54 +- [x] [#2749](https://github.com/kubernetes/ingress-nginx/pull/2749) Use docker to build go binaries +- [x] [#2754](https://github.com/kubernetes/ingress-nginx/pull/2754) Allow gzip compression level to be controlled via ConfigMap +- [x] [#2760](https://github.com/kubernetes/ingress-nginx/pull/2760) Fix ingress rule parsing error +- [x] [#2767](https://github.com/kubernetes/ingress-nginx/pull/2767) Fix regression introduced in #2732 +- [x] [#2771](https://github.com/kubernetes/ingress-nginx/pull/2771) Grafana Dashboard +- [x] [#2775](https://github.com/kubernetes/ingress-nginx/pull/2775) Simplify handler registration and updates prometheus +- [x] [#2776](https://github.com/kubernetes/ingress-nginx/pull/2776) Fix configuration hash calculation + +_Documentation:_ + +- [x] [#2717](https://github.com/kubernetes/ingress-nginx/pull/2717) GCE/GKE proxy mentioned for Azure +- [x] [#2743](https://github.com/kubernetes/ingress-nginx/pull/2743) Clarify Installation Document by Separating Helm Steps +- [x] [#2761](https://github.com/kubernetes/ingress-nginx/pull/2761) Fix spelling mistake +- [x] [#2764](https://github.com/kubernetes/ingress-nginx/pull/2764) Use language neutral links to MDN +- [x] [#2765](https://github.com/kubernetes/ingress-nginx/pull/2765) Add FOSSA status badge +- [x] [#2777](https://github.com/kubernetes/ingress-nginx/pull/2777) Build docs using local docker image [ci skip] + +### 0.16.2 + +**Image:** `quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.16.2` + +_Breaking changes:_ + +Running as user requires an update in the deployment manifest. + +```yaml +securityContext: + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + # www-data -> 33 + runAsUser: 33 +``` + +Note: the deploy [guide](https://kubernetes.github.io/ingress-nginx/deploy/#mandatory-command) contains this change + +_Changes:_ + +- [x] [#2678](https://github.com/kubernetes/ingress-nginx/pull/2678) Refactor server type to include SSLCert +- [x] [#2685](https://github.com/kubernetes/ingress-nginx/pull/2685) Fix qemu docker build +- [x] [#2696](https://github.com/kubernetes/ingress-nginx/pull/2696) If server_tokens is disabled completely remove the Server header +- [x] [#2698](https://github.com/kubernetes/ingress-nginx/pull/2698) Improve best-cert guessing with empty tls.hosts +- [x] [#2701](https://github.com/kubernetes/ingress-nginx/pull/2701) Remove prometheus labels with high cardinality + +_Documentation:_ + +- [x] [#2368](https://github.com/kubernetes/ingress-nginx/pull/2368) [aggregate] Fix typos across codebase +- [x] [#2681](https://github.com/kubernetes/ingress-nginx/pull/2681) Typo fix in error message: encounted->encountered +- [x] [#2697](https://github.com/kubernetes/ingress-nginx/pull/2697) Enhance Distributed Tracing Documentation + +### 0.16.1 + +**Image:** `quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.16.1` + +_Breaking changes:_ + +Running as user requires an update in the deployment manifest. + +```yaml +securityContext: + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + # www-data -> 33 + runAsUser: 33 +``` + +Note: the deploy [guide](https://kubernetes.github.io/ingress-nginx/deploy/#mandatory-command) contains this change + +_New Features:_ + +- Run as user dropping root privileges +- New prometheus metric implementation (VTS module was removed) +- [InfluxDB integration](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/annotations/#influxdb) +- [Module GeoIP2](https://github.com/leev/ngx_http_geoip2_module) + +_Changes:_ + +- [x] [#2692](https://github.com/kubernetes/ingress-nginx/pull/2692) Fix initial read of configuration configmap +- [x] [#2693](https://github.com/kubernetes/ingress-nginx/pull/2693) Revert #2669 +- [x] [#2694](https://github.com/kubernetes/ingress-nginx/pull/2694) Add note about status update + +### 0.16.0 + +**Image:** `quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.16.1` + +_Breaking changes:_ + +Running as user requires an update in the deployment manifest. + +```yaml +securityContext: + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + # www-data -> 33 + runAsUser: 33 +``` + +Note: the deploy [guide](https://kubernetes.github.io/ingress-nginx/deploy/#mandatory-command) contains this change + +_New Features:_ + +- Run as user dropping root privileges +- New prometheus metric implementation (VTS module was removed) +- [InfluxDB integration](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/annotations/#influxdb) +- [Module GeoIP2](https://github.com/leev/ngx_http_geoip2_module) + +_Changes:_ + +- [x] [#2423](https://github.com/kubernetes/ingress-nginx/pull/2423) Resolves issue with proxy-redirect nginx configuration +- [x] [#2451](https://github.com/kubernetes/ingress-nginx/pull/2451) fix for #1930, make sessions sticky, for ingress with multiple rules … +- [x] [#2484](https://github.com/kubernetes/ingress-nginx/pull/2484) Fix bugs in Lua implementation of sticky sessions +- [x] [#2486](https://github.com/kubernetes/ingress-nginx/pull/2486) Extend kubernetes interrelation variables in nginx.tmpl +- [x] [#2504](https://github.com/kubernetes/ingress-nginx/pull/2504) Add Timeout For TLS Passthrough +- [x] [#2505](https://github.com/kubernetes/ingress-nginx/pull/2505) Annotations for the InfluxDB module +- [x] [#2517](https://github.com/kubernetes/ingress-nginx/pull/2517) Fix typo about the kind of request +- [x] [#2523](https://github.com/kubernetes/ingress-nginx/pull/2523) Add tests for bind-address +- [x] [#2524](https://github.com/kubernetes/ingress-nginx/pull/2524) Add support for grpc_set_header +- [x] [#2526](https://github.com/kubernetes/ingress-nginx/pull/2526) Fix upstream hash lua test +- [x] [#2528](https://github.com/kubernetes/ingress-nginx/pull/2528) Remove go-bindata +- [x] [#2533](https://github.com/kubernetes/ingress-nginx/pull/2533) NGINX image update: add the influxdb module +- [x] [#2534](https://github.com/kubernetes/ingress-nginx/pull/2534) Set Focus for E2E Tests +- [x] [#2537](https://github.com/kubernetes/ingress-nginx/pull/2537) Update nginx modules +- [x] [#2542](https://github.com/kubernetes/ingress-nginx/pull/2542) Instrument controller to show configReload metrics +- [x] [#2543](https://github.com/kubernetes/ingress-nginx/pull/2543) introduce a balancer interface +- [x] [#2548](https://github.com/kubernetes/ingress-nginx/pull/2548) Implement generate-request-id +- [x] [#2554](https://github.com/kubernetes/ingress-nginx/pull/2554) use better defaults for proxy-next-upstream(-tries) +- [x] [#2558](https://github.com/kubernetes/ingress-nginx/pull/2558) Update qemu to 2.12.0 [ci skip] +- [x] [#2559](https://github.com/kubernetes/ingress-nginx/pull/2559) Add geoip2 module and DB to nginx build +- [x] [#2564](https://github.com/kubernetes/ingress-nginx/pull/2564) Add security contacts file [ci skip] +- [x] [#2569](https://github.com/kubernetes/ingress-nginx/pull/2569) Update nginx modules to fix core dump [ci skip] +- [x] [#2570](https://github.com/kubernetes/ingress-nginx/pull/2570) Enable core dumps during tests +- [x] [#2573](https://github.com/kubernetes/ingress-nginx/pull/2573) Refactor e2e tests and update go dependencies +- [x] [#2574](https://github.com/kubernetes/ingress-nginx/pull/2574) Fix default-backend annotation +- [x] [#2575](https://github.com/kubernetes/ingress-nginx/pull/2575) Print information about NGINX version +- [x] [#2577](https://github.com/kubernetes/ingress-nginx/pull/2577) make sure ingress-nginx instances are watching their namespace only during test runs +- [x] [#2588](https://github.com/kubernetes/ingress-nginx/pull/2588) Update nginx dependencies +- [x] [#2590](https://github.com/kubernetes/ingress-nginx/pull/2590) Typo fix: muthual autentication -> mutual authentication +- [x] [#2591](https://github.com/kubernetes/ingress-nginx/pull/2591) Access log improvements +- [x] [#2597](https://github.com/kubernetes/ingress-nginx/pull/2597) Fix arm paths for liblua.so and lua_package_cpath +- [x] [#2598](https://github.com/kubernetes/ingress-nginx/pull/2598) Always sort upstream list to provide stable iteration order +- [x] [#2600](https://github.com/kubernetes/ingress-nginx/pull/2600) typo fix futher to further && preformance to performance +- [x] [#2602](https://github.com/kubernetes/ingress-nginx/pull/2602) Crossplat fixes +- [x] [#2603](https://github.com/kubernetes/ingress-nginx/pull/2603) Bump nginx influxdb module to f8732268d44aea706ecf8d9c6036e9b6dacc99b2 +- [x] [#2608](https://github.com/kubernetes/ingress-nginx/pull/2608) Expose UDP message on /metrics endpoint +- [x] [#2611](https://github.com/kubernetes/ingress-nginx/pull/2611) Add metric emitter lua module +- [x] [#2614](https://github.com/kubernetes/ingress-nginx/pull/2614) fix nginx conf test error when not found active service endpoints +- [x] [#2617](https://github.com/kubernetes/ingress-nginx/pull/2617) Update go to 1.10.3 +- [x] [#2618](https://github.com/kubernetes/ingress-nginx/pull/2618) Update nginx to 1.15.0 and remove VTS module +- [x] [#2619](https://github.com/kubernetes/ingress-nginx/pull/2619) Run as user dropping privileges +- [x] [#2623](https://github.com/kubernetes/ingress-nginx/pull/2623) Proofread cmd package and update flags description +- [x] [#2634](https://github.com/kubernetes/ingress-nginx/pull/2634) Disable resync period +- [x] [#2636](https://github.com/kubernetes/ingress-nginx/pull/2636) Add missing equality comparisons for ingress.Server +- [x] [#2638](https://github.com/kubernetes/ingress-nginx/pull/2638) Wait the result of the controller deployment before running any test +- [x] [#2639](https://github.com/kubernetes/ingress-nginx/pull/2639) Clarify log messages in controller package +- [x] [#2643](https://github.com/kubernetes/ingress-nginx/pull/2643) Remove VTS from the ingress controller +- [x] [#2644](https://github.com/kubernetes/ingress-nginx/pull/2644) Update nginx image version +- [x] [#2646](https://github.com/kubernetes/ingress-nginx/pull/2646) Rollback nginx 1.15.0 to 1.13.12 +- [x] [#2649](https://github.com/kubernetes/ingress-nginx/pull/2649) Add support for IPV6 in stream upstream servers +- [x] [#2652](https://github.com/kubernetes/ingress-nginx/pull/2652) Use a unix socket instead udp for reception of metrics +- [x] [#2653](https://github.com/kubernetes/ingress-nginx/pull/2653) Remove dummy file watcher +- [x] [#2654](https://github.com/kubernetes/ingress-nginx/pull/2654) Hotfix: influxdb module enable disable toggle +- [x] [#2656](https://github.com/kubernetes/ingress-nginx/pull/2656) Improve configuration change detection +- [x] [#2658](https://github.com/kubernetes/ingress-nginx/pull/2658) Do not wait informer initialization to read configuration +- [x] [#2659](https://github.com/kubernetes/ingress-nginx/pull/2659) Update nginx image +- [x] [#2660](https://github.com/kubernetes/ingress-nginx/pull/2660) Change modsecurity directories +- [x] [#2661](https://github.com/kubernetes/ingress-nginx/pull/2661) Add additional header when debug is enabled +- [x] [#2664](https://github.com/kubernetes/ingress-nginx/pull/2664) refactor some lua code +- [x] [#2669](https://github.com/kubernetes/ingress-nginx/pull/2669) Remove unnecessary sync when the leader change +- [x] [#2672](https://github.com/kubernetes/ingress-nginx/pull/2672) After a configmap change parse ingress annotations (again) +- [x] [#2673](https://github.com/kubernetes/ingress-nginx/pull/2673) Add new approvers to the project +- [x] [#2674](https://github.com/kubernetes/ingress-nginx/pull/2674) Add e2e test for configmap change and reload +- [x] [#2675](https://github.com/kubernetes/ingress-nginx/pull/2675) Update opentracing nginx module +- [x] [#2676](https://github.com/kubernetes/ingress-nginx/pull/2676) Update opentracing configuration + +_Documentation:_ + +- [x] [#2479](https://github.com/kubernetes/ingress-nginx/pull/2479) Document how the NGINX Ingress controller build nginx.conf +- [x] [#2515](https://github.com/kubernetes/ingress-nginx/pull/2515) Simplify installation and e2e manifests +- [x] [#2531](https://github.com/kubernetes/ingress-nginx/pull/2531) Mention the #ingress-nginx Slack channel +- [x] [#2540](https://github.com/kubernetes/ingress-nginx/pull/2540) DOCS: Correct ssl-passthrough annotation description. +- [x] [#2544](https://github.com/kubernetes/ingress-nginx/pull/2544) [docs] Fix manifest URL for GKE + Azure +- [x] [#2566](https://github.com/kubernetes/ingress-nginx/pull/2566) Fix wrong default value for `enable-brotli` +- [x] [#2581](https://github.com/kubernetes/ingress-nginx/pull/2581) Improved link in modsecurity.md +- [x] [#2583](https://github.com/kubernetes/ingress-nginx/pull/2583) docs: add secret scheme details to the example +- [x] [#2592](https://github.com/kubernetes/ingress-nginx/pull/2592) Typo fix: are be->are/to on->to +- [x] [#2595](https://github.com/kubernetes/ingress-nginx/pull/2595) Typo fix: successfull->successful +- [x] [#2601](https://github.com/kubernetes/ingress-nginx/pull/2601) fix changelog link in README.md +- [x] [#2624](https://github.com/kubernetes/ingress-nginx/pull/2624) Fix minor documentation example +- [x] [#2625](https://github.com/kubernetes/ingress-nginx/pull/2625) Add annotation doc on proxy buffer size +- [x] [#2630](https://github.com/kubernetes/ingress-nginx/pull/2630) Update documentation for custom error pages +- [x] [#2666](https://github.com/kubernetes/ingress-nginx/pull/2666) Add documentation for proxy-cookie-domain annotation (#2034) + +### 0.15.0 + +**Image:** `quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.15.0` + +_Changes:_ + +- [x] [#2440](https://github.com/kubernetes/ingress-nginx/pull/2440) TLS tests +- [x] [#2443](https://github.com/kubernetes/ingress-nginx/pull/2443) improve build-dev-env.sh script +- [x] [#2446](https://github.com/kubernetes/ingress-nginx/pull/2446) always use x-request-id +- [x] [#2447](https://github.com/kubernetes/ingress-nginx/pull/2447) Add basic security context to deployment YAMLs +- [x] [#2453](https://github.com/kubernetes/ingress-nginx/pull/2453) Add google analytics [ci skip] +- [x] [#2456](https://github.com/kubernetes/ingress-nginx/pull/2456) Assert or install go-bindata before incanting +- [x] [#2472](https://github.com/kubernetes/ingress-nginx/pull/2472) Refactor Lua balancer +- [x] [#2477](https://github.com/kubernetes/ingress-nginx/pull/2477) Change TrimLeft for TrimPrefix on the from-to-www redirect +- [x] [#2490](https://github.com/kubernetes/ingress-nginx/pull/2490) add resty cookie +- [x] [#2495](https://github.com/kubernetes/ingress-nginx/pull/2495) [ci skip] bump nginx baseimage version +- [x] [#2501](https://github.com/kubernetes/ingress-nginx/pull/2501) Refactor update of status removing initial check for loadbalancer +- [x] [#2502](https://github.com/kubernetes/ingress-nginx/pull/2502) Update go version in fortune teller image +- [x] [#2511](https://github.com/kubernetes/ingress-nginx/pull/2511) force backend sync when worker starts +- [x] [#2512](https://github.com/kubernetes/ingress-nginx/pull/2512) Remove warning when secret is used only for authentication +- [x] [#2514](https://github.com/kubernetes/ingress-nginx/pull/2514) Fix and simplify local dev workflow and execution of e2e tests + +_Documentation:_ + +- [x] [#2448](https://github.com/kubernetes/ingress-nginx/pull/2448) Update GitHub pull request template +- [x] [#2449](https://github.com/kubernetes/ingress-nginx/pull/2449) Improve documentation format +- [x] [#2454](https://github.com/kubernetes/ingress-nginx/pull/2454) Add gRPC annotation doc +- [x] [#2455](https://github.com/kubernetes/ingress-nginx/pull/2455) Adjust size of tables and only adjust the first column on mobile +- [x] [#2457](https://github.com/kubernetes/ingress-nginx/pull/2457) Add Getting the Code section to Quick Start +- [x] [#2464](https://github.com/kubernetes/ingress-nginx/pull/2464) Documentation fixes & improvements +- [x] [#2467](https://github.com/kubernetes/ingress-nginx/pull/2467) Fixed broken link in deploy README +- [x] [#2498](https://github.com/kubernetes/ingress-nginx/pull/2498) Add some clarification around multiple ingress controller behavior +- [x] [#2503](https://github.com/kubernetes/ingress-nginx/pull/2503) Add KubeCon Europe 2018 Video to documentation + +### 0.14.0 + +**Image:** `quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.14.0` + +_New Features:_ + +- [Documentation web page](https://kubernetes.github.io/ingress-nginx/) +- Support for `upstream-hash-by` annotation in dynamic configuration mode +- Improved e2e test suite + +_Changes:_ + +- [x] [#2346](https://github.com/kubernetes/ingress-nginx/pull/2346) Move ConfigMap updating methods into e2e/framework +- [x] [#2347](https://github.com/kubernetes/ingress-nginx/pull/2347) Update owners +- [x] [#2348](https://github.com/kubernetes/ingress-nginx/pull/2348) Use same convention, curl + kubectl for GKE +- [x] [#2350](https://github.com/kubernetes/ingress-nginx/pull/2350) Correct some returned messages in server_tokens.go +- [x] [#2352](https://github.com/kubernetes/ingress-nginx/pull/2352) Correct some info in flags.go +- [x] [#2353](https://github.com/kubernetes/ingress-nginx/pull/2353) Add proxy-add-original-uri-header config flag +- [x] [#2356](https://github.com/kubernetes/ingress-nginx/pull/2356) Add vts-sum-key config flag +- [x] [#2361](https://github.com/kubernetes/ingress-nginx/pull/2361) Check ingress rule contains HTTP paths +- [x] [#2363](https://github.com/kubernetes/ingress-nginx/pull/2363) Review $request_id +- [x] [#2365](https://github.com/kubernetes/ingress-nginx/pull/2365) Clean JSON before post request to update configuration +- [x] [#2369](https://github.com/kubernetes/ingress-nginx/pull/2369) Update nginx image to fix modsecurity crs issues +- [x] [#2370](https://github.com/kubernetes/ingress-nginx/pull/2370) Update nginx image +- [x] [#2374](https://github.com/kubernetes/ingress-nginx/pull/2374) Remove most of the time.Sleep from the e2e tests +- [x] [#2379](https://github.com/kubernetes/ingress-nginx/pull/2379) Add busted unit testing framework for lua code +- [x] [#2382](https://github.com/kubernetes/ingress-nginx/pull/2382) Accept ns/name Secret reference in annotations +- [x] [#2383](https://github.com/kubernetes/ingress-nginx/pull/2383) Improve speed of e2e tests +- [x] [#2385](https://github.com/kubernetes/ingress-nginx/pull/2385) include lua-resty-balancer in nginx image +- [x] [#2386](https://github.com/kubernetes/ingress-nginx/pull/2386) upstream-hash-by annotation support for dynamic configuraton mode +- [x] [#2388](https://github.com/kubernetes/ingress-nginx/pull/2388) Silence unnecessary MissingAnnotations errors +- [x] [#2392](https://github.com/kubernetes/ingress-nginx/pull/2392) Ensure dep fix fsnotify +- [x] [#2395](https://github.com/kubernetes/ingress-nginx/pull/2395) Fix flaky test +- [x] [#2396](https://github.com/kubernetes/ingress-nginx/pull/2396) Update go dependencies +- [x] [#2398](https://github.com/kubernetes/ingress-nginx/pull/2398) Allow tls section without hosts in Ingress rule +- [x] [#2399](https://github.com/kubernetes/ingress-nginx/pull/2399) Add test for store helper ListIngresses +- [x] [#2401](https://github.com/kubernetes/ingress-nginx/pull/2401) Add tests for controller getEndpoints +- [x] [#2408](https://github.com/kubernetes/ingress-nginx/pull/2408) Read backends data even if buffered to temp file +- [x] [#2410](https://github.com/kubernetes/ingress-nginx/pull/2410) Add balancer unit tests +- [x] [#2411](https://github.com/kubernetes/ingress-nginx/pull/2411) Update nginx-opentracing to 0.3.0 +- [x] [#2414](https://github.com/kubernetes/ingress-nginx/pull/2414) Fix golint installation +- [x] [#2416](https://github.com/kubernetes/ingress-nginx/pull/2416) Update nginx image +- [x] [#2417](https://github.com/kubernetes/ingress-nginx/pull/2417) Automate building developer environment +- [x] [#2421](https://github.com/kubernetes/ingress-nginx/pull/2421) Apply gometalinter suggestions +- [x] [#2428](https://github.com/kubernetes/ingress-nginx/pull/2428) Add buffer configuration to external auth location config +- [x] [#2433](https://github.com/kubernetes/ingress-nginx/pull/2433) Remove data races from tests +- [x] [#2434](https://github.com/kubernetes/ingress-nginx/pull/2434) Check ginkgo is installed before running e2e tests +- [x] [#2437](https://github.com/kubernetes/ingress-nginx/pull/2437) Add annotation to enable rewrite logs in a location + +_Documentation:_ + +- [x] [#2351](https://github.com/kubernetes/ingress-nginx/pull/2351) Typo fix in cli-arguments.md +- [x] [#2372](https://github.com/kubernetes/ingress-nginx/pull/2372) fix the default cookie name in doc +- [x] [#2377](https://github.com/kubernetes/ingress-nginx/pull/2377) DOCS: Add clarification regarding ssl passthrough +- [x] [#2409](https://github.com/kubernetes/ingress-nginx/pull/2409) Add deployment instructions for Docker for Mac (Edge) +- [x] [#2413](https://github.com/kubernetes/ingress-nginx/pull/2413) Reorganize documentation +- [x] [#2438](https://github.com/kubernetes/ingress-nginx/pull/2438) Update custom-errors.md +- [x] [#2439](https://github.com/kubernetes/ingress-nginx/pull/2439) Update README.md +- [x] [#2430](https://github.com/kubernetes/ingress-nginx/pull/2430) Add scripts and tasks to publish docs to github pages +- [x] [#2431](https://github.com/kubernetes/ingress-nginx/pull/2431) Improve readme file +- [x] [#2366](https://github.com/kubernetes/ingress-nginx/pull/2366) fix: fill missing patch yaml config. +- [x] [#2432](https://github.com/kubernetes/ingress-nginx/pull/2432) Fix broken links in the docs +- [x] [#2436](https://github.com/kubernetes/ingress-nginx/pull/2436) Update exposing-tcp-udp-services.md + +### 0.13.0 + +**Image:** `quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.13.0` + +_New Features:_ + +- NGINX 1.13.12 +- Support for gRPC: + - The annotation `nginx.ingress.kubernetes.io/grpc-backend: "true"` enable this feature + - If the gRPC service requires TLS `nginx.ingress.kubernetes.io/secure-backends: "true"` +- Configurable load balancing with EWMA +- Support for [lua-resty-waf](https://github.com/p0pr0ck5/lua-resty-waf) as alternative to ModSecurity. [Check configuration guide](https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/annotations.md#lua-resty-waf) +- Support for session affinity when dynamic configuration is enabled. +- Add NoAuthLocations and default it to "/.well-known/acme-challenge" + +_Changes:_ + +- [x] [#2078](https://github.com/kubernetes/ingress-nginx/pull/2078) Expose SSL client cert data to external auth provider. +- [x] [#2187](https://github.com/kubernetes/ingress-nginx/pull/2187) Managing a whitelist for \_/nginx_status +- [x] [#2208](https://github.com/kubernetes/ingress-nginx/pull/2208) Add missing lua bindata change +- [x] [#2209](https://github.com/kubernetes/ingress-nginx/pull/2209) fix go test TestSkipEnqueue error, move queue.Run +- [x] [#2210](https://github.com/kubernetes/ingress-nginx/pull/2210) allow ipv6 localhost when enabled +- [x] [#2212](https://github.com/kubernetes/ingress-nginx/pull/2212) Fix dynamic configuration when custom errors are enabled +- [x] [#2215](https://github.com/kubernetes/ingress-nginx/pull/2215) fix wrong config generation when upstream-hash-by is set +- [x] [#2220](https://github.com/kubernetes/ingress-nginx/pull/2220) fix: cannot set $service_name if use rewrite +- [x] [#2221](https://github.com/kubernetes/ingress-nginx/pull/2221) Update nginx to 1.13.10 and enable gRPC +- [x] [#2223](https://github.com/kubernetes/ingress-nginx/pull/2223) Add support for gRPC +- [x] [#2227](https://github.com/kubernetes/ingress-nginx/pull/2227) do not hardcode keepalive for upstream_balancer +- [x] [#2228](https://github.com/kubernetes/ingress-nginx/pull/2228) Fix broken links in multi-tls +- [x] [#2229](https://github.com/kubernetes/ingress-nginx/pull/2229) Configurable load balancing with EWMA +- [x] [#2232](https://github.com/kubernetes/ingress-nginx/pull/2232) Make proxy_next_upstream_tries configurable +- [x] [#2233](https://github.com/kubernetes/ingress-nginx/pull/2233) clean backends data before sending to Lua endpoint +- [x] [#2234](https://github.com/kubernetes/ingress-nginx/pull/2234) Update go dependencies +- [x] [#2235](https://github.com/kubernetes/ingress-nginx/pull/2235) add proxy header ssl-client-issuer-dn, fix #2178 +- [x] [#2241](https://github.com/kubernetes/ingress-nginx/pull/2241) Revert "Get file max from fs/file-max. (#2050)" +- [x] [#2243](https://github.com/kubernetes/ingress-nginx/pull/2243) Add NoAuthLocations and default it to "/.well-known/acme-challenge" +- [x] [#2244](https://github.com/kubernetes/ingress-nginx/pull/2244) fix: empty ingress path +- [x] [#2246](https://github.com/kubernetes/ingress-nginx/pull/2246) Fix grpc json tag name +- [x] [#2254](https://github.com/kubernetes/ingress-nginx/pull/2254) e2e tests for dynamic configuration and Lua features and a bug fix +- [x] [#2263](https://github.com/kubernetes/ingress-nginx/pull/2263) clean up tmpl +- [x] [#2270](https://github.com/kubernetes/ingress-nginx/pull/2270) Revert deleted code in #2146 +- [x] [#2271](https://github.com/kubernetes/ingress-nginx/pull/2271) Use SharedIndexInformers in place of Informers +- [x] [#2272](https://github.com/kubernetes/ingress-nginx/pull/2272) Disable opentracing for nginx internal urls +- [x] [#2273](https://github.com/kubernetes/ingress-nginx/pull/2273) Update go to 1.10.1 +- [x] [#2280](https://github.com/kubernetes/ingress-nginx/pull/2280) Fix bug when auth req is enabled(external authentication) +- [x] [#2283](https://github.com/kubernetes/ingress-nginx/pull/2283) Fix flaky e2e tests +- [x] [#2285](https://github.com/kubernetes/ingress-nginx/pull/2285) Update controller.go +- [x] [#2290](https://github.com/kubernetes/ingress-nginx/pull/2290) Update nginx to 1.13.11 +- [x] [#2294](https://github.com/kubernetes/ingress-nginx/pull/2294) Fix HSTS without preload +- [x] [#2296](https://github.com/kubernetes/ingress-nginx/pull/2296) Improve indentation of generated nginx.conf +- [x] [#2298](https://github.com/kubernetes/ingress-nginx/pull/2298) Disable dynamic configuration in s390x and ppc64le +- [x] [#2300](https://github.com/kubernetes/ingress-nginx/pull/2300) Fix race condition when Ingress does not contains a secret +- [x] [#2301](https://github.com/kubernetes/ingress-nginx/pull/2301) include lua-resty-waf and its dependencies in the base Nginx image +- [x] [#2303](https://github.com/kubernetes/ingress-nginx/pull/2303) More lua dependencies +- [x] [#2304](https://github.com/kubernetes/ingress-nginx/pull/2304) Lua resty waf controller +- [x] [#2305](https://github.com/kubernetes/ingress-nginx/pull/2305) Fix issues building nginx image in different platforms +- [x] [#2306](https://github.com/kubernetes/ingress-nginx/pull/2306) Disable lua waf where luajit is not available +- [x] [#2308](https://github.com/kubernetes/ingress-nginx/pull/2308) Add verification of lua load balancer to health check +- [x] [#2309](https://github.com/kubernetes/ingress-nginx/pull/2309) Configure upload limits for setup of lua load balancer +- [x] [#2314](https://github.com/kubernetes/ingress-nginx/pull/2314) annotation to ignore given list of WAF rulesets +- [x] [#2315](https://github.com/kubernetes/ingress-nginx/pull/2315) extra waf rules per ingress +- [x] [#2317](https://github.com/kubernetes/ingress-nginx/pull/2317) run lua-resty-waf in different modes +- [x] [#2327](https://github.com/kubernetes/ingress-nginx/pull/2327) Update nginx to 1.13.12 +- [x] [#2328](https://github.com/kubernetes/ingress-nginx/pull/2328) Update nginx image +- [x] [#2331](https://github.com/kubernetes/ingress-nginx/pull/2331) fix nil pointer when ssl with ca.crt +- [x] [#2333](https://github.com/kubernetes/ingress-nginx/pull/2333) disable lua for arch s390x and ppc64le +- [x] [#2340](https://github.com/kubernetes/ingress-nginx/pull/2340) Fix buildupstream name to work with dynamic session affinity +- [x] [#2341](https://github.com/kubernetes/ingress-nginx/pull/2341) Add session affinity to custom load balancing +- [x] [#2342](https://github.com/kubernetes/ingress-nginx/pull/2342) Sync SSL certificates on events + +_Documentation:_ + +- [x] [#2236](https://github.com/kubernetes/ingress-nginx/pull/2236) Add missing configuration in #2235 +- [x] [#1785](https://github.com/kubernetes/ingress-nginx/pull/1785) Add deployment docs for AWS NLB +- [x] [#2213](https://github.com/kubernetes/ingress-nginx/pull/2213) Update cli-arguments.md +- [x] [#2219](https://github.com/kubernetes/ingress-nginx/pull/2219) Fix log format documentation +- [x] [#2238](https://github.com/kubernetes/ingress-nginx/pull/2238) Correct typo +- [x] [#2239](https://github.com/kubernetes/ingress-nginx/pull/2239) fix-link +- [x] [#2240](https://github.com/kubernetes/ingress-nginx/pull/2240) fix:"any value other" should be "any other value" +- [x] [#2255](https://github.com/kubernetes/ingress-nginx/pull/2255) Update annotations.md +- [x] [#2267](https://github.com/kubernetes/ingress-nginx/pull/2267) Update README.md +- [x] [#2274](https://github.com/kubernetes/ingress-nginx/pull/2274) Typo fixes in modsecurity.md +- [x] [#2276](https://github.com/kubernetes/ingress-nginx/pull/2276) Update README.md +- [x] [#2282](https://github.com/kubernetes/ingress-nginx/pull/2282) Fix nlb instructions + +### 0.12.0 + +**Image:** `quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.12.0` + +_New Features:_ + +- Live NGINX configuration update without reloading using the flag `--enable-dynamic-configuration` (disabled by default). +- New flag `--publish-status-address` to manually set the Ingress status IP address. +- Add worker-cpu-affinity NGINX option. +- Enable remote logging using syslog. +- Do not redirect `/.well-known/acme-challenge` to HTTPS. + +_Changes:_ + +- [x] [#2125](https://github.com/kubernetes/ingress-nginx/pull/2125) Add GCB config to build defaultbackend +- [x] [#2127](https://github.com/kubernetes/ingress-nginx/pull/2127) Revert deletion of dependency version override +- [x] [#2137](https://github.com/kubernetes/ingress-nginx/pull/2137) Updated log level to v2 for sysctlFSFileMax. +- [x] [#2140](https://github.com/kubernetes/ingress-nginx/pull/2140) Cors header should always be returned +- [x] [#2141](https://github.com/kubernetes/ingress-nginx/pull/2141) Fix error loading modules +- [x] [#2143](https://github.com/kubernetes/ingress-nginx/pull/2143) Only add HSTS headers in HTTPS +- [x] [#2144](https://github.com/kubernetes/ingress-nginx/pull/2144) Add annotation to disable logs in a location +- [x] [#2145](https://github.com/kubernetes/ingress-nginx/pull/2145) Add option in the configuration configmap to enable remote logging +- [x] [#2146](https://github.com/kubernetes/ingress-nginx/pull/2146) In case of TLS errors do not allow traffic +- [x] [#2148](https://github.com/kubernetes/ingress-nginx/pull/2148) Add publish-status-address flag +- [x] [#2155](https://github.com/kubernetes/ingress-nginx/pull/2155) Update nginx with new modules +- [x] [#2162](https://github.com/kubernetes/ingress-nginx/pull/2162) Remove duplicated BuildConfigFromFlags func +- [x] [#2163](https://github.com/kubernetes/ingress-nginx/pull/2163) include lua-upstream-nginx-module in Nginx build +- [x] [#2164](https://github.com/kubernetes/ingress-nginx/pull/2164) use the correct error channel +- [x] [#2167](https://github.com/kubernetes/ingress-nginx/pull/2167) configuring load balancing per ingress +- [x] [#2172](https://github.com/kubernetes/ingress-nginx/pull/2172) include lua-resty-lock in nginx image +- [x] [#2174](https://github.com/kubernetes/ingress-nginx/pull/2174) Live Nginx configuration update without reloading +- [x] [#2180](https://github.com/kubernetes/ingress-nginx/pull/2180) Include tests in golint checks, fix warnings +- [x] [#2181](https://github.com/kubernetes/ingress-nginx/pull/2181) change nginx process pgid +- [x] [#2185](https://github.com/kubernetes/ingress-nginx/pull/2185) Remove ProxyPassParams setting +- [x] [#2191](https://github.com/kubernetes/ingress-nginx/pull/2191) Add checker test for bad pid +- [x] [#2193](https://github.com/kubernetes/ingress-nginx/pull/2193) fix wrong json tag +- [x] [#2201](https://github.com/kubernetes/ingress-nginx/pull/2201) Add worker-cpu-affinity nginx option +- [x] [#2202](https://github.com/kubernetes/ingress-nginx/pull/2202) Allow config to disable geoip +- [x] [#2205](https://github.com/kubernetes/ingress-nginx/pull/2205) add luacheck to lint lua files + +_Documentation:_ + +- [x] [#2124](https://github.com/kubernetes/ingress-nginx/pull/2124) Document how to provide list types in configmap +- [x] [#2133](https://github.com/kubernetes/ingress-nginx/pull/2133) fix limit-req-status-code doc +- [x] [#2139](https://github.com/kubernetes/ingress-nginx/pull/2139) Update documentation for nginx-ingress-role RBAC. +- [x] [#2165](https://github.com/kubernetes/ingress-nginx/pull/2165) Typo fix "api server " -> "API server" +- [x] [#2169](https://github.com/kubernetes/ingress-nginx/pull/2169) Add documentation about secure-verify-ca-secret +- [x] [#2200](https://github.com/kubernetes/ingress-nginx/pull/2200) fix grammer mistake + +### 0.11.0 + +**Image:** `quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.11.0` + +_New Features:_ + +- NGINX 1.13.9 + +_Changes:_ + +- [x] [#1992](https://github.com/kubernetes/ingress-nginx/pull/1992) Added configmap option to disable IPv6 in nginx DNS resolver +- [x] [#1993](https://github.com/kubernetes/ingress-nginx/pull/1993) Enable Customization of Auth Request Redirect +- [x] [#1996](https://github.com/kubernetes/ingress-nginx/pull/1996) Use v3/dev/performance of ModSecurity because of performance +- [x] [#1997](https://github.com/kubernetes/ingress-nginx/pull/1997) fix var checked +- [x] [#1998](https://github.com/kubernetes/ingress-nginx/pull/1998) Add support to enable/disable proxy buffering +- [x] [#1999](https://github.com/kubernetes/ingress-nginx/pull/1999) Add connection-proxy-header annotation +- [x] [#2001](https://github.com/kubernetes/ingress-nginx/pull/2001) Add limit-request-status-code option +- [x] [#2005](https://github.com/kubernetes/ingress-nginx/pull/2005) fix typo error for server name \_ +- [x] [#2006](https://github.com/kubernetes/ingress-nginx/pull/2006) Add support for enabling ssl_ciphers per host +- [x] [#2019](https://github.com/kubernetes/ingress-nginx/pull/2019) Update nginx image +- [x] [#2021](https://github.com/kubernetes/ingress-nginx/pull/2021) Add nginx_cookie_flag_module +- [x] [#2026](https://github.com/kubernetes/ingress-nginx/pull/2026) update KUBERNETES from v1.8.0 to 1.9.0 +- [x] [#2027](https://github.com/kubernetes/ingress-nginx/pull/2027) Show pod information in http-svc example +- [x] [#2030](https://github.com/kubernetes/ingress-nginx/pull/2030) do not ignore $http_host and $http_x_forwarded_host +- [x] [#2031](https://github.com/kubernetes/ingress-nginx/pull/2031) The maximum number of open file descriptors should be maxOpenFiles. +- [x] [#2036](https://github.com/kubernetes/ingress-nginx/pull/2036) add matchLabels in Deployment yaml, that both API extensions/v1beta1 … +- [x] [#2050](https://github.com/kubernetes/ingress-nginx/pull/2050) Get file max from fs/file-max. +- [x] [#2063](https://github.com/kubernetes/ingress-nginx/pull/2063) Run one test at a time +- [x] [#2065](https://github.com/kubernetes/ingress-nginx/pull/2065) Always return an IP address +- [x] [#2069](https://github.com/kubernetes/ingress-nginx/pull/2069) Do not cancel the synchronization of secrets +- [x] [#2071](https://github.com/kubernetes/ingress-nginx/pull/2071) Update Go to 1.9.4 +- [x] [#2082](https://github.com/kubernetes/ingress-nginx/pull/2082) Use a ring channel to avoid blocking write of events +- [x] [#2089](https://github.com/kubernetes/ingress-nginx/pull/2089) Retry initial connection to the Kubernetes cluster +- [x] [#2093](https://github.com/kubernetes/ingress-nginx/pull/2093) Only pods in running phase are vallid for status +- [x] [#2099](https://github.com/kubernetes/ingress-nginx/pull/2099) Added GeoIP Organisational data +- [x] [#2107](https://github.com/kubernetes/ingress-nginx/pull/2107) Enabled the dynamic reload of GeoIP data +- [x] [#2119](https://github.com/kubernetes/ingress-nginx/pull/2119) Remove deprecated flag disable-node-list +- [x] [#2120](https://github.com/kubernetes/ingress-nginx/pull/2120) Migrate to codecov.io + +_Documentation:_ + +- [x] [#1987](https://github.com/kubernetes/ingress-nginx/pull/1987) add kube-system namespace for oauth2-proxy example +- [x] [#1991](https://github.com/kubernetes/ingress-nginx/pull/1991) Add comment about bolean and number values +- [x] [#2009](https://github.com/kubernetes/ingress-nginx/pull/2009) docs/user-guide/tls: remove duplicated section +- [x] [#2011](https://github.com/kubernetes/ingress-nginx/pull/2011) broken link for sticky-ingress.yaml +- [x] [#2014](https://github.com/kubernetes/ingress-nginx/pull/2014) Add document for connection-proxy-header annotation +- [x] [#2016](https://github.com/kubernetes/ingress-nginx/pull/2016) Minor link fix in deployment docs +- [x] [#2018](https://github.com/kubernetes/ingress-nginx/pull/2018) Added documentation for Permanent Redirect +- [x] [#2035](https://github.com/kubernetes/ingress-nginx/pull/2035) fix broken links in static-ip readme +- [x] [#2038](https://github.com/kubernetes/ingress-nginx/pull/2038) fix typo: appropiate -> [appropriate] +- [x] [#2039](https://github.com/kubernetes/ingress-nginx/pull/2039) fix typo stickyness to stickiness +- [x] [#2040](https://github.com/kubernetes/ingress-nginx/pull/2040) fix wrong annotation +- [x] [#2041](https://github.com/kubernetes/ingress-nginx/pull/2041) fix spell error reslover -> resolver +- [x] [#2046](https://github.com/kubernetes/ingress-nginx/pull/2046) Fix typos +- [x] [#2054](https://github.com/kubernetes/ingress-nginx/pull/2054) Adding documentation for helm with RBAC enabled +- [x] [#2075](https://github.com/kubernetes/ingress-nginx/pull/2075) Fix opentracing configuration when multiple options are configured +- [x] [#2076](https://github.com/kubernetes/ingress-nginx/pull/2076) Fix spelling errors +- [x] [#2077](https://github.com/kubernetes/ingress-nginx/pull/2077) Remove initContainer from default deployment + +### 0.10.2 + +**Image:** `quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.10.2` + +_Changes:_ + +- [x] [#1978](https://github.com/kubernetes/ingress-nginx/pull/1978) Fix chain completion and default certificate flag issues + +### 0.10.1 + +**Image:** `quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.10.1` + +_Changes:_ + +- [x] [#1945](https://github.com/kubernetes/ingress-nginx/pull/1945) When a secret is updated read ingress annotations (again) +- [x] [#1948](https://github.com/kubernetes/ingress-nginx/pull/1948) Update go to 1.9.3 +- [x] [#1953](https://github.com/kubernetes/ingress-nginx/pull/1953) Added annotation for upstream-vhost +- [x] [#1960](https://github.com/kubernetes/ingress-nginx/pull/1960) Adjust sysctl values to improve nginx performance +- [x] [#1963](https://github.com/kubernetes/ingress-nginx/pull/1963) Fix tests +- [x] [#1969](https://github.com/kubernetes/ingress-nginx/pull/1969) Rollback #1854 +- [x] [#1970](https://github.com/kubernetes/ingress-nginx/pull/1970) By default brotli is disabled + +### 0.10.0 + +**Image:** `quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.10.0` + +_Breaking changes:_ + +Changed the names of default Nginx ingress prometheus metrics. +If you are scraping default Nginx ingress metrics with prometheus the metrics changes are as follows: + +``` +nginx_active_connections_total -> nginx_connections_total{state="active"} +nginx_accepted_connections_total -> nginx_connections_total{state="accepted"} +nginx_handled_connections_total -> nginx_connections_total{state="handled"} +nginx_current_reading_connections_total -> nginx_connections{state="reading"} +nginx_current_writing_connections_total -> nginx_connections{state="writing"} +current_waiting_connections_total -> nginx_connections{state="waiting"} +``` + +_New Features:_ + +- NGINX 1.13.8 +- Support to hide headers from upstream servers +- Support for Jaeger +- CORS max age annotation + +_Changes:_ + +- [x] [#1782](https://github.com/kubernetes/ingress-nginx/pull/1782) auth-tls-pass-certificate-to-upstream should be bool +- [x] [#1787](https://github.com/kubernetes/ingress-nginx/pull/1787) force external_auth requests to http/1.1 +- [x] [#1800](https://github.com/kubernetes/ingress-nginx/pull/1800) Add control of the configuration refresh interval +- [x] [#1805](https://github.com/kubernetes/ingress-nginx/pull/1805) Add X-Forwarded-Prefix on rewrites +- [x] [#1844](https://github.com/kubernetes/ingress-nginx/pull/1844) Validate x-forwarded-proto and connection scheme before redirect to https +- [x] [#1852](https://github.com/kubernetes/ingress-nginx/pull/1852) Update nginx to v1.13.8 and update modules +- [x] [#1854](https://github.com/kubernetes/ingress-nginx/pull/1854) Fix redirect to ssl +- [x] [#1858](https://github.com/kubernetes/ingress-nginx/pull/1858) When upstream-hash-by annotation is used do not configure a lb algorithm +- [x] [#1861](https://github.com/kubernetes/ingress-nginx/pull/1861) Improve speed of tests execution +- [x] [#1869](https://github.com/kubernetes/ingress-nginx/pull/1869) "proxy_redirect default" should be placed after the "proxy_pass" +- [x] [#1870](https://github.com/kubernetes/ingress-nginx/pull/1870) Fix SSL Passthrough template issue and custom ports in redirect to HTTPS +- [x] [#1871](https://github.com/kubernetes/ingress-nginx/pull/1871) Update nginx image to 0.31 +- [x] [#1872](https://github.com/kubernetes/ingress-nginx/pull/1872) Fix data race updating ingress status +- [x] [#1880](https://github.com/kubernetes/ingress-nginx/pull/1880) Update go dependencies and cleanup deprecated packages +- [x] [#1888](https://github.com/kubernetes/ingress-nginx/pull/1888) Add CORS max age annotation +- [x] [#1891](https://github.com/kubernetes/ingress-nginx/pull/1891) Refactor initial synchronization of ingress objects +- [x] [#1903](https://github.com/kubernetes/ingress-nginx/pull/1903) If server_tokens is disabled remove the Server header +- [x] [#1906](https://github.com/kubernetes/ingress-nginx/pull/1906) Random string function should only contains letters +- [x] [#1907](https://github.com/kubernetes/ingress-nginx/pull/1907) Fix custom port in redirects +- [x] [#1909](https://github.com/kubernetes/ingress-nginx/pull/1909) Release nginx 0.32 +- [x] [#1910](https://github.com/kubernetes/ingress-nginx/pull/1910) updating prometheus metrics names according to naming best practices +- [x] [#1912](https://github.com/kubernetes/ingress-nginx/pull/1912) removing \_total prefix from nginx guage metrics +- [x] [#1914](https://github.com/kubernetes/ingress-nginx/pull/1914) Add --with-http_secure_link_module for the Nginx build configuration +- [x] [#1916](https://github.com/kubernetes/ingress-nginx/pull/1916) Add support for jaeger backend +- [x] [#1918](https://github.com/kubernetes/ingress-nginx/pull/1918) Update nginx image to 0.32 +- [x] [#1919](https://github.com/kubernetes/ingress-nginx/pull/1919) Add option for reuseport in nginx listen section +- [x] [#1926](https://github.com/kubernetes/ingress-nginx/pull/1926) Do not use port from host header +- [x] [#1927](https://github.com/kubernetes/ingress-nginx/pull/1927) Remove sendfile configuration +- [x] [#1928](https://github.com/kubernetes/ingress-nginx/pull/1928) Add support to hide headers from upstream servers +- [x] [#1929](https://github.com/kubernetes/ingress-nginx/pull/1929) Refactoring of kubernetes informers and local caches +- [x] [#1933](https://github.com/kubernetes/ingress-nginx/pull/1933) Remove deploy of ingress controller from the example + +_Documentation:_ + +- [x] [#1786](https://github.com/kubernetes/ingress-nginx/pull/1786) fix: some typo. +- [x] [#1792](https://github.com/kubernetes/ingress-nginx/pull/1792) Add note about annotation values +- [x] [#1814](https://github.com/kubernetes/ingress-nginx/pull/1814) Fix link to custom configuration +- [x] [#1826](https://github.com/kubernetes/ingress-nginx/pull/1826) Add note about websocket and load balancers +- [x] [#1840](https://github.com/kubernetes/ingress-nginx/pull/1840) Add note about default log files +- [x] [#1853](https://github.com/kubernetes/ingress-nginx/pull/1853) Clarify docs for add-headers and proxy-set-headers +- [x] [#1864](https://github.com/kubernetes/ingress-nginx/pull/1864) configmap.md: Convert hyphens in name column to non-breaking-hyphens +- [x] [#1865](https://github.com/kubernetes/ingress-nginx/pull/1865) Add docs for legacy TLS version and ciphers +- [x] [#1867](https://github.com/kubernetes/ingress-nginx/pull/1867) Fix publish-service patch and update README +- [x] [#1913](https://github.com/kubernetes/ingress-nginx/pull/1913) Missing r +- [x] [#1925](https://github.com/kubernetes/ingress-nginx/pull/1925) Fix doc links + +### 0.9.0 + +**Image:** `quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.9.0` + +_Changes:_ + +- [x] [#1731](https://github.com/kubernetes/ingress-nginx/pull/1731) Allow configuration of proxy_responses value for tcp/udp configmaps +- [x] [#1766](https://github.com/kubernetes/ingress-nginx/pull/1766) Fix ingress typo +- [x] [#1768](https://github.com/kubernetes/ingress-nginx/pull/1768) Custom default backend must use annotations if present +- [x] [#1769](https://github.com/kubernetes/ingress-nginx/pull/1769) Use custom https port in redirects +- [x] [#1771](https://github.com/kubernetes/ingress-nginx/pull/1771) Add additional check for old SSL certificates +- [x] [#1776](https://github.com/kubernetes/ingress-nginx/pull/1776) Add option to configure the redirect code + ### 0.9-beta.19 -**Image:** `quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.9.0-beta.19` +**Image:** `quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.9.0-beta.19` -*Changes:* +_Changes:_ - Fix regression with ingress.class annotation introduced in 0.9-beta.18 ### 0.9-beta.18 -**Image:** `quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.9.0-beta.18` +**Image:** `quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.9.0-beta.18` -*Breaking changes:* +_Breaking changes:_ - The NGINX ingress annotations contains a new prefix: **nginx.ingress.kubernetes.io**. This change is behind a flag to avoid breaking running deployments. To avoid breaking a running NGINX ingress controller add the flag **--annotations-prefix=ingress.kubernetes.io** to the nginx ingress controller deployment. There is one exception, the annotation `kubernetes.io/ingress.class` remains unchanged (this annotation is used in multiple ingress controllers) -*New Features:* +_New Features:_ - NGINX 1.13.7 - Support for s390x - e2e tests -*Changes:* - -- [X] [#1648](https://github.com/kubernetes/ingress-nginx/pull/1648) Remove GenericController and add tests -- [X] [#1650](https://github.com/kubernetes/ingress-nginx/pull/1650) Fix misspell errors -- [X] [#1651](https://github.com/kubernetes/ingress-nginx/pull/1651) Remove node lister -- [X] [#1652](https://github.com/kubernetes/ingress-nginx/pull/1652) Remove node lister -- [X] [#1653](https://github.com/kubernetes/ingress-nginx/pull/1653) Fix diff execution -- [X] [#1654](https://github.com/kubernetes/ingress-nginx/pull/1654) Fix travis script and update kubernetes to 1.8.0 -- [X] [#1658](https://github.com/kubernetes/ingress-nginx/pull/1658) Tests -- [X] [#1659](https://github.com/kubernetes/ingress-nginx/pull/1659) Add nginx helper tests -- [X] [#1662](https://github.com/kubernetes/ingress-nginx/pull/1662) Refactor annotations -- [X] [#1665](https://github.com/kubernetes/ingress-nginx/pull/1665) Add the original http request method to the auth request -- [X] [#1687](https://github.com/kubernetes/ingress-nginx/pull/1687) Fix use merge of annotations -- [X] [#1689](https://github.com/kubernetes/ingress-nginx/pull/1689) Enable s390x -- [X] [#1693](https://github.com/kubernetes/ingress-nginx/pull/1693) Fix docker build -- [X] [#1695](https://github.com/kubernetes/ingress-nginx/pull/1695) Update nginx to v0.29 -- [X] [#1696](https://github.com/kubernetes/ingress-nginx/pull/1696) Always add cors headers when enabled -- [X] [#1697](https://github.com/kubernetes/ingress-nginx/pull/1697) Disable features not availables in some platforms -- [X] [#1698](https://github.com/kubernetes/ingress-nginx/pull/1698) Auth e2e tests -- [X] [#1699](https://github.com/kubernetes/ingress-nginx/pull/1699) Refactor SSL intermediate CA certificate check -- [X] [#1700](https://github.com/kubernetes/ingress-nginx/pull/1700) Add patch command to append publish-service flag -- [X] [#1701](https://github.com/kubernetes/ingress-nginx/pull/1701) fix: Core() is deprecated use CoreV1() instead. -- [X] [#1702](https://github.com/kubernetes/ingress-nginx/pull/1702) Fix TLS example [ci skip] -- [X] [#1704](https://github.com/kubernetes/ingress-nginx/pull/1704) Add e2e tests to verify the correct source IP address -- [X] [#1705](https://github.com/kubernetes/ingress-nginx/pull/1705) Add annotation for setting proxy_redirect -- [X] [#1706](https://github.com/kubernetes/ingress-nginx/pull/1706) Increase ELB idle timeouts [ci skip] -- [X] [#1710](https://github.com/kubernetes/ingress-nginx/pull/1710) Do not update a secret not referenced by ingress rules -- [X] [#1713](https://github.com/kubernetes/ingress-nginx/pull/1713) add --report-node-internal-ip-address describe to cli-arguments.md -- [X] [#1717](https://github.com/kubernetes/ingress-nginx/pull/1717) Fix command used to detect version -- [X] [#1720](https://github.com/kubernetes/ingress-nginx/pull/1720) Add docker-registry example [ci skip] -- [X] [#1722](https://github.com/kubernetes/ingress-nginx/pull/1722) Add annotation to enable passing the certificate to the upstream server -- [X] [#1723](https://github.com/kubernetes/ingress-nginx/pull/1723) Add timeouts to http server and additional pprof routes -- [X] [#1724](https://github.com/kubernetes/ingress-nginx/pull/1724) Cleanup main -- [X] [#1725](https://github.com/kubernetes/ingress-nginx/pull/1725) Enable all e2e tests -- [X] [#1726](https://github.com/kubernetes/ingress-nginx/pull/1726) fix: replace deprecated methods. -- [X] [#1734](https://github.com/kubernetes/ingress-nginx/pull/1734) Changes ssl-client-cert header -- [X] [#1737](https://github.com/kubernetes/ingress-nginx/pull/1737) Update nginx v1.13.7 -- [X] [#1738](https://github.com/kubernetes/ingress-nginx/pull/1738) Cleanup -- [X] [#1739](https://github.com/kubernetes/ingress-nginx/pull/1739) Improve e2e checks -- [X] [#1740](https://github.com/kubernetes/ingress-nginx/pull/1740) Update nginx -- [X] [#1745](https://github.com/kubernetes/ingress-nginx/pull/1745) Simplify annotations -- [X] [#1746](https://github.com/kubernetes/ingress-nginx/pull/1746) Cleanup of e2e helpers - -*Documentation:* - -- [X] [#1657](https://github.com/kubernetes/ingress-nginx/pull/1657) Add better documentation for deploying for dev -- [X] [#1680](https://github.com/kubernetes/ingress-nginx/pull/1680) Add doc for log-format-escape-json [ci skip] -- [X] [#1685](https://github.com/kubernetes/ingress-nginx/pull/1685) Fix default SSL certificate flag docs [ci skip] -- [X] [#1686](https://github.com/kubernetes/ingress-nginx/pull/1686) Fix development doc [ci skip] -- [X] [#1727](https://github.com/kubernetes/ingress-nginx/pull/1727) fix: fix typos in docs. -- [X] [#1747](https://github.com/kubernetes/ingress-nginx/pull/1747) Add config-map usage and options to Documentation - +_Changes:_ + +- [x] [#1648](https://github.com/kubernetes/ingress-nginx/pull/1648) Remove GenericController and add tests +- [x] [#1650](https://github.com/kubernetes/ingress-nginx/pull/1650) Fix misspell errors +- [x] [#1651](https://github.com/kubernetes/ingress-nginx/pull/1651) Remove node lister +- [x] [#1652](https://github.com/kubernetes/ingress-nginx/pull/1652) Remove node lister +- [x] [#1653](https://github.com/kubernetes/ingress-nginx/pull/1653) Fix diff execution +- [x] [#1654](https://github.com/kubernetes/ingress-nginx/pull/1654) Fix travis script and update kubernetes to 1.8.0 +- [x] [#1658](https://github.com/kubernetes/ingress-nginx/pull/1658) Tests +- [x] [#1659](https://github.com/kubernetes/ingress-nginx/pull/1659) Add nginx helper tests +- [x] [#1662](https://github.com/kubernetes/ingress-nginx/pull/1662) Refactor annotations +- [x] [#1665](https://github.com/kubernetes/ingress-nginx/pull/1665) Add the original http request method to the auth request +- [x] [#1687](https://github.com/kubernetes/ingress-nginx/pull/1687) Fix use merge of annotations +- [x] [#1689](https://github.com/kubernetes/ingress-nginx/pull/1689) Enable s390x +- [x] [#1693](https://github.com/kubernetes/ingress-nginx/pull/1693) Fix docker build +- [x] [#1695](https://github.com/kubernetes/ingress-nginx/pull/1695) Update nginx to v0.29 +- [x] [#1696](https://github.com/kubernetes/ingress-nginx/pull/1696) Always add cors headers when enabled +- [x] [#1697](https://github.com/kubernetes/ingress-nginx/pull/1697) Disable features not availables in some platforms +- [x] [#1698](https://github.com/kubernetes/ingress-nginx/pull/1698) Auth e2e tests +- [x] [#1699](https://github.com/kubernetes/ingress-nginx/pull/1699) Refactor SSL intermediate CA certificate check +- [x] [#1700](https://github.com/kubernetes/ingress-nginx/pull/1700) Add patch command to append publish-service flag +- [x] [#1701](https://github.com/kubernetes/ingress-nginx/pull/1701) fix: Core() is deprecated use CoreV1() instead. +- [x] [#1702](https://github.com/kubernetes/ingress-nginx/pull/1702) Fix TLS example [ci skip] +- [x] [#1704](https://github.com/kubernetes/ingress-nginx/pull/1704) Add e2e tests to verify the correct source IP address +- [x] [#1705](https://github.com/kubernetes/ingress-nginx/pull/1705) Add annotation for setting proxy_redirect +- [x] [#1706](https://github.com/kubernetes/ingress-nginx/pull/1706) Increase ELB idle timeouts [ci skip] +- [x] [#1710](https://github.com/kubernetes/ingress-nginx/pull/1710) Do not update a secret not referenced by ingress rules +- [x] [#1713](https://github.com/kubernetes/ingress-nginx/pull/1713) add --report-node-internal-ip-address describe to cli-arguments.md +- [x] [#1717](https://github.com/kubernetes/ingress-nginx/pull/1717) Fix command used to detect version +- [x] [#1720](https://github.com/kubernetes/ingress-nginx/pull/1720) Add docker-registry example [ci skip] +- [x] [#1722](https://github.com/kubernetes/ingress-nginx/pull/1722) Add annotation to enable passing the certificate to the upstream server +- [x] [#1723](https://github.com/kubernetes/ingress-nginx/pull/1723) Add timeouts to http server and additional pprof routes +- [x] [#1724](https://github.com/kubernetes/ingress-nginx/pull/1724) Cleanup main +- [x] [#1725](https://github.com/kubernetes/ingress-nginx/pull/1725) Enable all e2e tests +- [x] [#1726](https://github.com/kubernetes/ingress-nginx/pull/1726) fix: replace deprecated methods. +- [x] [#1734](https://github.com/kubernetes/ingress-nginx/pull/1734) Changes ssl-client-cert header +- [x] [#1737](https://github.com/kubernetes/ingress-nginx/pull/1737) Update nginx v1.13.7 +- [x] [#1738](https://github.com/kubernetes/ingress-nginx/pull/1738) Cleanup +- [x] [#1739](https://github.com/kubernetes/ingress-nginx/pull/1739) Improve e2e checks +- [x] [#1740](https://github.com/kubernetes/ingress-nginx/pull/1740) Update nginx +- [x] [#1745](https://github.com/kubernetes/ingress-nginx/pull/1745) Simplify annotations +- [x] [#1746](https://github.com/kubernetes/ingress-nginx/pull/1746) Cleanup of e2e helpers + +_Documentation:_ + +- [x] [#1657](https://github.com/kubernetes/ingress-nginx/pull/1657) Add better documentation for deploying for dev +- [x] [#1680](https://github.com/kubernetes/ingress-nginx/pull/1680) Add doc for log-format-escape-json [ci skip] +- [x] [#1685](https://github.com/kubernetes/ingress-nginx/pull/1685) Fix default SSL certificate flag docs [ci skip] +- [x] [#1686](https://github.com/kubernetes/ingress-nginx/pull/1686) Fix development doc [ci skip] +- [x] [#1727](https://github.com/kubernetes/ingress-nginx/pull/1727) fix: fix typos in docs. +- [x] [#1747](https://github.com/kubernetes/ingress-nginx/pull/1747) Add config-map usage and options to Documentation ### 0.9-beta.17 -**Image:** `quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.9.0-beta.17` +**Image:** `quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.9.0-beta.17` -*Changes:* +_Changes:_ - Fix regression with annotations introduced in 0.9-beta.16 (thanks @tomlanyon) ### 0.9-beta.16 -**Image:** `quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.9.0-beta.16` +**Image:** `quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.9.0-beta.16` -*New Features:* +_New Features:_ - Images are published to [quay.io](https://quay.io/repository/kubernetes-ingress-controller) - NGINX 1.13.6 @@ -98,234 +1360,231 @@ - Support for [brotli compression in NGINX](https://certsimple.com/blog/nginx-brotli) - Return 503 error instead of 404 when no endpoint is available -*Breaking changes:* +_Breaking changes:_ - The default SSL configuration was updated to use `TLSv1.2` and the default cipher list is `ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256` -*Known issues:* +_Known issues:_ - When ModSecurity is enabled a segfault could occur - [ModSecurity#1590](https://github.com/SpiderLabs/ModSecurity/issues/1590) -*Changes:* - -- [X] [#1489](https://github.com/kubernetes/ingress-nginx/pull/1489) Compute a real `X-Forwarded-For` header -- [X] [#1490](https://github.com/kubernetes/ingress-nginx/pull/1490) Introduce an upstream-hash-by annotation to support consistent hashing by nginx variable or text -- [X] [#1498](https://github.com/kubernetes/ingress-nginx/pull/1498) Add modsecurity module -- [X] [#1500](https://github.com/kubernetes/ingress-nginx/pull/1500) Enable modsecurity feature -- [X] [#1501](https://github.com/kubernetes/ingress-nginx/pull/1501) Request ingress controller version in issue template -- [X] [#1502](https://github.com/kubernetes/ingress-nginx/pull/1502) Force reload on template change -- [X] [#1503](https://github.com/kubernetes/ingress-nginx/pull/1503) Add falg to report node internal IP address in ingress status -- [X] [#1505](https://github.com/kubernetes/ingress-nginx/pull/1505) Increase size of variable hash bucket -- [X] [#1506](https://github.com/kubernetes/ingress-nginx/pull/1506) Update nginx ssl configuration -- [X] [#1507](https://github.com/kubernetes/ingress-nginx/pull/1507) Add tls session ticket key setting -- [X] [#1511](https://github.com/kubernetes/ingress-nginx/pull/1511) fix deprecated ssl_client_cert. add ssl_client_verify header -- [X] [#1513](https://github.com/kubernetes/ingress-nginx/pull/1513) Return 503 by default when no endpoint is available -- [X] [#1520](https://github.com/kubernetes/ingress-nginx/pull/1520) Change alias behaviour not to create new server section needlessly -- [X] [#1523](https://github.com/kubernetes/ingress-nginx/pull/1523) Include the serversnippet from the config map in server blocks -- [X] [#1533](https://github.com/kubernetes/ingress-nginx/pull/1533) Remove authentication send body annotation -- [X] [#1535](https://github.com/kubernetes/ingress-nginx/pull/1535) Remove auth-send-body [ci skip] -- [X] [#1538](https://github.com/kubernetes/ingress-nginx/pull/1538) Rename service-nodeport.yml to service-nodeport.yaml -- [X] [#1543](https://github.com/kubernetes/ingress-nginx/pull/1543) Fix glog initialization error -- [X] [#1544](https://github.com/kubernetes/ingress-nginx/pull/1544) Fix `make container` for OSX. -- [X] [#1547](https://github.com/kubernetes/ingress-nginx/pull/1547) fix broken GCE-GKE service descriptor -- [X] [#1550](https://github.com/kubernetes/ingress-nginx/pull/1550) Add e2e tests - default backend -- [X] [#1553](https://github.com/kubernetes/ingress-nginx/pull/1553) Cors features improvements -- [X] [#1554](https://github.com/kubernetes/ingress-nginx/pull/1554) Add missing unit test for nextPowerOf2 function -- [X] [#1556](https://github.com/kubernetes/ingress-nginx/pull/1556) fixed https port forwarding in Azure LB service -- [X] [#1566](https://github.com/kubernetes/ingress-nginx/pull/1566) Release nginx-slim 0.27 -- [X] [#1568](https://github.com/kubernetes/ingress-nginx/pull/1568) update defaultbackend tag -- [X] [#1569](https://github.com/kubernetes/ingress-nginx/pull/1569) Update 404 server image -- [X] [#1570](https://github.com/kubernetes/ingress-nginx/pull/1570) Update nginx version -- [X] [#1571](https://github.com/kubernetes/ingress-nginx/pull/1571) Fix cors tests -- [X] [#1572](https://github.com/kubernetes/ingress-nginx/pull/1572) Certificate Auth Bugfix -- [X] [#1577](https://github.com/kubernetes/ingress-nginx/pull/1577) Do not use relative urls for yaml files -- [X] [#1580](https://github.com/kubernetes/ingress-nginx/pull/1580) Upgrade to use the latest version of nginx-opentracing. -- [X] [#1581](https://github.com/kubernetes/ingress-nginx/pull/1581) Fix Makefile to work in OSX. -- [X] [#1582](https://github.com/kubernetes/ingress-nginx/pull/1582) Add scripts to release from travis-ci -- [X] [#1584](https://github.com/kubernetes/ingress-nginx/pull/1584) Add missing probes in deployments -- [X] [#1585](https://github.com/kubernetes/ingress-nginx/pull/1585) Add version flag -- [X] [#1587](https://github.com/kubernetes/ingress-nginx/pull/1587) Use pass access scheme in signin url -- [X] [#1589](https://github.com/kubernetes/ingress-nginx/pull/1589) Fix upstream vhost Equal comparison -- [X] [#1590](https://github.com/kubernetes/ingress-nginx/pull/1590) Fix Equals Comparison for CORS annotation -- [X] [#1592](https://github.com/kubernetes/ingress-nginx/pull/1592) Update opentracing module and release image to quay.io -- [X] [#1593](https://github.com/kubernetes/ingress-nginx/pull/1593) Fix makefile default task -- [X] [#1605](https://github.com/kubernetes/ingress-nginx/pull/1605) Fix ExternalName services -- [X] [#1607](https://github.com/kubernetes/ingress-nginx/pull/1607) Add support for named ports with service-upstream. #1459 -- [X] [#1608](https://github.com/kubernetes/ingress-nginx/pull/1608) Fix issue with clusterIP detection on service upstream. #1534 -- [X] [#1610](https://github.com/kubernetes/ingress-nginx/pull/1610) Only set alias if not already set -- [X] [#1618](https://github.com/kubernetes/ingress-nginx/pull/1618) Fix full XFF with PROXY -- [X] [#1620](https://github.com/kubernetes/ingress-nginx/pull/1620) Add gzip_vary -- [X] [#1621](https://github.com/kubernetes/ingress-nginx/pull/1621) Fix path to ELB listener image -- [X] [#1627](https://github.com/kubernetes/ingress-nginx/pull/1627) Add brotli support -- [X] [#1629](https://github.com/kubernetes/ingress-nginx/pull/1629) Add ssl-client-dn header -- [X] [#1632](https://github.com/kubernetes/ingress-nginx/pull/1632) Rename OWNERS assignees: to approvers: -- [X] [#1635](https://github.com/kubernetes/ingress-nginx/pull/1635) Install dumb-init using apt-get -- [X] [#1636](https://github.com/kubernetes/ingress-nginx/pull/1636) Update go to 1.9.2 -- [X] [#1640](https://github.com/kubernetes/ingress-nginx/pull/1640) Update nginx to 0.28 and enable brotli - -*Documentation:* - -- [X] [#1491](https://github.com/kubernetes/ingress-nginx/pull/1491) Note that GCE has moved to a new repo -- [X] [#1492](https://github.com/kubernetes/ingress-nginx/pull/1492) Cleanup readme.md -- [X] [#1494](https://github.com/kubernetes/ingress-nginx/pull/1494) Cleanup -- [X] [#1497](https://github.com/kubernetes/ingress-nginx/pull/1497) Cleanup examples directory -- [X] [#1504](https://github.com/kubernetes/ingress-nginx/pull/1504) Clean readme -- [X] [#1508](https://github.com/kubernetes/ingress-nginx/pull/1508) Fixed link in prometheus example -- [X] [#1527](https://github.com/kubernetes/ingress-nginx/pull/1527) Split documentation -- [X] [#1536](https://github.com/kubernetes/ingress-nginx/pull/1536) Update documentation and examples [ci skip] -- [X] [#1541](https://github.com/kubernetes/ingress-nginx/pull/1541) fix(documentation): Fix some typos -- [X] [#1548](https://github.com/kubernetes/ingress-nginx/pull/1548) link to prometheus docs -- [X] [#1562](https://github.com/kubernetes/ingress-nginx/pull/1562) Fix development guide link -- [X] [#1563](https://github.com/kubernetes/ingress-nginx/pull/1563) Add task to verify markdown links -- [X] [#1583](https://github.com/kubernetes/ingress-nginx/pull/1583) Add note for certificate authentication in Cloudflare -- [X] [#1617](https://github.com/kubernetes/ingress-nginx/pull/1617) fix typo in user-guide/annotations.md +_Changes:_ + +- [x] [#1489](https://github.com/kubernetes/ingress-nginx/pull/1489) Compute a real `X-Forwarded-For` header +- [x] [#1490](https://github.com/kubernetes/ingress-nginx/pull/1490) Introduce an upstream-hash-by annotation to support consistent hashing by nginx variable or text +- [x] [#1498](https://github.com/kubernetes/ingress-nginx/pull/1498) Add modsecurity module +- [x] [#1500](https://github.com/kubernetes/ingress-nginx/pull/1500) Enable modsecurity feature +- [x] [#1501](https://github.com/kubernetes/ingress-nginx/pull/1501) Request ingress controller version in issue template +- [x] [#1502](https://github.com/kubernetes/ingress-nginx/pull/1502) Force reload on template change +- [x] [#1503](https://github.com/kubernetes/ingress-nginx/pull/1503) Add falg to report node internal IP address in ingress status +- [x] [#1505](https://github.com/kubernetes/ingress-nginx/pull/1505) Increase size of variable hash bucket +- [x] [#1506](https://github.com/kubernetes/ingress-nginx/pull/1506) Update nginx ssl configuration +- [x] [#1507](https://github.com/kubernetes/ingress-nginx/pull/1507) Add tls session ticket key setting +- [x] [#1511](https://github.com/kubernetes/ingress-nginx/pull/1511) fix deprecated ssl_client_cert. add ssl_client_verify header +- [x] [#1513](https://github.com/kubernetes/ingress-nginx/pull/1513) Return 503 by default when no endpoint is available +- [x] [#1520](https://github.com/kubernetes/ingress-nginx/pull/1520) Change alias behaviour not to create new server section needlessly +- [x] [#1523](https://github.com/kubernetes/ingress-nginx/pull/1523) Include the serversnippet from the config map in server blocks +- [x] [#1533](https://github.com/kubernetes/ingress-nginx/pull/1533) Remove authentication send body annotation +- [x] [#1535](https://github.com/kubernetes/ingress-nginx/pull/1535) Remove auth-send-body [ci skip] +- [x] [#1538](https://github.com/kubernetes/ingress-nginx/pull/1538) Rename service-nodeport.yml to service-nodeport.yaml +- [x] [#1543](https://github.com/kubernetes/ingress-nginx/pull/1543) Fix glog initialization error +- [x] [#1544](https://github.com/kubernetes/ingress-nginx/pull/1544) Fix `make container` for OSX. +- [x] [#1547](https://github.com/kubernetes/ingress-nginx/pull/1547) fix broken GCE-GKE service descriptor +- [x] [#1550](https://github.com/kubernetes/ingress-nginx/pull/1550) Add e2e tests - default backend +- [x] [#1553](https://github.com/kubernetes/ingress-nginx/pull/1553) Cors features improvements +- [x] [#1554](https://github.com/kubernetes/ingress-nginx/pull/1554) Add missing unit test for nextPowerOf2 function +- [x] [#1556](https://github.com/kubernetes/ingress-nginx/pull/1556) fixed https port forwarding in Azure LB service +- [x] [#1566](https://github.com/kubernetes/ingress-nginx/pull/1566) Release nginx-slim 0.27 +- [x] [#1568](https://github.com/kubernetes/ingress-nginx/pull/1568) update defaultbackend tag +- [x] [#1569](https://github.com/kubernetes/ingress-nginx/pull/1569) Update 404 server image +- [x] [#1570](https://github.com/kubernetes/ingress-nginx/pull/1570) Update nginx version +- [x] [#1571](https://github.com/kubernetes/ingress-nginx/pull/1571) Fix cors tests +- [x] [#1572](https://github.com/kubernetes/ingress-nginx/pull/1572) Certificate Auth Bugfix +- [x] [#1577](https://github.com/kubernetes/ingress-nginx/pull/1577) Do not use relative urls for yaml files +- [x] [#1580](https://github.com/kubernetes/ingress-nginx/pull/1580) Upgrade to use the latest version of nginx-opentracing. +- [x] [#1581](https://github.com/kubernetes/ingress-nginx/pull/1581) Fix Makefile to work in OSX. +- [x] [#1582](https://github.com/kubernetes/ingress-nginx/pull/1582) Add scripts to release from travis-ci +- [x] [#1584](https://github.com/kubernetes/ingress-nginx/pull/1584) Add missing probes in deployments +- [x] [#1585](https://github.com/kubernetes/ingress-nginx/pull/1585) Add version flag +- [x] [#1587](https://github.com/kubernetes/ingress-nginx/pull/1587) Use pass access scheme in signin url +- [x] [#1589](https://github.com/kubernetes/ingress-nginx/pull/1589) Fix upstream vhost Equal comparison +- [x] [#1590](https://github.com/kubernetes/ingress-nginx/pull/1590) Fix Equals Comparison for CORS annotation +- [x] [#1592](https://github.com/kubernetes/ingress-nginx/pull/1592) Update opentracing module and release image to quay.io +- [x] [#1593](https://github.com/kubernetes/ingress-nginx/pull/1593) Fix makefile default task +- [x] [#1605](https://github.com/kubernetes/ingress-nginx/pull/1605) Fix ExternalName services +- [x] [#1607](https://github.com/kubernetes/ingress-nginx/pull/1607) Add support for named ports with service-upstream. #1459 +- [x] [#1608](https://github.com/kubernetes/ingress-nginx/pull/1608) Fix issue with clusterIP detection on service upstream. #1534 +- [x] [#1610](https://github.com/kubernetes/ingress-nginx/pull/1610) Only set alias if not already set +- [x] [#1618](https://github.com/kubernetes/ingress-nginx/pull/1618) Fix full XFF with PROXY +- [x] [#1620](https://github.com/kubernetes/ingress-nginx/pull/1620) Add gzip_vary +- [x] [#1621](https://github.com/kubernetes/ingress-nginx/pull/1621) Fix path to ELB listener image +- [x] [#1627](https://github.com/kubernetes/ingress-nginx/pull/1627) Add brotli support +- [x] [#1629](https://github.com/kubernetes/ingress-nginx/pull/1629) Add ssl-client-dn header +- [x] [#1632](https://github.com/kubernetes/ingress-nginx/pull/1632) Rename OWNERS assignees: to approvers: +- [x] [#1635](https://github.com/kubernetes/ingress-nginx/pull/1635) Install dumb-init using apt-get +- [x] [#1636](https://github.com/kubernetes/ingress-nginx/pull/1636) Update go to 1.9.2 +- [x] [#1640](https://github.com/kubernetes/ingress-nginx/pull/1640) Update nginx to 0.28 and enable brotli + +_Documentation:_ + +- [x] [#1491](https://github.com/kubernetes/ingress-nginx/pull/1491) Note that GCE has moved to a new repo +- [x] [#1492](https://github.com/kubernetes/ingress-nginx/pull/1492) Cleanup readme.md +- [x] [#1494](https://github.com/kubernetes/ingress-nginx/pull/1494) Cleanup +- [x] [#1497](https://github.com/kubernetes/ingress-nginx/pull/1497) Cleanup examples directory +- [x] [#1504](https://github.com/kubernetes/ingress-nginx/pull/1504) Clean readme +- [x] [#1508](https://github.com/kubernetes/ingress-nginx/pull/1508) Fixed link in prometheus example +- [x] [#1527](https://github.com/kubernetes/ingress-nginx/pull/1527) Split documentation +- [x] [#1536](https://github.com/kubernetes/ingress-nginx/pull/1536) Update documentation and examples [ci skip] +- [x] [#1541](https://github.com/kubernetes/ingress-nginx/pull/1541) fix(documentation): Fix some typos +- [x] [#1548](https://github.com/kubernetes/ingress-nginx/pull/1548) link to prometheus docs +- [x] [#1562](https://github.com/kubernetes/ingress-nginx/pull/1562) Fix development guide link +- [x] [#1563](https://github.com/kubernetes/ingress-nginx/pull/1563) Add task to verify markdown links +- [x] [#1583](https://github.com/kubernetes/ingress-nginx/pull/1583) Add note for certificate authentication in Cloudflare +- [x] [#1617](https://github.com/kubernetes/ingress-nginx/pull/1617) fix typo in user-guide/annotations.md ### 0.9-beta.15 -**Image:** `gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.15` +**Image:** `gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.15` -*New Features:* +_New Features:_ - Add OCSP support - Configurable ssl_verify_client -*Changes:* +_Changes:_ -- [X] [#1468](https://github.com/kubernetes/ingress/pull/1468) Add the original URL to the auth request -- [X] [#1469](https://github.com/kubernetes/ingress/pull/1469) Typo: Add missing {{ }} -- [X] [#1472](https://github.com/kubernetes/ingress/pull/1472) Fix X-Auth-Request-Redirect value to reflect the request uri -- [X] [#1473](https://github.com/kubernetes/ingress/pull/1473) Fix proxy protocol check -- [X] [#1475](https://github.com/kubernetes/ingress/pull/1475) Add OCSP support -- [X] [#1477](https://github.com/kubernetes/ingress/pull/1477) Fix semicolons in global configuration -- [X] [#1478](https://github.com/kubernetes/ingress/pull/1478) Pass redirect field in login page to get a proper redirect -- [X] [#1480](https://github.com/kubernetes/ingress/pull/1480) configurable ssl_verify_client -- [X] [#1485](https://github.com/kubernetes/ingress/pull/1485) Fix source IP address -- [X] [#1486](https://github.com/kubernetes/ingress/pull/1486) Fix overwrite of custom configuration +- [x] [#1468](https://github.com/kubernetes/ingress/pull/1468) Add the original URL to the auth request +- [x] [#1469](https://github.com/kubernetes/ingress/pull/1469) Typo: Add missing {{ }} +- [x] [#1472](https://github.com/kubernetes/ingress/pull/1472) Fix X-Auth-Request-Redirect value to reflect the request uri +- [x] [#1473](https://github.com/kubernetes/ingress/pull/1473) Fix proxy protocol check +- [x] [#1475](https://github.com/kubernetes/ingress/pull/1475) Add OCSP support +- [x] [#1477](https://github.com/kubernetes/ingress/pull/1477) Fix semicolons in global configuration +- [x] [#1478](https://github.com/kubernetes/ingress/pull/1478) Pass redirect field in login page to get a proper redirect +- [x] [#1480](https://github.com/kubernetes/ingress/pull/1480) configurable ssl_verify_client +- [x] [#1485](https://github.com/kubernetes/ingress/pull/1485) Fix source IP address +- [x] [#1486](https://github.com/kubernetes/ingress/pull/1486) Fix overwrite of custom configuration -*Documentation:* +_Documentation:_ -- [X] [#1460](https://github.com/kubernetes/ingress/pull/1460) Expose UDP port in UDP ingress example -- [X] [#1465](https://github.com/kubernetes/ingress/pull/1465) review prometheus docs +- [x] [#1460](https://github.com/kubernetes/ingress/pull/1460) Expose UDP port in UDP ingress example +- [x] [#1465](https://github.com/kubernetes/ingress/pull/1465) review prometheus docs ### 0.9-beta.14 -**Image:** `gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.14` +**Image:** `gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.14` -*New Features:* +_New Features:_ - Opentracing support for NGINX - Setting upstream vhost for nginx - Allow custom global configuration at multiple levels - Add support for proxy protocol decoding and encoding in TCP services -*Changes:* - -- [X] [#719](https://github.com/kubernetes/ingress/pull/719) Setting upstream vhost for nginx. -- [X] [#1321](https://github.com/kubernetes/ingress/pull/1321) Enable keepalive in upstreams -- [X] [#1322](https://github.com/kubernetes/ingress/pull/1322) parse real ip -- [X] [#1323](https://github.com/kubernetes/ingress/pull/1323) use $the_real_ip for rate limit whitelist -- [X] [#1326](https://github.com/kubernetes/ingress/pull/1326) Pass headers from the custom error backend -- [X] [#1328](https://github.com/kubernetes/ingress/pull/1328) update deprecated interface -- [X] [#1329](https://github.com/kubernetes/ingress/pull/1329) add example for nginx-ingress -- [X] [#1330](https://github.com/kubernetes/ingress/pull/1330) Increase coverage in template.go for nginx controller -- [X] [#1335](https://github.com/kubernetes/ingress/pull/1335) Configurable proxy_request_buffering per location.. -- [X] [#1338](https://github.com/kubernetes/ingress/pull/1338) Fix multiple leader election -- [X] [#1339](https://github.com/kubernetes/ingress/pull/1339) Enable status port listening in all interfaces -- [X] [#1340](https://github.com/kubernetes/ingress/pull/1340) Update sha256sum of nginx substitutions -- [X] [#1341](https://github.com/kubernetes/ingress/pull/1341) Fix typos -- [X] [#1345](https://github.com/kubernetes/ingress/pull/1345) refactor controllers.go -- [X] [#1349](https://github.com/kubernetes/ingress/pull/1349) Force reload if a secret is updated -- [X] [#1363](https://github.com/kubernetes/ingress/pull/1363) Fix proxy request buffering default configuration -- [X] [#1365](https://github.com/kubernetes/ingress/pull/1365) Fix equals comparsion returing False if both objects have nil Targets or Services. -- [X] [#1367](https://github.com/kubernetes/ingress/pull/1367) Fix typos -- [X] [#1379](https://github.com/kubernetes/ingress/pull/1379) Fix catch all upstream server -- [X] [#1380](https://github.com/kubernetes/ingress/pull/1380) Cleanup -- [X] [#1381](https://github.com/kubernetes/ingress/pull/1381) Refactor X-Forwarded-* headers -- [X] [#1382](https://github.com/kubernetes/ingress/pull/1382) Cleanup -- [X] [#1387](https://github.com/kubernetes/ingress/pull/1387) Improve resource usage in nginx controller -- [X] [#1392](https://github.com/kubernetes/ingress/pull/1392) Avoid issues with goroutines updating fields -- [X] [#1393](https://github.com/kubernetes/ingress/pull/1393) Limit the number of goroutines used for the update of ingress status -- [X] [#1394](https://github.com/kubernetes/ingress/pull/1394) Improve equals -- [X] [#1402](https://github.com/kubernetes/ingress/pull/1402) fix error when cert or key is nil -- [X] [#1403](https://github.com/kubernetes/ingress/pull/1403) Added tls ports to rbac nginx ingress controller and service -- [X] [#1404](https://github.com/kubernetes/ingress/pull/1404) Use nginx default value for SSLECDHCurve -- [X] [#1411](https://github.com/kubernetes/ingress/pull/1411) Add more descriptive logging in certificate loading -- [X] [#1412](https://github.com/kubernetes/ingress/pull/1412) Correct Error Handling to avoid panics and add more logging to template -- [X] [#1413](https://github.com/kubernetes/ingress/pull/1413) Validate external names -- [X] [#1418](https://github.com/kubernetes/ingress/pull/1418) Fix links after design proposals move -- [X] [#1419](https://github.com/kubernetes/ingress/pull/1419) Remove duplicated ingress check code -- [X] [#1420](https://github.com/kubernetes/ingress/pull/1420) Process queue items by time window -- [X] [#1423](https://github.com/kubernetes/ingress/pull/1423) Fix cast error -- [X] [#1424](https://github.com/kubernetes/ingress/pull/1424) Allow overriding the tag and registry -- [X] [#1426](https://github.com/kubernetes/ingress/pull/1426) Enhance Certificate Logging and Clearup Mutual Auth Docs -- [X] [#1430](https://github.com/kubernetes/ingress/pull/1430) Add support for proxy protocol decoding and encoding in TCP services -- [X] [#1434](https://github.com/kubernetes/ingress/pull/1434) Fix exec of readSecrets -- [X] [#1435](https://github.com/kubernetes/ingress/pull/1435) Add header to upstream server for external authentication -- [X] [#1438](https://github.com/kubernetes/ingress/pull/1438) Do not intercept errors from the custom error service -- [X] [#1439](https://github.com/kubernetes/ingress/pull/1439) Nginx master process killed thus no futher reloads -- [X] [#1440](https://github.com/kubernetes/ingress/pull/1440) Kill worker processes to allow the restart of nginx -- [X] [#1445](https://github.com/kubernetes/ingress/pull/1445) Updated godeps -- [X] [#1450](https://github.com/kubernetes/ingress/pull/1450) Fix links -- [X] [#1451](https://github.com/kubernetes/ingress/pull/1451) Add example of server-snippet -- [X] [#1452](https://github.com/kubernetes/ingress/pull/1452) Fix sync of secrets (kube lego) -- [X] [#1454](https://github.com/kubernetes/ingress/pull/1454) Allow custom global configuration at multiple levels - -*Documentation:* - -- [X] [#1400](https://github.com/kubernetes/ingress/pull/1400) Fix ConfigMap link in doc -- [X] [#1422](https://github.com/kubernetes/ingress/pull/1422) Add docs for opentracing -- [X] [#1441](https://github.com/kubernetes/ingress/pull/1441) Improve custom error pages doc -- [X] [#1442](https://github.com/kubernetes/ingress/pull/1442) Opentracing docs -- [X] [#1446](https://github.com/kubernetes/ingress/pull/1446) Add custom timeout annotations doc - +_Changes:_ + +- [x] [#719](https://github.com/kubernetes/ingress/pull/719) Setting upstream vhost for nginx. +- [x] [#1321](https://github.com/kubernetes/ingress/pull/1321) Enable keepalive in upstreams +- [x] [#1322](https://github.com/kubernetes/ingress/pull/1322) parse real ip +- [x] [#1323](https://github.com/kubernetes/ingress/pull/1323) use $the_real_ip for rate limit whitelist +- [x] [#1326](https://github.com/kubernetes/ingress/pull/1326) Pass headers from the custom error backend +- [x] [#1328](https://github.com/kubernetes/ingress/pull/1328) update deprecated interface +- [x] [#1329](https://github.com/kubernetes/ingress/pull/1329) add example for nginx-ingress +- [x] [#1330](https://github.com/kubernetes/ingress/pull/1330) Increase coverage in template.go for nginx controller +- [x] [#1335](https://github.com/kubernetes/ingress/pull/1335) Configurable proxy_request_buffering per location.. +- [x] [#1338](https://github.com/kubernetes/ingress/pull/1338) Fix multiple leader election +- [x] [#1339](https://github.com/kubernetes/ingress/pull/1339) Enable status port listening in all interfaces +- [x] [#1340](https://github.com/kubernetes/ingress/pull/1340) Update sha256sum of nginx substitutions +- [x] [#1341](https://github.com/kubernetes/ingress/pull/1341) Fix typos +- [x] [#1345](https://github.com/kubernetes/ingress/pull/1345) refactor controllers.go +- [x] [#1349](https://github.com/kubernetes/ingress/pull/1349) Force reload if a secret is updated +- [x] [#1363](https://github.com/kubernetes/ingress/pull/1363) Fix proxy request buffering default configuration +- [x] [#1365](https://github.com/kubernetes/ingress/pull/1365) Fix equals comparsion returing False if both objects have nil Targets or Services. +- [x] [#1367](https://github.com/kubernetes/ingress/pull/1367) Fix typos +- [x] [#1379](https://github.com/kubernetes/ingress/pull/1379) Fix catch all upstream server +- [x] [#1380](https://github.com/kubernetes/ingress/pull/1380) Cleanup +- [x] [#1381](https://github.com/kubernetes/ingress/pull/1381) Refactor X-Forwarded-\* headers +- [x] [#1382](https://github.com/kubernetes/ingress/pull/1382) Cleanup +- [x] [#1387](https://github.com/kubernetes/ingress/pull/1387) Improve resource usage in nginx controller +- [x] [#1392](https://github.com/kubernetes/ingress/pull/1392) Avoid issues with goroutines updating fields +- [x] [#1393](https://github.com/kubernetes/ingress/pull/1393) Limit the number of goroutines used for the update of ingress status +- [x] [#1394](https://github.com/kubernetes/ingress/pull/1394) Improve equals +- [x] [#1402](https://github.com/kubernetes/ingress/pull/1402) fix error when cert or key is nil +- [x] [#1403](https://github.com/kubernetes/ingress/pull/1403) Added tls ports to rbac nginx ingress controller and service +- [x] [#1404](https://github.com/kubernetes/ingress/pull/1404) Use nginx default value for SSLECDHCurve +- [x] [#1411](https://github.com/kubernetes/ingress/pull/1411) Add more descriptive logging in certificate loading +- [x] [#1412](https://github.com/kubernetes/ingress/pull/1412) Correct Error Handling to avoid panics and add more logging to template +- [x] [#1413](https://github.com/kubernetes/ingress/pull/1413) Validate external names +- [x] [#1418](https://github.com/kubernetes/ingress/pull/1418) Fix links after design proposals move +- [x] [#1419](https://github.com/kubernetes/ingress/pull/1419) Remove duplicated ingress check code +- [x] [#1420](https://github.com/kubernetes/ingress/pull/1420) Process queue items by time window +- [x] [#1423](https://github.com/kubernetes/ingress/pull/1423) Fix cast error +- [x] [#1424](https://github.com/kubernetes/ingress/pull/1424) Allow overriding the tag and registry +- [x] [#1426](https://github.com/kubernetes/ingress/pull/1426) Enhance Certificate Logging and Clearup Mutual Auth Docs +- [x] [#1430](https://github.com/kubernetes/ingress/pull/1430) Add support for proxy protocol decoding and encoding in TCP services +- [x] [#1434](https://github.com/kubernetes/ingress/pull/1434) Fix exec of readSecrets +- [x] [#1435](https://github.com/kubernetes/ingress/pull/1435) Add header to upstream server for external authentication +- [x] [#1438](https://github.com/kubernetes/ingress/pull/1438) Do not intercept errors from the custom error service +- [x] [#1439](https://github.com/kubernetes/ingress/pull/1439) Nginx master process killed thus no further reloads +- [x] [#1440](https://github.com/kubernetes/ingress/pull/1440) Kill worker processes to allow the restart of nginx +- [x] [#1445](https://github.com/kubernetes/ingress/pull/1445) Updated godeps +- [x] [#1450](https://github.com/kubernetes/ingress/pull/1450) Fix links +- [x] [#1451](https://github.com/kubernetes/ingress/pull/1451) Add example of server-snippet +- [x] [#1452](https://github.com/kubernetes/ingress/pull/1452) Fix sync of secrets (kube lego) +- [x] [#1454](https://github.com/kubernetes/ingress/pull/1454) Allow custom global configuration at multiple levels + +_Documentation:_ + +- [x] [#1400](https://github.com/kubernetes/ingress/pull/1400) Fix ConfigMap link in doc +- [x] [#1422](https://github.com/kubernetes/ingress/pull/1422) Add docs for opentracing +- [x] [#1441](https://github.com/kubernetes/ingress/pull/1441) Improve custom error pages doc +- [x] [#1442](https://github.com/kubernetes/ingress/pull/1442) Opentracing docs +- [x] [#1446](https://github.com/kubernetes/ingress/pull/1446) Add custom timeout annotations doc ### 0.9-beta.13 -**Image:** `gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.13` +**Image:** `gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.13` -*New Features:* +_New Features:_ - NGINX 1.3.5 - New flag to disable node listing - Custom X-Forwarder-Header (CloudFlare uses `CF-Connecting-IP` as header) - Custom error page in Client Certificate Authentication -*Changes:* - -- [X] [#1272](https://github.com/kubernetes/ingress/pull/1272) Delete useless statement -- [X] [#1277](https://github.com/kubernetes/ingress/pull/1277) Add indent for nginx.conf -- [X] [#1278](https://github.com/kubernetes/ingress/pull/1278) Add proxy-pass-params annotation and Backend field -- [X] [#1282](https://github.com/kubernetes/ingress/pull/1282) Fix nginx stats -- [X] [#1288](https://github.com/kubernetes/ingress/pull/1288) Allow PATCH in enable-cors -- [X] [#1290](https://github.com/kubernetes/ingress/pull/1290) Add flag to disabling node listing -- [X] [#1293](https://github.com/kubernetes/ingress/pull/1293) Adds support for error page in Client Certificate Authentication -- [X] [#1308](https://github.com/kubernetes/ingress/pull/1308) A trivial typo in config -- [X] [#1310](https://github.com/kubernetes/ingress/pull/1310) Refactoring nginx configuration configmap -- [X] [#1311](https://github.com/kubernetes/ingress/pull/1311) Enable nginx async writes -- [X] [#1312](https://github.com/kubernetes/ingress/pull/1312) Allow custom forwarded for header -- [X] [#1313](https://github.com/kubernetes/ingress/pull/1313) Fix eol in nginx template -- [X] [#1315](https://github.com/kubernetes/ingress/pull/1315) Fix nginx custom error pages - - -*Documentation:* - -- [X] [#1270](https://github.com/kubernetes/ingress/pull/1270) add missing yamls in controllers/nginx -- [X] [#1276](https://github.com/kubernetes/ingress/pull/1276) Link rbac sample from deployment docs -- [X] [#1291](https://github.com/kubernetes/ingress/pull/1291) fix link to conformance suite -- [X] [#1295](https://github.com/kubernetes/ingress/pull/1295) fix README of nginx-ingress-controller -- [X] [#1299](https://github.com/kubernetes/ingress/pull/1299) fix two doc issues in nginx/README -- [X] [#1306](https://github.com/kubernetes/ingress/pull/1306) Fix kubeconfig example for nginx deployment - +_Changes:_ + +- [x] [#1272](https://github.com/kubernetes/ingress/pull/1272) Delete useless statement +- [x] [#1277](https://github.com/kubernetes/ingress/pull/1277) Add indent for nginx.conf +- [x] [#1278](https://github.com/kubernetes/ingress/pull/1278) Add proxy-pass-params annotation and Backend field +- [x] [#1282](https://github.com/kubernetes/ingress/pull/1282) Fix nginx stats +- [x] [#1288](https://github.com/kubernetes/ingress/pull/1288) Allow PATCH in enable-cors +- [x] [#1290](https://github.com/kubernetes/ingress/pull/1290) Add flag to disabling node listing +- [x] [#1293](https://github.com/kubernetes/ingress/pull/1293) Adds support for error page in Client Certificate Authentication +- [x] [#1308](https://github.com/kubernetes/ingress/pull/1308) A trivial typo in config +- [x] [#1310](https://github.com/kubernetes/ingress/pull/1310) Refactoring nginx configuration configmap +- [x] [#1311](https://github.com/kubernetes/ingress/pull/1311) Enable nginx async writes +- [x] [#1312](https://github.com/kubernetes/ingress/pull/1312) Allow custom forwarded for header +- [x] [#1313](https://github.com/kubernetes/ingress/pull/1313) Fix eol in nginx template +- [x] [#1315](https://github.com/kubernetes/ingress/pull/1315) Fix nginx custom error pages + +_Documentation:_ + +- [x] [#1270](https://github.com/kubernetes/ingress/pull/1270) add missing yamls in controllers/nginx +- [x] [#1276](https://github.com/kubernetes/ingress/pull/1276) Link rbac sample from deployment docs +- [x] [#1291](https://github.com/kubernetes/ingress/pull/1291) fix link to conformance suite +- [x] [#1295](https://github.com/kubernetes/ingress/pull/1295) fix README of nginx-ingress-controller +- [x] [#1299](https://github.com/kubernetes/ingress/pull/1299) fix two doc issues in nginx/README +- [x] [#1306](https://github.com/kubernetes/ingress/pull/1306) Fix kubeconfig example for nginx deployment ### 0.9-beta.12 -**Image:** `gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.12` +**Image:** `gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.12` -*Breaking changes:* +_Breaking changes:_ - SSL passthrough is disabled by default. To enable the feature use `--enable-ssl-passthrough` -*New Features:* +_New Features:_ - Support for arm64 - New flags to customize listen ports @@ -336,540 +1595,529 @@ - Custom default backend (per Ingress) - Graceful shutdown for NGINX -*Changes:* - -- [X] [#977](https://github.com/kubernetes/ingress/pull/977) Add sort-backends command line option -- [X] [#981](https://github.com/kubernetes/ingress/pull/981) Add annotation to allow use of service ClusterIP for NGINX upstream. -- [X] [#991](https://github.com/kubernetes/ingress/pull/991) Remove secret sync loop -- [X] [#992](https://github.com/kubernetes/ingress/pull/992) Check errors generating pem files -- [X] [#993](https://github.com/kubernetes/ingress/pull/993) Fix the sed command to work on macOS -- [X] [#1013](https://github.com/kubernetes/ingress/pull/1013) The fields of vtsDate are unified in the form of plural -- [X] [#1025](https://github.com/kubernetes/ingress/pull/1025) Fix file watch -- [X] [#1027](https://github.com/kubernetes/ingress/pull/1027) Lint code -- [X] [#1031](https://github.com/kubernetes/ingress/pull/1031) Change missing secret name log level to V(3) -- [X] [#1032](https://github.com/kubernetes/ingress/pull/1032) Alternative syncSecret approach #1030 -- [X] [#1042](https://github.com/kubernetes/ingress/pull/1042) Add function to allow custom values in Ingress status -- [X] [#1043](https://github.com/kubernetes/ingress/pull/1043) Return reference to object providing Endpoint -- [X] [#1046](https://github.com/kubernetes/ingress/pull/1046) Add field FileSHA in BasicDigest struct -- [X] [#1058](https://github.com/kubernetes/ingress/pull/1058) add per minute rate limiting -- [X] [#1060](https://github.com/kubernetes/ingress/pull/1060) Update fsnotify dependency to fix arm64 issue -- [X] [#1065](https://github.com/kubernetes/ingress/pull/1065) Add more descriptive steps in Dev Documentation -- [X] [#1073](https://github.com/kubernetes/ingress/pull/1073) Release nginx-slim 0.22 -- [X] [#1074](https://github.com/kubernetes/ingress/pull/1074) Remove lua and use fastcgi to render errors -- [X] [#1075](https://github.com/kubernetes/ingress/pull/1075) (feat/ #374) support proxy timeout -- [X] [#1076](https://github.com/kubernetes/ingress/pull/1076) Add more ssl test cases -- [X] [#1078](https://github.com/kubernetes/ingress/pull/1078) fix the same udp port and tcp port, update nginx.conf error -- [X] [#1080](https://github.com/kubernetes/ingress/pull/1080) Disable platform s390x -- [X] [#1081](https://github.com/kubernetes/ingress/pull/1081) Spit Static check and Coverage in diff Stages of Travis CI -- [X] [#1082](https://github.com/kubernetes/ingress/pull/1082) Fix build tasks -- [X] [#1087](https://github.com/kubernetes/ingress/pull/1087) Release nginx-slim 0.23 -- [X] [#1088](https://github.com/kubernetes/ingress/pull/1088) Configure nginx worker timeout -- [X] [#1089](https://github.com/kubernetes/ingress/pull/1089) Update nginx to 1.13.4 -- [X] [#1098](https://github.com/kubernetes/ingress/pull/1098) Exposing the event recorder to allow other controllers to create events -- [X] [#1102](https://github.com/kubernetes/ingress/pull/1102) Fix lose SSL Passthrough -- [X] [#1104](https://github.com/kubernetes/ingress/pull/1104) Simplify verification of hostname in ssl certificates -- [X] [#1109](https://github.com/kubernetes/ingress/pull/1109) Cleanup remote address in nginx template -- [X] [#1110](https://github.com/kubernetes/ingress/pull/1110) Fix Endpoint comparison -- [X] [#1118](https://github.com/kubernetes/ingress/pull/1118) feat(#733)Support nginx bandwidth control -- [X] [#1124](https://github.com/kubernetes/ingress/pull/1124) check fields len in dns.go -- [X] [#1130](https://github.com/kubernetes/ingress/pull/1130) Update nginx.go -- [X] [#1134](https://github.com/kubernetes/ingress/pull/1134) replace deprecated interface with versioned ones -- [X] [#1136](https://github.com/kubernetes/ingress/pull/1136) Fix status update - changed in #1074 -- [X] [#1138](https://github.com/kubernetes/ingress/pull/1138) update nginx.go: preformance improve -- [X] [#1139](https://github.com/kubernetes/ingress/pull/1139) Fix Todo:convert sequence to table -- [X] [#1162](https://github.com/kubernetes/ingress/pull/1162) Optimize CI build time -- [X] [#1164](https://github.com/kubernetes/ingress/pull/1164) Use variable request_uri as redirect after auth -- [X] [#1179](https://github.com/kubernetes/ingress/pull/1179) Fix sticky upstream not used when enable rewrite -- [X] [#1184](https://github.com/kubernetes/ingress/pull/1184) Add support for temporal and permanent redirects -- [X] [#1185](https://github.com/kubernetes/ingress/pull/1185) Add more info about Server-Alias usage -- [X] [#1186](https://github.com/kubernetes/ingress/pull/1186) Add annotation for client-body-buffer-size per location -- [X] [#1190](https://github.com/kubernetes/ingress/pull/1190) Add flag to disable SSL passthrough -- [X] [#1193](https://github.com/kubernetes/ingress/pull/1193) fix broken link -- [X] [#1198](https://github.com/kubernetes/ingress/pull/1198) Add option for specific scheme for base url -- [X] [#1202](https://github.com/kubernetes/ingress/pull/1202) formatIP issue -- [X] [#1203](https://github.com/kubernetes/ingress/pull/1203) NGINX not reloading correctly -- [X] [#1204](https://github.com/kubernetes/ingress/pull/1204) Fix template error -- [X] [#1205](https://github.com/kubernetes/ingress/pull/1205) Add initial sync of secrets -- [X] [#1206](https://github.com/kubernetes/ingress/pull/1206) Update ssl-passthrough docs -- [X] [#1207](https://github.com/kubernetes/ingress/pull/1207) delete broken link -- [X] [#1208](https://github.com/kubernetes/ingress/pull/1208) fix some typo -- [X] [#1210](https://github.com/kubernetes/ingress/pull/1210) add rate limit whitelist -- [X] [#1215](https://github.com/kubernetes/ingress/pull/1215) Replace base64 encoding with random uuid -- [X] [#1218](https://github.com/kubernetes/ingress/pull/1218) Trivial fixes in core/pkg/net -- [X] [#1219](https://github.com/kubernetes/ingress/pull/1219) keep zones unique per ingress resource -- [X] [#1221](https://github.com/kubernetes/ingress/pull/1221) Move certificate authentication from location to server -- [X] [#1223](https://github.com/kubernetes/ingress/pull/1223) Add doc for non-www to www annotation -- [X] [#1224](https://github.com/kubernetes/ingress/pull/1224) refactor rate limit whitelist -- [X] [#1226](https://github.com/kubernetes/ingress/pull/1226) Remove useless variable in nginx.tmpl -- [X] [#1227](https://github.com/kubernetes/ingress/pull/1227) Update annotations doc with base-url-scheme -- [X] [#1233](https://github.com/kubernetes/ingress/pull/1233) Fix ClientBodyBufferSize annotation -- [X] [#1234](https://github.com/kubernetes/ingress/pull/1234) Lint code -- [X] [#1235](https://github.com/kubernetes/ingress/pull/1235) Fix Equal comparison -- [X] [#1236](https://github.com/kubernetes/ingress/pull/1236) Add Validation for Client Body Buffer Size -- [X] [#1238](https://github.com/kubernetes/ingress/pull/1238) Add support for 'client_body_timeout' and 'client_header_timeout' -- [X] [#1239](https://github.com/kubernetes/ingress/pull/1239) Add flags to customize listen ports and detect port collisions -- [X] [#1243](https://github.com/kubernetes/ingress/pull/1243) Add support for access-log-path and error-log-path -- [X] [#1244](https://github.com/kubernetes/ingress/pull/1244) Add custom default backend annotation -- [X] [#1246](https://github.com/kubernetes/ingress/pull/1246) Add additional headers when custom default backend is used -- [X] [#1247](https://github.com/kubernetes/ingress/pull/1247) Make Ingress annotations available in template -- [X] [#1248](https://github.com/kubernetes/ingress/pull/1248) Improve nginx controller performance -- [X] [#1254](https://github.com/kubernetes/ingress/pull/1254) fix Type transform panic -- [X] [#1257](https://github.com/kubernetes/ingress/pull/1257) Graceful shutdown for Nginx -- [X] [#1261](https://github.com/kubernetes/ingress/pull/1261) Add support for 'worker-shutdown-timeout' - - -*Documentation:* - -- [X] [#976](https://github.com/kubernetes/ingress/pull/976) Update annotations doc -- [X] [#979](https://github.com/kubernetes/ingress/pull/979) Missing auth example -- [X] [#980](https://github.com/kubernetes/ingress/pull/980) Add nginx basic auth example -- [X] [#1001](https://github.com/kubernetes/ingress/pull/1001) examples/nginx/rbac: Give access to own namespace -- [X] [#1005](https://github.com/kubernetes/ingress/pull/1005) Update configuration.md -- [X] [#1018](https://github.com/kubernetes/ingress/pull/1018) add docs for `proxy-set-headers` and `add-headers` -- [X] [#1038](https://github.com/kubernetes/ingress/pull/1038) typo / spelling in README.md -- [X] [#1039](https://github.com/kubernetes/ingress/pull/1039) typo in examples/tcp/nginx/README.md -- [X] [#1049](https://github.com/kubernetes/ingress/pull/1049) Fix config name in the example. -- [X] [#1054](https://github.com/kubernetes/ingress/pull/1054) Fix link to UDP example -- [X] [#1084](https://github.com/kubernetes/ingress/pull/1084) (issue #310)Fix some broken link -- [X] [#1103](https://github.com/kubernetes/ingress/pull/1103) Add GoDoc Widget -- [X] [#1105](https://github.com/kubernetes/ingress/pull/1105) Make Readme file more readable -- [X] [#1106](https://github.com/kubernetes/ingress/pull/1106) Update annotations.md -- [X] [#1107](https://github.com/kubernetes/ingress/pull/1107) Fix Broken Link -- [X] [#1119](https://github.com/kubernetes/ingress/pull/1119) fix typos in controllers/nginx/README.md -- [X] [#1122](https://github.com/kubernetes/ingress/pull/1122) Fix broken link -- [X] [#1131](https://github.com/kubernetes/ingress/pull/1131) Add short help doc in configuration for nginx limit rate -- [X] [#1143](https://github.com/kubernetes/ingress/pull/1143) Minor Typo Fix -- [X] [#1144](https://github.com/kubernetes/ingress/pull/1144) Minor Typo fix -- [X] [#1145](https://github.com/kubernetes/ingress/pull/1145) Minor Typo fix -- [X] [#1146](https://github.com/kubernetes/ingress/pull/1146) Fix Minor Typo in Readme -- [X] [#1147](https://github.com/kubernetes/ingress/pull/1147) Minor Typo Fix -- [X] [#1148](https://github.com/kubernetes/ingress/pull/1148) Minor Typo Fix in Getting-Started.md -- [X] [#1149](https://github.com/kubernetes/ingress/pull/1149) Fix Minor Typo in TLS authentication -- [X] [#1150](https://github.com/kubernetes/ingress/pull/1150) Fix Minor Typo in Customize the HAProxy configuration -- [X] [#1151](https://github.com/kubernetes/ingress/pull/1151) Fix Minor Typo in customization custom-template -- [X] [#1152](https://github.com/kubernetes/ingress/pull/1152) Fix minor typo in HAProxy Multi TLS certificate termination -- [X] [#1153](https://github.com/kubernetes/ingress/pull/1153) Fix minor typo in Multi TLS certificate termination -- [X] [#1154](https://github.com/kubernetes/ingress/pull/1154) Fix minor typo in Role Based Access Control -- [X] [#1155](https://github.com/kubernetes/ingress/pull/1155) Fix minor typo in TCP loadbalancing -- [X] [#1156](https://github.com/kubernetes/ingress/pull/1156) Fix minor typo in UDP loadbalancing -- [X] [#1157](https://github.com/kubernetes/ingress/pull/1157) Fix minor typos in Prerequisites -- [X] [#1158](https://github.com/kubernetes/ingress/pull/1158) Fix minor typo in Ingress examples -- [X] [#1159](https://github.com/kubernetes/ingress/pull/1159) Fix minor typos in Ingress admin guide -- [X] [#1160](https://github.com/kubernetes/ingress/pull/1160) Fix a broken href and typo in Ingress FAQ -- [X] [#1165](https://github.com/kubernetes/ingress/pull/1165) Update CONTRIBUTING.md -- [X] [#1168](https://github.com/kubernetes/ingress/pull/1168) finx link to running-locally.md -- [X] [#1170](https://github.com/kubernetes/ingress/pull/1170) Update dead link in nginx/HTTPS section -- [X] [#1172](https://github.com/kubernetes/ingress/pull/1172) Update README.md -- [X] [#1173](https://github.com/kubernetes/ingress/pull/1173) Update admin.md -- [X] [#1174](https://github.com/kubernetes/ingress/pull/1174) fix several titles -- [X] [#1177](https://github.com/kubernetes/ingress/pull/1177) fix typos -- [X] [#1188](https://github.com/kubernetes/ingress/pull/1188) Fix minor typo -- [X] [#1189](https://github.com/kubernetes/ingress/pull/1189) Fix sign in URL redirect parameter -- [X] [#1192](https://github.com/kubernetes/ingress/pull/1192) Update README.md -- [X] [#1195](https://github.com/kubernetes/ingress/pull/1195) Update troubleshooting.md -- [X] [#1196](https://github.com/kubernetes/ingress/pull/1196) Update README.md -- [X] [#1209](https://github.com/kubernetes/ingress/pull/1209) Update README.md -- [X] [#1085](https://github.com/kubernetes/ingress/pull/1085) Fix ConfigMap's namespace in custom configuration example for nginx -- [X] [#1142](https://github.com/kubernetes/ingress/pull/1142) Fix typo in multiple docs -- [X] [#1228](https://github.com/kubernetes/ingress/pull/1228) Update release doc in getting-started.md -- [X] [#1230](https://github.com/kubernetes/ingress/pull/1230) Update godep guide link - +_Changes:_ + +- [x] [#977](https://github.com/kubernetes/ingress/pull/977) Add sort-backends command line option +- [x] [#981](https://github.com/kubernetes/ingress/pull/981) Add annotation to allow use of service ClusterIP for NGINX upstream. +- [x] [#991](https://github.com/kubernetes/ingress/pull/991) Remove secret sync loop +- [x] [#992](https://github.com/kubernetes/ingress/pull/992) Check errors generating pem files +- [x] [#993](https://github.com/kubernetes/ingress/pull/993) Fix the sed command to work on macOS +- [x] [#1013](https://github.com/kubernetes/ingress/pull/1013) The fields of vtsDate are unified in the form of plural +- [x] [#1025](https://github.com/kubernetes/ingress/pull/1025) Fix file watch +- [x] [#1027](https://github.com/kubernetes/ingress/pull/1027) Lint code +- [x] [#1031](https://github.com/kubernetes/ingress/pull/1031) Change missing secret name log level to V(3) +- [x] [#1032](https://github.com/kubernetes/ingress/pull/1032) Alternative syncSecret approach #1030 +- [x] [#1042](https://github.com/kubernetes/ingress/pull/1042) Add function to allow custom values in Ingress status +- [x] [#1043](https://github.com/kubernetes/ingress/pull/1043) Return reference to object providing Endpoint +- [x] [#1046](https://github.com/kubernetes/ingress/pull/1046) Add field FileSHA in BasicDigest struct +- [x] [#1058](https://github.com/kubernetes/ingress/pull/1058) add per minute rate limiting +- [x] [#1060](https://github.com/kubernetes/ingress/pull/1060) Update fsnotify dependency to fix arm64 issue +- [x] [#1065](https://github.com/kubernetes/ingress/pull/1065) Add more descriptive steps in Dev Documentation +- [x] [#1073](https://github.com/kubernetes/ingress/pull/1073) Release nginx-slim 0.22 +- [x] [#1074](https://github.com/kubernetes/ingress/pull/1074) Remove lua and use fastcgi to render errors +- [x] [#1075](https://github.com/kubernetes/ingress/pull/1075) (feat/ #374) support proxy timeout +- [x] [#1076](https://github.com/kubernetes/ingress/pull/1076) Add more ssl test cases +- [x] [#1078](https://github.com/kubernetes/ingress/pull/1078) fix the same udp port and tcp port, update nginx.conf error +- [x] [#1080](https://github.com/kubernetes/ingress/pull/1080) Disable platform s390x +- [x] [#1081](https://github.com/kubernetes/ingress/pull/1081) Spit Static check and Coverage in diff Stages of Travis CI +- [x] [#1082](https://github.com/kubernetes/ingress/pull/1082) Fix build tasks +- [x] [#1087](https://github.com/kubernetes/ingress/pull/1087) Release nginx-slim 0.23 +- [x] [#1088](https://github.com/kubernetes/ingress/pull/1088) Configure nginx worker timeout +- [x] [#1089](https://github.com/kubernetes/ingress/pull/1089) Update nginx to 1.13.4 +- [x] [#1098](https://github.com/kubernetes/ingress/pull/1098) Exposing the event recorder to allow other controllers to create events +- [x] [#1102](https://github.com/kubernetes/ingress/pull/1102) Fix lose SSL Passthrough +- [x] [#1104](https://github.com/kubernetes/ingress/pull/1104) Simplify verification of hostname in ssl certificates +- [x] [#1109](https://github.com/kubernetes/ingress/pull/1109) Cleanup remote address in nginx template +- [x] [#1110](https://github.com/kubernetes/ingress/pull/1110) Fix Endpoint comparison +- [x] [#1118](https://github.com/kubernetes/ingress/pull/1118) feat(#733)Support nginx bandwidth control +- [x] [#1124](https://github.com/kubernetes/ingress/pull/1124) check fields len in dns.go +- [x] [#1130](https://github.com/kubernetes/ingress/pull/1130) Update nginx.go +- [x] [#1134](https://github.com/kubernetes/ingress/pull/1134) replace deprecated interface with versioned ones +- [x] [#1136](https://github.com/kubernetes/ingress/pull/1136) Fix status update - changed in #1074 +- [x] [#1138](https://github.com/kubernetes/ingress/pull/1138) update nginx.go: performance improve +- [x] [#1139](https://github.com/kubernetes/ingress/pull/1139) Fix Todo:convert sequence to table +- [x] [#1162](https://github.com/kubernetes/ingress/pull/1162) Optimize CI build time +- [x] [#1164](https://github.com/kubernetes/ingress/pull/1164) Use variable request_uri as redirect after auth +- [x] [#1179](https://github.com/kubernetes/ingress/pull/1179) Fix sticky upstream not used when enable rewrite +- [x] [#1184](https://github.com/kubernetes/ingress/pull/1184) Add support for temporal and permanent redirects +- [x] [#1185](https://github.com/kubernetes/ingress/pull/1185) Add more info about Server-Alias usage +- [x] [#1186](https://github.com/kubernetes/ingress/pull/1186) Add annotation for client-body-buffer-size per location +- [x] [#1190](https://github.com/kubernetes/ingress/pull/1190) Add flag to disable SSL passthrough +- [x] [#1193](https://github.com/kubernetes/ingress/pull/1193) fix broken link +- [x] [#1198](https://github.com/kubernetes/ingress/pull/1198) Add option for specific scheme for base url +- [x] [#1202](https://github.com/kubernetes/ingress/pull/1202) formatIP issue +- [x] [#1203](https://github.com/kubernetes/ingress/pull/1203) NGINX not reloading correctly +- [x] [#1204](https://github.com/kubernetes/ingress/pull/1204) Fix template error +- [x] [#1205](https://github.com/kubernetes/ingress/pull/1205) Add initial sync of secrets +- [x] [#1206](https://github.com/kubernetes/ingress/pull/1206) Update ssl-passthrough docs +- [x] [#1207](https://github.com/kubernetes/ingress/pull/1207) delete broken link +- [x] [#1208](https://github.com/kubernetes/ingress/pull/1208) fix some typo +- [x] [#1210](https://github.com/kubernetes/ingress/pull/1210) add rate limit whitelist +- [x] [#1215](https://github.com/kubernetes/ingress/pull/1215) Replace base64 encoding with random uuid +- [x] [#1218](https://github.com/kubernetes/ingress/pull/1218) Trivial fixes in core/pkg/net +- [x] [#1219](https://github.com/kubernetes/ingress/pull/1219) keep zones unique per ingress resource +- [x] [#1221](https://github.com/kubernetes/ingress/pull/1221) Move certificate authentication from location to server +- [x] [#1223](https://github.com/kubernetes/ingress/pull/1223) Add doc for non-www to www annotation +- [x] [#1224](https://github.com/kubernetes/ingress/pull/1224) refactor rate limit whitelist +- [x] [#1226](https://github.com/kubernetes/ingress/pull/1226) Remove useless variable in nginx.tmpl +- [x] [#1227](https://github.com/kubernetes/ingress/pull/1227) Update annotations doc with base-url-scheme +- [x] [#1233](https://github.com/kubernetes/ingress/pull/1233) Fix ClientBodyBufferSize annotation +- [x] [#1234](https://github.com/kubernetes/ingress/pull/1234) Lint code +- [x] [#1235](https://github.com/kubernetes/ingress/pull/1235) Fix Equal comparison +- [x] [#1236](https://github.com/kubernetes/ingress/pull/1236) Add Validation for Client Body Buffer Size +- [x] [#1238](https://github.com/kubernetes/ingress/pull/1238) Add support for 'client_body_timeout' and 'client_header_timeout' +- [x] [#1239](https://github.com/kubernetes/ingress/pull/1239) Add flags to customize listen ports and detect port collisions +- [x] [#1243](https://github.com/kubernetes/ingress/pull/1243) Add support for access-log-path and error-log-path +- [x] [#1244](https://github.com/kubernetes/ingress/pull/1244) Add custom default backend annotation +- [x] [#1246](https://github.com/kubernetes/ingress/pull/1246) Add additional headers when custom default backend is used +- [x] [#1247](https://github.com/kubernetes/ingress/pull/1247) Make Ingress annotations available in template +- [x] [#1248](https://github.com/kubernetes/ingress/pull/1248) Improve nginx controller performance +- [x] [#1254](https://github.com/kubernetes/ingress/pull/1254) fix Type transform panic +- [x] [#1257](https://github.com/kubernetes/ingress/pull/1257) Graceful shutdown for Nginx +- [x] [#1261](https://github.com/kubernetes/ingress/pull/1261) Add support for 'worker-shutdown-timeout' + +_Documentation:_ + +- [x] [#976](https://github.com/kubernetes/ingress/pull/976) Update annotations doc +- [x] [#979](https://github.com/kubernetes/ingress/pull/979) Missing auth example +- [x] [#980](https://github.com/kubernetes/ingress/pull/980) Add nginx basic auth example +- [x] [#1001](https://github.com/kubernetes/ingress/pull/1001) examples/nginx/rbac: Give access to own namespace +- [x] [#1005](https://github.com/kubernetes/ingress/pull/1005) Update configuration.md +- [x] [#1018](https://github.com/kubernetes/ingress/pull/1018) add docs for `proxy-set-headers` and `add-headers` +- [x] [#1038](https://github.com/kubernetes/ingress/pull/1038) typo / spelling in README.md +- [x] [#1039](https://github.com/kubernetes/ingress/pull/1039) typo in examples/tcp/nginx/README.md +- [x] [#1049](https://github.com/kubernetes/ingress/pull/1049) Fix config name in the example. +- [x] [#1054](https://github.com/kubernetes/ingress/pull/1054) Fix link to UDP example +- [x] [#1084](https://github.com/kubernetes/ingress/pull/1084) (issue #310)Fix some broken link +- [x] [#1103](https://github.com/kubernetes/ingress/pull/1103) Add GoDoc Widget +- [x] [#1105](https://github.com/kubernetes/ingress/pull/1105) Make Readme file more readable +- [x] [#1106](https://github.com/kubernetes/ingress/pull/1106) Update annotations.md +- [x] [#1107](https://github.com/kubernetes/ingress/pull/1107) Fix Broken Link +- [x] [#1119](https://github.com/kubernetes/ingress/pull/1119) fix typos in controllers/nginx/README.md +- [x] [#1122](https://github.com/kubernetes/ingress/pull/1122) Fix broken link +- [x] [#1131](https://github.com/kubernetes/ingress/pull/1131) Add short help doc in configuration for nginx limit rate +- [x] [#1143](https://github.com/kubernetes/ingress/pull/1143) Minor Typo Fix +- [x] [#1144](https://github.com/kubernetes/ingress/pull/1144) Minor Typo fix +- [x] [#1145](https://github.com/kubernetes/ingress/pull/1145) Minor Typo fix +- [x] [#1146](https://github.com/kubernetes/ingress/pull/1146) Fix Minor Typo in Readme +- [x] [#1147](https://github.com/kubernetes/ingress/pull/1147) Minor Typo Fix +- [x] [#1148](https://github.com/kubernetes/ingress/pull/1148) Minor Typo Fix in Getting-Started.md +- [x] [#1149](https://github.com/kubernetes/ingress/pull/1149) Fix Minor Typo in TLS authentication +- [x] [#1150](https://github.com/kubernetes/ingress/pull/1150) Fix Minor Typo in Customize the HAProxy configuration +- [x] [#1151](https://github.com/kubernetes/ingress/pull/1151) Fix Minor Typo in customization custom-template +- [x] [#1152](https://github.com/kubernetes/ingress/pull/1152) Fix minor typo in HAProxy Multi TLS certificate termination +- [x] [#1153](https://github.com/kubernetes/ingress/pull/1153) Fix minor typo in Multi TLS certificate termination +- [x] [#1154](https://github.com/kubernetes/ingress/pull/1154) Fix minor typo in Role Based Access Control +- [x] [#1155](https://github.com/kubernetes/ingress/pull/1155) Fix minor typo in TCP loadbalancing +- [x] [#1156](https://github.com/kubernetes/ingress/pull/1156) Fix minor typo in UDP loadbalancing +- [x] [#1157](https://github.com/kubernetes/ingress/pull/1157) Fix minor typos in Prerequisites +- [x] [#1158](https://github.com/kubernetes/ingress/pull/1158) Fix minor typo in Ingress examples +- [x] [#1159](https://github.com/kubernetes/ingress/pull/1159) Fix minor typos in Ingress admin guide +- [x] [#1160](https://github.com/kubernetes/ingress/pull/1160) Fix a broken href and typo in Ingress FAQ +- [x] [#1165](https://github.com/kubernetes/ingress/pull/1165) Update CONTRIBUTING.md +- [x] [#1168](https://github.com/kubernetes/ingress/pull/1168) finx link to running-locally.md +- [x] [#1170](https://github.com/kubernetes/ingress/pull/1170) Update dead link in nginx/HTTPS section +- [x] [#1172](https://github.com/kubernetes/ingress/pull/1172) Update README.md +- [x] [#1173](https://github.com/kubernetes/ingress/pull/1173) Update admin.md +- [x] [#1174](https://github.com/kubernetes/ingress/pull/1174) fix several titles +- [x] [#1177](https://github.com/kubernetes/ingress/pull/1177) fix typos +- [x] [#1188](https://github.com/kubernetes/ingress/pull/1188) Fix minor typo +- [x] [#1189](https://github.com/kubernetes/ingress/pull/1189) Fix sign in URL redirect parameter +- [x] [#1192](https://github.com/kubernetes/ingress/pull/1192) Update README.md +- [x] [#1195](https://github.com/kubernetes/ingress/pull/1195) Update troubleshooting.md +- [x] [#1196](https://github.com/kubernetes/ingress/pull/1196) Update README.md +- [x] [#1209](https://github.com/kubernetes/ingress/pull/1209) Update README.md +- [x] [#1085](https://github.com/kubernetes/ingress/pull/1085) Fix ConfigMap's namespace in custom configuration example for nginx +- [x] [#1142](https://github.com/kubernetes/ingress/pull/1142) Fix typo in multiple docs +- [x] [#1228](https://github.com/kubernetes/ingress/pull/1228) Update release doc in getting-started.md +- [x] [#1230](https://github.com/kubernetes/ingress/pull/1230) Update godep guide link ### 0.9-beta.11 -**Image:** `gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.11` +**Image:** `gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.11` Fixes NGINX [CVE-2017-7529](http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2017-7529) -*Changes:* - -- [X] [#659](https://github.com/kubernetes/ingress/pull/659) [nginx] TCP configmap should allow listen proxy_protocol per service -- [X] [#730](https://github.com/kubernetes/ingress/pull/730) Add support for add_headers -- [X] [#808](https://github.com/kubernetes/ingress/pull/808) HTTP->HTTPS redirect does not work with use-proxy-protocol: "true" -- [X] [#921](https://github.com/kubernetes/ingress/pull/921) Make proxy-real-ip-cidr a comma separated list -- [X] [#930](https://github.com/kubernetes/ingress/pull/930) Add support for proxy protocol in TCP services -- [X] [#933](https://github.com/kubernetes/ingress/pull/933) Lint code -- [X] [#937](https://github.com/kubernetes/ingress/pull/937) Fix lint code errors -- [X] [#940](https://github.com/kubernetes/ingress/pull/940) Sets parameters for a shared memory zone of limit_conn_zone -- [X] [#949](https://github.com/kubernetes/ingress/pull/949) fix nginx version to 1.13.3 to fix integer overflow -- [X] [#956](https://github.com/kubernetes/ingress/pull/956) Simplify handling of ssl certificates -- [X] [#958](https://github.com/kubernetes/ingress/pull/958) Release ubuntu-slim:0.13 -- [X] [#959](https://github.com/kubernetes/ingress/pull/959) Release nginx-slim 0.21 -- [X] [#960](https://github.com/kubernetes/ingress/pull/960) Update nginx in ingress controller -- [X] [#964](https://github.com/kubernetes/ingress/pull/964) Support for proxy_headers_hash_bucket_size and proxy_headers_hash_max_size -- [X] [#966](https://github.com/kubernetes/ingress/pull/966) Fix error checking for pod name & NS -- [X] [#967](https://github.com/kubernetes/ingress/pull/967) Fix runningAddresses typo -- [X] [#968](https://github.com/kubernetes/ingress/pull/968) Fix missing hyphen in yaml for nginx RBAC example -- [X] [#973](https://github.com/kubernetes/ingress/pull/973) check number of servers in configuration comparator - +_Changes:_ + +- [x] [#659](https://github.com/kubernetes/ingress/pull/659) [nginx] TCP configmap should allow listen proxy_protocol per service +- [x] [#730](https://github.com/kubernetes/ingress/pull/730) Add support for add_headers +- [x] [#808](https://github.com/kubernetes/ingress/pull/808) HTTP->HTTPS redirect does not work with use-proxy-protocol: "true" +- [x] [#921](https://github.com/kubernetes/ingress/pull/921) Make proxy-real-ip-cidr a comma separated list +- [x] [#930](https://github.com/kubernetes/ingress/pull/930) Add support for proxy protocol in TCP services +- [x] [#933](https://github.com/kubernetes/ingress/pull/933) Lint code +- [x] [#937](https://github.com/kubernetes/ingress/pull/937) Fix lint code errors +- [x] [#940](https://github.com/kubernetes/ingress/pull/940) Sets parameters for a shared memory zone of limit_conn_zone +- [x] [#949](https://github.com/kubernetes/ingress/pull/949) fix nginx version to 1.13.3 to fix integer overflow +- [x] [#956](https://github.com/kubernetes/ingress/pull/956) Simplify handling of ssl certificates +- [x] [#958](https://github.com/kubernetes/ingress/pull/958) Release ubuntu-slim:0.13 +- [x] [#959](https://github.com/kubernetes/ingress/pull/959) Release nginx-slim 0.21 +- [x] [#960](https://github.com/kubernetes/ingress/pull/960) Update nginx in ingress controller +- [x] [#964](https://github.com/kubernetes/ingress/pull/964) Support for proxy_headers_hash_bucket_size and proxy_headers_hash_max_size +- [x] [#966](https://github.com/kubernetes/ingress/pull/966) Fix error checking for pod name & NS +- [x] [#967](https://github.com/kubernetes/ingress/pull/967) Fix runningAddresses typo +- [x] [#968](https://github.com/kubernetes/ingress/pull/968) Fix missing hyphen in yaml for nginx RBAC example +- [x] [#973](https://github.com/kubernetes/ingress/pull/973) check number of servers in configuration comparator ### 0.9-beta.10 -**Image:** `gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.10` +**Image:** `gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.10` Fix release 0.9-beta.9 ### 0.9-beta.9 -**Image:** `gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.9` +**Image:** `gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.9` -*New Features:* +_New Features:_ - Add support for arm and ppc64le - -*Changes:* - -- [X] [#548](https://github.com/kubernetes/ingress/pull/548) nginx: support multidomain certificates -- [X] [#620](https://github.com/kubernetes/ingress/pull/620) [nginx] Listening ports are not configurable, so ingress can't be run multiple times per node when using CNI -- [X] [#648](https://github.com/kubernetes/ingress/pull/648) publish-service argument isn't honored when ELB is internal only facing. -- [X] [#833](https://github.com/kubernetes/ingress/pull/833) WIP: Avoid reloads implementing Equals in structs -- [X] [#838](https://github.com/kubernetes/ingress/pull/838) Feature request: Add ingress annotation to enable upstream "keepalive" option -- [X] [#844](https://github.com/kubernetes/ingress/pull/844) ingress annotations affinity is not working -- [X] [#862](https://github.com/kubernetes/ingress/pull/862) Avoid reloads implementing Equaler interface -- [X] [#864](https://github.com/kubernetes/ingress/pull/864) Remove dead code -- [X] [#868](https://github.com/kubernetes/ingress/pull/868) Lint nginx code -- [X] [#871](https://github.com/kubernetes/ingress/pull/871) Add feature to allow sticky sessions per location -- [X] [#873](https://github.com/kubernetes/ingress/pull/873) Update README.md -- [X] [#876](https://github.com/kubernetes/ingress/pull/876) Add information about nginx controller flags -- [X] [#878](https://github.com/kubernetes/ingress/pull/878) Update go to 1.8.3 -- [X] [#881](https://github.com/kubernetes/ingress/pull/881) Option to not remove loadBalancer status record? -- [X] [#882](https://github.com/kubernetes/ingress/pull/882) Add flag to skip the update of Ingress status on shutdown -- [X] [#885](https://github.com/kubernetes/ingress/pull/885) Don't use $proxy_protocol var which may be undefined. -- [X] [#886](https://github.com/kubernetes/ingress/pull/886) Add support for SubjectAltName in SSL certificates -- [X] [#888](https://github.com/kubernetes/ingress/pull/888) Update nginx-slim to 0.19 -- [X] [#889](https://github.com/kubernetes/ingress/pull/889) Add PHOST to backend -- [X] [#890](https://github.com/kubernetes/ingress/pull/890) Improve variable configuration for source IP address -- [X] [#892](https://github.com/kubernetes/ingress/pull/892) Add upstream keepalive connections cache -- [X] [#897](https://github.com/kubernetes/ingress/pull/897) Update outdated ingress resource link -- [X] [#898](https://github.com/kubernetes/ingress/pull/898) add error check right when reload nginx fail -- [X] [#899](https://github.com/kubernetes/ingress/pull/899) Fix nginx error check -- [X] [#900](https://github.com/kubernetes/ingress/pull/900) After #862 changes in the configmap do not trigger a reload -- [X] [#901](https://github.com/kubernetes/ingress/pull/901) [doc] Update NGinX status port to 18080 -- [X] [#902](https://github.com/kubernetes/ingress/pull/902) Always reload after a change in the configuration -- [X] [#904](https://github.com/kubernetes/ingress/pull/904) Fix nginx sticky sessions -- [X] [#906](https://github.com/kubernetes/ingress/pull/906) Fix race condition with closed channels -- [X] [#907](https://github.com/kubernetes/ingress/pull/907) nginx/proxy: allow specifying next upstream behaviour -- [X] [#910](https://github.com/kubernetes/ingress/pull/910) Feature request: use `X-Forwarded-Host` from the reverse proxy before -- [X] [#911](https://github.com/kubernetes/ingress/pull/911) Improve X-Forwarded-Host support -- [X] [#915](https://github.com/kubernetes/ingress/pull/915) Release nginx-slim 0.20 -- [X] [#916](https://github.com/kubernetes/ingress/pull/916) Add arm and ppc64le support -- [X] [#919](https://github.com/kubernetes/ingress/pull/919) Apply the 'ssl-redirect' annotation per-location -- [X] [#922](https://github.com/kubernetes/ingress/pull/922) Add example of TLS termination using a classic ELB +_Changes:_ + +- [x] [#548](https://github.com/kubernetes/ingress/pull/548) nginx: support multidomain certificates +- [x] [#620](https://github.com/kubernetes/ingress/pull/620) [nginx] Listening ports are not configurable, so ingress can't be run multiple times per node when using CNI +- [x] [#648](https://github.com/kubernetes/ingress/pull/648) publish-service argument isn't honored when ELB is internal only facing. +- [x] [#833](https://github.com/kubernetes/ingress/pull/833) WIP: Avoid reloads implementing Equals in structs +- [x] [#838](https://github.com/kubernetes/ingress/pull/838) Feature request: Add ingress annotation to enable upstream "keepalive" option +- [x] [#844](https://github.com/kubernetes/ingress/pull/844) ingress annotations affinity is not working +- [x] [#862](https://github.com/kubernetes/ingress/pull/862) Avoid reloads implementing Equaler interface +- [x] [#864](https://github.com/kubernetes/ingress/pull/864) Remove dead code +- [x] [#868](https://github.com/kubernetes/ingress/pull/868) Lint nginx code +- [x] [#871](https://github.com/kubernetes/ingress/pull/871) Add feature to allow sticky sessions per location +- [x] [#873](https://github.com/kubernetes/ingress/pull/873) Update README.md +- [x] [#876](https://github.com/kubernetes/ingress/pull/876) Add information about nginx controller flags +- [x] [#878](https://github.com/kubernetes/ingress/pull/878) Update go to 1.8.3 +- [x] [#881](https://github.com/kubernetes/ingress/pull/881) Option to not remove loadBalancer status record? +- [x] [#882](https://github.com/kubernetes/ingress/pull/882) Add flag to skip the update of Ingress status on shutdown +- [x] [#885](https://github.com/kubernetes/ingress/pull/885) Don't use $proxy_protocol var which may be undefined. +- [x] [#886](https://github.com/kubernetes/ingress/pull/886) Add support for SubjectAltName in SSL certificates +- [x] [#888](https://github.com/kubernetes/ingress/pull/888) Update nginx-slim to 0.19 +- [x] [#889](https://github.com/kubernetes/ingress/pull/889) Add PHOST to backend +- [x] [#890](https://github.com/kubernetes/ingress/pull/890) Improve variable configuration for source IP address +- [x] [#892](https://github.com/kubernetes/ingress/pull/892) Add upstream keepalive connections cache +- [x] [#897](https://github.com/kubernetes/ingress/pull/897) Update outdated ingress resource link +- [x] [#898](https://github.com/kubernetes/ingress/pull/898) add error check right when reload nginx fail +- [x] [#899](https://github.com/kubernetes/ingress/pull/899) Fix nginx error check +- [x] [#900](https://github.com/kubernetes/ingress/pull/900) After #862 changes in the configmap do not trigger a reload +- [x] [#901](https://github.com/kubernetes/ingress/pull/901) [doc] Update NGinX status port to 18080 +- [x] [#902](https://github.com/kubernetes/ingress/pull/902) Always reload after a change in the configuration +- [x] [#904](https://github.com/kubernetes/ingress/pull/904) Fix nginx sticky sessions +- [x] [#906](https://github.com/kubernetes/ingress/pull/906) Fix race condition with closed channels +- [x] [#907](https://github.com/kubernetes/ingress/pull/907) nginx/proxy: allow specifying next upstream behaviour +- [x] [#910](https://github.com/kubernetes/ingress/pull/910) Feature request: use `X-Forwarded-Host` from the reverse proxy before +- [x] [#911](https://github.com/kubernetes/ingress/pull/911) Improve X-Forwarded-Host support +- [x] [#915](https://github.com/kubernetes/ingress/pull/915) Release nginx-slim 0.20 +- [x] [#916](https://github.com/kubernetes/ingress/pull/916) Add arm and ppc64le support +- [x] [#919](https://github.com/kubernetes/ingress/pull/919) Apply the 'ssl-redirect' annotation per-location +- [x] [#922](https://github.com/kubernetes/ingress/pull/922) Add example of TLS termination using a classic ELB ### 0.9-beta.8 -**Image:** `gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.8` - -*Changes:* - -- [X] [#761](https://github.com/kubernetes/ingress/pull/761) NGINX TCP Ingresses do not bind on IPv6 -- [X] [#850](https://github.com/kubernetes/ingress/pull/850) Fix IPv6 UDP stream section -- [X] [#851](https://github.com/kubernetes/ingress/pull/851) ensure private key and certificate match -- [X] [#852](https://github.com/kubernetes/ingress/pull/852) Don't expose certificate metrics for default server -- [X] [#846](https://github.com/kubernetes/ingress/pull/846) Match ServicePort to Endpoints by Name -- [X] [#854](https://github.com/kubernetes/ingress/pull/854) Document log-format-stream and log-format-upstream -- [X] [#847](https://github.com/kubernetes/ingress/pull/847) fix semicolon -- [X] [#848](https://github.com/kubernetes/ingress/pull/848) Add metric "ssl certificate expiration" -- [X] [#839](https://github.com/kubernetes/ingress/pull/839) "No endpoints" issue -- [X] [#845](https://github.com/kubernetes/ingress/pull/845) Fix no endpoints issue when named ports are used -- [X] [#822](https://github.com/kubernetes/ingress/pull/822) Release ubuntu-slim 0.11 -- [X] [#824](https://github.com/kubernetes/ingress/pull/824) Update nginx-slim to 0.18 -- [X] [#823](https://github.com/kubernetes/ingress/pull/823) Release nginx-slim 0.18 -- [X] [#827](https://github.com/kubernetes/ingress/pull/827) Introduce working example of nginx controller with rbac -- [X] [#835](https://github.com/kubernetes/ingress/pull/835) Make log format json escaping configurable -- [X] [#843](https://github.com/kubernetes/ingress/pull/843) Avoid setting maximum number of open file descriptors lower than 1024 -- [X] [#837](https://github.com/kubernetes/ingress/pull/837) Cleanup interface -- [X] [#836](https://github.com/kubernetes/ingress/pull/836) Make log format json escaping configurable -- [X] [#828](https://github.com/kubernetes/ingress/pull/828) Wrap IPv6 endpoints in [] -- [X] [#821](https://github.com/kubernetes/ingress/pull/821) nginx-ingress: occasional 503 Service Temporarily Unavailable -- [X] [#829](https://github.com/kubernetes/ingress/pull/829) feat(template): wrap IPv6 addresses in [] -- [X] [#786](https://github.com/kubernetes/ingress/pull/786) Update echoserver image version in examples -- [X] [#825](https://github.com/kubernetes/ingress/pull/825) Create or delete ingress based on class annotation -- [X] [#790](https://github.com/kubernetes/ingress/pull/790) #789 removing duplicate X-Real-IP header -- [X] [#792](https://github.com/kubernetes/ingress/pull/792) Avoid checking if the controllers are synced -- [X] [#798](https://github.com/kubernetes/ingress/pull/798) nginx: RBAC for leader election -- [X] [#799](https://github.com/kubernetes/ingress/pull/799) could not build variables_hash -- [X] [#809](https://github.com/kubernetes/ingress/pull/809) Fix dynamic variable name -- [X] [#804](https://github.com/kubernetes/ingress/pull/804) Fix #798 - RBAC for leader election -- [X] [#806](https://github.com/kubernetes/ingress/pull/806) fix ingress rbac roles -- [X] [#811](https://github.com/kubernetes/ingress/pull/811) external auth - proxy_pass_request_body off + big bodies give 500/413 -- [X] [#785](https://github.com/kubernetes/ingress/pull/785) Publish echoheader image -- [X] [#813](https://github.com/kubernetes/ingress/pull/813) Added client_max_body_size to authPath location -- [X] [#814](https://github.com/kubernetes/ingress/pull/814) rbac-nginx: resourceNames cannot filter create verb -- [X] [#774](https://github.com/kubernetes/ingress/pull/774) Add IPv6 support in TCP and UDP stream section -- [X] [#784](https://github.com/kubernetes/ingress/pull/784) Allow customization of variables hash tables -- [X] [#782](https://github.com/kubernetes/ingress/pull/782) Set "proxy_pass_header Server;" -- [X] [#783](https://github.com/kubernetes/ingress/pull/783) nginx/README.md: clarify app-root and fix example hyperlink -- [X] [#787](https://github.com/kubernetes/ingress/pull/787) Add setting to allow returning the Server header from the backend +**Image:** `gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.8` + +_Changes:_ + +- [x] [#761](https://github.com/kubernetes/ingress/pull/761) NGINX TCP Ingresses do not bind on IPv6 +- [x] [#850](https://github.com/kubernetes/ingress/pull/850) Fix IPv6 UDP stream section +- [x] [#851](https://github.com/kubernetes/ingress/pull/851) ensure private key and certificate match +- [x] [#852](https://github.com/kubernetes/ingress/pull/852) Don't expose certificate metrics for default server +- [x] [#846](https://github.com/kubernetes/ingress/pull/846) Match ServicePort to Endpoints by Name +- [x] [#854](https://github.com/kubernetes/ingress/pull/854) Document log-format-stream and log-format-upstream +- [x] [#847](https://github.com/kubernetes/ingress/pull/847) fix semicolon +- [x] [#848](https://github.com/kubernetes/ingress/pull/848) Add metric "ssl certificate expiration" +- [x] [#839](https://github.com/kubernetes/ingress/pull/839) "No endpoints" issue +- [x] [#845](https://github.com/kubernetes/ingress/pull/845) Fix no endpoints issue when named ports are used +- [x] [#822](https://github.com/kubernetes/ingress/pull/822) Release ubuntu-slim 0.11 +- [x] [#824](https://github.com/kubernetes/ingress/pull/824) Update nginx-slim to 0.18 +- [x] [#823](https://github.com/kubernetes/ingress/pull/823) Release nginx-slim 0.18 +- [x] [#827](https://github.com/kubernetes/ingress/pull/827) Introduce working example of nginx controller with rbac +- [x] [#835](https://github.com/kubernetes/ingress/pull/835) Make log format json escaping configurable +- [x] [#843](https://github.com/kubernetes/ingress/pull/843) Avoid setting maximum number of open file descriptors lower than 1024 +- [x] [#837](https://github.com/kubernetes/ingress/pull/837) Cleanup interface +- [x] [#836](https://github.com/kubernetes/ingress/pull/836) Make log format json escaping configurable +- [x] [#828](https://github.com/kubernetes/ingress/pull/828) Wrap IPv6 endpoints in [] +- [x] [#821](https://github.com/kubernetes/ingress/pull/821) nginx-ingress: occasional 503 Service Temporarily Unavailable +- [x] [#829](https://github.com/kubernetes/ingress/pull/829) feat(template): wrap IPv6 addresses in [] +- [x] [#786](https://github.com/kubernetes/ingress/pull/786) Update echoserver image version in examples +- [x] [#825](https://github.com/kubernetes/ingress/pull/825) Create or delete ingress based on class annotation +- [x] [#790](https://github.com/kubernetes/ingress/pull/790) #789 removing duplicate X-Real-IP header +- [x] [#792](https://github.com/kubernetes/ingress/pull/792) Avoid checking if the controllers are synced +- [x] [#798](https://github.com/kubernetes/ingress/pull/798) nginx: RBAC for leader election +- [x] [#799](https://github.com/kubernetes/ingress/pull/799) could not build variables_hash +- [x] [#809](https://github.com/kubernetes/ingress/pull/809) Fix dynamic variable name +- [x] [#804](https://github.com/kubernetes/ingress/pull/804) Fix #798 - RBAC for leader election +- [x] [#806](https://github.com/kubernetes/ingress/pull/806) fix ingress rbac roles +- [x] [#811](https://github.com/kubernetes/ingress/pull/811) external auth - proxy_pass_request_body off + big bodies give 500/413 +- [x] [#785](https://github.com/kubernetes/ingress/pull/785) Publish echoheader image +- [x] [#813](https://github.com/kubernetes/ingress/pull/813) Added client_max_body_size to authPath location +- [x] [#814](https://github.com/kubernetes/ingress/pull/814) rbac-nginx: resourceNames cannot filter create verb +- [x] [#774](https://github.com/kubernetes/ingress/pull/774) Add IPv6 support in TCP and UDP stream section +- [x] [#784](https://github.com/kubernetes/ingress/pull/784) Allow customization of variables hash tables +- [x] [#782](https://github.com/kubernetes/ingress/pull/782) Set "proxy_pass_header Server;" +- [x] [#783](https://github.com/kubernetes/ingress/pull/783) nginx/README.md: clarify app-root and fix example hyperlink +- [x] [#787](https://github.com/kubernetes/ingress/pull/787) Add setting to allow returning the Server header from the backend ### 0.9-beta.7 -**Image:** `gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.7` +**Image:** `gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.7` -*Changes:* +_Changes:_ -- [X] [#777](https://github.com/kubernetes/ingress/pull/777) Update sniff parser to fix index out of bound error +- [x] [#777](https://github.com/kubernetes/ingress/pull/777) Update sniff parser to fix index out of bound error ### 0.9-beta.6 -**Image:** `gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.6` - -*Changes:* - -- [X] [#647](https://github.com/kubernetes/ingress/pull/647) ingress.class enhancement for debugging. -- [X] [#708](https://github.com/kubernetes/ingress/pull/708) ingress losing real source IP when tls enabled -- [X] [#760](https://github.com/kubernetes/ingress/pull/760) Change recorder event scheme -- [X] [#704](https://github.com/kubernetes/ingress/pull/704) fix nginx reload flags '-c' -- [X] [#757](https://github.com/kubernetes/ingress/pull/757) Replace use of endpoints as locks with configmap -- [X] [#752](https://github.com/kubernetes/ingress/pull/752) nginx ingress header config backwards -- [X] [#756](https://github.com/kubernetes/ingress/pull/756) Fix bad variable assignment in template nginx -- [X] [#729](https://github.com/kubernetes/ingress/pull/729) Release nginx-slim 0.17 -- [X] [#755](https://github.com/kubernetes/ingress/pull/755) Fix server name hash maxSize default value -- [X] [#741](https://github.com/kubernetes/ingress/pull/741) Update golang dependencies -- [X] [#749](https://github.com/kubernetes/ingress/pull/749) Remove service annotation for namedPorts -- [X] [#740](https://github.com/kubernetes/ingress/pull/740) Refactoring whitelist source IP verification -- [X] [#734](https://github.com/kubernetes/ingress/pull/734) Specify nginx image arch -- [X] [#728](https://github.com/kubernetes/ingress/pull/728) Update nginx image -- [X] [#723](https://github.com/kubernetes/ingress/pull/723) update readme about vts metrics -- [X] [#726](https://github.com/kubernetes/ingress/pull/726) Release ubuntu-slim 0.10 -- [X] [#727](https://github.com/kubernetes/ingress/pull/727) [nginx] whitelist-source-range doesn’t work on ssl port -- [X] [#709](https://github.com/kubernetes/ingress/pull/709) Add config for X-Forwarded-For trust -- [X] [#679](https://github.com/kubernetes/ingress/pull/679) add getenv -- [X] [#680](https://github.com/kubernetes/ingress/pull/680) nginx/pkg/config: delete unuseful variable -- [X] [#716](https://github.com/kubernetes/ingress/pull/716) Add secure-verify-ca-secret annotation -- [X] [#722](https://github.com/kubernetes/ingress/pull/722) Remove go-reap and use tini as process reaper -- [X] [#725](https://github.com/kubernetes/ingress/pull/725) Add keepalive_requests and client_body_buffer_size options -- [X] [#724](https://github.com/kubernetes/ingress/pull/724) change the directory of default-backend.yaml -- [X] [#656](https://github.com/kubernetes/ingress/pull/656) Nginx Ingress Controller - Specify load balancing method -- [X] [#717](https://github.com/kubernetes/ingress/pull/717) delete unuseful variable -- [X] [#712](https://github.com/kubernetes/ingress/pull/712) Set $proxy_upstream_name before location directive -- [X] [#715](https://github.com/kubernetes/ingress/pull/715) Corrected annotation ex `signin-url` to `auth-url` -- [X] [#718](https://github.com/kubernetes/ingress/pull/718) nodeController sync -- [X] [#694](https://github.com/kubernetes/ingress/pull/694) SSL-Passthrough broken in beta.5 -- [X] [#678](https://github.com/kubernetes/ingress/pull/678) Convert CN SSL Certificate to lowercase before comparison -- [X] [#690](https://github.com/kubernetes/ingress/pull/690) Fix IP in logs for https traffic -- [X] [#673](https://github.com/kubernetes/ingress/pull/673) Override load balancer alg view config map -- [X] [#675](https://github.com/kubernetes/ingress/pull/675) Use proxy-protocol to pass through source IP to nginx -- [X] [#707](https://github.com/kubernetes/ingress/pull/707) use nginx vts module version 0.1.14 -- [X] [#702](https://github.com/kubernetes/ingress/pull/702) Document passing of ssl_client_cert to backend -- [X] [#688](https://github.com/kubernetes/ingress/pull/688) Add example of UDP loadbalancing -- [X] [#696](https://github.com/kubernetes/ingress/pull/696) [nginx] pass non-SNI TLS hello to default backend, Fixes #693 -- [X] [#685](https://github.com/kubernetes/ingress/pull/685) Fix error in generated nginx.conf for optional hsts-preload - +**Image:** `gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.6` + +_Changes:_ + +- [x] [#647](https://github.com/kubernetes/ingress/pull/647) ingress.class enhancement for debugging. +- [x] [#708](https://github.com/kubernetes/ingress/pull/708) ingress losing real source IP when tls enabled +- [x] [#760](https://github.com/kubernetes/ingress/pull/760) Change recorder event scheme +- [x] [#704](https://github.com/kubernetes/ingress/pull/704) fix nginx reload flags '-c' +- [x] [#757](https://github.com/kubernetes/ingress/pull/757) Replace use of endpoints as locks with configmap +- [x] [#752](https://github.com/kubernetes/ingress/pull/752) nginx ingress header config backwards +- [x] [#756](https://github.com/kubernetes/ingress/pull/756) Fix bad variable assignment in template nginx +- [x] [#729](https://github.com/kubernetes/ingress/pull/729) Release nginx-slim 0.17 +- [x] [#755](https://github.com/kubernetes/ingress/pull/755) Fix server name hash maxSize default value +- [x] [#741](https://github.com/kubernetes/ingress/pull/741) Update golang dependencies +- [x] [#749](https://github.com/kubernetes/ingress/pull/749) Remove service annotation for namedPorts +- [x] [#740](https://github.com/kubernetes/ingress/pull/740) Refactoring whitelist source IP verification +- [x] [#734](https://github.com/kubernetes/ingress/pull/734) Specify nginx image arch +- [x] [#728](https://github.com/kubernetes/ingress/pull/728) Update nginx image +- [x] [#723](https://github.com/kubernetes/ingress/pull/723) update readme about vts metrics +- [x] [#726](https://github.com/kubernetes/ingress/pull/726) Release ubuntu-slim 0.10 +- [x] [#727](https://github.com/kubernetes/ingress/pull/727) [nginx] whitelist-source-range doesn’t work on ssl port +- [x] [#709](https://github.com/kubernetes/ingress/pull/709) Add config for X-Forwarded-For trust +- [x] [#679](https://github.com/kubernetes/ingress/pull/679) add getenv +- [x] [#680](https://github.com/kubernetes/ingress/pull/680) nginx/pkg/config: delete unuseful variable +- [x] [#716](https://github.com/kubernetes/ingress/pull/716) Add secure-verify-ca-secret annotation +- [x] [#722](https://github.com/kubernetes/ingress/pull/722) Remove go-reap and use tini as process reaper +- [x] [#725](https://github.com/kubernetes/ingress/pull/725) Add keepalive_requests and client_body_buffer_size options +- [x] [#724](https://github.com/kubernetes/ingress/pull/724) change the directory of default-backend.yaml +- [x] [#656](https://github.com/kubernetes/ingress/pull/656) Nginx Ingress Controller - Specify load balancing method +- [x] [#717](https://github.com/kubernetes/ingress/pull/717) delete unuseful variable +- [x] [#712](https://github.com/kubernetes/ingress/pull/712) Set $proxy_upstream_name before location directive +- [x] [#715](https://github.com/kubernetes/ingress/pull/715) Corrected annotation ex `signin-url` to `auth-url` +- [x] [#718](https://github.com/kubernetes/ingress/pull/718) nodeController sync +- [x] [#694](https://github.com/kubernetes/ingress/pull/694) SSL-Passthrough broken in beta.5 +- [x] [#678](https://github.com/kubernetes/ingress/pull/678) Convert CN SSL Certificate to lowercase before comparison +- [x] [#690](https://github.com/kubernetes/ingress/pull/690) Fix IP in logs for https traffic +- [x] [#673](https://github.com/kubernetes/ingress/pull/673) Override load balancer alg view config map +- [x] [#675](https://github.com/kubernetes/ingress/pull/675) Use proxy-protocol to pass through source IP to nginx +- [x] [#707](https://github.com/kubernetes/ingress/pull/707) use nginx vts module version 0.1.14 +- [x] [#702](https://github.com/kubernetes/ingress/pull/702) Document passing of ssl_client_cert to backend +- [x] [#688](https://github.com/kubernetes/ingress/pull/688) Add example of UDP loadbalancing +- [x] [#696](https://github.com/kubernetes/ingress/pull/696) [nginx] pass non-SNI TLS hello to default backend, Fixes #693 +- [x] [#685](https://github.com/kubernetes/ingress/pull/685) Fix error in generated nginx.conf for optional hsts-preload ### 0.9-beta.5 -**Image:** `gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.5` - -*Changes:* +**Image:** `gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.5` -- [X] [#663](https://github.com/kubernetes/ingress/pull/663) Remove helper required in go < 1.8 -- [X] [#662](https://github.com/kubernetes/ingress/pull/662) Add debug information about ingress class -- [X] [#661](https://github.com/kubernetes/ingress/pull/661) Avoid running nginx if the configuration file is empty -- [X] [#660](https://github.com/kubernetes/ingress/pull/660) Rollback queue refactoring -- [X] [#654](https://github.com/kubernetes/ingress/pull/654) Update go version to 1.8 +_Changes:_ +- [x] [#663](https://github.com/kubernetes/ingress/pull/663) Remove helper required in go < 1.8 +- [x] [#662](https://github.com/kubernetes/ingress/pull/662) Add debug information about ingress class +- [x] [#661](https://github.com/kubernetes/ingress/pull/661) Avoid running nginx if the configuration file is empty +- [x] [#660](https://github.com/kubernetes/ingress/pull/660) Rollback queue refactoring +- [x] [#654](https://github.com/kubernetes/ingress/pull/654) Update go version to 1.8 ### 0.9-beta.4 -**Image:** `gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.4` +**Image:** `gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.4` -*New Features:* +_New Features:_ - Add support for services of type ExternalName - -*Changes:* - -- [X] [#635](https://github.com/kubernetes/ingress/pull/635) Allow configuration of features underscores_in_headers and ignore_invalid_headers -- [X] [#633](https://github.com/kubernetes/ingress/pull/633) Fix lint errors -- [X] [#630](https://github.com/kubernetes/ingress/pull/630) Add example of TCP loadbalancing -- [X] [#629](https://github.com/kubernetes/ingress/pull/629) Add support for services of type ExternalName -- [X] [#624](https://github.com/kubernetes/ingress/pull/624) Compute server_names_hash_bucket_size correctly -- [X] [#615](https://github.com/kubernetes/ingress/pull/615) Process exited cleanly before we hit wait4 -- [X] [#614](https://github.com/kubernetes/ingress/pull/614) Refactor nginx ssl passthrough -- [X] [#613](https://github.com/kubernetes/ingress/pull/613) Status leader election must consired the ingress class -- [X] [#607](https://github.com/kubernetes/ingress/pull/607) Allow custom server_names_hash_max_size & server_names_hash_bucket_size -- [X] [#601](https://github.com/kubernetes/ingress/pull/601) add a judgment -- [X] [#601](https://github.com/kubernetes/ingress/pull/600) Replace custom child reap code with go-reap -- [X] [#597](https://github.com/kubernetes/ingress/pull/599) Add flag to force namespace isolation -- [X] [#595](https://github.com/kubernetes/ingress/pull/595) Remove Host header from auth_request proxy configuration -- [X] [#588](https://github.com/kubernetes/ingress/pull/588) Read resolv.conf file just once -- [X] [#586](https://github.com/kubernetes/ingress/pull/586) Updated instructions to create an ingress controller build -- [X] [#583](https://github.com/kubernetes/ingress/pull/583) fixed lua_package_path in nginx.tmpl -- [X] [#580](https://github.com/kubernetes/ingress/pull/580) Updated faq for running multiple ingress controller -- [X] [#579](https://github.com/kubernetes/ingress/pull/579) Detect if the ingress controller is running with multiple replicas -- [X] [#578](https://github.com/kubernetes/ingress/pull/578) Set different listeners per protocol version -- [X] [#577](https://github.com/kubernetes/ingress/pull/577) Avoid zombie child processes -- [X] [#576](https://github.com/kubernetes/ingress/pull/576) Replace secret workqueue -- [X] [#568](https://github.com/kubernetes/ingress/pull/568) Revert merge annotations to the implicit root context -- [X] [#563](https://github.com/kubernetes/ingress/pull/563) Add option to disable hsts preload -- [X] [#560](https://github.com/kubernetes/ingress/pull/560) Fix intermittent misconfiguration of backend.secure and SessionAffinity -- [X] [#556](https://github.com/kubernetes/ingress/pull/556) Update nginx version and remove dumb-init -- [X] [#551](https://github.com/kubernetes/ingress/pull/551) Build namespace and ingress class as label -- [X] [#546](https://github.com/kubernetes/ingress/pull/546) Fix a couple of 'does not contains' typos -- [X] [#542](https://github.com/kubernetes/ingress/pull/542) Fix lint errors -- [X] [#540](https://github.com/kubernetes/ingress/pull/540) Add Backends.SSLPassthrough attribute -- [X] [#539](https://github.com/kubernetes/ingress/pull/539) Migrate to client-go -- [X] [#536](https://github.com/kubernetes/ingress/pull/536) add unit test cases for core/pkg/ingress/controller/backend_ssl -- [X] [#535](https://github.com/kubernetes/ingress/pull/535) Add test for ingress status update -- [X] [#532](https://github.com/kubernetes/ingress/pull/532) Add setting to configure ecdh curve -- [X] [#531](https://github.com/kubernetes/ingress/pull/531) Fix link to examples -- [X] [#530](https://github.com/kubernetes/ingress/pull/530) Fix link to custom nginx configuration -- [X] [#528](https://github.com/kubernetes/ingress/pull/528) Add reference to apiserver-host flag -- [X] [#527](https://github.com/kubernetes/ingress/pull/527) Add annotations to location of default backend (root context) -- [X] [#525](https://github.com/kubernetes/ingress/pull/525) Avoid negative values configuring the max number of open files -- [X] [#523](https://github.com/kubernetes/ingress/pull/523) Fix a typo in an error message -- [X] [#521](https://github.com/kubernetes/ingress/pull/521) nginx-ingress-controller is built twice by docker-build target -- [X] [#517](https://github.com/kubernetes/ingress/pull/517) Use whitelist-source-range from configmap when no annotation on ingress -- [X] [#516](https://github.com/kubernetes/ingress/pull/516) Convert WorkerProcesses setting to string to allow the value auto -- [X] [#512](https://github.com/kubernetes/ingress/pull/512) Fix typos regarding the ssl-passthrough annotation documentation -- [X] [#505](https://github.com/kubernetes/ingress/pull/505) add unit test cases for core/pkg/ingress/controller/annotations -- [X] [#503](https://github.com/kubernetes/ingress/pull/503) Add example for nginx in aws -- [X] [#502](https://github.com/kubernetes/ingress/pull/502) Add information about SSL Passthrough annotation -- [X] [#500](https://github.com/kubernetes/ingress/pull/500) Improve TLS secret configuration -- [X] [#498](https://github.com/kubernetes/ingress/pull/498) Proper enqueue a secret on the secret queue -- [X] [#493](https://github.com/kubernetes/ingress/pull/493) Update nginx and vts module -- [X] [#490](https://github.com/kubernetes/ingress/pull/490) Add unit test case for named_port -- [X] [#488](https://github.com/kubernetes/ingress/pull/488) Adds support for CORS on error responses and Authorization header -- [X] [#485](https://github.com/kubernetes/ingress/pull/485) Fix typo nginx configMap vts metrics customization -- [X] [#481](https://github.com/kubernetes/ingress/pull/481) Remove unnecessary quote in nginx log format -- [X] [#471](https://github.com/kubernetes/ingress/pull/471) prometheus scrape annotations -- [X] [#460](https://github.com/kubernetes/ingress/pull/460) add example of 'run multiple haproxy ingress controllers as a deployment' -- [X] [#459](https://github.com/kubernetes/ingress/pull/459) Add information about SSL certificates in the default log level -- [X] [#456](https://github.com/kubernetes/ingress/pull/456) Avoid upstreams with multiple servers with the same port -- [X] [#454](https://github.com/kubernetes/ingress/pull/454) Pass request port to real server -- [X] [#450](https://github.com/kubernetes/ingress/pull/450) fix nginx-tcp-and-udp on same port -- [X] [#446](https://github.com/kubernetes/ingress/pull/446) remove configmap validations -- [X] [#445](https://github.com/kubernetes/ingress/pull/445) Remove snakeoil certificate generation -- [X] [#442](https://github.com/kubernetes/ingress/pull/442) Fix a few bugs in the nginx-ingress-controller Makefile -- [X] [#441](https://github.com/kubernetes/ingress/pull/441) skip validation when configmap is empty -- [X] [#439](https://github.com/kubernetes/ingress/pull/439) Avoid a nil-reference when the temporary file cannot be created -- [X] [#438](https://github.com/kubernetes/ingress/pull/438) Improve English in error messages -- [X] [#437](https://github.com/kubernetes/ingress/pull/437) Reference constant - +_Changes:_ + +- [x] [#635](https://github.com/kubernetes/ingress/pull/635) Allow configuration of features underscores_in_headers and ignore_invalid_headers +- [x] [#633](https://github.com/kubernetes/ingress/pull/633) Fix lint errors +- [x] [#630](https://github.com/kubernetes/ingress/pull/630) Add example of TCP loadbalancing +- [x] [#629](https://github.com/kubernetes/ingress/pull/629) Add support for services of type ExternalName +- [x] [#624](https://github.com/kubernetes/ingress/pull/624) Compute server_names_hash_bucket_size correctly +- [x] [#615](https://github.com/kubernetes/ingress/pull/615) Process exited cleanly before we hit wait4 +- [x] [#614](https://github.com/kubernetes/ingress/pull/614) Refactor nginx ssl passthrough +- [x] [#613](https://github.com/kubernetes/ingress/pull/613) Status leader election must consired the ingress class +- [x] [#607](https://github.com/kubernetes/ingress/pull/607) Allow custom server_names_hash_max_size & server_names_hash_bucket_size +- [x] [#601](https://github.com/kubernetes/ingress/pull/601) add a judgment +- [x] [#601](https://github.com/kubernetes/ingress/pull/600) Replace custom child reap code with go-reap +- [x] [#597](https://github.com/kubernetes/ingress/pull/599) Add flag to force namespace isolation +- [x] [#595](https://github.com/kubernetes/ingress/pull/595) Remove Host header from auth_request proxy configuration +- [x] [#588](https://github.com/kubernetes/ingress/pull/588) Read resolv.conf file just once +- [x] [#586](https://github.com/kubernetes/ingress/pull/586) Updated instructions to create an ingress controller build +- [x] [#583](https://github.com/kubernetes/ingress/pull/583) fixed lua_package_path in nginx.tmpl +- [x] [#580](https://github.com/kubernetes/ingress/pull/580) Updated faq for running multiple ingress controller +- [x] [#579](https://github.com/kubernetes/ingress/pull/579) Detect if the ingress controller is running with multiple replicas +- [x] [#578](https://github.com/kubernetes/ingress/pull/578) Set different listeners per protocol version +- [x] [#577](https://github.com/kubernetes/ingress/pull/577) Avoid zombie child processes +- [x] [#576](https://github.com/kubernetes/ingress/pull/576) Replace secret workqueue +- [x] [#568](https://github.com/kubernetes/ingress/pull/568) Revert merge annotations to the implicit root context +- [x] [#563](https://github.com/kubernetes/ingress/pull/563) Add option to disable hsts preload +- [x] [#560](https://github.com/kubernetes/ingress/pull/560) Fix intermittent misconfiguration of backend.secure and SessionAffinity +- [x] [#556](https://github.com/kubernetes/ingress/pull/556) Update nginx version and remove dumb-init +- [x] [#551](https://github.com/kubernetes/ingress/pull/551) Build namespace and ingress class as label +- [x] [#546](https://github.com/kubernetes/ingress/pull/546) Fix a couple of 'does not contains' typos +- [x] [#542](https://github.com/kubernetes/ingress/pull/542) Fix lint errors +- [x] [#540](https://github.com/kubernetes/ingress/pull/540) Add Backends.SSLPassthrough attribute +- [x] [#539](https://github.com/kubernetes/ingress/pull/539) Migrate to client-go +- [x] [#536](https://github.com/kubernetes/ingress/pull/536) add unit test cases for core/pkg/ingress/controller/backend_ssl +- [x] [#535](https://github.com/kubernetes/ingress/pull/535) Add test for ingress status update +- [x] [#532](https://github.com/kubernetes/ingress/pull/532) Add setting to configure ecdh curve +- [x] [#531](https://github.com/kubernetes/ingress/pull/531) Fix link to examples +- [x] [#530](https://github.com/kubernetes/ingress/pull/530) Fix link to custom nginx configuration +- [x] [#528](https://github.com/kubernetes/ingress/pull/528) Add reference to apiserver-host flag +- [x] [#527](https://github.com/kubernetes/ingress/pull/527) Add annotations to location of default backend (root context) +- [x] [#525](https://github.com/kubernetes/ingress/pull/525) Avoid negative values configuring the max number of open files +- [x] [#523](https://github.com/kubernetes/ingress/pull/523) Fix a typo in an error message +- [x] [#521](https://github.com/kubernetes/ingress/pull/521) nginx-ingress-controller is built twice by docker-build target +- [x] [#517](https://github.com/kubernetes/ingress/pull/517) Use whitelist-source-range from configmap when no annotation on ingress +- [x] [#516](https://github.com/kubernetes/ingress/pull/516) Convert WorkerProcesses setting to string to allow the value auto +- [x] [#512](https://github.com/kubernetes/ingress/pull/512) Fix typos regarding the ssl-passthrough annotation documentation +- [x] [#505](https://github.com/kubernetes/ingress/pull/505) add unit test cases for core/pkg/ingress/controller/annotations +- [x] [#503](https://github.com/kubernetes/ingress/pull/503) Add example for nginx in aws +- [x] [#502](https://github.com/kubernetes/ingress/pull/502) Add information about SSL Passthrough annotation +- [x] [#500](https://github.com/kubernetes/ingress/pull/500) Improve TLS secret configuration +- [x] [#498](https://github.com/kubernetes/ingress/pull/498) Proper enqueue a secret on the secret queue +- [x] [#493](https://github.com/kubernetes/ingress/pull/493) Update nginx and vts module +- [x] [#490](https://github.com/kubernetes/ingress/pull/490) Add unit test case for named_port +- [x] [#488](https://github.com/kubernetes/ingress/pull/488) Adds support for CORS on error responses and Authorization header +- [x] [#485](https://github.com/kubernetes/ingress/pull/485) Fix typo nginx configMap vts metrics customization +- [x] [#481](https://github.com/kubernetes/ingress/pull/481) Remove unnecessary quote in nginx log format +- [x] [#471](https://github.com/kubernetes/ingress/pull/471) prometheus scrape annotations +- [x] [#460](https://github.com/kubernetes/ingress/pull/460) add example of 'run multiple haproxy ingress controllers as a deployment' +- [x] [#459](https://github.com/kubernetes/ingress/pull/459) Add information about SSL certificates in the default log level +- [x] [#456](https://github.com/kubernetes/ingress/pull/456) Avoid upstreams with multiple servers with the same port +- [x] [#454](https://github.com/kubernetes/ingress/pull/454) Pass request port to real server +- [x] [#450](https://github.com/kubernetes/ingress/pull/450) fix nginx-tcp-and-udp on same port +- [x] [#446](https://github.com/kubernetes/ingress/pull/446) remove configmap validations +- [x] [#445](https://github.com/kubernetes/ingress/pull/445) Remove snakeoil certificate generation +- [x] [#442](https://github.com/kubernetes/ingress/pull/442) Fix a few bugs in the nginx-ingress-controller Makefile +- [x] [#441](https://github.com/kubernetes/ingress/pull/441) skip validation when configmap is empty +- [x] [#439](https://github.com/kubernetes/ingress/pull/439) Avoid a nil-reference when the temporary file cannot be created +- [x] [#438](https://github.com/kubernetes/ingress/pull/438) Improve English in error messages +- [x] [#437](https://github.com/kubernetes/ingress/pull/437) Reference constant ### 0.9-beta.3 -**Image:** `gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.3` +**Image:** `gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.3` -*New Features:* +_New Features:_ - Custom log formats using `log-format-upstream` directive in the configuration configmap. - Force redirect to SSL using the annotation `ingress.kubernetes.io/force-ssl-redirect` - Prometheus metric for VTS status module (transparent, just enable vts stats) - Improved external authentication adding `ingress.kubernetes.io/auth-signin` annotation. Please check this [example](https://github.com/kubernetes/ingress/tree/master/examples/external-auth/nginx) - -*Breaking changes:* +_Breaking changes:_ - `ssl-dh-param` configuration in configmap is now the name of a secret that contains the Diffie-Hellman key -*Changes:* - -- [X] [#433](https://github.com/kubernetes/ingress/pull/433) close over the ingress variable or the last assignment will be used -- [X] [#424](https://github.com/kubernetes/ingress/pull/424) Manually sync secrets from certificate authentication annotations -- [X] [#423](https://github.com/kubernetes/ingress/pull/423) Scrap json metrics from nginx vts module when enabled -- [X] [#418](https://github.com/kubernetes/ingress/pull/418) Only update Ingress status for the configured class -- [X] [#415](https://github.com/kubernetes/ingress/pull/415) Improve external authentication docs -- [X] [#410](https://github.com/kubernetes/ingress/pull/410) Add support for "signin url" -- [X] [#409](https://github.com/kubernetes/ingress/pull/409) Allow custom http2 header sizes -- [X] [#408](https://github.com/kubernetes/ingress/pull/408) Review docs -- [X] [#406](https://github.com/kubernetes/ingress/pull/406) Add debug info and fix spelling -- [X] [#402](https://github.com/kubernetes/ingress/pull/402) allow specifying custom dh param -- [X] [#397](https://github.com/kubernetes/ingress/pull/397) Fix external auth -- [X] [#394](https://github.com/kubernetes/ingress/pull/394) Update README.md -- [X] [#392](https://github.com/kubernetes/ingress/pull/392) Fix http2 header size -- [X] [#391](https://github.com/kubernetes/ingress/pull/391) remove tmp nginx-diff files -- [X] [#390](https://github.com/kubernetes/ingress/pull/390) Fix RateLimit comment -- [X] [#385](https://github.com/kubernetes/ingress/pull/385) add Copyright -- [X] [#382](https://github.com/kubernetes/ingress/pull/382) Ingress Fake Certificate generation -- [X] [#380](https://github.com/kubernetes/ingress/pull/380) Fix custom log format -- [X] [#373](https://github.com/kubernetes/ingress/pull/373) Cleanup -- [X] [#371](https://github.com/kubernetes/ingress/pull/371) add configuration to disable listening on ipv6 -- [X] [#370](https://github.com/kubernetes/ingress/pull/270) Add documentation for ingress.kubernetes.io/force-ssl-redirect -- [X] [#369](https://github.com/kubernetes/ingress/pull/369) Minor text fix for "ApiServer" -- [X] [#367](https://github.com/kubernetes/ingress/pull/367) BuildLogFormatUpstream was always using the default log-format -- [X] [#366](https://github.com/kubernetes/ingress/pull/366) add_judgment -- [X] [#365](https://github.com/kubernetes/ingress/pull/365) add ForceSSLRedirect ingress annotation -- [X] [#364](https://github.com/kubernetes/ingress/pull/364) Fix error caused by increasing proxy_buffer_size (#363) -- [X] [#362](https://github.com/kubernetes/ingress/pull/362) Fix ingress class -- [X] [#360](https://github.com/kubernetes/ingress/pull/360) add example of 'run multiple nginx ingress controllers as a deployment' -- [X] [#358](https://github.com/kubernetes/ingress/pull/358) Checks if the TLS secret contains a valid keypair structure -- [X] [#356](https://github.com/kubernetes/ingress/pull/356) Disable listen only on ipv6 and fix proxy_protocol -- [X] [#354](https://github.com/kubernetes/ingress/pull/354) add judgment -- [X] [#352](https://github.com/kubernetes/ingress/pull/352) Add ability to customize upstream and stream log format -- [X] [#351](https://github.com/kubernetes/ingress/pull/351) Enable custom election id for status sync. -- [X] [#347](https://github.com/kubernetes/ingress/pull/347) Fix client source IP address -- [X] [#345](https://github.com/kubernetes/ingress/pull/345) Fix lint error -- [X] [#344](https://github.com/kubernetes/ingress/pull/344) Refactoring of TCP and UDP services -- [X] [#343](https://github.com/kubernetes/ingress/pull/343) Fix node lister when --watch-namespace is used -- [X] [#341](https://github.com/kubernetes/ingress/pull/341) Do not run coverage check in the default target. -- [X] [#340](https://github.com/kubernetes/ingress/pull/340) Add support for specify proxy cookie path/domain -- [X] [#337](https://github.com/kubernetes/ingress/pull/337) Fix for formatting error introduced in #304 -- [X] [#335](https://github.com/kubernetes/ingress/pull/335) Fix for vet complaints: -- [X] [#332](https://github.com/kubernetes/ingress/pull/332) Add annotation to customize nginx configuration -- [X] [#331](https://github.com/kubernetes/ingress/pull/331) Correct spelling mistake -- [X] [#328](https://github.com/kubernetes/ingress/pull/328) fix misspell "affinity" in main.go -- [X] [#326](https://github.com/kubernetes/ingress/pull/326) add nginx daemonset example -- [X] [#311](https://github.com/kubernetes/ingress/pull/311) Sort stream service ports to avoid extra reloads -- [X] [#307](https://github.com/kubernetes/ingress/pull/307) Add docs for body-size annotation -- [X] [#306](https://github.com/kubernetes/ingress/pull/306) modify nginx readme -- [X] [#304](https://github.com/kubernetes/ingress/pull/304) change 'buildSSPassthrouthUpstreams' to 'buildSSLPassthroughUpstreams' - +_Changes:_ + +- [x] [#433](https://github.com/kubernetes/ingress/pull/433) close over the ingress variable or the last assignment will be used +- [x] [#424](https://github.com/kubernetes/ingress/pull/424) Manually sync secrets from certificate authentication annotations +- [x] [#423](https://github.com/kubernetes/ingress/pull/423) Scrap json metrics from nginx vts module when enabled +- [x] [#418](https://github.com/kubernetes/ingress/pull/418) Only update Ingress status for the configured class +- [x] [#415](https://github.com/kubernetes/ingress/pull/415) Improve external authentication docs +- [x] [#410](https://github.com/kubernetes/ingress/pull/410) Add support for "signin url" +- [x] [#409](https://github.com/kubernetes/ingress/pull/409) Allow custom http2 header sizes +- [x] [#408](https://github.com/kubernetes/ingress/pull/408) Review docs +- [x] [#406](https://github.com/kubernetes/ingress/pull/406) Add debug info and fix spelling +- [x] [#402](https://github.com/kubernetes/ingress/pull/402) allow specifying custom dh param +- [x] [#397](https://github.com/kubernetes/ingress/pull/397) Fix external auth +- [x] [#394](https://github.com/kubernetes/ingress/pull/394) Update README.md +- [x] [#392](https://github.com/kubernetes/ingress/pull/392) Fix http2 header size +- [x] [#391](https://github.com/kubernetes/ingress/pull/391) remove tmp nginx-diff files +- [x] [#390](https://github.com/kubernetes/ingress/pull/390) Fix RateLimit comment +- [x] [#385](https://github.com/kubernetes/ingress/pull/385) add Copyright +- [x] [#382](https://github.com/kubernetes/ingress/pull/382) Ingress Fake Certificate generation +- [x] [#380](https://github.com/kubernetes/ingress/pull/380) Fix custom log format +- [x] [#373](https://github.com/kubernetes/ingress/pull/373) Cleanup +- [x] [#371](https://github.com/kubernetes/ingress/pull/371) add configuration to disable listening on ipv6 +- [x] [#370](https://github.com/kubernetes/ingress/pull/270) Add documentation for ingress.kubernetes.io/force-ssl-redirect +- [x] [#369](https://github.com/kubernetes/ingress/pull/369) Minor text fix for "ApiServer" +- [x] [#367](https://github.com/kubernetes/ingress/pull/367) BuildLogFormatUpstream was always using the default log-format +- [x] [#366](https://github.com/kubernetes/ingress/pull/366) add_judgment +- [x] [#365](https://github.com/kubernetes/ingress/pull/365) add ForceSSLRedirect ingress annotation +- [x] [#364](https://github.com/kubernetes/ingress/pull/364) Fix error caused by increasing proxy_buffer_size (#363) +- [x] [#362](https://github.com/kubernetes/ingress/pull/362) Fix ingress class +- [x] [#360](https://github.com/kubernetes/ingress/pull/360) add example of 'run multiple nginx ingress controllers as a deployment' +- [x] [#358](https://github.com/kubernetes/ingress/pull/358) Checks if the TLS secret contains a valid keypair structure +- [x] [#356](https://github.com/kubernetes/ingress/pull/356) Disable listen only on ipv6 and fix proxy_protocol +- [x] [#354](https://github.com/kubernetes/ingress/pull/354) add judgment +- [x] [#352](https://github.com/kubernetes/ingress/pull/352) Add ability to customize upstream and stream log format +- [x] [#351](https://github.com/kubernetes/ingress/pull/351) Enable custom election id for status sync. +- [x] [#347](https://github.com/kubernetes/ingress/pull/347) Fix client source IP address +- [x] [#345](https://github.com/kubernetes/ingress/pull/345) Fix lint error +- [x] [#344](https://github.com/kubernetes/ingress/pull/344) Refactoring of TCP and UDP services +- [x] [#343](https://github.com/kubernetes/ingress/pull/343) Fix node lister when --watch-namespace is used +- [x] [#341](https://github.com/kubernetes/ingress/pull/341) Do not run coverage check in the default target. +- [x] [#340](https://github.com/kubernetes/ingress/pull/340) Add support for specify proxy cookie path/domain +- [x] [#337](https://github.com/kubernetes/ingress/pull/337) Fix for formatting error introduced in #304 +- [x] [#335](https://github.com/kubernetes/ingress/pull/335) Fix for vet complaints: +- [x] [#332](https://github.com/kubernetes/ingress/pull/332) Add annotation to customize nginx configuration +- [x] [#331](https://github.com/kubernetes/ingress/pull/331) Correct spelling mistake +- [x] [#328](https://github.com/kubernetes/ingress/pull/328) fix misspell "affinity" in main.go +- [x] [#326](https://github.com/kubernetes/ingress/pull/326) add nginx daemonset example +- [x] [#311](https://github.com/kubernetes/ingress/pull/311) Sort stream service ports to avoid extra reloads +- [x] [#307](https://github.com/kubernetes/ingress/pull/307) Add docs for body-size annotation +- [x] [#306](https://github.com/kubernetes/ingress/pull/306) modify nginx readme +- [x] [#304](https://github.com/kubernetes/ingress/pull/304) change 'buildSSPassthrouthUpstreams' to 'buildSSLPassthroughUpstreams' ### 0.9-beta.2 -**Image:** `gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.2` +**Image:** `gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.2` -*New Features:* +_New Features:_ - New configuration flag `proxy-set-headers` to allow set custom headers before send traffic to backends. [Example here](https://github.com/kubernetes/ingress/tree/master/examples/customization/custom-headers/nginx) - Disable directive access_log globally using `disable-access-log: "true"` in the configuration ConfigMap. - Sticky session per Ingress rule using the annotation `ingress.kubernetes.io/affinity`. [Example here](https://github.com/kubernetes/ingress/tree/master/examples/affinity/cookie/nginx) -*Changes:* - -- [X] [#300](https://github.com/kubernetes/ingress/pull/300) Change nginx variable to use in filter of access_log -- [X] [#296](https://github.com/kubernetes/ingress/pull/296) Fix rewrite regex to match the start of the URL and not a substring -- [X] [#293](https://github.com/kubernetes/ingress/pull/293) Update makefile gcloud docker command -- [X] [#290](https://github.com/kubernetes/ingress/pull/290) Update nginx version in ingress controller to 1.11.10 -- [X] [#286](https://github.com/kubernetes/ingress/pull/286) Add logs to help debugging and simplify default upstream configuration -- [X] [#285](https://github.com/kubernetes/ingress/pull/285) Added a Node StoreLister type -- [X] [#281](https://github.com/kubernetes/ingress/pull/281) Add chmod up directory tree for world read/execute on directories -- [X] [#279](https://github.com/kubernetes/ingress/pull/279) fix wrong link in the file of examples/README.md -- [X] [#275](https://github.com/kubernetes/ingress/pull/275) Pass headers to custom error backend -- [X] [#272](https://github.com/kubernetes/ingress/pull/272) Fix error getting class information from Ingress annotations -- [X] [#268](https://github.com/kubernetes/ingress/pull/268) minor: Fix typo in nginx README -- [X] [#265](https://github.com/kubernetes/ingress/pull/265) Fix rewrite annotation parser -- [X] [#262](https://github.com/kubernetes/ingress/pull/262) Add nginx README and configuration docs back -- [X] [#261](https://github.com/kubernetes/ingress/pull/261) types.go: fix typo in godoc -- [X] [#258](https://github.com/kubernetes/ingress/pull/258) Nginx sticky annotations -- [X] [#255](https://github.com/kubernetes/ingress/pull/255) Adds support for disabling access_log globally -- [X] [#247](https://github.com/kubernetes/ingress/pull/247) Fix wrong URL in nginx ingress configuration -- [X] [#246](https://github.com/kubernetes/ingress/pull/246) Add support for custom proxy headers using a ConfigMap -- [X] [#244](https://github.com/kubernetes/ingress/pull/244) Add information about cors annotation -- [X] [#241](https://github.com/kubernetes/ingress/pull/241) correct a spell mistake -- [X] [#232](https://github.com/kubernetes/ingress/pull/232) Change searchs with searches -- [X] [#231](https://github.com/kubernetes/ingress/pull/231) Add information about proxy_protocol in port 442 -- [X] [#228](https://github.com/kubernetes/ingress/pull/228) Fix worker check issue -- [X] [#227](https://github.com/kubernetes/ingress/pull/227) proxy_protocol on ssl_passthrough listener -- [X] [#223](https://github.com/kubernetes/ingress/pull/223) Fix panic if a tempfile cannot be created -- [X] [#220](https://github.com/kubernetes/ingress/pull/220) Fixes for minikube usage instructions. -- [X] [#219](https://github.com/kubernetes/ingress/pull/219) Fix typo, add a couple of links. -- [X] [#218](https://github.com/kubernetes/ingress/pull/218) Improve links from CONTRIBUTING. -- [X] [#217](https://github.com/kubernetes/ingress/pull/217) Fix an e2e link. -- [X] [#212](https://github.com/kubernetes/ingress/pull/212) Simplify code to obtain TCP or UDP services -- [X] [#208](https://github.com/kubernetes/ingress/pull/208) Fix nil HTTP field -- [X] [#198](https://github.com/kubernetes/ingress/pull/198) Add an example for static-ip and deployment - +_Changes:_ + +- [x] [#300](https://github.com/kubernetes/ingress/pull/300) Change nginx variable to use in filter of access_log +- [x] [#296](https://github.com/kubernetes/ingress/pull/296) Fix rewrite regex to match the start of the URL and not a substring +- [x] [#293](https://github.com/kubernetes/ingress/pull/293) Update makefile gcloud docker command +- [x] [#290](https://github.com/kubernetes/ingress/pull/290) Update nginx version in ingress controller to 1.11.10 +- [x] [#286](https://github.com/kubernetes/ingress/pull/286) Add logs to help debugging and simplify default upstream configuration +- [x] [#285](https://github.com/kubernetes/ingress/pull/285) Added a Node StoreLister type +- [x] [#281](https://github.com/kubernetes/ingress/pull/281) Add chmod up directory tree for world read/execute on directories +- [x] [#279](https://github.com/kubernetes/ingress/pull/279) fix wrong link in the file of examples/README.md +- [x] [#275](https://github.com/kubernetes/ingress/pull/275) Pass headers to custom error backend +- [x] [#272](https://github.com/kubernetes/ingress/pull/272) Fix error getting class information from Ingress annotations +- [x] [#268](https://github.com/kubernetes/ingress/pull/268) minor: Fix typo in nginx README +- [x] [#265](https://github.com/kubernetes/ingress/pull/265) Fix rewrite annotation parser +- [x] [#262](https://github.com/kubernetes/ingress/pull/262) Add nginx README and configuration docs back +- [x] [#261](https://github.com/kubernetes/ingress/pull/261) types.go: fix typo in godoc +- [x] [#258](https://github.com/kubernetes/ingress/pull/258) Nginx sticky annotations +- [x] [#255](https://github.com/kubernetes/ingress/pull/255) Adds support for disabling access_log globally +- [x] [#247](https://github.com/kubernetes/ingress/pull/247) Fix wrong URL in nginx ingress configuration +- [x] [#246](https://github.com/kubernetes/ingress/pull/246) Add support for custom proxy headers using a ConfigMap +- [x] [#244](https://github.com/kubernetes/ingress/pull/244) Add information about cors annotation +- [x] [#241](https://github.com/kubernetes/ingress/pull/241) correct a spell mistake +- [x] [#232](https://github.com/kubernetes/ingress/pull/232) Change searchs with searches +- [x] [#231](https://github.com/kubernetes/ingress/pull/231) Add information about proxy_protocol in port 442 +- [x] [#228](https://github.com/kubernetes/ingress/pull/228) Fix worker check issue +- [x] [#227](https://github.com/kubernetes/ingress/pull/227) proxy_protocol on ssl_passthrough listener +- [x] [#223](https://github.com/kubernetes/ingress/pull/223) Fix panic if a tempfile cannot be created +- [x] [#220](https://github.com/kubernetes/ingress/pull/220) Fixes for minikube usage instructions. +- [x] [#219](https://github.com/kubernetes/ingress/pull/219) Fix typo, add a couple of links. +- [x] [#218](https://github.com/kubernetes/ingress/pull/218) Improve links from CONTRIBUTING. +- [x] [#217](https://github.com/kubernetes/ingress/pull/217) Fix an e2e link. +- [x] [#212](https://github.com/kubernetes/ingress/pull/212) Simplify code to obtain TCP or UDP services +- [x] [#208](https://github.com/kubernetes/ingress/pull/208) Fix nil HTTP field +- [x] [#198](https://github.com/kubernetes/ingress/pull/198) Add an example for static-ip and deployment ### 0.9-beta.1 -**Image:** `gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.1` +**Image:** `gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.1` -*New Features:* +_New Features:_ - SSL Passthrough - New Flag `--publish-service` that set the Service fronting the ingress controllers @@ -877,163 +2125,162 @@ Fix release 0.9-beta.9 - Custom body sizes per Ingress - Prometheus metrics - -*Breaking changes:* +_Breaking changes:_ - Flag `--nginx-configmap` was replaced with `--configmap` - Configmap field `body-size` was replaced with `proxy-body-size` -*Changes:* - -- [X] [#184](https://github.com/kubernetes/ingress/pull/184) Fix template error -- [X] [#179](https://github.com/kubernetes/ingress/pull/179) Allows the usage of Default SSL Cert -- [X] [#178](https://github.com/kubernetes/ingress/pull/178) Add initialization of proxy variable -- [X] [#177](https://github.com/kubernetes/ingress/pull/177) Refactoring sysctlFSFileMax helper -- [X] [#176](https://github.com/kubernetes/ingress/pull/176) Fix TLS does not get updated when changed -- [X] [#174](https://github.com/kubernetes/ingress/pull/174) Update nginx to 1.11.9 -- [X] [#172](https://github.com/kubernetes/ingress/pull/172) add some unit test cases for some packages under folder "core.pkg.ingress" -- [X] [#168](https://github.com/kubernetes/ingress/pull/168) Changes the SSL Temp file to something inside the same SSL Directory -- [X] [#165](https://github.com/kubernetes/ingress/pull/165) Fix rate limit issue when more than 2 servers enabled in ingress -- [X] [#161](https://github.com/kubernetes/ingress/pull/161) Document some missing parameters and their defaults for NGINX controller -- [X] [#158](https://github.com/kubernetes/ingress/pull/158) prefect unit test cases for annotation.proxy -- [X] [#156](https://github.com/kubernetes/ingress/pull/156) Fix issue for ratelimit -- [X] [#154](https://github.com/kubernetes/ingress/pull/154) add unit test cases for core.pkg.ingress.annotations.cors -- [X] [#151](https://github.com/kubernetes/ingress/pull/151) Port in redirect -- [X] [#150](https://github.com/kubernetes/ingress/pull/150) Add support for custom header sizes -- [X] [#149](https://github.com/kubernetes/ingress/pull/149) Add flag to allow switch off the update of Ingress status -- [X] [#148](https://github.com/kubernetes/ingress/pull/148) Add annotation to allow custom body sizes -- [X] [#145](https://github.com/kubernetes/ingress/pull/145) fix wrong links and punctuations -- [X] [#144](https://github.com/kubernetes/ingress/pull/144) add unit test cases for core.pkg.k8s -- [X] [#143](https://github.com/kubernetes/ingress/pull/143) Use protobuf instead of rest to connect to apiserver host and add troubleshooting doc -- [X] [#142](https://github.com/kubernetes/ingress/pull/142) Use system fs.max-files as limits instead of hard-coded value -- [X] [#141](https://github.com/kubernetes/ingress/pull/141) Add reuse port and backlog to port 80 and 443 -- [X] [#138](https://github.com/kubernetes/ingress/pull/138) reference to const -- [X] [#136](https://github.com/kubernetes/ingress/pull/136) Add content and descriptions about nginx's configuration -- [X] [#135](https://github.com/kubernetes/ingress/pull/135) correct improper punctuation -- [X] [#134](https://github.com/kubernetes/ingress/pull/134) fix typo -- [X] [#133](https://github.com/kubernetes/ingress/pull/133) Add TCP and UDP services removed in migration -- [X] [#132](https://github.com/kubernetes/ingress/pull/132) Document nginx controller configuration tweaks -- [X] [#128](https://github.com/kubernetes/ingress/pull/128) Add tests and godebug to compare structs -- [X] [#126](https://github.com/kubernetes/ingress/pull/126) change the type of imagePullPolicy -- [X] [#123](https://github.com/kubernetes/ingress/pull/123) Add resolver configuration to nginx -- [X] [#119](https://github.com/kubernetes/ingress/pull/119) add unit test case for annotations.service -- [X] [#115](https://github.com/kubernetes/ingress/pull/115) add default_server to listen statement for default backend -- [X] [#114](https://github.com/kubernetes/ingress/pull/114) fix typo -- [X] [#113](https://github.com/kubernetes/ingress/pull/113) Add condition of enqueue and unit test cases for task.Queue -- [X] [#108](https://github.com/kubernetes/ingress/pull/108) annotations: print error and skip if malformed -- [X] [#107](https://github.com/kubernetes/ingress/pull/107) fix some wrong links of examples which to be used for nginx -- [X] [#103](https://github.com/kubernetes/ingress/pull/103) Update the nginx controller manifests -- [X] [#101](https://github.com/kubernetes/ingress/pull/101) Add unit test for strings.StringInSlice -- [X] [#99](https://github.com/kubernetes/ingress/pull/99) Update nginx to 1.11.8 -- [X] [#97](https://github.com/kubernetes/ingress/pull/97) Fix gofmt -- [X] [#96](https://github.com/kubernetes/ingress/pull/96) Fix typo PassthrougBackends -> PassthroughBackends -- [X] [#95](https://github.com/kubernetes/ingress/pull/95) Deny location mapping in case of specific errors -- [X] [#94](https://github.com/kubernetes/ingress/pull/94) Add support to disable server_tokens directive -- [X] [#93](https://github.com/kubernetes/ingress/pull/93) Fix sort for catch all server -- [X] [#92](https://github.com/kubernetes/ingress/pull/92) Refactoring of nginx configuration deserialization -- [X] [#91](https://github.com/kubernetes/ingress/pull/91) Fix x-forwarded-port mapping -- [X] [#90](https://github.com/kubernetes/ingress/pull/90) fix the wrong link to build/test/release -- [X] [#89](https://github.com/kubernetes/ingress/pull/89) fix the wrong links to the examples and developer documentation -- [X] [#88](https://github.com/kubernetes/ingress/pull/88) Fix multiple tls hosts sharing the same secretName -- [X] [#86](https://github.com/kubernetes/ingress/pull/86) Update X-Forwarded-Port -- [X] [#82](https://github.com/kubernetes/ingress/pull/82) Fix incorrect X-Forwarded-Port for TLS -- [X] [#81](https://github.com/kubernetes/ingress/pull/81) Do not push containers to remote repo as part of test-e2e -- [X] [#78](https://github.com/kubernetes/ingress/pull/78) Fix #76: hardcode X-Forwarded-Port due to SSL Passthrough -- [X] [#77](https://github.com/kubernetes/ingress/pull/77) Add support for IPV6 in dns resolvers -- [X] [#66](https://github.com/kubernetes/ingress/pull/66) Start FAQ docs -- [X] [#65](https://github.com/kubernetes/ingress/pull/65) Support hostnames in Ingress status -- [X] [#64](https://github.com/kubernetes/ingress/pull/64) Sort whitelist list to avoid random orders -- [X] [#62](https://github.com/kubernetes/ingress/pull/62) Fix e2e make targets -- [X] [#61](https://github.com/kubernetes/ingress/pull/61) Ignore coverage profile files -- [X] [#58](https://github.com/kubernetes/ingress/pull/58) Fix "invalid port in upstream" on nginx controller -- [X] [#57](https://github.com/kubernetes/ingress/pull/57) Fix invalid port in upstream -- [X] [#54](https://github.com/kubernetes/ingress/pull/54) Expand developer docs -- [X] [#52](https://github.com/kubernetes/ingress/pull/52) fix typo in variable ProxyRealIPCIDR -- [X] [#44](https://github.com/kubernetes/ingress/pull/44) Bump nginx version to one higher than that in contrib -- [X] [#36](https://github.com/kubernetes/ingress/pull/36) Add nginx metrics to prometheus -- [X] [#34](https://github.com/kubernetes/ingress/pull/34) nginx: also listen on ipv6 -- [X] [#32](https://github.com/kubernetes/ingress/pull/32) Restart nginx if master process dies -- [X] [#31](https://github.com/kubernetes/ingress/pull/31) Add healthz checker -- [X] [#25](https://github.com/kubernetes/ingress/pull/25) Fix a data race in TestFileWatcher -- [X] [#12](https://github.com/kubernetes/ingress/pull/12) Split implementations from generic code -- [X] [#10](https://github.com/kubernetes/ingress/pull/10) Copy Ingress history from kubernetes/contrib -- [X] [#1498](https://github.com/kubernetes/contrib/pull/1498) Refactoring of template handling -- [X] [#1571](https://github.com/kubernetes/contrib/pull/1571) use POD_NAMESPACE as a namespace in cli parameters -- [X] [#1591](https://github.com/kubernetes/contrib/pull/1591) Always listen on port 443, even without ingress rules -- [X] [#1596](https://github.com/kubernetes/contrib/pull/1596) Adapt nginx hash sizes to the number of ingress -- [X] [#1653](https://github.com/kubernetes/contrib/pull/1653) Update image version -- [X] [#1672](https://github.com/kubernetes/contrib/pull/1672) Add firewall rules and ing class clarifications -- [X] [#1711](https://github.com/kubernetes/contrib/pull/1711) Add function helpers to nginx template -- [X] [#1743](https://github.com/kubernetes/contrib/pull/1743) Allow customisation of the nginx proxy_buffer_size directive via ConfigMap -- [X] [#1749](https://github.com/kubernetes/contrib/pull/1749) Readiness probe that works behind a CP lb -- [X] [#1751](https://github.com/kubernetes/contrib/pull/1751) Add the name of the upstream in the log -- [X] [#1758](https://github.com/kubernetes/contrib/pull/1758) Update nginx to 1.11.4 -- [X] [#1759](https://github.com/kubernetes/contrib/pull/1759) Add support for default backend in Ingress rule -- [X] [#1762](https://github.com/kubernetes/contrib/pull/1762) Add cloud detection -- [X] [#1766](https://github.com/kubernetes/contrib/pull/1766) Clarify the controller uses endpoints and not services -- [X] [#1767](https://github.com/kubernetes/contrib/pull/1767) Update godeps -- [X] [#1772](https://github.com/kubernetes/contrib/pull/1772) Avoid replacing nginx.conf file if the new configuration is invalid -- [X] [#1773](https://github.com/kubernetes/contrib/pull/1773) Add annotation to add CORS support -- [X] [#1786](https://github.com/kubernetes/contrib/pull/1786) Add docs about go template -- [X] [#1796](https://github.com/kubernetes/contrib/pull/1796) Add external authentication support using auth_request -- [X] [#1802](https://github.com/kubernetes/contrib/pull/1802) Initialize proxy_upstream_name variable -- [X] [#1806](https://github.com/kubernetes/contrib/pull/1806) Add docs about the log format -- [X] [#1808](https://github.com/kubernetes/contrib/pull/1808) WebSocket documentation -- [X] [#1847](https://github.com/kubernetes/contrib/pull/1847) Change structure of packages -- [X] Add annotation for custom upstream timeouts -- [X] Mutual TLS auth (https://github.com/kubernetes/contrib/issues/1870) +_Changes:_ + +- [x] [#184](https://github.com/kubernetes/ingress/pull/184) Fix template error +- [x] [#179](https://github.com/kubernetes/ingress/pull/179) Allows the usage of Default SSL Cert +- [x] [#178](https://github.com/kubernetes/ingress/pull/178) Add initialization of proxy variable +- [x] [#177](https://github.com/kubernetes/ingress/pull/177) Refactoring sysctlFSFileMax helper +- [x] [#176](https://github.com/kubernetes/ingress/pull/176) Fix TLS does not get updated when changed +- [x] [#174](https://github.com/kubernetes/ingress/pull/174) Update nginx to 1.11.9 +- [x] [#172](https://github.com/kubernetes/ingress/pull/172) add some unit test cases for some packages under folder "core.pkg.ingress" +- [x] [#168](https://github.com/kubernetes/ingress/pull/168) Changes the SSL Temp file to something inside the same SSL Directory +- [x] [#165](https://github.com/kubernetes/ingress/pull/165) Fix rate limit issue when more than 2 servers enabled in ingress +- [x] [#161](https://github.com/kubernetes/ingress/pull/161) Document some missing parameters and their defaults for NGINX controller +- [x] [#158](https://github.com/kubernetes/ingress/pull/158) prefect unit test cases for annotation.proxy +- [x] [#156](https://github.com/kubernetes/ingress/pull/156) Fix issue for ratelimit +- [x] [#154](https://github.com/kubernetes/ingress/pull/154) add unit test cases for core.pkg.ingress.annotations.cors +- [x] [#151](https://github.com/kubernetes/ingress/pull/151) Port in redirect +- [x] [#150](https://github.com/kubernetes/ingress/pull/150) Add support for custom header sizes +- [x] [#149](https://github.com/kubernetes/ingress/pull/149) Add flag to allow switch off the update of Ingress status +- [x] [#148](https://github.com/kubernetes/ingress/pull/148) Add annotation to allow custom body sizes +- [x] [#145](https://github.com/kubernetes/ingress/pull/145) fix wrong links and punctuations +- [x] [#144](https://github.com/kubernetes/ingress/pull/144) add unit test cases for core.pkg.k8s +- [x] [#143](https://github.com/kubernetes/ingress/pull/143) Use protobuf instead of rest to connect to apiserver host and add troubleshooting doc +- [x] [#142](https://github.com/kubernetes/ingress/pull/142) Use system fs.max-files as limits instead of hard-coded value +- [x] [#141](https://github.com/kubernetes/ingress/pull/141) Add reuse port and backlog to port 80 and 443 +- [x] [#138](https://github.com/kubernetes/ingress/pull/138) reference to const +- [x] [#136](https://github.com/kubernetes/ingress/pull/136) Add content and descriptions about nginx's configuration +- [x] [#135](https://github.com/kubernetes/ingress/pull/135) correct improper punctuation +- [x] [#134](https://github.com/kubernetes/ingress/pull/134) fix typo +- [x] [#133](https://github.com/kubernetes/ingress/pull/133) Add TCP and UDP services removed in migration +- [x] [#132](https://github.com/kubernetes/ingress/pull/132) Document nginx controller configuration tweaks +- [x] [#128](https://github.com/kubernetes/ingress/pull/128) Add tests and godebug to compare structs +- [x] [#126](https://github.com/kubernetes/ingress/pull/126) change the type of imagePullPolicy +- [x] [#123](https://github.com/kubernetes/ingress/pull/123) Add resolver configuration to nginx +- [x] [#119](https://github.com/kubernetes/ingress/pull/119) add unit test case for annotations.service +- [x] [#115](https://github.com/kubernetes/ingress/pull/115) add default_server to listen statement for default backend +- [x] [#114](https://github.com/kubernetes/ingress/pull/114) fix typo +- [x] [#113](https://github.com/kubernetes/ingress/pull/113) Add condition of enqueue and unit test cases for task.Queue +- [x] [#108](https://github.com/kubernetes/ingress/pull/108) annotations: print error and skip if malformed +- [x] [#107](https://github.com/kubernetes/ingress/pull/107) fix some wrong links of examples which to be used for nginx +- [x] [#103](https://github.com/kubernetes/ingress/pull/103) Update the nginx controller manifests +- [x] [#101](https://github.com/kubernetes/ingress/pull/101) Add unit test for strings.StringInSlice +- [x] [#99](https://github.com/kubernetes/ingress/pull/99) Update nginx to 1.11.8 +- [x] [#97](https://github.com/kubernetes/ingress/pull/97) Fix gofmt +- [x] [#96](https://github.com/kubernetes/ingress/pull/96) Fix typo PassthrougBackends -> PassthroughBackends +- [x] [#95](https://github.com/kubernetes/ingress/pull/95) Deny location mapping in case of specific errors +- [x] [#94](https://github.com/kubernetes/ingress/pull/94) Add support to disable server_tokens directive +- [x] [#93](https://github.com/kubernetes/ingress/pull/93) Fix sort for catch all server +- [x] [#92](https://github.com/kubernetes/ingress/pull/92) Refactoring of nginx configuration deserialization +- [x] [#91](https://github.com/kubernetes/ingress/pull/91) Fix x-forwarded-port mapping +- [x] [#90](https://github.com/kubernetes/ingress/pull/90) fix the wrong link to build/test/release +- [x] [#89](https://github.com/kubernetes/ingress/pull/89) fix the wrong links to the examples and developer documentation +- [x] [#88](https://github.com/kubernetes/ingress/pull/88) Fix multiple tls hosts sharing the same secretName +- [x] [#86](https://github.com/kubernetes/ingress/pull/86) Update X-Forwarded-Port +- [x] [#82](https://github.com/kubernetes/ingress/pull/82) Fix incorrect X-Forwarded-Port for TLS +- [x] [#81](https://github.com/kubernetes/ingress/pull/81) Do not push containers to remote repo as part of test-e2e +- [x] [#78](https://github.com/kubernetes/ingress/pull/78) Fix #76: hardcode X-Forwarded-Port due to SSL Passthrough +- [x] [#77](https://github.com/kubernetes/ingress/pull/77) Add support for IPV6 in dns resolvers +- [x] [#66](https://github.com/kubernetes/ingress/pull/66) Start FAQ docs +- [x] [#65](https://github.com/kubernetes/ingress/pull/65) Support hostnames in Ingress status +- [x] [#64](https://github.com/kubernetes/ingress/pull/64) Sort whitelist list to avoid random orders +- [x] [#62](https://github.com/kubernetes/ingress/pull/62) Fix e2e make targets +- [x] [#61](https://github.com/kubernetes/ingress/pull/61) Ignore coverage profile files +- [x] [#58](https://github.com/kubernetes/ingress/pull/58) Fix "invalid port in upstream" on nginx controller +- [x] [#57](https://github.com/kubernetes/ingress/pull/57) Fix invalid port in upstream +- [x] [#54](https://github.com/kubernetes/ingress/pull/54) Expand developer docs +- [x] [#52](https://github.com/kubernetes/ingress/pull/52) fix typo in variable ProxyRealIPCIDR +- [x] [#44](https://github.com/kubernetes/ingress/pull/44) Bump nginx version to one higher than that in contrib +- [x] [#36](https://github.com/kubernetes/ingress/pull/36) Add nginx metrics to prometheus +- [x] [#34](https://github.com/kubernetes/ingress/pull/34) nginx: also listen on ipv6 +- [x] [#32](https://github.com/kubernetes/ingress/pull/32) Restart nginx if master process dies +- [x] [#31](https://github.com/kubernetes/ingress/pull/31) Add healthz checker +- [x] [#25](https://github.com/kubernetes/ingress/pull/25) Fix a data race in TestFileWatcher +- [x] [#12](https://github.com/kubernetes/ingress/pull/12) Split implementations from generic code +- [x] [#10](https://github.com/kubernetes/ingress/pull/10) Copy Ingress history from kubernetes/contrib +- [x] [#1498](https://github.com/kubernetes/contrib/pull/1498) Refactoring of template handling +- [x] [#1571](https://github.com/kubernetes/contrib/pull/1571) use POD_NAMESPACE as a namespace in cli parameters +- [x] [#1591](https://github.com/kubernetes/contrib/pull/1591) Always listen on port 443, even without ingress rules +- [x] [#1596](https://github.com/kubernetes/contrib/pull/1596) Adapt nginx hash sizes to the number of ingress +- [x] [#1653](https://github.com/kubernetes/contrib/pull/1653) Update image version +- [x] [#1672](https://github.com/kubernetes/contrib/pull/1672) Add firewall rules and ing class clarifications +- [x] [#1711](https://github.com/kubernetes/contrib/pull/1711) Add function helpers to nginx template +- [x] [#1743](https://github.com/kubernetes/contrib/pull/1743) Allow customisation of the nginx proxy_buffer_size directive via ConfigMap +- [x] [#1749](https://github.com/kubernetes/contrib/pull/1749) Readiness probe that works behind a CP lb +- [x] [#1751](https://github.com/kubernetes/contrib/pull/1751) Add the name of the upstream in the log +- [x] [#1758](https://github.com/kubernetes/contrib/pull/1758) Update nginx to 1.11.4 +- [x] [#1759](https://github.com/kubernetes/contrib/pull/1759) Add support for default backend in Ingress rule +- [x] [#1762](https://github.com/kubernetes/contrib/pull/1762) Add cloud detection +- [x] [#1766](https://github.com/kubernetes/contrib/pull/1766) Clarify the controller uses endpoints and not services +- [x] [#1767](https://github.com/kubernetes/contrib/pull/1767) Update godeps +- [x] [#1772](https://github.com/kubernetes/contrib/pull/1772) Avoid replacing nginx.conf file if the new configuration is invalid +- [x] [#1773](https://github.com/kubernetes/contrib/pull/1773) Add annotation to add CORS support +- [x] [#1786](https://github.com/kubernetes/contrib/pull/1786) Add docs about go template +- [x] [#1796](https://github.com/kubernetes/contrib/pull/1796) Add external authentication support using auth_request +- [x] [#1802](https://github.com/kubernetes/contrib/pull/1802) Initialize proxy_upstream_name variable +- [x] [#1806](https://github.com/kubernetes/contrib/pull/1806) Add docs about the log format +- [x] [#1808](https://github.com/kubernetes/contrib/pull/1808) WebSocket documentation +- [x] [#1847](https://github.com/kubernetes/contrib/pull/1847) Change structure of packages +- [x] Add annotation for custom upstream timeouts +- [x] Mutual TLS auth (https://github.com/kubernetes/contrib/issues/1870) ### 0.8.3 -- [X] [#1450](https://github.com/kubernetes/contrib/pull/1450) Check for errors in nginx template +- [x] [#1450](https://github.com/kubernetes/contrib/pull/1450) Check for errors in nginx template - [ ] [#1498](https://github.com/kubernetes/contrib/pull/1498) Refactoring of template handling -- [X] [#1467](https://github.com/kubernetes/contrib/pull/1467) Use ClientConfig to configure connection -- [X] [#1575](https://github.com/kubernetes/contrib/pull/1575) Update nginx to 1.11.3 +- [x] [#1467](https://github.com/kubernetes/contrib/pull/1467) Use ClientConfig to configure connection +- [x] [#1575](https://github.com/kubernetes/contrib/pull/1575) Update nginx to 1.11.3 ### 0.8.2 -- [X] [#1336](https://github.com/kubernetes/contrib/pull/1336) Add annotation to skip ingress rule -- [X] [#1338](https://github.com/kubernetes/contrib/pull/1338) Add HTTPS default backend -- [X] [#1351](https://github.com/kubernetes/contrib/pull/1351) Avoid generation of invalid ssl certificates -- [X] [#1379](https://github.com/kubernetes/contrib/pull/1379) improve nginx performance -- [X] [#1350](https://github.com/kubernetes/contrib/pull/1350) Improve performance (listen backlog=net.core.somaxconn) -- [X] [#1384](https://github.com/kubernetes/contrib/pull/1384) Unset Authorization header when proxying -- [X] [#1398](https://github.com/kubernetes/contrib/pull/1398) Mitigate HTTPoxy Vulnerability +- [x] [#1336](https://github.com/kubernetes/contrib/pull/1336) Add annotation to skip ingress rule +- [x] [#1338](https://github.com/kubernetes/contrib/pull/1338) Add HTTPS default backend +- [x] [#1351](https://github.com/kubernetes/contrib/pull/1351) Avoid generation of invalid ssl certificates +- [x] [#1379](https://github.com/kubernetes/contrib/pull/1379) improve nginx performance +- [x] [#1350](https://github.com/kubernetes/contrib/pull/1350) Improve performance (listen backlog=net.core.somaxconn) +- [x] [#1384](https://github.com/kubernetes/contrib/pull/1384) Unset Authorization header when proxying +- [x] [#1398](https://github.com/kubernetes/contrib/pull/1398) Mitigate HTTPoxy Vulnerability ### 0.8.1 -- [X] [#1317](https://github.com/kubernetes/contrib/pull/1317) Fix duplicated real_ip_header -- [X] [#1315](https://github.com/kubernetes/contrib/pull/1315) Addresses #1314 +- [x] [#1317](https://github.com/kubernetes/contrib/pull/1317) Fix duplicated real_ip_header +- [x] [#1315](https://github.com/kubernetes/contrib/pull/1315) Addresses #1314 ### 0.8 -- [X] [#1063](https://github.com/kubernetes/contrib/pull/1063) watches referenced tls secrets -- [X] [#850](https://github.com/kubernetes/contrib/pull/850) adds configurable SSL redirect nginx controller -- [X] [#1136](https://github.com/kubernetes/contrib/pull/1136) Fix nginx rewrite rule order -- [X] [#1144](https://github.com/kubernetes/contrib/pull/1144) Add cidr whitelist support -- [X] [#1230](https://github.com/kubernetes/contrib/pull/1130) Improve docs and examples -- [X] [#1258](https://github.com/kubernetes/contrib/pull/1258) Avoid sync without a reachable -- [X] [#1235](https://github.com/kubernetes/contrib/pull/1235) Fix stats by country in nginx status page -- [X] [#1236](https://github.com/kubernetes/contrib/pull/1236) Update nginx to add dynamic TLS records and spdy -- [X] [#1238](https://github.com/kubernetes/contrib/pull/1238) Add support for dynamic TLS records and spdy -- [X] [#1239](https://github.com/kubernetes/contrib/pull/1239) Add support for conditional log of urls -- [X] [#1253](https://github.com/kubernetes/contrib/pull/1253) Use delayed queue -- [X] [#1296](https://github.com/kubernetes/contrib/pull/1296) Fix formatting -- [X] [#1299](https://github.com/kubernetes/contrib/pull/1299) Fix formatting +- [x] [#1063](https://github.com/kubernetes/contrib/pull/1063) watches referenced tls secrets +- [x] [#850](https://github.com/kubernetes/contrib/pull/850) adds configurable SSL redirect nginx controller +- [x] [#1136](https://github.com/kubernetes/contrib/pull/1136) Fix nginx rewrite rule order +- [x] [#1144](https://github.com/kubernetes/contrib/pull/1144) Add cidr whitelist support +- [x] [#1230](https://github.com/kubernetes/contrib/pull/1130) Improve docs and examples +- [x] [#1258](https://github.com/kubernetes/contrib/pull/1258) Avoid sync without a reachable +- [x] [#1235](https://github.com/kubernetes/contrib/pull/1235) Fix stats by country in nginx status page +- [x] [#1236](https://github.com/kubernetes/contrib/pull/1236) Update nginx to add dynamic TLS records and spdy +- [x] [#1238](https://github.com/kubernetes/contrib/pull/1238) Add support for dynamic TLS records and spdy +- [x] [#1239](https://github.com/kubernetes/contrib/pull/1239) Add support for conditional log of urls +- [x] [#1253](https://github.com/kubernetes/contrib/pull/1253) Use delayed queue +- [x] [#1296](https://github.com/kubernetes/contrib/pull/1296) Fix formatting +- [x] [#1299](https://github.com/kubernetes/contrib/pull/1299) Fix formatting ### 0.7 -- [X] [#898](https://github.com/kubernetes/contrib/pull/898) reorder locations. Location / must be the last one to avoid errors routing to subroutes -- [X] [#946](https://github.com/kubernetes/contrib/pull/946) Add custom authentication (Basic or Digest) to ingress rules -- [X] [#926](https://github.com/kubernetes/contrib/pull/926) Custom errors should be optional -- [X] [#1002](https://github.com/kubernetes/contrib/pull/1002) Use k8s probes (disable NGINX checks) -- [X] [#962](https://github.com/kubernetes/contrib/pull/962) Make optional http2 -- [X] [#1054](https://github.com/kubernetes/contrib/pull/1054) force reload if some certificate change -- [X] [#958](https://github.com/kubernetes/contrib/pull/958) update NGINX to 1.11.0 and add digest module -- [X] [#960](https://github.com/kubernetes/contrib/issues/960) https://trac.nginx.org/nginx/changeset/ce94f07d50826fcc8d48f046fe19d59329420fdb/nginx -- [X] [#1057](https://github.com/kubernetes/contrib/pull/1057) Remove loadBalancer ip on shutdown -- [X] [#1079](https://github.com/kubernetes/contrib/pull/1079) path rewrite -- [X] [#1093](https://github.com/kubernetes/contrib/pull/1093) rate limiting -- [X] [#1102](https://github.com/kubernetes/contrib/pull/1102) geolocation of traffic in stats -- [X] [#884](https://github.com/kubernetes/contrib/issues/884) support services running ssl -- [X] [#930](https://github.com/kubernetes/contrib/issues/930) detect changes in configuration configmaps +- [x] [#898](https://github.com/kubernetes/contrib/pull/898) reorder locations. Location / must be the last one to avoid errors routing to subroutes +- [x] [#946](https://github.com/kubernetes/contrib/pull/946) Add custom authentication (Basic or Digest) to ingress rules +- [x] [#926](https://github.com/kubernetes/contrib/pull/926) Custom errors should be optional +- [x] [#1002](https://github.com/kubernetes/contrib/pull/1002) Use k8s probes (disable NGINX checks) +- [x] [#962](https://github.com/kubernetes/contrib/pull/962) Make optional http2 +- [x] [#1054](https://github.com/kubernetes/contrib/pull/1054) force reload if some certificate change +- [x] [#958](https://github.com/kubernetes/contrib/pull/958) update NGINX to 1.11.0 and add digest module +- [x] [#960](https://github.com/kubernetes/contrib/issues/960) https://trac.nginx.org/nginx/changeset/ce94f07d50826fcc8d48f046fe19d59329420fdb/nginx +- [x] [#1057](https://github.com/kubernetes/contrib/pull/1057) Remove loadBalancer ip on shutdown +- [x] [#1079](https://github.com/kubernetes/contrib/pull/1079) path rewrite +- [x] [#1093](https://github.com/kubernetes/contrib/pull/1093) rate limiting +- [x] [#1102](https://github.com/kubernetes/contrib/pull/1102) geolocation of traffic in stats +- [x] [#884](https://github.com/kubernetes/contrib/issues/884) support services running ssl +- [x] [#930](https://github.com/kubernetes/contrib/issues/930) detect changes in configuration configmaps diff --git a/Gopkg.lock b/Gopkg.lock deleted file mode 100644 index c9d9757ef7..0000000000 --- a/Gopkg.lock +++ /dev/null @@ -1,471 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - name = "cloud.google.com/go" - packages = ["compute/metadata"] - revision = "2d3a6656c17a60b0815b7e06ab0be04eacb6e613" - version = "v0.16.0" - -[[projects]] - name = "github.com/Azure/go-autorest" - packages = ["autorest","autorest/adal","autorest/azure","autorest/date"] - revision = "ab5671379918d9af294b6f0e3d8aaa98c829416d" - version = "v9.4.0" - -[[projects]] - name = "github.com/PuerkitoBio/purell" - packages = ["."] - revision = "0bcb03f4b4d0a9428594752bd2a3b9aa0a9d4bd4" - version = "v1.1.0" - -[[projects]] - branch = "master" - name = "github.com/PuerkitoBio/urlesc" - packages = ["."] - revision = "de5bf2ad457846296e2031421a34e2568e304e35" - -[[projects]] - branch = "master" - name = "github.com/armon/go-proxyproto" - packages = ["."] - revision = "48572f11356f1843b694f21a290d4f1006bc5e47" - -[[projects]] - branch = "master" - name = "github.com/beorn7/perks" - packages = ["quantile"] - revision = "4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9" - -[[projects]] - name = "github.com/davecgh/go-spew" - packages = ["spew"] - revision = "346938d642f2ec3594ed81d874461961cd0faa76" - version = "v1.1.0" - -[[projects]] - name = "github.com/dgrijalva/jwt-go" - packages = ["."] - revision = "dbeaa9332f19a944acb5736b4456cfcc02140e29" - version = "v3.1.0" - -[[projects]] - name = "github.com/docker/distribution" - packages = ["digestset","reference"] - revision = "edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c" - -[[projects]] - branch = "master" - name = "github.com/docker/spdystream" - packages = [".","spdy"] - revision = "bc6354cbbc295e925e4c611ffe90c1f287ee54db" - -[[projects]] - name = "github.com/emicklei/go-restful" - packages = [".","log"] - revision = "5741799b275a3c4a5a9623a993576d7545cf7b5c" - version = "v2.4.0" - -[[projects]] - name = "github.com/emicklei/go-restful-swagger12" - packages = ["."] - revision = "dcef7f55730566d41eae5db10e7d6981829720f6" - version = "1.0.1" - -[[projects]] - name = "github.com/fsnotify/fsnotify" - packages = ["."] - revision = "629574ca2a5df945712d3079857300b5e4da0236" - version = "v1.4.2" - -[[projects]] - branch = "master" - name = "github.com/fullsailor/pkcs7" - packages = ["."] - revision = "a009d8d7de53d9503c797cb8ec66fa3b21eed209" - -[[projects]] - name = "github.com/ghodss/yaml" - packages = ["."] - revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7" - version = "v1.0.0" - -[[projects]] - branch = "master" - name = "github.com/go-openapi/jsonpointer" - packages = ["."] - revision = "779f45308c19820f1a69e9a4cd965f496e0da10f" - -[[projects]] - branch = "master" - name = "github.com/go-openapi/jsonreference" - packages = ["."] - revision = "36d33bfe519efae5632669801b180bf1a245da3b" - -[[projects]] - branch = "master" - name = "github.com/go-openapi/spec" - packages = ["."] - revision = "a4fa9574c7aa73b2fc54e251eb9524d0482bb592" - -[[projects]] - branch = "master" - name = "github.com/go-openapi/swag" - packages = ["."] - revision = "cf0bdb963811675a4d7e74901cefc7411a1df939" - -[[projects]] - name = "github.com/gogo/protobuf" - packages = ["gogoproto","proto","protoc-gen-gogo/descriptor","sortkeys"] - revision = "342cbe0a04158f6dcb03ca0079991a51a4248c02" - version = "v0.5" - -[[projects]] - branch = "master" - name = "github.com/golang/glog" - packages = ["."] - revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998" - -[[projects]] - branch = "master" - name = "github.com/golang/groupcache" - packages = ["lru"] - revision = "84a468cf14b4376def5d68c722b139b881c450a4" - -[[projects]] - branch = "master" - name = "github.com/golang/protobuf" - packages = ["proto","ptypes","ptypes/any","ptypes/duration","ptypes/timestamp"] - revision = "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" - -[[projects]] - branch = "master" - name = "github.com/google/btree" - packages = ["."] - revision = "316fb6d3f031ae8f4d457c6c5186b9e3ded70435" - -[[projects]] - branch = "master" - name = "github.com/google/gofuzz" - packages = ["."] - revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1" - -[[projects]] - name = "github.com/googleapis/gnostic" - packages = ["OpenAPIv2","compiler","extensions"] - revision = "ee43cbb60db7bd22502942cccbc39059117352ab" - version = "v0.1.0" - -[[projects]] - branch = "master" - name = "github.com/gophercloud/gophercloud" - packages = [".","openstack","openstack/identity/v2/tenants","openstack/identity/v2/tokens","openstack/identity/v3/tokens","openstack/utils","pagination"] - revision = "0b6b13c4dd9e07a89f83cbe4617c13ad646d6362" - -[[projects]] - branch = "master" - name = "github.com/gregjones/httpcache" - packages = [".","diskcache"] - revision = "22a0b1feae53974ed4cfe27bcce70dba061cc5fd" - -[[projects]] - branch = "master" - name = "github.com/hashicorp/golang-lru" - packages = [".","simplelru"] - revision = "0a025b7e63adc15a622f29b0b2c4c3848243bbf6" - -[[projects]] - branch = "master" - name = "github.com/howeyc/gopass" - packages = ["."] - revision = "bf9dde6d0d2c004a008c27aaee91170c786f6db8" - -[[projects]] - name = "github.com/imdario/mergo" - packages = ["."] - revision = "7fe0c75c13abdee74b09fcacef5ea1c6bba6a874" - version = "0.2.4" - -[[projects]] - name = "github.com/json-iterator/go" - packages = ["."] - revision = "6240e1e7983a85228f7fd9c3e1b6932d46ec58e2" - version = "1.0.3" - -[[projects]] - branch = "master" - name = "github.com/juju/ratelimit" - packages = ["."] - revision = "59fac5042749a5afb9af70e813da1dd5474f0167" - -[[projects]] - name = "github.com/kr/pty" - packages = ["."] - revision = "95d05c1eef33a45bd58676b6ce28d105839b8d0b" - version = "v1.0.1" - -[[projects]] - branch = "master" - name = "github.com/kylelemons/godebug" - packages = ["diff","pretty"] - revision = "d65d576e9348f5982d7f6d83682b694e731a45c6" - -[[projects]] - branch = "master" - name = "github.com/mailru/easyjson" - packages = ["buffer","jlexer","jwriter"] - revision = "5f62e4f3afa2f576dc86531b7df4d966b19ef8f8" - -[[projects]] - name = "github.com/matttproud/golang_protobuf_extensions" - packages = ["pbutil"] - revision = "3247c84500bff8d9fb6d579d800f20b3e091582c" - version = "v1.0.0" - -[[projects]] - branch = "master" - name = "github.com/mitchellh/go-ps" - packages = ["."] - revision = "4fdf99ab29366514c69ccccddab5dc58b8d84062" - -[[projects]] - branch = "master" - name = "github.com/mitchellh/mapstructure" - packages = ["."] - revision = "06020f85339e21b2478f756a78e295255ffa4d6a" - -[[projects]] - branch = "master" - name = "github.com/moul/http2curl" - packages = ["."] - revision = "9ac6cf4d929b2fa8fd2d2e6dec5bb0feb4f4911d" - -[[projects]] - branch = "master" - name = "github.com/ncabatoff/process-exporter" - packages = [".","proc"] - revision = "5917bc766b95a1fa3c2ae85340f4de02a6b7e15e" - source = "github.com/aledbf/process-exporter" - -[[projects]] - name = "github.com/onsi/ginkgo" - packages = [".","config","internal/codelocation","internal/containernode","internal/failer","internal/leafnodes","internal/remote","internal/spec","internal/spec_iterator","internal/specrunner","internal/suite","internal/testingtproxy","internal/writer","reporters","reporters/stenographer","reporters/stenographer/support/go-colorable","reporters/stenographer/support/go-isatty","types"] - revision = "9eda700730cba42af70d53180f9dcce9266bc2bc" - version = "v1.4.0" - -[[projects]] - name = "github.com/onsi/gomega" - packages = [".","format","internal/assertion","internal/asyncassertion","internal/oraclematcher","internal/testingtsupport","matchers","matchers/support/goraph/bipartitegraph","matchers/support/goraph/edge","matchers/support/goraph/node","matchers/support/goraph/util","types"] - revision = "c893efa28eb45626cdaa76c9f653b62488858837" - version = "v1.2.0" - -[[projects]] - name = "github.com/opencontainers/go-digest" - packages = ["."] - revision = "279bed98673dd5bef374d3b6e4b09e2af76183bf" - version = "v1.0.0-rc1" - -[[projects]] - name = "github.com/parnurzeal/gorequest" - packages = ["."] - revision = "a578a48e8d6ca8b01a3b18314c43c6716bb5f5a3" - version = "v0.2.15" - -[[projects]] - branch = "master" - name = "github.com/paultag/sniff" - packages = ["parser"] - revision = "87325c3dddf408cfb71f5044873d34ac426d5a59" - -[[projects]] - name = "github.com/pborman/uuid" - packages = ["."] - revision = "e790cca94e6cc75c7064b1332e63811d4aae1a53" - version = "v1.1" - -[[projects]] - branch = "master" - name = "github.com/petar/GoLLRB" - packages = ["llrb"] - revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4" - -[[projects]] - name = "github.com/peterbourgon/diskv" - packages = ["."] - revision = "5f041e8faa004a95c88a202771f4cc3e991971e6" - version = "v2.0.1" - -[[projects]] - name = "github.com/pkg/errors" - packages = ["."] - revision = "645ef00459ed84a119197bfb8d8205042c6df63d" - version = "v0.8.0" - -[[projects]] - name = "github.com/prometheus/client_golang" - packages = ["prometheus","prometheus/promhttp"] - revision = "c5b7fccd204277076155f10851dad72b76a49317" - version = "v0.8.0" - -[[projects]] - branch = "master" - name = "github.com/prometheus/client_model" - packages = ["go"] - revision = "6f3806018612930941127f2a7c6c453ba2c527d2" - -[[projects]] - branch = "master" - name = "github.com/prometheus/common" - packages = ["expfmt","internal/bitbucket.org/ww/goautoneg","model"] - revision = "e3fb1a1acd7605367a2b378bc2e2f893c05174b7" - -[[projects]] - branch = "master" - name = "github.com/prometheus/procfs" - packages = [".","xfs"] - revision = "a6e9df898b1336106c743392c48ee0b71f5c4efa" - -[[projects]] - name = "github.com/spf13/afero" - packages = [".","mem"] - revision = "8d919cbe7e2627e417f3e45c3c0e489a5b7e2536" - version = "v1.0.0" - -[[projects]] - name = "github.com/spf13/pflag" - packages = ["."] - revision = "e57e3eeb33f795204c1ca35f56c44f83227c6e66" - version = "v1.0.0" - -[[projects]] - name = "github.com/zakjan/cert-chain-resolver" - packages = ["certUtil"] - revision = "c222fb53d84f5c835aab8027b5d422d4089f9d29" - version = "1.0.1" - -[[projects]] - branch = "master" - name = "golang.org/x/crypto" - packages = ["ssh/terminal"] - revision = "6a293f2d4b14b8e6d3f0539e383f6d0d30fce3fd" - -[[projects]] - branch = "master" - name = "golang.org/x/net" - packages = ["context","context/ctxhttp","html","html/atom","html/charset","http2","http2/hpack","idna","internal/timeseries","lex/httplex","publicsuffix","trace"] - revision = "a337091b0525af65de94df2eb7e98bd9962dcbe2" - -[[projects]] - branch = "master" - name = "golang.org/x/oauth2" - packages = [".","google","internal","jws","jwt"] - revision = "9ff8ebcc8e241d46f52ecc5bff0e5a2f2dbef402" - -[[projects]] - branch = "master" - name = "golang.org/x/sys" - packages = ["unix","windows"] - revision = "1e2299c37cc91a509f1b12369872d27be0ce98a6" - -[[projects]] - branch = "master" - name = "golang.org/x/text" - packages = ["collate","collate/build","encoding","encoding/charmap","encoding/htmlindex","encoding/internal","encoding/internal/identifier","encoding/japanese","encoding/korean","encoding/simplifiedchinese","encoding/traditionalchinese","encoding/unicode","internal/colltab","internal/gen","internal/tag","internal/triegen","internal/ucd","internal/utf8internal","language","runes","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable","width"] - revision = "88f656faf3f37f690df1a32515b479415e1a6769" - -[[projects]] - name = "google.golang.org/appengine" - packages = [".","internal","internal/app_identity","internal/base","internal/datastore","internal/log","internal/modules","internal/remote_api","internal/urlfetch","urlfetch"] - revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a" - version = "v1.0.0" - -[[projects]] - branch = "master" - name = "google.golang.org/genproto" - packages = ["googleapis/rpc/status"] - revision = "11c7f9e547da6db876260ce49ea7536985904c9b" - -[[projects]] - name = "google.golang.org/grpc" - packages = [".","balancer","codes","connectivity","credentials","grpclb/grpc_lb_v1/messages","grpclog","internal","keepalive","metadata","naming","peer","resolver","stats","status","tap","transport"] - revision = "5ffe3083946d5603a0578721101dc8165b1d5b5f" - version = "v1.7.2" - -[[projects]] - name = "gopkg.in/fsnotify.v1" - packages = ["."] - revision = "629574ca2a5df945712d3079857300b5e4da0236" - version = "v1.4.2" - -[[projects]] - name = "gopkg.in/go-playground/pool.v3" - packages = ["."] - revision = "e73cd3a5ded835540c5cf4778488579c5b357d68" - version = "v3.1.1" - -[[projects]] - name = "gopkg.in/inf.v0" - packages = ["."] - revision = "3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4" - version = "v0.9.0" - -[[projects]] - branch = "v2" - name = "gopkg.in/yaml.v2" - packages = ["."] - revision = "eb3733d160e74a9c7e442f435eb3bea458e1d19f" - -[[projects]] - branch = "release-1.8" - name = "k8s.io/api" - packages = ["admissionregistration/v1alpha1","apps/v1beta1","apps/v1beta2","authentication/v1","authentication/v1beta1","authorization/v1","authorization/v1beta1","autoscaling/v1","autoscaling/v2beta1","batch/v1","batch/v1beta1","batch/v2alpha1","certificates/v1beta1","core/v1","extensions/v1beta1","networking/v1","policy/v1beta1","rbac/v1","rbac/v1alpha1","rbac/v1beta1","scheduling/v1alpha1","settings/v1alpha1","storage/v1","storage/v1beta1"] - revision = "4df58c811fe2e65feb879227b2b245e4dc26e7ad" - -[[projects]] - branch = "master" - name = "k8s.io/apiextensions-apiserver" - packages = ["pkg/apis/apiextensions","pkg/apis/apiextensions/v1beta1","pkg/client/clientset/clientset","pkg/client/clientset/clientset/scheme","pkg/client/clientset/clientset/typed/apiextensions/v1beta1","pkg/features"] - revision = "51a1910459f074162eb4e25233e461fe91e99405" - -[[projects]] - branch = "release-1.8" - name = "k8s.io/apimachinery" - packages = ["pkg/api/equality","pkg/api/errors","pkg/api/meta","pkg/api/resource","pkg/api/validation","pkg/apimachinery","pkg/apimachinery/announced","pkg/apimachinery/registered","pkg/apis/meta/internalversion","pkg/apis/meta/v1","pkg/apis/meta/v1/unstructured","pkg/apis/meta/v1/validation","pkg/apis/meta/v1alpha1","pkg/conversion","pkg/conversion/queryparams","pkg/conversion/unstructured","pkg/fields","pkg/labels","pkg/runtime","pkg/runtime/schema","pkg/runtime/serializer","pkg/runtime/serializer/json","pkg/runtime/serializer/protobuf","pkg/runtime/serializer/recognizer","pkg/runtime/serializer/streaming","pkg/runtime/serializer/versioning","pkg/selection","pkg/types","pkg/util/cache","pkg/util/clock","pkg/util/diff","pkg/util/errors","pkg/util/framer","pkg/util/httpstream","pkg/util/httpstream/spdy","pkg/util/intstr","pkg/util/json","pkg/util/mergepatch","pkg/util/net","pkg/util/rand","pkg/util/remotecommand","pkg/util/runtime","pkg/util/sets","pkg/util/strategicpatch","pkg/util/uuid","pkg/util/validation","pkg/util/validation/field","pkg/util/wait","pkg/util/yaml","pkg/version","pkg/watch","third_party/forked/golang/json","third_party/forked/golang/netutil","third_party/forked/golang/reflect"] - revision = "019ae5ada31de202164b118aee88ee2d14075c31" - -[[projects]] - branch = "master" - name = "k8s.io/apiserver" - packages = ["pkg/authentication/authenticator","pkg/authentication/serviceaccount","pkg/authentication/user","pkg/features","pkg/server/healthz","pkg/util/feature","pkg/util/logs"] - revision = "7001bc4df8883d4a0ec84cd4b2117655a0009b6c" - -[[projects]] - name = "k8s.io/client-go" - packages = ["discovery","discovery/fake","kubernetes","kubernetes/fake","kubernetes/scheme","kubernetes/typed/admissionregistration/v1alpha1","kubernetes/typed/admissionregistration/v1alpha1/fake","kubernetes/typed/apps/v1beta1","kubernetes/typed/apps/v1beta1/fake","kubernetes/typed/apps/v1beta2","kubernetes/typed/apps/v1beta2/fake","kubernetes/typed/authentication/v1","kubernetes/typed/authentication/v1/fake","kubernetes/typed/authentication/v1beta1","kubernetes/typed/authentication/v1beta1/fake","kubernetes/typed/authorization/v1","kubernetes/typed/authorization/v1/fake","kubernetes/typed/authorization/v1beta1","kubernetes/typed/authorization/v1beta1/fake","kubernetes/typed/autoscaling/v1","kubernetes/typed/autoscaling/v1/fake","kubernetes/typed/autoscaling/v2beta1","kubernetes/typed/autoscaling/v2beta1/fake","kubernetes/typed/batch/v1","kubernetes/typed/batch/v1/fake","kubernetes/typed/batch/v1beta1","kubernetes/typed/batch/v1beta1/fake","kubernetes/typed/batch/v2alpha1","kubernetes/typed/batch/v2alpha1/fake","kubernetes/typed/certificates/v1beta1","kubernetes/typed/certificates/v1beta1/fake","kubernetes/typed/core/v1","kubernetes/typed/core/v1/fake","kubernetes/typed/extensions/v1beta1","kubernetes/typed/extensions/v1beta1/fake","kubernetes/typed/networking/v1","kubernetes/typed/networking/v1/fake","kubernetes/typed/policy/v1beta1","kubernetes/typed/policy/v1beta1/fake","kubernetes/typed/rbac/v1","kubernetes/typed/rbac/v1/fake","kubernetes/typed/rbac/v1alpha1","kubernetes/typed/rbac/v1alpha1/fake","kubernetes/typed/rbac/v1beta1","kubernetes/typed/rbac/v1beta1/fake","kubernetes/typed/scheduling/v1alpha1","kubernetes/typed/scheduling/v1alpha1/fake","kubernetes/typed/settings/v1alpha1","kubernetes/typed/settings/v1alpha1/fake","kubernetes/typed/storage/v1","kubernetes/typed/storage/v1/fake","kubernetes/typed/storage/v1beta1","kubernetes/typed/storage/v1beta1/fake","pkg/version","plugin/pkg/client/auth","plugin/pkg/client/auth/azure","plugin/pkg/client/auth/gcp","plugin/pkg/client/auth/oidc","plugin/pkg/client/auth/openstack","rest","rest/watch","testing","third_party/forked/golang/template","tools/auth","tools/cache","tools/clientcmd","tools/clientcmd/api","tools/clientcmd/api/latest","tools/clientcmd/api/v1","tools/leaderelection","tools/leaderelection/resourcelock","tools/metrics","tools/pager","tools/record","tools/reference","tools/remotecommand","transport","transport/spdy","util/cert","util/cert/triple","util/exec","util/flowcontrol","util/homedir","util/integer","util/jsonpath","util/retry","util/workqueue"] - revision = "2ae454230481a7cb5544325e12ad7658ecccd19b" - version = "v5.0.1" - -[[projects]] - branch = "master" - name = "k8s.io/kube-openapi" - packages = ["pkg/common"] - revision = "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" - -[[projects]] - name = "k8s.io/kubernetes" - packages = ["pkg/api","pkg/api/helper","pkg/api/install","pkg/api/service","pkg/api/util","pkg/api/v1","pkg/api/v1/helper","pkg/api/v1/pod","pkg/api/validation","pkg/apis/extensions","pkg/apis/networking","pkg/capabilities","pkg/cloudprovider","pkg/controller","pkg/features","pkg/kubelet/apis","pkg/kubelet/apis/cri/v1alpha1/runtime","pkg/kubelet/container","pkg/kubelet/types","pkg/kubelet/util/format","pkg/kubelet/util/ioutils","pkg/kubelet/util/sliceutils","pkg/security/apparmor","pkg/serviceaccount","pkg/util/file","pkg/util/filesystem","pkg/util/hash","pkg/util/io","pkg/util/mount","pkg/util/net/sets","pkg/util/parsers","pkg/util/pointer","pkg/util/sysctl","pkg/util/taints","pkg/volume","pkg/volume/util","third_party/forked/golang/expansion"] - revision = "f0efb3cb883751c5ffdbe6d515f3cb4fbe7b7acd" - version = "v1.8.3" - -[[projects]] - branch = "master" - name = "k8s.io/utils" - packages = ["exec"] - revision = "bf963466fd3fea33c428098b12a89d8ecd012f25" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - inputs-digest = "cb7f34d9108be87b2601c0d4acbd6faa41bdf211a082760fa1b4d4300bf3e959" - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml deleted file mode 100644 index b522ec114c..0000000000 --- a/Gopkg.toml +++ /dev/null @@ -1,110 +0,0 @@ - -# Gopkg.toml example -# -# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" - -[[override]] - name = "github.com/docker/distribution" - revision = "edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c" - -[[constraint]] - name = "github.com/opencontainers/go-digest" - branch = "master" - -[[constraint]] - branch = "master" - name = "github.com/armon/go-proxyproto" - -[[constraint]] - branch = "master" - name = "github.com/golang/glog" - -[[constraint]] - name = "github.com/imdario/mergo" - version = "0.2.4" - -[[constraint]] - branch = "master" - name = "github.com/kylelemons/godebug" - -[[constraint]] - branch = "master" - name = "github.com/mitchellh/go-ps" - -[[constraint]] - branch = "master" - name = "github.com/mitchellh/mapstructure" - -[[constraint]] - name = "github.com/ncabatoff/process-exporter" - source = "github.com/aledbf/process-exporter" - branch = "master" - -[[constraint]] - branch = "master" - name = "github.com/paultag/sniff" - -[[constraint]] - name = "github.com/pborman/uuid" - version = "1.1.0" - -[[constraint]] - name = "github.com/pkg/errors" - version = "0.8.0" - -[[constraint]] - name = "github.com/prometheus/client_golang" - version = "0.8.0" - -[[constraint]] - name = "github.com/spf13/pflag" - version = "1.0.0" - -[[constraint]] - name = "github.com/zakjan/cert-chain-resolver" - version = "1.0.1" - -[[constraint]] - name = "gopkg.in/fsnotify.v1" - version = "1.4.2" - -[[constraint]] - name = "gopkg.in/go-playground/pool.v3" - version = "3.1.1" - -[[constraint]] - branch = "master" - name = "k8s.io/apiserver" - -[[constraint]] - name = "k8s.io/api" - branch = "release-1.8" - -[[constraint]] - name = "k8s.io/apimachinery" - branch = "release-1.8" - -[[constraint]] - name = "k8s.io/client-go" - version = "v5.0.1" - -[[constraint]] - name = "k8s.io/kubernetes" - version = "v1.8.3" diff --git a/Makefile b/Makefile index dea74e2b19..184c311a89 100644 --- a/Makefile +++ b/Makefile @@ -15,68 +15,71 @@ .PHONY: all all: all-container -BUILDTAGS= - # Use the 0.0 tag for testing, it shouldn't clobber any release builds -TAG?=0.9.0-beta.19 -REGISTRY?=quay.io/kubernetes-ingress-controller -GOOS?=linux -DOCKER?=gcloud docker -- -SED_I?=sed -i +TAG ?= 0.24.1 +REGISTRY ?= quay.io/kubernetes-ingress-controller +DOCKER ?= docker +SED_I ?= sed -i GOHOSTOS ?= $(shell go env GOHOSTOS) +# e2e settings +# Allow limiting the scope of the e2e tests. By default run everything +FOCUS ?= .* +# number of parallel test +E2E_NODES ?= 10 +# slow test only if takes > 50s +SLOW_E2E_THRESHOLD ?= 50 +K8S_VERSION ?= v1.14.1 + ifeq ($(GOHOSTOS),darwin) SED_I=sed -i '' endif -REPO_INFO=$(shell git config --get remote.origin.url) - -ifndef COMMIT - COMMIT := git-$(shell git rev-parse --short HEAD) -endif +REPO_INFO ?= $(shell git config --get remote.origin.url) +GIT_COMMIT ?= git-$(shell git rev-parse --short HEAD) -PKG=k8s.io/ingress-nginx +PKG = k8s.io/ingress-nginx ARCH ?= $(shell go env GOARCH) GOARCH = ${ARCH} DUMB_ARCH = ${ARCH} -ALL_ARCH = amd64 arm arm64 ppc64le s390x +GOBUILD_FLAGS := -v + +ALL_ARCH = amd64 arm64 + +QEMUVERSION = v4.0.0 -QEMUVERSION=v2.9.1 +BUSTED_ARGS =-v --pattern=_test + +GOOS = linux + +export ARCH +export DUMB_ARCH +export TAG +export PKG +export GOARCH +export GOOS +export GIT_COMMIT +export GOBUILD_FLAGS +export REPO_INFO +export BUSTED_ARGS IMGNAME = nginx-ingress-controller IMAGE = $(REGISTRY)/$(IMGNAME) MULTI_ARCH_IMG = $(IMAGE)-$(ARCH) # Set default base image dynamically for each arch -BASEIMAGE?=quay.io/kubernetes-ingress-controller/nginx-$(ARCH):0.30 +BASEIMAGE?=quay.io/kubernetes-ingress-controller/nginx-$(ARCH):0.87 -ifeq ($(ARCH),arm) - QEMUARCH=arm - GOARCH=arm - DUMB_ARCH=armhf -endif ifeq ($(ARCH),arm64) - QEMUARCH=aarch64 -endif -ifeq ($(ARCH),ppc64le) - QEMUARCH=ppc64le - GOARCH=ppc64le - DUMB_ARCH=ppc64el -endif -ifeq ($(ARCH),s390x) - QEMUARCH=s390x + QEMUARCH=aarch64 endif TEMP_DIR := $(shell mktemp -d) DOCKERFILE := $(TEMP_DIR)/rootfs/Dockerfile -.PHONY: image-info -image-info: - echo -n '{"image":"$(IMAGE)","tag":"$(TAG)"}' - .PHONY: sub-container-% sub-container-%: $(MAKE) ARCH=$* build container @@ -92,12 +95,16 @@ all-container: $(addprefix sub-container-,$(ALL_ARCH)) all-push: $(addprefix sub-push-,$(ALL_ARCH)) .PHONY: container -container: .container-$(ARCH) +container: clean-container .container-$(ARCH) .PHONY: .container-$(ARCH) .container-$(ARCH): + mkdir -p $(TEMP_DIR)/rootfs + cp bin/$(ARCH)/nginx-ingress-controller $(TEMP_DIR)/rootfs/nginx-ingress-controller + cp bin/$(ARCH)/dbg $(TEMP_DIR)/rootfs/dbg + cp -RP ./* $(TEMP_DIR) - $(SED_I) 's|BASEIMAGE|$(BASEIMAGE)|g' $(DOCKERFILE) + $(SED_I) "s|BASEIMAGE|$(BASEIMAGE)|g" $(DOCKERFILE) $(SED_I) "s|QEMUARCH|$(QEMUARCH)|g" $(DOCKERFILE) $(SED_I) "s|DUMB_ARCH|$(DUMB_ARCH)|g" $(DOCKERFILE) @@ -106,19 +113,26 @@ ifeq ($(ARCH),amd64) $(SED_I) "/CROSS_BUILD_/d" $(DOCKERFILE) else # When cross-building, only the placeholder "CROSS_BUILD_" should be removed - # Register /usr/bin/qemu-ARCH-static as the handler for ARM binaries in the kernel - $(DOCKER) run --rm --privileged multiarch/qemu-user-static:register --reset curl -sSL https://github.com/multiarch/qemu-user-static/releases/download/$(QEMUVERSION)/x86_64_qemu-$(QEMUARCH)-static.tar.gz | tar -xz -C $(TEMP_DIR)/rootfs $(SED_I) "s/CROSS_BUILD_//g" $(DOCKERFILE) endif - $(DOCKER) build -t $(MULTI_ARCH_IMG):$(TAG) $(TEMP_DIR)/rootfs + @$(DOCKER) build --no-cache --pull -t $(MULTI_ARCH_IMG):$(TAG) $(TEMP_DIR)/rootfs ifeq ($(ARCH), amd64) - # This is for to maintain the backward compatibility - $(DOCKER) tag $(MULTI_ARCH_IMG):$(TAG) $(IMAGE):$(TAG) + # This is for maintaining backward compatibility + @$(DOCKER) tag $(MULTI_ARCH_IMG):$(TAG) $(IMAGE):$(TAG) endif +.PHONY: clean-container +clean-container: + @$(DOCKER) rmi -f $(MULTI_ARCH_IMG):$(TAG) || true + +.PHONY: register-qemu +register-qemu: + # Register /usr/bin/qemu-ARCH-static as the handler for binaries in multiple platforms + @$(DOCKER) run --rm --privileged multiarch/qemu-user-static:register --reset + .PHONY: push push: .push-$(ARCH) @@ -129,67 +143,111 @@ ifeq ($(ARCH), amd64) $(DOCKER) push $(IMAGE):$(TAG) endif -.PHONY: clean -clean: - $(DOCKER) rmi -f $(MULTI_ARCH_IMG):$(TAG) || true - -.PHONE: code-generator -code-generator: - go-bindata -o internal/file/bindata.go -prefix="rootfs" -pkg=file -ignore=Dockerfile -ignore=".DS_Store" rootfs/... - .PHONY: build -build: clean - CGO_ENABLED=0 GOOS=${GOOS} GOARCH=${GOARCH} go build -a -installsuffix cgo \ - -ldflags "-s -w -X ${PKG}/version.RELEASE=${TAG} -X ${PKG}/version.COMMIT=${COMMIT} -X ${PKG}/version.REPO=${REPO_INFO}" \ - -o ${TEMP_DIR}/rootfs/nginx-ingress-controller ${PKG}/cmd/nginx +build: + @build/build.sh + +.PHONY: build-plugin +build-plugin: + @build/build-plugin.sh -.PHONY: fmt -fmt: - @echo "+ $@" - @go list -f '{{if len .TestGoFiles}}"gofmt -s -l {{.Dir}}"{{end}}' $(shell go list ${PKG}/... | grep -v vendor) | xargs -L 1 sh -c +.PHONY: clean +clean: + rm -rf bin/ .gocache/ -.PHONY: lint -lint: - @echo "+ $@" - @go list -f '{{if len .TestGoFiles}}"golint {{.Dir}}/..."{{end}}' $(shell go list ${PKG}/... | grep -v vendor | grep -v '/test/e2e') | xargs -L 1 sh -c +.PHONY: static-check +static-check: + @build/static-check.sh .PHONY: test -test: fmt lint vet - @echo "+ $@" - @go test -v -race -tags "$(BUILDTAGS) cgo" $(shell go list ${PKG}/... | grep -v vendor | grep -v '/test/e2e') +test: + @build/test.sh -.PHONY: e2e-image -e2e-image: sub-container-amd64 - TAG=$(TAG) IMAGE=$(MULTI_ARCH_IMG) docker tag $(IMAGE):$(TAG) $(IMAGE):test - docker images +.PHONY: lua-test +lua-test: + @build/test-lua.sh .PHONY: e2e-test e2e-test: - @go test -o e2e-tests -c ./test/e2e - @KUBECONFIG=${HOME}/.kube/config INGRESSNGINXCONFIG=${HOME}/.kube/config ./e2e-tests + echo "Granting permissions to ingress-nginx e2e service account..." + kubectl create serviceaccount ingress-nginx-e2e || true + kubectl create clusterrolebinding permissive-binding \ + --clusterrole=cluster-admin \ + --user=admin \ + --user=kubelet \ + --serviceaccount=default:ingress-nginx-e2e || true + + until kubectl get secret | grep -q ^ingress-nginx-e2e-token; do \ + echo "waiting for api token"; \ + sleep 3; \ + done + + kubectl run --rm \ + --attach \ + --restart=Never \ + --generator=run-pod/v1 \ + --env="E2E_NODES=$(E2E_NODES)" \ + --env="FOCUS=$(FOCUS)" \ + --env="SLOW_E2E_THRESHOLD=$(SLOW_E2E_THRESHOLD)" \ + --overrides='{ "apiVersion": "v1", "spec":{"serviceAccountName": "ingress-nginx-e2e"}}' \ + e2e --image=nginx-ingress-controller:e2e + +.PHONY: e2e-test-image +e2e-test-image: e2e-test-binary + make -C test/e2e-image + +.PHONY: e2e-test-binary +e2e-test-binary: + @ginkgo build ./test/e2e .PHONY: cover cover: - @echo "+ $@" - @go list -f '{{if len .TestGoFiles}}"go test -coverprofile={{.Dir}}/.coverprofile {{.ImportPath}}"{{end}}' $(shell go list ${PKG}/... | grep -v vendor | grep -v '/test/e2e') | xargs -L 1 sh -c - gover - goveralls -coverprofile=gover.coverprofile -service travis-ci -repotoken $$COVERALLS_TOKEN + @build/cover.sh + echo "Uploading coverage results..." + @curl -s https://codecov.io/bash | bash .PHONY: vet vet: - @echo "+ $@" - @go vet $(shell go list ${PKG}/... | grep -v vendor) + @go vet $(shell go list ${PKG}/internal/... | grep -v vendor) .PHONY: release release: all-container all-push echo "done" -.PHONY: docker-build -docker-build: all-container - -.PHONY: docker-push -docker-push: all-push - .PHONY: check_dead_links check_dead_links: - docker run -t -v $$PWD:/tmp rubygem/awesome_bot --allow-dupe --allow-redirect $(shell find $$PWD -name "*.md" -mindepth 1 -printf '%P\n' | grep -v vendor | grep -v Changelog.md) + @docker run -t \ + -v $$PWD:/tmp aledbf/awesome_bot:0.1 \ + --allow-dupe \ + --allow-redirect $(shell find $$PWD -mindepth 1 -name "*.md" -printf '%P\n' | grep -v vendor | grep -v Changelog.md) + +.PHONY: dep-ensure +dep-ensure: + GO111MODULE=on go mod tidy -v + find vendor -name '*_test.go' -delete + +.PHONY: dev-env +dev-env: + @build/dev-env.sh + +.PHONY: live-docs +live-docs: + @docker build --pull -t ingress-nginx/mkdocs build/mkdocs + @docker run --rm -it -p 3000:3000 -v ${PWD}:/docs ingress-nginx/mkdocs + +.PHONY: build-docs +build-docs: + @docker build --pull -t ingress-nginx/mkdocs build/mkdocs + @docker run --rm -v ${PWD}:/docs ingress-nginx/mkdocs build + +.PHONY: misspell +misspell: + @go get github.com/client9/misspell/cmd/misspell + misspell \ + -locale US \ + -error \ + cmd/* internal/* deploy/* docs/* design/* test/* README.md + +.PHONE: kind-e2e-test +kind-e2e-test: + test/e2e/run.sh diff --git a/OWNERS b/OWNERS index a2c1ec766a..f55b76c489 100644 --- a/OWNERS +++ b/OWNERS @@ -1,6 +1,10 @@ +# See the OWNERS docs: https://github.com/kubernetes/community/blob/master/contributors/guide/owners.md + approvers: -- aledbf -- justinsb -- bprashanth -- thockin -- nicksardo + - ingress-nginx-admins + - ingress-nginx-maintainers + - ElvinEfendi + +reviewers: + - aledbf + - ElvinEfendi diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES new file mode 100644 index 0000000000..cc60c17607 --- /dev/null +++ b/OWNERS_ALIASES @@ -0,0 +1,12 @@ +# See the OWNERS docs: https://git.k8s.io/community/docs/devel/owners.md + +aliases: + sig-network-leads: + - caseydavenport + - dcbw + - thockin + ingress-nginx-admins: + - bowei + - aledbf + ingress-nginx-maintainers: + - aledbf diff --git a/README.md b/README.md index d1e2dfb84e..83a230baa9 100644 --- a/README.md +++ b/README.md @@ -1,18 +1,31 @@ -The GCE ingress controller was moved to [github.com/kubernetes/ingress-gce](https://github.com/kubernetes/ingress-gce). +## Help us to improve the NGINX Ingress controller [completing the survey](https://docs.google.com/forms/d/15ULTOvYDsV920V0GWrspew4yyjEmTAi740Wr34UgKwA/viewform) --- # NGINX Ingress Controller [![Build Status](https://travis-ci.org/kubernetes/ingress-nginx.svg?branch=master)](https://travis-ci.org/kubernetes/ingress-nginx) -[![Coverage Status](https://coveralls.io/repos/github/kubernetes/ingress-nginx/badge.svg?branch=master)](https://coveralls.io/github/kubernetes/ingress-nginx?branch=master) +[![Coverage Status](https://codecov.io/gh/kubernetes/ingress-nginx/branch/master/graph/badge.svg)](https://codecov.io/gh/kubernetes/ingress-nginx) [![Go Report Card](https://goreportcard.com/badge/github.com/kubernetes/ingress-nginx)](https://goreportcard.com/report/github.com/kubernetes/ingress-nginx) +[![GitHub license](https://img.shields.io/github/license/kubernetes/ingress-nginx.svg)](https://github.com/kubernetes/ingress-nginx/blob/master/LICENSE) +[![GitHub stars](https://img.shields.io/github/stars/kubernetes/ingress-nginx.svg)](https://github.com/kubernetes/ingress-nginx/stargazers) +[![GitHub stars](https://img.shields.io/badge/contributions-welcome-orange.svg)](https://github.com/kubernetes/ingress-nginx/blob/master/CONTRIBUTING.md) +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fkubernetes%2Fingress-nginx.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fkubernetes%2Fingress-nginx?ref=badge_shield) + +# Get Involved + +- **Contributing**: Pull requests are welcome! + - Read [`CONTRIBUTING.md`](CONTRIBUTING.md) and check out [help-wanted](https://github.com/kubernetes/ingress-nginx/labels/help%20wanted) issues + - Submit github issues for any feature enhancements, bugs or documentation problems +- **Support**: Join to [Kubernetes Slack](http://slack.kubernetes.io/) to ask questions to get support from the maintainers and other developers + - Questions/comments can also be posted as [github issues](https://github.com/kubernetes/ingress-nginx/issues) +- **Discuss**: Tweet using the `#IngressNginx` hashtag ## Description -This repository contains the NGINX controller built around the [Kubernetes Ingress resource](http://kubernetes.io/docs/user-guide/ingress/) that uses [ConfigMap](https://kubernetes.io/docs/tasks/configure-pod-container/configmap/#understanding-configmaps) to store the NGINX configuration. +This repository contains the NGINX controller built around the [Kubernetes Ingress resource](http://kubernetes.io/docs/user-guide/ingress/) that uses [ConfigMap](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/#understanding-configmaps-and-pods) to store the NGINX configuration. [Make Ingress-Nginx Work for you, and the Community](https://youtu.be/GDm-7BlmPPg) from KubeCon Europe 2018 is a great video to get you started!! -Learn more about using Ingress on [k8s.io](http://kubernetes.io/docs/user-guide/ingress/) +Learn more about using Ingress on [k8s.io](https://kubernetes.io/docs/concepts/services-networking/ingress/) ### What is an Ingress Controller? @@ -22,156 +35,33 @@ The Ingress resource embodies this idea, and an Ingress controller is meant to h An Ingress Controller is a daemon, deployed as a Kubernetes Pod, that watches the apiserver's `/ingresses` endpoint for updates to the [Ingress resource](https://kubernetes.io/docs/concepts/services-networking/ingress/). Its job is to satisfy requests for Ingresses. -## Contents - -- [Conventions](#conventions) -- [Requirements](#requirements) -- [Deployment](deploy/README.md) -- [Command line arguments](docs/user-guide/cli-arguments.md) -- [Contribute](CONTRIBUTING.md) -- [TLS](docs/user-guide/tls.md) -- [Annotation ingress.class](#annotation-ingressclass) -- [Customizing NGINX](#customizing-nginx) - - [Custom NGINX configuration](docs/user-guide/configmap.md) - - [Annotations](docs/user-guide/annotations.md) -- [Source IP address](#source-ip-address) -- [Exposing TCP and UDP Services](docs/user-guide/exposing-tcp-udp-services.md) -- [Proxy Protocol](#proxy-protocol) -- [ModSecurity Web Application Firewall](docs/user-guide/modsecurity.md) -- [OpenTracing](docs/user-guide/opentracing.md) -- [VTS and Prometheus metrics](docs/examples/customization/custom-vts-metrics-prometheus/README.md) -- [Custom errors](docs/user-guide/custom-errors.md) -- [NGINX status page](docs/user-guide/nginx-status-page.md) -- [Running multiple ingress controllers](#running-multiple-ingress-controllers) -- [Disabling NGINX ingress controller](#disabling-nginx-ingress-controller) -- [Retries in non-idempotent methods](#retries-in-non-idempotent-methods) -- [Log format](docs/user-guide/log-format.md) -- [Websockets](#websockets) -- [Optimizing TLS Time To First Byte (TTTFB)](#optimizing-tls-time-to-first-byte-tttfb) -- [Debug & Troubleshooting](docs/troubleshooting.md) -- [Limitations](#limitations) -- [Why endpoints and not services?](#why-endpoints-and-not-services) -- [External Articles](docs/user-guide/external-articles.md) - -## Conventions - -Anytime we reference a tls secret, we mean (x509, pem encoded, RSA 2048, etc). You can generate such a certificate with: -`openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout ${KEY_FILE} -out ${CERT_FILE} -subj "/CN=${HOST}/O=${HOST}"` -and create the secret via `kubectl create secret tls ${CERT_NAME} --key ${KEY_FILE} --cert ${CERT_FILE}` - -## Requirements - -The default backend is a service which handles all url paths and hosts the nginx controller doesn't understand (i.e., all the requests that are not mapped with an Ingress). -Basically a default backend exposes two URLs: - -- `/healthz` that returns 200 -- `/` that returns 404 - -The sub-directory [`/images/404-server`](https://github.com/kubernetes/ingress-nginx/tree/master/images/404-server) provides a service which satisfies the requirements for a default backend. The sub-directory [`/images/custom-error-pages`](https://github.com/kubernetes/ingress-nginx/tree/master/images/custom-error-pages) provides an additional service for the purpose of customizing the error pages served via the default backend. - -## Annotation ingress.class - -If you have multiple Ingress controllers in a single cluster, you can pick one by specifying the `ingress.class` -annotation, eg creating an Ingress with an annotation like - -```yaml -metadata: - name: foo - annotations: - kubernetes.io/ingress.class: "gce" -``` - -will target the GCE controller, forcing the nginx controller to ignore it, while an annotation like - -```yaml -metadata: - name: foo - annotations: - kubernetes.io/ingress.class: "nginx" -``` - -will target the nginx controller, forcing the GCE controller to ignore it. - -__Note__: Deploying multiple ingress controller and not specifying the annotation will result in both controllers fighting to satisfy the Ingress. - -### Customizing NGINX - -There are three ways to customize NGINX: - -1. [ConfigMap](docs/user-guide/configmap.md): using a Configmap to set global configurations in NGINX. -2. [Annotations](docs/user-guide/annotations.md): use this if you want a specific configuration for a particular Ingress rule. -3. [Custom template](docs/user-guide/custom-template.md): when more specific settings are required, like [open_file_cache](http://nginx.org/en/docs/http/ngx_http_core_module.html#open_file_cache), adjust [listen](http://nginx.org/en/docs/http/ngx_http_core_module.html#listen) options as `rcvbuf` or when is not possible to change the configuration through the ConfigMap. - -## Source IP address - -By default NGINX uses the content of the header `X-Forwarded-For` as the source of truth to get information about the client IP address. This works without issues in L7 **if we configure the setting `proxy-real-ip-cidr`** with the correct information of the IP/network address of trusted external load balancer. - -If the ingress controller is running in AWS we need to use the VPC IPv4 CIDR. - -Another option is to enable proxy protocol using `use-proxy-protocol: "true"`. - -In this mode NGINX does not use the content of the header to get the source IP address of the connection. - -## Proxy Protocol - -If you are using a L4 proxy to forward the traffic to the NGINX pods and terminate HTTP/HTTPS there, you will lose the remote endpoint's IP address. To prevent this you could use the [Proxy Protocol](http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt) for forwarding traffic, this will send the connection details before forwarding the actual TCP connection itself. - -Amongst others [ELBs in AWS](http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/enable-proxy-protocol.html) and [HAProxy](http://www.haproxy.org/) support Proxy Protocol. - -### Running multiple ingress controllers - -If you're running multiple ingress controllers, or running on a cloud provider that natively handles ingress, you need to specify the annotation `kubernetes.io/ingress.class: "nginx"` in all ingresses that you would like this controller to claim. This mechanism also provides users the ability to run _multiple_ NGINX ingress controllers (e.g. one which serves public traffic, one which serves "internal" traffic). When utilizing this functionality the option `--ingress-class` should be changed to a value unique for the cluster within the definition of the replication controller. Here is a partial example: - -``` -spec: - template: - spec: - containers: - - name: nginx-ingress-internal-controller - args: - - /nginx-ingress-controller - - '--default-backend-service=ingress/nginx-ingress-default-backend' - - '--election-id=ingress-controller-leader-internal' - - '--ingress-class=nginx-internal' - - '--configmap=ingress/nginx-ingress-internal-controller' -``` - -Not specifying the annotation will lead to multiple ingress controllers claiming the same ingress. Specifying a value which does not match the class of any existing ingress controllers will result in all ingress controllers ignoring the ingress. - -The use of multiple ingress controllers in a single cluster is supported in Kubernetes versions >= 1.3. - -### Websockets - -Support for websockets is provided by NGINX out of the box. No special configuration required. - -The only requirement to avoid the close of connections is the increase of the values of `proxy-read-timeout` and `proxy-send-timeout`. +## Documentation -The default value of this settings is `60 seconds`. +To check out [Live Docs](https://kubernetes.github.io/ingress-nginx/) -A more adequate value to support websockets is a value higher than one hour (`3600`). +## Questions -### Optimizing TLS Time To First Byte (TTTFB) +For questions and support please use the [#ingress-nginx](https://kubernetes.slack.com/messages/CANQGM8BA/) channel in the [Kubernetes Slack](http://slack.kubernetes.io/) or post to the [Kubernetes Forum](https://discuss.kubernetes.io). The issue list of this repo is **exclusively** for bug reports and feature requests. -NGINX provides the configuration option [ssl_buffer_size](http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_buffer_size) to allow the optimization of the TLS record size. +## Issues -This improves the [Time To First Byte](https://www.igvita.com/2013/12/16/optimizing-nginx-tls-time-to-first-byte/) (TTTFB). -The default value in the Ingress controller is `4k` (NGINX default is `16k`). +Please make sure to read the [Issue Reporting Checklist](https://github.com/kubernetes/ingress-nginx/blob/master/CONTRIBUTING.md#issue-reporting-guidelines) before opening an issue. Issues not conforming to the guidelines may be closed immediately. -### Retries in non-idempotent methods +## Changelog -Since 1.9.13 NGINX will not retry non-idempotent requests (POST, LOCK, PATCH) in case of an error. -The previous behavior can be restored using `retry-non-idempotent=true` in the configuration ConfigMap. +Detailed changes for each release are documented in the [Changelog.md](Changelog.md) -### Disabling NGINX ingress controller +## Contribution -Setting the annotation `kubernetes.io/ingress.class` to any value other which does not match a valid ingress class will force the NGINX Ingress controller to ignore your Ingress. If you are only running a single NGINX ingress controller, this can be achieved by setting this to any value except "nginx" or an empty string. +Please make sure to read the [Contributing Guide](CONTRIBUTING.md) before making a pull request. -Do this if you wish to use one of the other Ingress controllers at the same time as the NGINX controller. +Thank you to all the people who already contributed to NGINX Ingress Controller! -### Limitations +## Code of Conduct -- Ingress rules for TLS require the definition of the field `host` +This project adheres to the [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md). +By participating in this project you agree to abide by its terms. -### Why endpoints and not services +## License -The NGINX ingress controller does not use [Services](http://kubernetes.io/docs/user-guide/services) to route traffic to the pods. Instead it uses the Endpoints API in order to bypass [kube-proxy](http://kubernetes.io/docs/admin/kube-proxy/) to allow NGINX features like session affinity and custom load balancing algorithms. It also removes some overhead, such as conntrack entries for iptables DNAT. +[Apache License 2.0](https://github.com/kubernetes/ingress-nginx/blob/master/LICENSE) diff --git a/SECURITY_CONTACTS b/SECURITY_CONTACTS new file mode 100644 index 0000000000..5a7cc3ba6d --- /dev/null +++ b/SECURITY_CONTACTS @@ -0,0 +1,13 @@ +# Defined below are the security contacts for this repo. +# +# They are the contact point for the Product Security Committee to reach out +# to for triaging and handling of incoming issues. +# +# The below names agree to abide by the +# [Embargo Policy](https://git.k8s.io/security/private-distributors-list.md#embargo-policy) +# and will be removed and replaced if they violate that agreement. +# +# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE +# INSTRUCTIONS AT https://kubernetes.io/security/ + +aledbf diff --git a/build/build-plugin.sh b/build/build-plugin.sh new file mode 100755 index 0000000000..6dd52d85ac --- /dev/null +++ b/build/build-plugin.sh @@ -0,0 +1,78 @@ +#!/bin/bash + +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if ! [ -z $DEBUG ]; then + set -x +fi + +set -o errexit +set -o nounset +set -o pipefail + +declare -a mandatory +mandatory=( + PKG + ARCH + GIT_COMMIT + REPO_INFO + TAG +) + +missing=false +for var in "${mandatory[@]}"; do + if [[ -z "${!var:-}" ]]; then + echo "Environment variable $var must be set" + missing=true + fi +done + +if [ "$missing" = true ]; then + exit 1 +fi + +export CGO_ENABLED=0 + +release=cmd/plugin/release + +function build_for_arch(){ + os=$1 + arch=$2 + extension=$3 + + env GOOS=${os} GOARCH=${arch} go build \ + ${GOBUILD_FLAGS} \ + -ldflags "-s -w \ + -X ${PKG}/version.RELEASE=${TAG} \ + -X ${PKG}/version.COMMIT=${GIT_COMMIT} \ + -X ${PKG}/version.REPO=${REPO_INFO}" \ + -o ${release}/kubectl-ingress_nginx${extension} ${PKG}/cmd/plugin + + tar -C ${release} -zcvf ${release}/kubectl-ingress_nginx-${os}-${arch}.tar.gz kubectl-ingress_nginx${extension} + rm ${release}/kubectl-ingress_nginx${extension} + hash=`sha256sum ${release}/kubectl-ingress_nginx-${os}-${arch}.tar.gz | awk '{ print $1 }'` + sed -i "s/%%%shasum_${os}_${arch}%%%/${hash}/g" ${release}/ingress-nginx.yaml +} + +rm -rf ${release} +mkdir ${release} + +cp cmd/plugin/ingress-nginx.yaml.tmpl ${release}/ingress-nginx.yaml + +sed -i "s/%%%tag%%%/${TAG}/g" ${release}/ingress-nginx.yaml + +build_for_arch darwin amd64 '' +build_for_arch linux amd64 '' +build_for_arch windows amd64 '.exe' diff --git a/build/build.sh b/build/build.sh new file mode 100755 index 0000000000..3f37ca447b --- /dev/null +++ b/build/build.sh @@ -0,0 +1,62 @@ +#!/bin/bash + +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if ! [ -z $DEBUG ]; then + set -x +fi + +set -o errexit +set -o nounset +set -o pipefail + +declare -a mandatory +mandatory=( + PKG + ARCH + GIT_COMMIT + REPO_INFO + TAG +) + +missing=false +for var in "${mandatory[@]}"; do + if [[ -z "${!var:-}" ]]; then + echo "Environment variable $var must be set" + missing=true + fi +done + +if [ "$missing" = true ]; then + exit 1 +fi + +export CGO_ENABLED=0 + +go build \ + ${GOBUILD_FLAGS} \ + -ldflags "-s -w \ + -X ${PKG}/version.RELEASE=${TAG} \ + -X ${PKG}/version.COMMIT=${GIT_COMMIT} \ + -X ${PKG}/version.REPO=${REPO_INFO}" \ + -o bin/${ARCH}/nginx-ingress-controller ${PKG}/cmd/nginx + +go build \ + ${GOBUILD_FLAGS} \ + -ldflags "-s -w \ + -X ${PKG}/version.RELEASE=${TAG} \ + -X ${PKG}/version.COMMIT=${GIT_COMMIT} \ + -X ${PKG}/version.REPO=${REPO_INFO}" \ + -o bin/${ARCH}/dbg ${PKG}/cmd/dbg diff --git a/build/cover.sh b/build/cover.sh new file mode 100755 index 0000000000..3b25c4a231 --- /dev/null +++ b/build/cover.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if ! [ -z $DEBUG ]; then + set -x +fi + +set -o errexit +set -o nounset +set -o pipefail + +if [ -z "${PKG}" ]; then + echo "PKG must be set" + exit 1 +fi + +rm -rf coverage.txt +for d in `go list ${PKG}/... | grep -v vendor | grep -v '/test/e2e' | grep -v images`; do + t=$(date +%s); + go test -coverprofile=cover.out -covermode=atomic $d || exit 1; + echo "Coverage test $d took $(($(date +%s)-$t)) seconds"; + if [ -f cover.out ]; then + cat cover.out >> coverage.txt; + rm cover.out; + fi; +done diff --git a/build/dev-env.sh b/build/dev-env.sh new file mode 100755 index 0000000000..59b883cdcf --- /dev/null +++ b/build/dev-env.sh @@ -0,0 +1,67 @@ +#!/bin/bash + +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if ! [ -z $DEBUG ]; then + set -x +fi + +set -o errexit +set -o nounset +set -o pipefail + +SKIP_MINIKUBE_START=${SKIP_MINIKUBE_START:-} +NAMESPACE="${NAMESPACE:-ingress-nginx}" +echo "NAMESPACE is set to ${NAMESPACE}" + +kubectl config use-context minikube + +export TAG=dev +export ARCH=amd64 +export REGISTRY=${REGISTRY:-ingress-controller} + +DEV_IMAGE=${REGISTRY}/nginx-ingress-controller:${TAG} + +if [ -z "${SKIP_MINIKUBE_START}" ]; then + test $(minikube status | grep Running | wc -l) -ge 2 && $(minikube status | grep -q 'Correctly Configured') || minikube start \ + --extra-config=kubelet.sync-frequency=1s \ + --extra-config=apiserver.authorization-mode=RBAC + + eval $(minikube docker-env --shell bash) +fi + +echo "[dev-env] building container" +make build container + +docker save "${DEV_IMAGE}" | (eval $(minikube docker-env --shell bash) && docker load) || true + +for tool in kubectl kustomize; do + echo "[dev-env] installing $tool" + $tool version || brew install $tool +done + +if ! kubectl get namespace $NAMESPACE; then + kubectl create namespace $NAMESPACE +fi + +ROOT=./deploy/minikube + +pushd $ROOT +kustomize edit set namespace $NAMESPACE +kustomize edit set image quay.io/kubernetes-ingress-controller/nginx-ingress-controller=${DEV_IMAGE} +popd + +echo "[dev-env] deploying NGINX Ingress controller in namespace $NAMESPACE" +kustomize build $ROOT | kubectl apply -f - diff --git a/build/mkdocs/Dockerfile b/build/mkdocs/Dockerfile new file mode 100644 index 0000000000..ebd3efc57e --- /dev/null +++ b/build/mkdocs/Dockerfile @@ -0,0 +1,40 @@ +# Copyright 2018 The Kubernetes Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM alpine:3.9 + +RUN apk update && apk add --no-cache \ + bash \ + git \ + git-fast-import \ + openssh \ + python3 \ + python3-dev \ + curl \ + && python3 -m ensurepip \ + && rm -r /usr/lib/python*/ensurepip \ + && pip3 install --upgrade pip setuptools \ + && rm -r /root/.cache \ + && rm -rf /var/cache/apk/* + +RUN curl -sSL https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/requirements-docs.txt -o requirements.txt \ + && pip install -U -r requirements.txt + +WORKDIR /docs + +EXPOSE 3000 + +COPY entrypoint.sh / + +ENTRYPOINT ["/entrypoint.sh"] diff --git a/build/mkdocs/entrypoint.sh b/build/mkdocs/entrypoint.sh new file mode 100755 index 0000000000..dc87db8bc6 --- /dev/null +++ b/build/mkdocs/entrypoint.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o pipefail + +CMD=$1 + +if [ "$CMD" == "build" ]; +then + mkdocs build + exit 0; +fi + +mkdocs serve --dev-addr=0.0.0.0:3000 --livereload diff --git a/build/run-in-docker.sh b/build/run-in-docker.sh new file mode 100755 index 0000000000..e8732b6117 --- /dev/null +++ b/build/run-in-docker.sh @@ -0,0 +1,52 @@ +#!/bin/bash + +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if ! [ -z $DEBUG ]; then + set -x +fi + +set -o errexit +set -o nounset +set -o pipefail + +E2E_IMAGE=quay.io/kubernetes-ingress-controller/e2e:v06042019-0c7a34696 + +DOCKER_OPTS=${DOCKER_OPTS:-""} + +FLAGS=$@ + +PKG=k8s.io/ingress-nginx +ARCH=$(go env GOARCH) + +MINIKUBE_PATH=${HOME}/.minikube +MINIKUBE_VOLUME="-v ${MINIKUBE_PATH}:${MINIKUBE_PATH}" +if [ ! -d ${MINIKUBE_PATH} ]; then + echo "Minikube directory not found! Volume will be excluded from docker build." + MINIKUBE_VOLUME="" +fi + +docker run \ + --tty \ + --rm \ + ${DOCKER_OPTS} \ + -v ${HOME}/.kube:/${HOME}/.kube \ + -v ${PWD}:/go/src/${PKG} \ + -v ${PWD}/.gocache:${HOME}/.cache/go-build \ + -v ${PWD}/bin/${ARCH}:/go/bin/linux_${ARCH} \ + -v /var/run/docker.sock:/var/run/docker.sock \ + ${MINIKUBE_VOLUME} \ + -w /go/src/${PKG} \ + ${E2E_IMAGE} ${FLAGS} diff --git a/build/static-check.sh b/build/static-check.sh new file mode 100755 index 0000000000..49f408f908 --- /dev/null +++ b/build/static-check.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if ! [ -z $DEBUG ]; then + set -x +fi + +set -o errexit +set -o nounset +set -o pipefail + +if [ -z "${PKG}" ]; then + echo "PKG must be set" + exit 1 +fi + +hack/verify-all.sh + +luacheck --codes -q rootfs/etc/nginx/lua/ diff --git a/build/test-lua.sh b/build/test-lua.sh new file mode 100755 index 0000000000..b843f66fdd --- /dev/null +++ b/build/test-lua.sh @@ -0,0 +1,34 @@ +#!/bin/bash + +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +if ! [ -z $DEBUG ]; then + set -x +fi + +set -o errexit +set -o nounset +set -o pipefail + +resty \ + -I ./rootfs/etc/nginx/lua \ + -I /usr/local/lib/lua \ + -I /usr/lib/lua-platform-path/lua/5.1 \ + --shdict "configuration_data 5M" \ + --shdict "certificate_data 16M" \ + --shdict "balancer_ewma 1M" \ + --shdict "balancer_ewma_last_touched_at 1M" \ + ./rootfs/etc/nginx/lua/test/run.lua ${BUSTED_ARGS} ./rootfs/etc/nginx/lua/test/ diff --git a/build/test.sh b/build/test.sh new file mode 100755 index 0000000000..758b4e5939 --- /dev/null +++ b/build/test.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +if ! [ -z $DEBUG ]; then + set -x +fi + +set -o errexit +set -o nounset +set -o pipefail + +if [ -z "${PKG}" ]; then + echo "PKG must be set" + exit 1 +fi + +go test -v -race -tags "cgo" \ + $(go list ${PKG}/... | grep -v vendor | grep -v '/test/e2e' | grep -v images | grep -v "docs/examples") diff --git a/cmd/dbg/main.go b/cmd/dbg/main.go new file mode 100644 index 0000000000..7b9657ab36 --- /dev/null +++ b/cmd/dbg/main.go @@ -0,0 +1,241 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "bytes" + "encoding/json" + "fmt" + "github.com/spf13/cobra" + "k8s.io/ingress-nginx/internal/nginx" + "os" +) + +const ( + backendsPath = "/configuration/backends" + generalPath = "/configuration/general" + certsPath = "/configuration/certs" +) + +func main() { + rootCmd := &cobra.Command{ + Use: "dbg", + Short: "dbg is a tool for quickly inspecting the state of the nginx instance", + } + + backendsCmd := &cobra.Command{ + Use: "backends", + Short: "Inspect the dynamically-loaded backends information", + } + rootCmd.AddCommand(backendsCmd) + + backendsAllCmd := &cobra.Command{ + Use: "all", + Short: "Output the all dynamic backend information as a JSON array", + Run: func(cmd *cobra.Command, args []string) { + backendsAll() + }, + } + backendsCmd.AddCommand(backendsAllCmd) + + backendsListCmd := &cobra.Command{ + Use: "list", + Short: "Output a newline-separated list of the backend names", + Run: func(cmd *cobra.Command, args []string) { + backendsList() + }, + } + backendsCmd.AddCommand(backendsListCmd) + + backendsGetCmd := &cobra.Command{ + Use: "get [backend name]", + Short: "Output the backend information only for the backend that has this name", + Args: cobra.ExactArgs(1), + Run: func(cmd *cobra.Command, args []string) { + backendsGet(args[0]) + }, + } + backendsCmd.AddCommand(backendsGetCmd) + + certCmd := &cobra.Command{ + Use: "certs", + Short: "Inspect dynamic SSL certificates", + } + + certGetCmd := &cobra.Command{ + Use: "get [hostname]", + Short: "Get the dynamically-loaded certificate information for the given hostname", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + certGet(args[0]) + return nil + }, + } + certCmd.AddCommand(certGetCmd) + + rootCmd.AddCommand(certCmd) + + generalCmd := &cobra.Command{ + Use: "general", + Short: "Output the general dynamic lua state", + Run: func(cmd *cobra.Command, args []string) { + general() + }, + } + rootCmd.AddCommand(generalCmd) + + confCmd := &cobra.Command{ + Use: "conf", + Short: "Dump the contents of /etc/nginx/nginx.conf", + Run: func(cmd *cobra.Command, args []string) { + readNginxConf() + }, + } + rootCmd.AddCommand(confCmd) + + if err := rootCmd.Execute(); err != nil { + fmt.Println(err) + os.Exit(1) + } + +} + +func backendsAll() { + statusCode, body, requestErr := nginx.NewGetStatusRequest(backendsPath) + if requestErr != nil { + fmt.Println(requestErr) + return + } + if statusCode != 200 { + fmt.Printf("Nginx returned code %v\n", statusCode) + return + } + + var prettyBuffer bytes.Buffer + indentErr := json.Indent(&prettyBuffer, body, "", " ") + if indentErr != nil { + fmt.Println(indentErr) + return + } + + fmt.Println(string(prettyBuffer.Bytes())) +} + +func backendsList() { + statusCode, body, requestErr := nginx.NewGetStatusRequest(backendsPath) + if requestErr != nil { + fmt.Println(requestErr) + return + } + if statusCode != 200 { + fmt.Printf("Nginx returned code %v\n", statusCode) + return + } + + var f interface{} + unmarshalErr := json.Unmarshal(body, &f) + if unmarshalErr != nil { + fmt.Println(unmarshalErr) + return + } + backends := f.([]interface{}) + + for _, backendi := range backends { + backend := backendi.(map[string]interface{}) + fmt.Println(backend["name"].(string)) + } +} + +func backendsGet(name string) { + statusCode, body, requestErr := nginx.NewGetStatusRequest(backendsPath) + if requestErr != nil { + fmt.Println(requestErr) + return + } + if statusCode != 200 { + fmt.Printf("Nginx returned code %v\n", statusCode) + return + } + + var f interface{} + unmarshalErr := json.Unmarshal(body, &f) + if unmarshalErr != nil { + fmt.Println(unmarshalErr) + return + } + backends := f.([]interface{}) + + for _, backendi := range backends { + backend := backendi.(map[string]interface{}) + if backend["name"].(string) == name { + printed, _ := json.MarshalIndent(backend, "", " ") + fmt.Println(string(printed)) + return + } + } + fmt.Println("A backend of this name was not found.") +} + +func certGet(host string) { + statusCode, body, requestErr := nginx.NewGetStatusRequest(certsPath + "?hostname=" + host) + if requestErr != nil { + fmt.Println(requestErr) + return + } + + if statusCode == 200 { + fmt.Print(string(body)) + return + } else if statusCode != 404 { + fmt.Printf("Nginx returned code %v\n", statusCode) + fmt.Println(string(body)) + return + } + + fmt.Printf("No cert found for host %v\n", host) +} + +func general() { + statusCode, body, requestErr := nginx.NewGetStatusRequest(generalPath) + if requestErr != nil { + fmt.Println(requestErr) + return + } + if statusCode != 200 { + fmt.Printf("Nginx returned code %v\n", statusCode) + return + } + + var prettyBuffer bytes.Buffer + indentErr := json.Indent(&prettyBuffer, body, "", " ") + if indentErr != nil { + fmt.Println(indentErr) + return + } + + fmt.Println(string(prettyBuffer.Bytes())) +} + +func readNginxConf() { + conf, err := nginx.ReadNginxConf() + if err != nil { + fmt.Println(err) + return + } + + fmt.Println(conf) +} diff --git a/cmd/nginx/flag_test.go b/cmd/nginx/flag_test.go index ca662e4555..6e826b9328 100644 --- a/cmd/nginx/flag_test.go +++ b/cmd/nginx/flag_test.go @@ -31,15 +31,15 @@ func resetForTesting(usage func()) { flag.Usage = usage } -func TestMandatoryFlag(t *testing.T) { +func TestNoMandatoryFlag(t *testing.T) { _, _, err := parseFlags() - if err == nil { - t.Fatalf("expected and error about default backend service") + if err != nil { + t.Fatalf("Expected no error but got: %s", err) } } func TestDefaults(t *testing.T) { - resetForTesting(func() { t.Fatal("bad parse") }) + resetForTesting(func() { t.Fatal("Parsing failed") }) oldArgs := os.Args defer func() { os.Args = oldArgs }() @@ -47,18 +47,31 @@ func TestDefaults(t *testing.T) { showVersion, conf, err := parseFlags() if err != nil { - t.Fatalf("unexpected error parsing default flags: %v", err) + t.Fatalf("Unexpected error parsing default flags: %v", err) } if showVersion { - t.Fatal("expected false but true was returned for flag show-version") + t.Fatal("Expected flag \"show-version\" to be false") } if conf == nil { - t.Fatal("expected a configuration but nil returned") + t.Fatal("Expected a controller Configuration") } } func TestSetupSSLProxy(t *testing.T) { // TODO } + +func TestFlagConflict(t *testing.T) { + resetForTesting(func() { t.Fatal("Parsing failed") }) + + oldArgs := os.Args + defer func() { os.Args = oldArgs }() + os.Args = []string{"cmd", "--publish-service", "namespace/test", "--http-port", "0", "--https-port", "0", "--publish-status-address", "1.1.1.1"} + + _, _, err := parseFlags() + if err == nil { + t.Fatalf("Expected an error parsing flags but none returned") + } +} diff --git a/cmd/nginx/flags.go b/cmd/nginx/flags.go index d80afb714a..0615d94ef7 100644 --- a/cmd/nginx/flags.go +++ b/cmd/nginx/flags.go @@ -22,136 +22,179 @@ import ( "os" "time" - "github.com/golang/glog" "github.com/spf13/pflag" apiv1 "k8s.io/api/core/v1" + "k8s.io/klog" "k8s.io/ingress-nginx/internal/ingress/annotations/class" "k8s.io/ingress-nginx/internal/ingress/annotations/parser" "k8s.io/ingress-nginx/internal/ingress/controller" ngx_config "k8s.io/ingress-nginx/internal/ingress/controller/config" ing_net "k8s.io/ingress-nginx/internal/net" + "k8s.io/ingress-nginx/internal/nginx" ) func parseFlags() (bool, *controller.Configuration, error) { var ( flags = pflag.NewFlagSet("", pflag.ExitOnError) - apiserverHost = flags.String("apiserver-host", "", "The address of the Kubernetes Apiserver "+ - "to connect to in the format of protocol://address:port, e.g., "+ - "http://localhost:8080. If not specified, the assumption is that the binary runs inside a "+ - "Kubernetes cluster and local discovery is attempted.") - kubeConfigFile = flags.String("kubeconfig", "", "Path to kubeconfig file with authorization and master location information.") + apiserverHost = flags.String("apiserver-host", "", + `Address of the Kubernetes API server. +Takes the form "protocol://address:port". If not specified, it is assumed the +program runs inside a Kubernetes cluster and local discovery is attempted.`) + + kubeConfigFile = flags.String("kubeconfig", "", + `Path to a kubeconfig file containing authorization and API server information.`) defaultSvc = flags.String("default-backend-service", "", - `Service used to serve a 404 page for the default backend. Takes the form - namespace/name. The controller uses the first node port of this Service for - the default backend.`) + `Service used to serve HTTP requests not matching any known server name (catch-all). +Takes the form "namespace/name". The controller configures NGINX to forward +requests to the first port of this Service.`) ingressClass = flags.String("ingress-class", "", - `Name of the ingress class to route through this controller.`) + `Name of the ingress class this controller satisfies. +The class of an Ingress object is set using the annotation "kubernetes.io/ingress.class". +All ingress classes are satisfied if this parameter is left empty.`) configMap = flags.String("configmap", "", - `Name of the ConfigMap that contains the custom configuration to use`) + `Name of the ConfigMap containing custom global configurations for the controller.`) publishSvc = flags.String("publish-service", "", - `Service fronting the ingress controllers. Takes the form namespace/name. - The controller will set the endpoint records on the ingress objects to reflect those on the service.`) + `Service fronting the Ingress controller. +Takes the form "namespace/name". When used together with update-status, the +controller mirrors the address of this service's endpoints to the load-balancer +status of all Ingress objects it satisfies.`) tcpConfigMapName = flags.String("tcp-services-configmap", "", - `Name of the ConfigMap that contains the definition of the TCP services to expose. - The key in the map indicates the external port to be used. The value is the name of the - service with the format namespace/serviceName and the port of the service could be a - number of the name of the port. - The ports 80 and 443 are not allowed as external ports. This ports are reserved for the backend`) - + `Name of the ConfigMap containing the definition of the TCP services to expose. +The key in the map indicates the external port to be used. The value is a +reference to a Service in the form "namespace/name:port", where "port" can +either be a port number or name. TCP ports 80 and 443 are reserved by the +controller for servicing HTTP traffic.`) udpConfigMapName = flags.String("udp-services-configmap", "", - `Name of the ConfigMap that contains the definition of the UDP services to expose. - The key in the map indicates the external port to be used. The value is the name of the - service with the format namespace/serviceName and the port of the service could be a - number of the name of the port.`) + `Name of the ConfigMap containing the definition of the UDP services to expose. +The key in the map indicates the external port to be used. The value is a +reference to a Service in the form "namespace/name:port", where "port" can +either be a port name or number.`) - resyncPeriod = flags.Duration("sync-period", 600*time.Second, - `Relist and confirm cloud resources this often. Default is 10 minutes`) + resyncPeriod = flags.Duration("sync-period", 0, + `Period at which the controller forces the repopulation of its local object stores. Disabled by default.`) watchNamespace = flags.String("watch-namespace", apiv1.NamespaceAll, - `Namespace to watch for Ingress. Default is to watch all namespaces`) - - profiling = flags.Bool("profiling", true, `Enable profiling via web interface host:port/debug/pprof/`) + `Namespace the controller watches for updates to Kubernetes objects. +This includes Ingresses, Services and all configuration resources. All +namespaces are watched if this parameter is left empty.`) - defSSLCertificate = flags.String("default-ssl-certificate", "", `Name of the secret - that contains a SSL certificate to be used as default for a HTTPS catch-all server. - Takes the form /.`) + profiling = flags.Bool("profiling", true, + `Enable profiling via web interface host:port/debug/pprof/`) - defHealthzURL = flags.String("health-check-path", "/healthz", `Defines - the URL to be used as health check inside in the default server in NGINX.`) + defSSLCertificate = flags.String("default-ssl-certificate", "", + `Secret containing a SSL certificate to be used by the default HTTPS server (catch-all). +Takes the form "namespace/name".`) - updateStatus = flags.Bool("update-status", true, `Indicates if the - ingress controller should update the Ingress status IP/hostname. Default is true`) + defHealthzURL = flags.String("health-check-path", "/healthz", + `URL path of the health check endpoint. +Configured inside the NGINX status server. All requests received on the port +defined by the healthz-port parameter are forwarded internally to this path.`) - electionID = flags.String("election-id", "ingress-controller-leader", `Election id to use for status update.`) + defHealthCheckTimeout = flags.Int("health-check-timeout", 10, `Time limit, in seconds, for a probe to health-check-path to succeed.`) - forceIsolation = flags.Bool("force-namespace-isolation", false, - `Force namespace isolation. This flag is required to avoid the reference of secrets or - configmaps located in a different namespace than the specified in the flag --watch-namespace.`) + updateStatus = flags.Bool("update-status", true, + `Update the load-balancer status of Ingress objects this controller satisfies. +Requires setting the publish-service parameter to a valid Service reference.`) - disableNodeList = flags.Bool("disable-node-list", false, - `Disable querying nodes. If --force-namespace-isolation is true, this should also be set. (DEPRECATED)`) + electionID = flags.String("election-id", "ingress-controller-leader", + `Election id to use for Ingress status updates.`) - updateStatusOnShutdown = flags.Bool("update-status-on-shutdown", true, `Indicates if the - ingress controller should update the Ingress status IP/hostname when the controller - is being stopped. Default is true`) + _ = flags.Bool("force-namespace-isolation", false, + `Force namespace isolation. +Prevents Ingress objects from referencing Secrets and ConfigMaps located in a +different namespace than their own. May be used together with watch-namespace.`) - sortBackends = flags.Bool("sort-backends", false, - `Defines if backends and it's endpoints should be sorted`) + updateStatusOnShutdown = flags.Bool("update-status-on-shutdown", true, + `Update the load-balancer status of Ingress objects when the controller shuts down. +Requires the update-status parameter.`) useNodeInternalIP = flags.Bool("report-node-internal-ip-address", false, - `Defines if the nodes IP address to be returned in the ingress status should be the internal instead of the external IP address`) + `Set the load-balancer status of Ingress objects to internal Node addresses instead of external. +Requires the update-status parameter.`) showVersion = flags.Bool("version", false, - `Shows release information about the NGINX Ingress controller`) - - enableSSLPassthrough = flags.Bool("enable-ssl-passthrough", false, `Enable SSL passthrough feature. Default is disabled`) - - httpPort = flags.Int("http-port", 80, `Indicates the port to use for HTTP traffic`) - httpsPort = flags.Int("https-port", 443, `Indicates the port to use for HTTPS traffic`) - statusPort = flags.Int("status-port", 18080, `Indicates the TCP port to use for exposing the nginx status page`) - sslProxyPort = flags.Int("ssl-passtrough-proxy-port", 442, `Default port to use internally for SSL when SSL Passthgough is enabled`) - defServerPort = flags.Int("default-server-port", 8181, `Default port to use for exposing the default server (catch all)`) - healthzPort = flags.Int("healthz-port", 10254, "port for healthz endpoint.") - - annotationsPrefix = flags.String("annotations-prefix", "nginx.ingress.kubernetes.io", `Prefix of the ingress annotations.`) - - enableSSLChainCompletion = flags.Bool("enable-ssl-chain-completion", true, - `Defines if the nginx ingress controller should check the secrets for missing intermediate CA certificates. - If the certificate contain issues chain issues is not possible to enable OCSP. - Default is true.`) + `Show release information about the NGINX Ingress controller and exit.`) + + enableSSLPassthrough = flags.Bool("enable-ssl-passthrough", false, + `Enable SSL Passthrough.`) + + annotationsPrefix = flags.String("annotations-prefix", "nginx.ingress.kubernetes.io", + `Prefix of the Ingress annotations specific to the NGINX controller.`) + + enableSSLChainCompletion = flags.Bool("enable-ssl-chain-completion", false, + `Autocomplete SSL certificate chains with missing intermediate CA certificates. +A valid certificate chain is required to enable OCSP stapling. Certificates +uploaded to Kubernetes must have the "Authority Information Access" X.509 v3 +extension for this to succeed.`) + + syncRateLimit = flags.Float32("sync-rate-limit", 0.3, + `Define the sync frequency upper limit`) + + publishStatusAddress = flags.String("publish-status-address", "", + `Customized address to set as the load-balancer status of Ingress objects this controller satisfies. +Requires the update-status parameter.`) + + dynamicCertificatesEnabled = flags.Bool("enable-dynamic-certificates", true, + `Dynamically update SSL certificates instead of reloading NGINX. +Feature backed by OpenResty Lua libraries. Requires that OCSP stapling is not enabled`) + + enableMetrics = flags.Bool("enable-metrics", true, + `Enables the collection of NGINX metrics`) + metricsPerHost = flags.Bool("metrics-per-host", true, + `Export metrics per-host`) + + httpPort = flags.Int("http-port", 80, `Port to use for servicing HTTP traffic.`) + httpsPort = flags.Int("https-port", 443, `Port to use for servicing HTTPS traffic.`) + _ = flags.Int("status-port", 18080, `Port to use for exposing NGINX status pages.`) + sslProxyPort = flags.Int("ssl-passthrough-proxy-port", 442, `Port to use internally for SSL Passthrough.`) + defServerPort = flags.Int("default-server-port", 8181, `Port to use for exposing the default server (catch-all).`) + healthzPort = flags.Int("healthz-port", 10254, "Port to use for the healthz endpoint.") + + disableCatchAll = flags.Bool("disable-catch-all", false, + `Disable support for catch-all Ingresses`) + + validationWebhook = flags.String("validating-webhook", "", + `The address to start an admission controller on to validate incoming ingresses. +Takes the form ":port". If not provided, no admission controller is started.`) + validationWebhookCert = flags.String("validating-webhook-certificate", "", + `The path of the validating webhook certificate PEM.`) + validationWebhookKey = flags.String("validating-webhook-key", "", + `The path of the validating webhook key PEM.`) ) + flags.MarkDeprecated("status-port", `The status port is a unix socket now.`) + flags.MarkDeprecated("force-namespace-isolation", `This flag doesn't do anything.`) + flag.Set("logtostderr", "true") flags.AddGoFlagSet(flag.CommandLine) flags.Parse(os.Args) - flag.Set("logtostderr", "true") // Workaround for this issue: // https://github.com/kubernetes/kubernetes/issues/17162 flag.CommandLine.Parse([]string{}) + pflag.VisitAll(func(flag *pflag.Flag) { + klog.V(2).Infof("FLAG: --%s=%q", flag.Name, flag.Value) + }) + if *showVersion { return true, nil, nil } - if *defaultSvc == "" { - return false, nil, fmt.Errorf("Please specify --default-backend-service") - } - if *ingressClass != "" { - glog.Infof("Watching for ingress class: %s", *ingressClass) + klog.Infof("Watching for Ingress class: %s", *ingressClass) if *ingressClass != class.DefaultClass { - glog.Warningf("only Ingress with class \"%v\" will be processed by this ingress controller", *ingressClass) + klog.Warningf("Only Ingresses with class %q will be processed by this Ingress controller", *ingressClass) } class.IngressClass = *ingressClass @@ -168,56 +211,66 @@ func parseFlags() (bool, *controller.Configuration, error) { return false, nil, fmt.Errorf("Port %v is already in use. Please check the flag --https-port", *httpsPort) } - if !ing_net.IsPortAvailable(*statusPort) { - return false, nil, fmt.Errorf("Port %v is already in use. Please check the flag --status-port", *statusPort) - } - if !ing_net.IsPortAvailable(*defServerPort) { return false, nil, fmt.Errorf("Port %v is already in use. Please check the flag --default-server-port", *defServerPort) } if *enableSSLPassthrough && !ing_net.IsPortAvailable(*sslProxyPort) { - return false, nil, fmt.Errorf("Port %v is already in use. Please check the flag --ssl-passtrough-proxy-port", *sslProxyPort) + return false, nil, fmt.Errorf("Port %v is already in use. Please check the flag --ssl-passthrough-proxy-port", *sslProxyPort) } - // TODO: remove disableNodeList flag - if *disableNodeList { - glog.Warningf("%s is DEPRECATED and will be removed in a future version.", disableNodeList) + if !*enableSSLChainCompletion { + klog.Warningf("SSL certificate chain completion is disabled (--enable-ssl-chain-completion=false)") } - if !*enableSSLChainCompletion { - glog.Warningf("Check of SSL certificate chain is disabled (--enable-ssl-chain-completion=false)") + if *enableSSLChainCompletion && *dynamicCertificatesEnabled { + return false, nil, fmt.Errorf(`SSL certificate chain completion cannot be enabled when dynamic certificates functionality is enabled. Please check the flags --enable-ssl-chain-completion`) + } + + if *publishSvc != "" && *publishStatusAddress != "" { + return false, nil, fmt.Errorf("Flags --publish-service and --publish-status-address are mutually exclusive") + } + + nginx.HealthPath = *defHealthzURL + + if *defHealthCheckTimeout > 0 { + nginx.HealthCheckTimeout = time.Duration(*defHealthCheckTimeout) * time.Second } config := &controller.Configuration{ - APIServerHost: *apiserverHost, - KubeConfigFile: *kubeConfigFile, - UpdateStatus: *updateStatus, - ElectionID: *electionID, - EnableProfiling: *profiling, - EnableSSLPassthrough: *enableSSLPassthrough, - EnableSSLChainCompletion: *enableSSLChainCompletion, - ResyncPeriod: *resyncPeriod, - DefaultService: *defaultSvc, - Namespace: *watchNamespace, - ConfigMapName: *configMap, - TCPConfigMapName: *tcpConfigMapName, - UDPConfigMapName: *udpConfigMapName, - DefaultSSLCertificate: *defSSLCertificate, - DefaultHealthzURL: *defHealthzURL, - PublishService: *publishSvc, - ForceNamespaceIsolation: *forceIsolation, - UpdateStatusOnShutdown: *updateStatusOnShutdown, - SortBackends: *sortBackends, - UseNodeInternalIP: *useNodeInternalIP, + APIServerHost: *apiserverHost, + KubeConfigFile: *kubeConfigFile, + UpdateStatus: *updateStatus, + ElectionID: *electionID, + EnableProfiling: *profiling, + EnableMetrics: *enableMetrics, + MetricsPerHost: *metricsPerHost, + EnableSSLPassthrough: *enableSSLPassthrough, + EnableSSLChainCompletion: *enableSSLChainCompletion, + ResyncPeriod: *resyncPeriod, + DefaultService: *defaultSvc, + Namespace: *watchNamespace, + ConfigMapName: *configMap, + TCPConfigMapName: *tcpConfigMapName, + UDPConfigMapName: *udpConfigMapName, + DefaultSSLCertificate: *defSSLCertificate, + PublishService: *publishSvc, + PublishStatusAddress: *publishStatusAddress, + UpdateStatusOnShutdown: *updateStatusOnShutdown, + UseNodeInternalIP: *useNodeInternalIP, + SyncRateLimit: *syncRateLimit, + DynamicCertificatesEnabled: *dynamicCertificatesEnabled, ListenPorts: &ngx_config.ListenPorts{ Default: *defServerPort, Health: *healthzPort, HTTP: *httpPort, HTTPS: *httpsPort, SSLProxy: *sslProxyPort, - Status: *statusPort, }, + DisableCatchAll: *disableCatchAll, + ValidationWebhook: *validationWebhook, + ValidationWebhookCertPath: *validationWebhookCert, + ValidationWebhookKeyPath: *validationWebhookKey, } return false, config, nil diff --git a/cmd/nginx/main.go b/cmd/nginx/main.go index cbc52573cd..231e07fc42 100644 --- a/cmd/nginx/main.go +++ b/cmd/nginx/main.go @@ -19,34 +19,48 @@ package main import ( "encoding/json" "fmt" - "net" + "math/rand" "net/http" "net/http/pprof" "os" "os/signal" - "strings" "syscall" "time" - proxyproto "github.com/armon/go-proxyproto" - "github.com/golang/glog" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" + "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + discovery "k8s.io/apimachinery/pkg/version" "k8s.io/apiserver/pkg/server/healthz" "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" - clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + "k8s.io/klog" "k8s.io/ingress-nginx/internal/file" "k8s.io/ingress-nginx/internal/ingress/controller" + "k8s.io/ingress-nginx/internal/ingress/metric" "k8s.io/ingress-nginx/internal/k8s" "k8s.io/ingress-nginx/internal/net/ssl" "k8s.io/ingress-nginx/version" ) +const ( + // High enough QPS to fit all expected use cases. QPS=0 is not set here, because + // client code is overriding it. + defaultQPS = 1e6 + // High enough Burst to fit all expected use cases. Burst=0 is not set here, because + // client code is overriding it. + defaultBurst = 1e6 +) + func main() { + klog.InitFlags(nil) + + rand.Seed(time.Now().UnixNano()) + fmt.Println(version.String()) showVersion, conf, err := parseFlags() @@ -55,12 +69,14 @@ func main() { } if err != nil { - glog.Fatal(err) + klog.Fatal(err) } + nginxVersion() + fs, err := file.NewLocalFS() if err != nil { - glog.Fatal(err) + klog.Fatal(err) } kubeClient, err := createApiserverClient(conf.APIServerHost, conf.KubeConfigFile) @@ -68,78 +84,72 @@ func main() { handleFatalInitError(err) } - ns, name, err := k8s.ParseNameNS(conf.DefaultService) - if err != nil { - glog.Fatal(err) - } - - _, err = kubeClient.CoreV1().Services(ns).Get(name, metav1.GetOptions{}) - if err != nil { - if strings.Contains(err.Error(), "cannot get services in the namespace") { - glog.Fatalf("✖ It seems the cluster it is running with Authorization enabled (like RBAC) and there is no permissions for the ingress controller. Please check the configuration") - } - glog.Fatalf("no service with name %v found: %v", conf.DefaultService, err) - } - glog.Infof("validated %v as the default backend", conf.DefaultService) - - if conf.PublishService != "" { - ns, name, err := k8s.ParseNameNS(conf.PublishService) + if len(conf.DefaultService) > 0 { + defSvcNs, defSvcName, err := k8s.ParseNameNS(conf.DefaultService) if err != nil { - glog.Fatal(err) + klog.Fatal(err) } - svc, err := kubeClient.CoreV1().Services(ns).Get(name, metav1.GetOptions{}) + _, err = kubeClient.CoreV1().Services(defSvcNs).Get(defSvcName, metav1.GetOptions{}) if err != nil { - glog.Fatalf("unexpected error getting information about service %v: %v", conf.PublishService, err) - } - - if len(svc.Status.LoadBalancer.Ingress) == 0 { - if len(svc.Spec.ExternalIPs) > 0 { - glog.Infof("service %v validated as assigned with externalIP", conf.PublishService) - } else { - // We could poll here, but we instead just exit and rely on k8s to restart us - glog.Fatalf("service %s does not (yet) have ingress points", conf.PublishService) + if errors.IsUnauthorized(err) || errors.IsForbidden(err) { + klog.Fatal("✖ The cluster seems to be running with a restrictive Authorization mode and the Ingress controller does not have the required permissions to operate normally.") } - } else { - glog.Infof("service %v validated as source of Ingress status", conf.PublishService) + klog.Fatalf("No service with name %v found: %v", conf.DefaultService, err) } + klog.Infof("Validated %v as the default backend.", conf.DefaultService) } if conf.Namespace != "" { _, err = kubeClient.CoreV1().Namespaces().Get(conf.Namespace, metav1.GetOptions{}) if err != nil { - glog.Fatalf("no namespace with name %v found: %v", conf.Namespace, err) + klog.Fatalf("No namespace with name %v found: %v", conf.Namespace, err) } } - if conf.ResyncPeriod.Seconds() < 10 { - glog.Fatalf("resync period (%vs) is too low", conf.ResyncPeriod.Seconds()) - } + conf.FakeCertificate = ssl.GetFakeSSLCert(fs) + klog.Infof("Created fake certificate with PemFileName: %v", conf.FakeCertificate.PemFileName) - // create the default SSL certificate (dummy) - defCert, defKey := ssl.GetFakeSSLCert() - c, err := ssl.AddOrUpdateCertAndKey(fakeCertificate, defCert, defKey, []byte{}) - if err != nil { - glog.Fatalf("Error generating self signed certificate: %v", err) + k8s.IsNetworkingIngressAvailable = k8s.NetworkingIngressAvailable(kubeClient) + if !k8s.IsNetworkingIngressAvailable { + klog.Warningf("Using deprecated \"k8s.io/api/extensions/v1beta1\" package because Kubernetes version is < v1.14.0") } - conf.FakeCertificatePath = c.PemFileName - conf.FakeCertificateSHA = c.PemSHA - conf.Client = kubeClient - ngx := controller.NewNGINXController(conf, fs) + reg := prometheus.NewRegistry() + + reg.MustRegister(prometheus.NewGoCollector()) + reg.MustRegister(prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{ + PidFn: func() (int, error) { return os.Getpid(), nil }, + ReportErrors: true, + })) - if conf.EnableSSLPassthrough { - setupSSLProxy(conf.ListenPorts.HTTPS, conf.ListenPorts.SSLProxy, ngx) + mc := metric.NewDummyCollector() + if conf.EnableMetrics { + mc, err = metric.NewCollector(conf.MetricsPerHost, reg) + if err != nil { + klog.Fatalf("Error creating prometheus collector: %v", err) + } } + mc.Start() + ngx := controller.NewNGINXController(conf, mc, fs) go handleSigterm(ngx, func(code int) { os.Exit(code) }) mux := http.NewServeMux() - go registerHandlers(conf.EnableProfiling, conf.ListenPorts.Health, ngx, mux) + + if conf.EnableProfiling { + registerProfiler(mux) + } + + registerHealthz(ngx, mux) + registerMetrics(reg, mux) + registerHandlers(mux) + + go startHTTPServer(conf.ListenPorts.Health, mux) ngx.Start() } @@ -150,72 +160,31 @@ func handleSigterm(ngx *controller.NGINXController, exit exiter) { signalChan := make(chan os.Signal, 1) signal.Notify(signalChan, syscall.SIGTERM) <-signalChan - glog.Infof("Received SIGTERM, shutting down") + klog.Info("Received SIGTERM, shutting down") exitCode := 0 if err := ngx.Stop(); err != nil { - glog.Infof("Error during shutdown %v", err) + klog.Infof("Error during shutdown: %v", err) exitCode = 1 } - glog.Infof("Handled quit, awaiting pod deletion") + klog.Info("Handled quit, awaiting Pod deletion") time.Sleep(10 * time.Second) - glog.Infof("Exiting with %v", exitCode) + klog.Infof("Exiting with %v", exitCode) exit(exitCode) } -func setupSSLProxy(sslPort, proxyPort int, n *controller.NGINXController) { - glog.Info("starting TLS proxy for SSL passthrough") - n.Proxy = &controller.TCPProxy{ - Default: &controller.TCPServer{ - Hostname: "localhost", - IP: "127.0.0.1", - Port: proxyPort, - ProxyProtocol: true, - }, - } - - listener, err := net.Listen("tcp", fmt.Sprintf(":%v", sslPort)) - if err != nil { - glog.Fatalf("%v", err) - } - - proxyList := &proxyproto.Listener{Listener: listener} - - // start goroutine that accepts tcp connections in port 443 - go func() { - for { - var conn net.Conn - var err error - - if n.IsProxyProtocolEnabled { - // we need to wrap the listener in order to decode - // proxy protocol before handling the connection - conn, err = proxyList.Accept() - } else { - conn, err = listener.Accept() - } - - if err != nil { - glog.Warningf("unexpected error accepting tcp connection: %v", err) - continue - } - - glog.V(3).Infof("remote address %s to local %s", conn.RemoteAddr(), conn.LocalAddr()) - go n.Proxy.Handle(conn) - } - }() -} - -// createApiserverClient creates new Kubernetes Apiserver client. When kubeconfig or apiserverHost param is empty -// the function assumes that it is running inside a Kubernetes cluster and attempts to -// discover the Apiserver. Otherwise, it connects to the Apiserver specified. -// -// apiserverHost param is in the format of protocol://address:port/pathPrefix, e.g.http://localhost:8001. -// kubeConfig location of kubeconfig file -func createApiserverClient(apiserverHost string, kubeConfig string) (*kubernetes.Clientset, error) { - cfg, err := buildConfigFromFlags(apiserverHost, kubeConfig) +// createApiserverClient creates a new Kubernetes REST client. apiserverHost is +// the URL of the API server in the format protocol://address:port/pathPrefix, +// kubeConfig is the location of a kubeconfig file. If defined, the kubeconfig +// file is loaded first, the URL of the API server read from the file is then +// optionally overridden by the value of apiserverHost. +// If neither apiserverHost nor kubeConfig is passed in, we assume the +// controller runs inside Kubernetes and fallback to the in-cluster config. If +// the in-cluster config is missing or fails, we fallback to the default config. +func createApiserverClient(apiserverHost, kubeConfig string) (*kubernetes.Clientset, error) { + cfg, err := clientcmd.BuildConfigFromFlags(apiserverHost, kubeConfig) if err != nil { return nil, err } @@ -224,108 +193,114 @@ func createApiserverClient(apiserverHost string, kubeConfig string) (*kubernetes cfg.Burst = defaultBurst cfg.ContentType = "application/vnd.kubernetes.protobuf" - glog.Infof("Creating API client for %s", cfg.Host) + klog.Infof("Creating API client for %s", cfg.Host) client, err := kubernetes.NewForConfig(cfg) if err != nil { return nil, err } - v, err := client.Discovery().ServerVersion() - if err != nil { - return nil, err - } + var v *discovery.Info - glog.Infof("Running in Kubernetes Cluster version v%v.%v (%v) - git (%v) commit %v - platform %v", - v.Major, v.Minor, v.GitVersion, v.GitTreeState, v.GitCommit, v.Platform) + // The client may fail to connect to the API server in the first request. + // https://github.com/kubernetes/ingress-nginx/issues/1968 + defaultRetry := wait.Backoff{ + Steps: 10, + Duration: 1 * time.Second, + Factor: 1.5, + Jitter: 0.1, + } - return client, nil -} + var lastErr error + retries := 0 + klog.V(2).Info("Trying to discover Kubernetes version") + err = wait.ExponentialBackoff(defaultRetry, func() (bool, error) { + v, err = client.Discovery().ServerVersion() -const ( - // High enough QPS to fit all expected use cases. QPS=0 is not set here, because - // client code is overriding it. - defaultQPS = 1e6 - // High enough Burst to fit all expected use cases. Burst=0 is not set here, because - // client code is overriding it. - defaultBurst = 1e6 + if err == nil { + return true, nil + } - fakeCertificate = "default-fake-certificate" -) + lastErr = err + klog.V(2).Infof("Unexpected error discovering Kubernetes version (attempt %v): %v", retries, err) + retries++ + return false, nil + }) -// buildConfigFromFlags builds REST config based on master URL and kubeconfig path. -// If both of them are empty then in cluster config is used. -func buildConfigFromFlags(masterURL, kubeconfigPath string) (*rest.Config, error) { - if kubeconfigPath == "" && masterURL == "" { - kubeconfig, err := rest.InClusterConfig() - if err != nil { - return nil, err - } + // err is returned in case of timeout in the exponential backoff (ErrWaitTimeout) + if err != nil { + return nil, lastErr + } - return kubeconfig, nil + // this should not happen, warn the user + if retries > 0 { + klog.Warningf("Initial connection to the Kubernetes API server was retried %d times.", retries) } - return clientcmd.NewNonInteractiveDeferredLoadingClientConfig( - &clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfigPath}, - &clientcmd.ConfigOverrides{ - ClusterInfo: clientcmdapi.Cluster{ - Server: masterURL, - }, - }).ClientConfig() + klog.Infof("Running in Kubernetes cluster version v%v.%v (%v) - git (%v) commit %v - platform %v", + v.Major, v.Minor, v.GitVersion, v.GitTreeState, v.GitCommit, v.Platform) + + return client, nil } -/** - * Handles fatal init error that prevents server from doing any work. Prints verbose error - * message and quits the server. - */ +// Handler for fatal init errors. Prints a verbose error message and exits. func handleFatalInitError(err error) { - glog.Fatalf("Error while initializing connection to Kubernetes apiserver. "+ - "This most likely means that the cluster is misconfigured (e.g., it has "+ - "invalid apiserver certificates or service accounts configuration). Reason: %s\n"+ + klog.Fatalf("Error while initiating a connection to the Kubernetes API server. "+ + "This could mean the cluster is misconfigured (e.g. it has invalid API server certificates "+ + "or Service Accounts configuration). Reason: %s\n"+ "Refer to the troubleshooting guide for more information: "+ - "https://github.com/kubernetes/ingress-nginx/blob/master/docs/troubleshooting.md", err) + "https://kubernetes.github.io/ingress-nginx/troubleshooting/", + err) } -func registerHandlers(enableProfiling bool, port int, ic *controller.NGINXController, mux *http.ServeMux) { +func registerHandlers(mux *http.ServeMux) { + mux.HandleFunc("/build", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + b, _ := json.Marshal(version.String()) + w.Write(b) + }) +} + +func registerHealthz(ic *controller.NGINXController, mux *http.ServeMux) { // expose health check endpoint (/healthz) healthz.InstallHandler(mux, healthz.PingHealthz, ic, ) +} - mux.Handle("/metrics", promhttp.Handler()) - - mux.HandleFunc("/build", func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - b, _ := json.Marshal(version.String()) - w.Write(b) - }) +func registerMetrics(reg *prometheus.Registry, mux *http.ServeMux) { + mux.Handle( + "/metrics", + promhttp.InstrumentMetricHandler( + reg, + promhttp.HandlerFor(reg, promhttp.HandlerOpts{}), + ), + ) - mux.HandleFunc("/stop", func(w http.ResponseWriter, r *http.Request) { - err := syscall.Kill(syscall.Getpid(), syscall.SIGTERM) - if err != nil { - glog.Errorf("unexpected error: %v", err) - } - }) +} - if enableProfiling { - mux.HandleFunc("/debug/pprof/", pprof.Index) - mux.HandleFunc("/debug/pprof/heap", pprof.Index) - mux.HandleFunc("/debug/pprof/mutex", pprof.Index) - mux.HandleFunc("/debug/pprof/goroutine", pprof.Index) - mux.HandleFunc("/debug/pprof/threadcreate", pprof.Index) - mux.HandleFunc("/debug/pprof/block", pprof.Index) - mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) - mux.HandleFunc("/debug/pprof/profile", pprof.Profile) - mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) - mux.HandleFunc("/debug/pprof/trace", pprof.Trace) - } +func registerProfiler(mux *http.ServeMux) { + mux.HandleFunc("/debug/pprof/", pprof.Index) + mux.HandleFunc("/debug/pprof/heap", pprof.Index) + mux.HandleFunc("/debug/pprof/mutex", pprof.Index) + mux.HandleFunc("/debug/pprof/goroutine", pprof.Index) + mux.HandleFunc("/debug/pprof/threadcreate", pprof.Index) + mux.HandleFunc("/debug/pprof/block", pprof.Index) + mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) + mux.HandleFunc("/debug/pprof/profile", pprof.Profile) + mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) + mux.HandleFunc("/debug/pprof/trace", pprof.Trace) +} +func startHTTPServer(port int, mux *http.ServeMux) { server := &http.Server{ - Addr: fmt.Sprintf(":%v", port), - Handler: mux, - ReadTimeout: 10 * time.Second, - WriteTimeout: 30 * time.Second, + Addr: fmt.Sprintf(":%v", port), + Handler: mux, + ReadTimeout: 10 * time.Second, + ReadHeaderTimeout: 10 * time.Second, + WriteTimeout: 300 * time.Second, + IdleTimeout: 120 * time.Second, } - glog.Fatal(server.ListenAndServe()) + klog.Fatal(server.ListenAndServe()) } diff --git a/cmd/nginx/main_test.go b/cmd/nginx/main_test.go index 752e65e68f..c4236ebfcb 100644 --- a/cmd/nginx/main_test.go +++ b/cmd/nginx/main_test.go @@ -23,41 +23,47 @@ import ( "testing" "time" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/ingress-nginx/internal/file" "k8s.io/ingress-nginx/internal/ingress/controller" ) func TestCreateApiserverClient(t *testing.T) { - home := os.Getenv("HOME") - kubeConfigFile := fmt.Sprintf("%v/.kube/config", home) - - cli, err := createApiserverClient("", kubeConfigFile) - if err != nil { - t.Fatalf("unexpected error creating api server client: %v", err) - } - if cli == nil { - t.Fatalf("expected a kubernetes client but none returned") - } - - _, err = createApiserverClient("", "") + _, err := createApiserverClient("", "") if err == nil { - t.Fatalf("expected an error creating api server client without an api server URL or kubeconfig file") + t.Fatal("Expected an error creating REST client without an API server URL or kubeconfig file.") } } func TestHandleSigterm(t *testing.T) { - home := os.Getenv("HOME") - kubeConfigFile := fmt.Sprintf("%v/.kube/config", home) + clientSet := fake.NewSimpleClientset() - cli, err := createApiserverClient("", kubeConfigFile) + ns := "test" + + cm := createConfigMap(clientSet, ns, t) + defer deleteConfigMap(cm, ns, clientSet, t) + + name := "test" + pod := v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ns, + }, + } + + _, err := clientSet.CoreV1().Pods(ns).Create(&pod) if err != nil { - t.Fatalf("unexpected error creating api server client: %v", err) + t.Fatalf("error creating pod %v: %v", pod, err) } resetForTesting(func() { t.Fatal("bad parse") }) - os.Setenv("POD_NAME", "test") - os.Setenv("POD_NAMESPACE", "test") + os.Setenv("POD_NAME", name) + os.Setenv("POD_NAMESPACE", ns) defer os.Setenv("POD_NAME", "") defer os.Setenv("POD_NAMESPACE", "") @@ -67,20 +73,20 @@ func TestHandleSigterm(t *testing.T) { _, conf, err := parseFlags() if err != nil { - t.Errorf("unexpected error creating NGINX controller: %v", err) + t.Errorf("Unexpected error creating NGINX controller: %v", err) } - conf.Client = cli + conf.Client = clientSet fs, err := file.NewFakeFS() if err != nil { - t.Fatalf("unexpected error: %v", err) + t.Fatalf("Unexpected error: %v", err) } - ngx := controller.NewNGINXController(conf, fs) + ngx := controller.NewNGINXController(conf, nil, fs) go handleSigterm(ngx, func(code int) { if code != 1 { - t.Errorf("expected exit code 1 but %v received", code) + t.Errorf("Expected exit code 1 but %d received", code) } return @@ -88,12 +94,45 @@ func TestHandleSigterm(t *testing.T) { time.Sleep(1 * time.Second) - t.Logf("sending SIGTERM to process PID %v", syscall.Getpid()) + t.Logf("Sending SIGTERM to PID %d", syscall.Getpid()) err = syscall.Kill(syscall.Getpid(), syscall.SIGTERM) if err != nil { - t.Errorf("unexpected error sending SIGTERM signal") + t.Error("Unexpected error sending SIGTERM signal.") + } + + err = clientSet.CoreV1().Pods(ns).Delete(name, &metav1.DeleteOptions{}) + if err != nil { + t.Fatalf("error deleting pod %v: %v", pod, err) + } +} + +func createConfigMap(clientSet kubernetes.Interface, ns string, t *testing.T) string { + t.Helper() + t.Log("Creating temporal config map") + + configMap := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "config", + SelfLink: fmt.Sprintf("/api/v1/namespaces/%s/configmaps/config", ns), + }, + } + + cm, err := clientSet.CoreV1().ConfigMaps(ns).Create(configMap) + if err != nil { + t.Errorf("error creating the configuration map: %v", err) } + t.Logf("Temporal configmap %v created", cm) + + return cm.Name } -func TestRegisterHandlers(t *testing.T) { +func deleteConfigMap(cm, ns string, clientSet kubernetes.Interface, t *testing.T) { + t.Helper() + t.Logf("Deleting temporal configmap %v", cm) + + err := clientSet.CoreV1().ConfigMaps(ns).Delete(cm, &metav1.DeleteOptions{}) + if err != nil { + t.Errorf("error deleting the configmap: %v", err) + } + t.Logf("Temporal configmap %v deleted", cm) } diff --git a/cmd/nginx/nginx.go b/cmd/nginx/nginx.go new file mode 100644 index 0000000000..c314cbe77d --- /dev/null +++ b/cmd/nginx/nginx.go @@ -0,0 +1,37 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "os" + "os/exec" + + "k8s.io/klog" +) + +func nginxVersion() { + flag := "-v" + + if klog.V(2) { + flag = "-V" + } + + cmd := exec.Command("nginx", flag) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.Run() +} diff --git a/cmd/plugin/commands/backends/backends.go b/cmd/plugin/commands/backends/backends.go new file mode 100644 index 0000000000..39973e3ab5 --- /dev/null +++ b/cmd/plugin/commands/backends/backends.go @@ -0,0 +1,85 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package backends + +import ( + "fmt" + + "github.com/spf13/cobra" + + "k8s.io/cli-runtime/pkg/genericclioptions" + + "k8s.io/ingress-nginx/cmd/plugin/kubectl" + "k8s.io/ingress-nginx/cmd/plugin/request" + "k8s.io/ingress-nginx/cmd/plugin/util" +) + +// CreateCommand creates and returns this cobra subcommand +func CreateCommand(flags *genericclioptions.ConfigFlags) *cobra.Command { + var pod, deployment *string + cmd := &cobra.Command{ + Use: "backends", + Short: "Inspect the dynamic backend information of an ingress-nginx instance", + RunE: func(cmd *cobra.Command, args []string) error { + backend, err := cmd.Flags().GetString("backend") + if err != nil { + return err + } + onlyList, err := cmd.Flags().GetBool("list") + if err != nil { + return err + } + if onlyList && backend != "" { + return fmt.Errorf("--list and --backend cannot both be specified") + } + + util.PrintError(backends(flags, *pod, *deployment, backend, onlyList)) + return nil + }, + } + + pod = util.AddPodFlag(cmd) + deployment = util.AddDeploymentFlag(cmd) + cmd.Flags().String("backend", "", "Output only the information for the given backend") + cmd.Flags().Bool("list", false, "Output a newline-separated list of backend names") + + return cmd +} + +func backends(flags *genericclioptions.ConfigFlags, podName string, deployment string, backend string, onlyList bool) error { + var command []string + if onlyList { + command = []string{"/dbg", "backends", "list"} + } else if backend != "" { + command = []string{"/dbg", "backends", "get", backend} + } else { + command = []string{"/dbg", "backends", "all"} + } + + pod, err := request.ChoosePod(flags, podName, deployment) + if err != nil { + return err + } + + out, err := kubectl.PodExecString(flags, &pod, command) + if err != nil { + return err + } + + fmt.Printf(out) + return nil +} diff --git a/cmd/plugin/commands/certs/certs.go b/cmd/plugin/commands/certs/certs.go new file mode 100644 index 0000000000..b16dd8f11c --- /dev/null +++ b/cmd/plugin/commands/certs/certs.go @@ -0,0 +1,71 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certs + +import ( + "fmt" + + "github.com/spf13/cobra" + + "k8s.io/cli-runtime/pkg/genericclioptions" + + "k8s.io/ingress-nginx/cmd/plugin/kubectl" + "k8s.io/ingress-nginx/cmd/plugin/request" + "k8s.io/ingress-nginx/cmd/plugin/util" +) + +// CreateCommand creates and returns this cobra subcommand +func CreateCommand(flags *genericclioptions.ConfigFlags) *cobra.Command { + var pod, deployment *string + cmd := &cobra.Command{ + Use: "certs", + Short: "Output the certificate data stored in an ingress-nginx pod", + RunE: func(cmd *cobra.Command, args []string) error { + host, err := cmd.Flags().GetString("host") + if err != nil { + return err + } + + util.PrintError(certs(flags, *pod, *deployment, host)) + return nil + }, + } + + cmd.Flags().String("host", "", "Get the cert for this hostname") + cobra.MarkFlagRequired(cmd.Flags(), "host") + pod = util.AddPodFlag(cmd) + deployment = util.AddDeploymentFlag(cmd) + + return cmd +} + +func certs(flags *genericclioptions.ConfigFlags, podName string, deployment string, host string) error { + command := []string{"/dbg", "certs", "get", host} + + pod, err := request.ChoosePod(flags, podName, deployment) + if err != nil { + return err + } + + out, err := kubectl.PodExecString(flags, &pod, command) + if err != nil { + return err + } + + fmt.Print(out) + return nil +} diff --git a/cmd/plugin/commands/conf/conf.go b/cmd/plugin/commands/conf/conf.go new file mode 100644 index 0000000000..51c3084930 --- /dev/null +++ b/cmd/plugin/commands/conf/conf.go @@ -0,0 +1,79 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package conf + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" + + "k8s.io/cli-runtime/pkg/genericclioptions" + + "k8s.io/ingress-nginx/cmd/plugin/kubectl" + "k8s.io/ingress-nginx/cmd/plugin/request" + "k8s.io/ingress-nginx/cmd/plugin/util" + "k8s.io/ingress-nginx/internal/nginx" +) + +// CreateCommand creates and returns this cobra subcommand +func CreateCommand(flags *genericclioptions.ConfigFlags) *cobra.Command { + var pod, deployment *string + cmd := &cobra.Command{ + Use: "conf", + Short: "Inspect the generated nginx.conf", + RunE: func(cmd *cobra.Command, args []string) error { + host, err := cmd.Flags().GetString("host") + if err != nil { + return err + } + + util.PrintError(conf(flags, host, *pod, *deployment)) + return nil + }, + } + cmd.Flags().String("host", "", "Print just the server block with this hostname") + pod = util.AddPodFlag(cmd) + deployment = util.AddDeploymentFlag(cmd) + + return cmd +} + +func conf(flags *genericclioptions.ConfigFlags, host string, podName string, deployment string) error { + pod, err := request.ChoosePod(flags, podName, deployment) + if err != nil { + return err + } + + nginxConf, err := kubectl.PodExecString(flags, &pod, []string{"/dbg", "conf"}) + if err != nil { + return err + } + + if host != "" { + block, err := nginx.GetServerBlock(nginxConf, host) + if err != nil { + return err + } + + fmt.Println(strings.TrimRight(strings.Trim(block, " \n"), " \n\t")) + } else { + fmt.Print(nginxConf) + } + + return nil +} diff --git a/cmd/plugin/commands/exec/exec.go b/cmd/plugin/commands/exec/exec.go new file mode 100644 index 0000000000..05f056d2e1 --- /dev/null +++ b/cmd/plugin/commands/exec/exec.go @@ -0,0 +1,72 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package exec + +import ( + "github.com/spf13/cobra" + + "k8s.io/cli-runtime/pkg/genericclioptions" + + "k8s.io/ingress-nginx/cmd/plugin/kubectl" + "k8s.io/ingress-nginx/cmd/plugin/request" + "k8s.io/ingress-nginx/cmd/plugin/util" +) + +// CreateCommand creates and returns this cobra subcommand +func CreateCommand(flags *genericclioptions.ConfigFlags) *cobra.Command { + opts := execFlags{} + var pod, deployment *string + + cmd := &cobra.Command{ + Use: "exec", + Short: "Execute a command inside an ingress-nginx pod", + RunE: func(cmd *cobra.Command, args []string) error { + util.PrintError(exec(flags, *pod, *deployment, args, opts)) + return nil + }, + } + pod = util.AddPodFlag(cmd) + deployment = util.AddDeploymentFlag(cmd) + cmd.Flags().BoolVarP(&opts.TTY, "tty", "t", false, "Stdin is a TTY") + cmd.Flags().BoolVarP(&opts.Stdin, "stdin", "i", false, "Pass stdin to the container") + + return cmd +} + +type execFlags struct { + TTY bool + Stdin bool +} + +func exec(flags *genericclioptions.ConfigFlags, podName string, deployment string, cmd []string, opts execFlags) error { + pod, err := request.ChoosePod(flags, podName, deployment) + if err != nil { + return err + } + + args := []string{"exec"} + if opts.TTY { + args = append(args, "-t") + } + if opts.Stdin { + args = append(args, "-i") + } + + args = append(args, []string{"-n", pod.Namespace, pod.Name, "--"}...) + args = append(args, cmd...) + return kubectl.Exec(flags, args) +} diff --git a/cmd/plugin/commands/general/general.go b/cmd/plugin/commands/general/general.go new file mode 100644 index 0000000000..1826810011 --- /dev/null +++ b/cmd/plugin/commands/general/general.go @@ -0,0 +1,61 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package general + +import ( + "fmt" + + "github.com/spf13/cobra" + + "k8s.io/cli-runtime/pkg/genericclioptions" + + "k8s.io/ingress-nginx/cmd/plugin/kubectl" + "k8s.io/ingress-nginx/cmd/plugin/request" + "k8s.io/ingress-nginx/cmd/plugin/util" +) + +// CreateCommand creates and returns this cobra subcommand +func CreateCommand(flags *genericclioptions.ConfigFlags) *cobra.Command { + var pod, deployment *string + cmd := &cobra.Command{ + Use: "general", + Short: "Inspect the other dynamic ingress-nginx information", + RunE: func(cmd *cobra.Command, args []string) error { + util.PrintError(general(flags, *pod, *deployment)) + return nil + }, + } + pod = util.AddPodFlag(cmd) + deployment = util.AddDeploymentFlag(cmd) + + return cmd +} + +func general(flags *genericclioptions.ConfigFlags, podName string, deployment string) error { + pod, err := request.ChoosePod(flags, podName, deployment) + if err != nil { + return err + } + + out, err := kubectl.PodExecString(flags, &pod, []string{"/dbg", "general"}) + if err != nil { + return err + } + + fmt.Print(out) + return nil +} diff --git a/cmd/plugin/commands/info/info.go b/cmd/plugin/commands/info/info.go new file mode 100644 index 0000000000..246046c3ae --- /dev/null +++ b/cmd/plugin/commands/info/info.go @@ -0,0 +1,59 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package info + +import ( + "fmt" + + "github.com/spf13/cobra" + + "k8s.io/cli-runtime/pkg/genericclioptions" + + "k8s.io/ingress-nginx/cmd/plugin/request" + "k8s.io/ingress-nginx/cmd/plugin/util" +) + +// CreateCommand creates and returns this cobra subcommand +func CreateCommand(flags *genericclioptions.ConfigFlags) *cobra.Command { + cmd := &cobra.Command{ + Use: "info", + Short: "Show information about the ingress-nginx service", + RunE: func(cmd *cobra.Command, args []string) error { + service, err := cmd.Flags().GetString("service") + if err != nil { + return err + } + + util.PrintError(info(flags, service)) + return nil + }, + } + + cmd.Flags().String("service", util.DefaultIngressServiceName, "The name of the ingress-nginx service") + return cmd +} + +func info(flags *genericclioptions.ConfigFlags, serviceName string) error { + service, err := request.GetServiceByName(flags, serviceName, nil) + if err != nil { + return err + } + + fmt.Printf("Service cluster IP address: %v\n", service.Spec.ClusterIP) + fmt.Printf("LoadBalancer IP|CNAME: %v\n", service.Spec.LoadBalancerIP) + return nil +} diff --git a/cmd/plugin/commands/ingresses/ingresses.go b/cmd/plugin/commands/ingresses/ingresses.go new file mode 100644 index 0000000000..38da629302 --- /dev/null +++ b/cmd/plugin/commands/ingresses/ingresses.go @@ -0,0 +1,217 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ingresses + +import ( + "fmt" + "os" + "text/tabwriter" + + "github.com/spf13/cobra" + networking "k8s.io/api/networking/v1beta1" + "k8s.io/cli-runtime/pkg/genericclioptions" + + "k8s.io/ingress-nginx/cmd/plugin/request" + "k8s.io/ingress-nginx/cmd/plugin/util" +) + +// CreateCommand creates and returns this cobra subcommand +func CreateCommand(flags *genericclioptions.ConfigFlags) *cobra.Command { + cmd := &cobra.Command{ + Use: "ingresses", + Aliases: []string{"ingress", "ing"}, + Short: "Provide a short summary of all of the ingress definitions", + RunE: func(cmd *cobra.Command, args []string) error { + host, err := cmd.Flags().GetString("host") + if err != nil { + return err + } + + allNamespaces, err := cmd.Flags().GetBool("all-namespaces") + if err != nil { + return err + } + + util.PrintError(ingresses(flags, host, allNamespaces)) + return nil + }, + } + cmd.Flags().String("host", "", "Show just the ingress definitions for this hostname") + cmd.Flags().Bool("all-namespaces", false, "Find ingress definitions from all namespaces") + + return cmd +} + +func ingresses(flags *genericclioptions.ConfigFlags, host string, allNamespaces bool) error { + var namespace string + if allNamespaces { + namespace = "" + } else { + namespace = util.GetNamespace(flags) + } + + ingresses, err := request.GetIngressDefinitions(flags, namespace) + if err != nil { + return err + } + + rows := getIngressRows(&ingresses) + + if host != "" { + rowsWithHost := make([]ingressRow, 0) + for _, row := range rows { + if row.Host == host { + rowsWithHost = append(rowsWithHost, row) + } + } + rows = rowsWithHost + } + + printer := tabwriter.NewWriter(os.Stdout, 6, 4, 3, ' ', 0) + defer printer.Flush() + + if allNamespaces { + fmt.Fprintln(printer, "NAMESPACE\tINGRESS NAME\tHOST+PATH\tADDRESSES\tTLS\tSERVICE\tSERVICE PORT\tENDPOINTS") + } else { + fmt.Fprintln(printer, "INGRESS NAME\tHOST+PATH\tADDRESSES\tTLS\tSERVICE\tSERVICE PORT\tENDPOINTS") + } + + for _, row := range rows { + var tlsMsg string + if row.TLS { + tlsMsg = "YES" + } else { + tlsMsg = "NO" + } + + numEndpoints, err := request.GetNumEndpoints(flags, row.Namespace, row.ServiceName) + if err != nil { + return err + } + if numEndpoints == nil { + row.NumEndpoints = "N/A" + } else { + row.NumEndpoints = fmt.Sprint(*numEndpoints) + } + + if allNamespaces { + fmt.Fprintf(printer, "%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\n", row.Namespace, row.IngressName, row.Host+row.Path, row.Address, tlsMsg, row.ServiceName, row.ServicePort, row.NumEndpoints) + } else { + fmt.Fprintf(printer, "%v\t%v\t%v\t%v\t%v\t%v\t%v\n", row.IngressName, row.Host+row.Path, row.Address, tlsMsg, row.ServiceName, row.ServicePort, row.NumEndpoints) + } + } + + return nil +} + +type ingressRow struct { + Namespace string + IngressName string + Host string + Path string + TLS bool + ServiceName string + ServicePort string + Address string + NumEndpoints string +} + +func getIngressRows(ingresses *[]networking.Ingress) []ingressRow { + rows := make([]ingressRow, 0) + + for _, ing := range *ingresses { + + address := "" + for _, lbIng := range ing.Status.LoadBalancer.Ingress { + if len(lbIng.IP) > 0 { + address = address + lbIng.IP + "," + } + if len(lbIng.Hostname) > 0 { + address = address + lbIng.Hostname + "," + } + } + if len(address) > 0 { + address = address[:len(address)-1] + } + + tlsHosts := make(map[string]struct{}) + for _, tls := range ing.Spec.TLS { + for _, host := range tls.Hosts { + tlsHosts[host] = struct{}{} + } + } + + defaultBackendService := "" + defaultBackendPort := "" + if ing.Spec.Backend != nil { + defaultBackendService = ing.Spec.Backend.ServiceName + defaultBackendPort = ing.Spec.Backend.ServicePort.String() + } + + // Handle catch-all ingress + if len(ing.Spec.Rules) == 0 && len(defaultBackendService) > 0 { + row := ingressRow{ + Namespace: ing.Namespace, + IngressName: ing.Name, + Host: "*", + ServiceName: defaultBackendService, + ServicePort: defaultBackendPort, + Address: address, + } + + rows = append(rows, row) + continue + } + + for _, rule := range ing.Spec.Rules { + _, hasTLS := tlsHosts[rule.Host] + + //Handle ingress with no paths + if rule.HTTP == nil { + row := ingressRow{ + Namespace: ing.Namespace, + IngressName: ing.Name, + Host: rule.Host, + Path: "", + TLS: hasTLS, + ServiceName: defaultBackendService, + ServicePort: defaultBackendPort, + Address: address, + } + rows = append(rows, row) + continue + } + + for _, path := range rule.HTTP.Paths { + row := ingressRow{ + Namespace: ing.Namespace, + IngressName: ing.Name, + Host: rule.Host, + Path: path.Path, + TLS: hasTLS, + ServiceName: path.Backend.ServiceName, + ServicePort: path.Backend.ServicePort.String(), + Address: address, + } + + rows = append(rows, row) + } + } + } + + return rows +} diff --git a/cmd/plugin/commands/lint/main.go b/cmd/plugin/commands/lint/main.go new file mode 100644 index 0000000000..d120e93114 --- /dev/null +++ b/cmd/plugin/commands/lint/main.go @@ -0,0 +1,232 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package lint + +import ( + "fmt" + + "github.com/spf13/cobra" + + appsv1 "k8s.io/api/apps/v1" + networking "k8s.io/api/networking/v1beta1" + kmeta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/cli-runtime/pkg/genericclioptions" + + "k8s.io/ingress-nginx/cmd/plugin/lints" + "k8s.io/ingress-nginx/cmd/plugin/request" + "k8s.io/ingress-nginx/cmd/plugin/util" + "k8s.io/ingress-nginx/version" +) + +// CreateCommand creates and returns this cobra subcommand +func CreateCommand(flags *genericclioptions.ConfigFlags) *cobra.Command { + var opts *lintOptions + cmd := &cobra.Command{ + Use: "lint", + Short: "Inspect kubernetes resources for possible issues", + RunE: func(cmd *cobra.Command, args []string) error { + err := opts.Validate() + if err != nil { + return err + } + + fmt.Println("Checking ingresses...") + err = ingresses(*opts) + if err != nil { + util.PrintError(err) + } + fmt.Println("Checking deployments...") + err = deployments(*opts) + if err != nil { + util.PrintError(err) + } + + return nil + }, + } + + opts = addCommonOptions(flags, cmd) + + cmd.AddCommand(createSubcommand(flags, []string{"ingresses", "ingress", "ing"}, "Check ingresses for possible issues", ingresses)) + cmd.AddCommand(createSubcommand(flags, []string{"deployments", "deployment", "dep"}, "Check deployments for possible issues", deployments)) + + return cmd +} + +func createSubcommand(flags *genericclioptions.ConfigFlags, names []string, short string, f func(opts lintOptions) error) *cobra.Command { + var opts *lintOptions + cmd := &cobra.Command{ + Use: names[0], + Aliases: names[1:], + Short: short, + RunE: func(cmd *cobra.Command, args []string) error { + err := opts.Validate() + if err != nil { + return err + } + util.PrintError(f(*opts)) + return nil + }, + } + + opts = addCommonOptions(flags, cmd) + + return cmd +} + +func addCommonOptions(flags *genericclioptions.ConfigFlags, cmd *cobra.Command) *lintOptions { + out := lintOptions{ + flags: flags, + } + cmd.Flags().BoolVar(&out.allNamespaces, "all-namespaces", false, "Check resources in all namespaces") + cmd.Flags().BoolVar(&out.showAll, "show-all", false, "Show all resources, not just the ones with problems") + cmd.Flags().BoolVarP(&out.verbose, "verbose", "v", false, "Show extra information about the lints") + cmd.Flags().StringVarP(&out.versionFrom, "from-version", "f", "0.0.0", "Use lints added for versions starting with this one") + cmd.Flags().StringVarP(&out.versionTo, "to-version", "t", version.RELEASE, "Use lints added for versions up to and including this one") + + return &out +} + +type lintOptions struct { + flags *genericclioptions.ConfigFlags + allNamespaces bool + showAll bool + verbose bool + versionFrom string + versionTo string +} + +func (opts *lintOptions) Validate() error { + _, _, _, err := util.ParseVersionString(opts.versionFrom) + if err != nil { + return err + } + + _, _, _, err = util.ParseVersionString(opts.versionTo) + if err != nil { + return err + } + + return nil +} + +type lint interface { + Check(obj kmeta.Object) bool + Message() string + Link() string + Version() string +} + +func checkObjectArray(lints []lint, objects []kmeta.Object, opts lintOptions) { + usedLints := make([]lint, 0) + for _, lint := range lints { + lintVersion := lint.Version() + if lint.Version() == "" { + lintVersion = "0.0.0" + } + if util.InVersionRangeInclusive(opts.versionFrom, lintVersion, opts.versionTo) { + usedLints = append(usedLints, lint) + } + } + + for _, obj := range objects { + objName := obj.GetName() + if opts.allNamespaces { + objName = obj.GetNamespace() + "/" + obj.GetName() + } + + failedLints := make([]lint, 0) + for _, lint := range usedLints { + if lint.Check(obj) { + failedLints = append(failedLints, lint) + } + } + + if len(failedLints) != 0 { + fmt.Printf("✗ %v\n", objName) + for _, lint := range failedLints { + fmt.Printf(" - %v\n", lint.Message()) + if opts.verbose && lint.Version() != "" { + fmt.Printf(" Lint added for version %v\n", lint.Version()) + } + if opts.verbose && lint.Link() != "" { + fmt.Printf(" %v\n", lint.Link()) + } + } + fmt.Println("") + continue + } + + if opts.showAll { + fmt.Printf("✓ %v\n", objName) + } + } +} + +func ingresses(opts lintOptions) error { + var ings []networking.Ingress + var err error + if opts.allNamespaces { + ings, err = request.GetIngressDefinitions(opts.flags, "") + } else { + ings, err = request.GetIngressDefinitions(opts.flags, util.GetNamespace(opts.flags)) + } + if err != nil { + return err + } + + var iLints []lints.IngressLint = lints.GetIngressLints() + genericLints := make([]lint, len(iLints)) + for i := range iLints { + genericLints[i] = iLints[i] + } + + objects := make([]kmeta.Object, 0) + for i := range ings { + objects = append(objects, &ings[i]) + } + + checkObjectArray(genericLints, objects, opts) + return nil +} + +func deployments(opts lintOptions) error { + var deps []appsv1.Deployment + var err error + if opts.allNamespaces { + deps, err = request.GetDeployments(opts.flags, "") + } else { + deps, err = request.GetDeployments(opts.flags, util.GetNamespace(opts.flags)) + } + if err != nil { + return err + } + + var iLints []lints.DeploymentLint = lints.GetDeploymentLints() + genericLints := make([]lint, len(iLints)) + for i := range iLints { + genericLints[i] = iLints[i] + } + + objects := make([]kmeta.Object, 0) + for i := range deps { + objects = append(objects, &deps[i]) + } + + checkObjectArray(genericLints, objects, opts) + return nil +} diff --git a/cmd/plugin/commands/logs/logs.go b/cmd/plugin/commands/logs/logs.go new file mode 100644 index 0000000000..81e18e7d8e --- /dev/null +++ b/cmd/plugin/commands/logs/logs.go @@ -0,0 +1,109 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logs + +import ( + "fmt" + + "github.com/spf13/cobra" + + "k8s.io/cli-runtime/pkg/genericclioptions" + + "k8s.io/ingress-nginx/cmd/plugin/kubectl" + "k8s.io/ingress-nginx/cmd/plugin/request" + "k8s.io/ingress-nginx/cmd/plugin/util" +) + +// CreateCommand creates and returns this cobra subcommand +func CreateCommand(flags *genericclioptions.ConfigFlags) *cobra.Command { + o := logsFlags{} + var pod, deployment *string + + cmd := &cobra.Command{ + Use: "logs", + Short: "Get the kubernetes logs for an ingress-nginx pod", + RunE: func(cmd *cobra.Command, args []string) error { + util.PrintError(logs(flags, *pod, *deployment, o)) + return nil + }, + } + pod = util.AddPodFlag(cmd) + deployment = util.AddDeploymentFlag(cmd) + + cmd.Flags().BoolVarP(&o.Follow, "follow", "f", o.Follow, "Specify if the logs should be streamed.") + cmd.Flags().BoolVar(&o.Timestamps, "timestamps", o.Timestamps, "Include timestamps on each line in the log output") + cmd.Flags().Int64Var(&o.LimitBytes, "limit-bytes", o.LimitBytes, "Maximum bytes of logs to return. Defaults to no limit.") + cmd.Flags().BoolVarP(&o.Previous, "previous", "p", o.Previous, "If true, print the logs for the previous instance of the container in a pod if it exists.") + cmd.Flags().Int64Var(&o.Tail, "tail", o.Tail, "Lines of recent log file to display. Defaults to -1 with no selector, showing all log lines otherwise 10, if a selector is provided.") + cmd.Flags().StringVar(&o.SinceTime, "since-time", o.SinceTime, "Only return logs after a specific date (RFC3339). Defaults to all logs. Only one of since-time / since may be used.") + cmd.Flags().StringVar(&o.SinceSeconds, "since", o.SinceSeconds, "Only return logs newer than a relative duration like 5s, 2m, or 3h. Defaults to all logs. Only one of since-time / since may be used.") + cmd.Flags().StringVarP(&o.Selector, "selector", "l", o.Selector, "Selector (label query) to filter on.") + + return cmd +} + +type logsFlags struct { + SinceTime string + SinceSeconds string + Follow bool + Previous bool + Timestamps bool + LimitBytes int64 + Tail int64 + Selector string +} + +func (o *logsFlags) toStrings() []string { + r := []string{} + if o.SinceTime != "" { + r = append(r, "--since-time", o.SinceTime) + } + if o.SinceSeconds != "" { + r = append(r, "--since", o.SinceSeconds) + } + if o.Follow { + r = append(r, "--follow") + } + if o.Previous { + r = append(r, "--previous") + } + if o.Timestamps { + r = append(r, "--timestamps") + } + if o.LimitBytes != 0 { + r = append(r, "--limit-bytes", fmt.Sprintf("%v", o.LimitBytes)) + } + if o.Tail != 0 { + r = append(r, "--tail", fmt.Sprintf("%v", o.Tail)) + } + if o.Selector != "" { + r = append(r, "--selector", o.Selector) + } + + return r +} + +func logs(flags *genericclioptions.ConfigFlags, podName string, deployment string, opts logsFlags) error { + pod, err := request.ChoosePod(flags, podName, deployment) + if err != nil { + return err + } + + cmd := []string{"logs", "-n", pod.Namespace, pod.Name} + cmd = append(cmd, opts.toStrings()...) + return kubectl.Exec(flags, cmd) +} diff --git a/cmd/plugin/commands/ssh/ssh.go b/cmd/plugin/commands/ssh/ssh.go new file mode 100644 index 0000000000..77fcd9940c --- /dev/null +++ b/cmd/plugin/commands/ssh/ssh.go @@ -0,0 +1,53 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ssh + +import ( + "github.com/spf13/cobra" + + "k8s.io/cli-runtime/pkg/genericclioptions" + + "k8s.io/ingress-nginx/cmd/plugin/kubectl" + "k8s.io/ingress-nginx/cmd/plugin/request" + "k8s.io/ingress-nginx/cmd/plugin/util" +) + +// CreateCommand creates and returns this cobra subcommand +func CreateCommand(flags *genericclioptions.ConfigFlags) *cobra.Command { + var pod, deployment *string + cmd := &cobra.Command{ + Use: "ssh", + Short: "ssh into a running ingress-nginx pod", + RunE: func(cmd *cobra.Command, args []string) error { + util.PrintError(ssh(flags, *pod, *deployment)) + return nil + }, + } + pod = util.AddPodFlag(cmd) + deployment = util.AddDeploymentFlag(cmd) + + return cmd +} + +func ssh(flags *genericclioptions.ConfigFlags, podName string, deployment string) error { + pod, err := request.ChoosePod(flags, podName, deployment) + if err != nil { + return err + } + + return kubectl.Exec(flags, []string{"exec", "-it", "-n", pod.Namespace, pod.Name, "--", "/bin/bash"}) +} diff --git a/cmd/plugin/ingress-nginx.yaml.tmpl b/cmd/plugin/ingress-nginx.yaml.tmpl new file mode 100644 index 0000000000..1d8f6e0362 --- /dev/null +++ b/cmd/plugin/ingress-nginx.yaml.tmpl @@ -0,0 +1,41 @@ +apiVersion: krew.googlecontainertools.github.com/v1alpha2 +kind: Plugin +metadata: + name: ingress-nginx +spec: + shortDescription: Interact with ingress-nginx + description: | + The official kubectl plugin for ingress-nginx. + version: %%%tag%%% + homepage: https://kubernetes.github.io/ingress-nginx/kubectl-plugin/ + platforms: + - uri: https://github.com/kubernetes/ingress-nginx/releases/download/nginx-%%%tag%%%/kubectl-ingress_nginx-darwin-amd64.tar.gz + sha256: %%%shasum_darwin_amd64%%% + files: + - from: "*" + to: "." + bin: "./kubectl-ingress_nginx" + selector: + matchLabels: + os: darwin + arch: amd64 + - uri: https://github.com/kubernetes/ingress-nginx/releases/download/nginx-%%%tag%%%/kubectl-ingress_nginx-linux-amd64.tar.gz + sha256: %%%shasum_linux_amd64%%% + files: + - from: "*" + to: "." + bin: "./kubectl-ingress_nginx" + selector: + matchLabels: + os: linux + arch: amd64 + - uri: https://github.com/kubernetes/ingress-nginx/releases/download/nginx-%%%tag%%%/kubectl-ingress_nginx-windows-amd64.tar.gz + sha256: %%%shasum_windows_amd64%%% + files: + - from: "*" + to: "." + bin: "./kubectl-ingress_nginx.exe" + selector: + matchLabels: + os: windows + arch: amd64 diff --git a/cmd/plugin/kubectl/kubectl.go b/cmd/plugin/kubectl/kubectl.go new file mode 100644 index 0000000000..c11ba5b772 --- /dev/null +++ b/cmd/plugin/kubectl/kubectl.go @@ -0,0 +1,133 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "bytes" + "fmt" + "io" + "os" + "os/exec" + "strings" + "syscall" + + apiv1 "k8s.io/api/core/v1" + "k8s.io/cli-runtime/pkg/genericclioptions" +) + +// PodExecString takes a pod and a command, uses kubectl exec to run the command in the pod +// and returns stdout as a string +func PodExecString(flags *genericclioptions.ConfigFlags, pod *apiv1.Pod, args []string) (string, error) { + args = append([]string{"exec", "-n", pod.Namespace, pod.Name}, args...) + return ExecToString(flags, args) +} + +// ExecToString runs a kubectl subcommand and returns stdout as a string +func ExecToString(flags *genericclioptions.ConfigFlags, args []string) (string, error) { + kArgs := getKubectlConfigFlags(flags) + kArgs = append(kArgs, args...) + + buf := bytes.NewBuffer(make([]byte, 0)) + err := execToWriter(append([]string{"kubectl"}, kArgs...), buf) + if err != nil { + return "", err + } + return buf.String(), nil +} + +// Exec replaces the current process with a kubectl invocation +func Exec(flags *genericclioptions.ConfigFlags, args []string) error { + kArgs := getKubectlConfigFlags(flags) + kArgs = append(kArgs, args...) + return execCommand(append([]string{"kubectl"}, kArgs...)) +} + +// Replaces the currently running process with the given command +func execCommand(args []string) error { + path, err := exec.LookPath(args[0]) + if err != nil { + return err + } + args[0] = path + + env := os.Environ() + return syscall.Exec(path, args, env) +} + +// Runs a command and returns stdout +func execToWriter(args []string, writer io.Writer) error { + cmd := exec.Command(args[0], args[1:]...) + + op, err := cmd.StdoutPipe() + if err != nil { + return err + } + + go io.Copy(writer, op) + err = cmd.Run() + if err != nil { + return err + } + + return nil +} + +// getKubectlConfigFlags serializes the parsed flag struct back into a series of command line args +// that can then be passed to kubectl. The mirror image of +// https://github.com/kubernetes/cli-runtime/blob/master/pkg/genericclioptions/config_flags.go#L251 +func getKubectlConfigFlags(flags *genericclioptions.ConfigFlags) []string { + out := []string{} + o := &out + + appendStringFlag(o, flags.KubeConfig, "kubeconfig") + appendStringFlag(o, flags.CacheDir, "cache-dir") + appendStringFlag(o, flags.CertFile, "client-certificate") + appendStringFlag(o, flags.KeyFile, "client-key") + appendStringFlag(o, flags.BearerToken, "token") + appendStringFlag(o, flags.Impersonate, "as") + appendStringArrayFlag(o, flags.ImpersonateGroup, "as-group") + appendStringFlag(o, flags.Username, "username") + appendStringFlag(o, flags.Password, "password") + appendStringFlag(o, flags.ClusterName, "cluster") + appendStringFlag(o, flags.AuthInfoName, "user") + //appendStringFlag(o, flags.Namespace, "namespace") + appendStringFlag(o, flags.Context, "context") + appendStringFlag(o, flags.APIServer, "server") + appendBoolFlag(o, flags.Insecure, "insecure-skip-tls-verify") + appendStringFlag(o, flags.CAFile, "certificate-authority") + appendStringFlag(o, flags.Timeout, "request-timeout") + + return out +} + +func appendStringFlag(out *[]string, in *string, flag string) { + if in != nil && *in != "" { + *out = append(*out, fmt.Sprintf("--%v=%v", flag, *in)) + } +} + +func appendBoolFlag(out *[]string, in *bool, flag string) { + if in != nil { + *out = append(*out, fmt.Sprintf("--%v=%v", flag, *in)) + } +} + +func appendStringArrayFlag(out *[]string, in *[]string, flag string) { + if in != nil && len(*in) > 0 { + *out = append(*out, fmt.Sprintf("--%v=%v'", flag, strings.Join(*in, ","))) + } +} diff --git a/cmd/plugin/lints/deployment.go b/cmd/plugin/lints/deployment.go new file mode 100644 index 0000000000..26e218d66f --- /dev/null +++ b/cmd/plugin/lints/deployment.go @@ -0,0 +1,108 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package lints + +import ( + "fmt" + "strings" + + v1 "k8s.io/api/apps/v1" + kmeta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/ingress-nginx/cmd/plugin/util" +) + +// DeploymentLint is a validation for a deployment +type DeploymentLint struct { + message string + version string + issue int + f func(cmp v1.Deployment) bool +} + +// Check returns true if the lint detects an issue +func (lint DeploymentLint) Check(obj kmeta.Object) bool { + cmp := obj.(*v1.Deployment) + return lint.f(*cmp) +} + +// Message is a description of the lint +func (lint DeploymentLint) Message() string { + return lint.message +} + +// Version is the ingress-nginx version the lint was added for, or the empty string +func (lint DeploymentLint) Version() string { + return lint.version +} + +// Link is a URL to the issue or PR explaining the lint +func (lint DeploymentLint) Link() string { + if lint.issue > 0 { + return fmt.Sprintf("%v%v", util.IssuePrefix, lint.issue) + } + + return "" +} + +// GetDeploymentLints retuns all of the lints for ingresses +func GetDeploymentLints() []DeploymentLint { + return []DeploymentLint{ + removedFlag("sort-backends", 3655, "0.22.0"), + removedFlag("force-namespace-isolation", 3887, "0.24.0"), + } +} + +func removedFlag(flag string, issueNumber int, version string) DeploymentLint { + return DeploymentLint{ + message: fmt.Sprintf("Uses removed config flag --%v", flag), + issue: issueNumber, + version: version, + f: func(dep v1.Deployment) bool { + if !isIngressNginxDeployment(dep) { + return false + } + + args := getNginxArgs(dep) + for _, arg := range args { + if strings.HasPrefix(arg, fmt.Sprintf("--%v", flag)) { + return true + } + } + + return false + }, + } +} + +func getNginxArgs(dep v1.Deployment) []string { + for _, container := range dep.Spec.Template.Spec.Containers { + if len(container.Args) > 0 && container.Args[0] == "/nginx-ingress-controller" { + return container.Args + } + } + return make([]string, 0) +} + +func isIngressNginxDeployment(dep v1.Deployment) bool { + containers := dep.Spec.Template.Spec.Containers + for _, container := range containers { + if len(container.Args) > 0 && container.Args[0] == "/nginx-ingress-controller" { + return true + } + } + return false +} diff --git a/cmd/plugin/lints/ingress.go b/cmd/plugin/lints/ingress.go new file mode 100644 index 0000000000..bf8a5ff3a4 --- /dev/null +++ b/cmd/plugin/lints/ingress.go @@ -0,0 +1,140 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package lints + +import ( + "fmt" + "strings" + + networking "k8s.io/api/networking/v1beta1" + kmeta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/ingress-nginx/cmd/plugin/util" +) + +// IngressLint is a validation for an ingress +type IngressLint struct { + message string + issue int + version string + f func(ing networking.Ingress) bool +} + +// Check returns true if the lint detects an issue +func (lint IngressLint) Check(obj kmeta.Object) bool { + ing := obj.(*networking.Ingress) + return lint.f(*ing) +} + +// Message is a description of the lint +func (lint IngressLint) Message() string { + return lint.message +} + +// Link is a URL to the issue or PR explaining the lint +func (lint IngressLint) Link() string { + if lint.issue > 0 { + return fmt.Sprintf("%v%v", util.IssuePrefix, lint.issue) + } + + return "" +} + +// Version is the ingress-nginx version the lint was added for, or the empty string +func (lint IngressLint) Version() string { + return lint.version +} + +// GetIngressLints retuns all of the lints for ingresses +func GetIngressLints() []IngressLint { + return []IngressLint{ + removedAnnotation("add-base-url", 3174, "0.22.0"), + removedAnnotation("base-url-scheme", 3174, "0.22.0"), + removedAnnotation("session-cookie-hash", 3743, "0.24.0"), + { + message: "The rewrite-target annotation value does not reference a capture group", + issue: 3174, + version: "0.22.0", + f: rewriteTargetWithoutCaptureGroup, + }, + { + message: "Contains an annotation with the prefix 'nginx.org'. This is a prefix for https://github.com/nginxinc/kubernetes-ingress", + f: annotationPrefixIsNginxOrg, + }, + { + message: "Contains an annotation with the prefix 'nginx.com'. This is a prefix for https://github.com/nginxinc/kubernetes-ingress", + f: annotationPrefixIsNginxCom, + }, + { + message: "The x-forwarded-prefix annotation value is a boolean instead of a string", + issue: 3786, + version: "0.24.0", + f: xForwardedPrefixIsBool, + }, + } +} + +func xForwardedPrefixIsBool(ing networking.Ingress) bool { + for name, val := range ing.Annotations { + if strings.HasSuffix(name, "/x-forwarded-prefix") && (val == "true" || val == "false") { + return true + } + } + return false +} + +func annotationPrefixIsNginxCom(ing networking.Ingress) bool { + for name := range ing.Annotations { + if strings.HasPrefix(name, "nginx.com/") { + return true + } + } + return false +} + +func annotationPrefixIsNginxOrg(ing networking.Ingress) bool { + for name := range ing.Annotations { + if strings.HasPrefix(name, "nginx.org/") { + return true + } + } + return false +} + +func rewriteTargetWithoutCaptureGroup(ing networking.Ingress) bool { + for name, val := range ing.Annotations { + if strings.HasSuffix(name, "/rewrite-target") && !strings.Contains(val, "$1") { + return true + } + } + return false +} + +func removedAnnotation(annotationName string, issueNumber int, version string) IngressLint { + return IngressLint{ + message: fmt.Sprintf("Contains the removed %v annotation.", annotationName), + issue: issueNumber, + version: version, + f: func(ing networking.Ingress) bool { + for annotation := range ing.Annotations { + if strings.HasSuffix(annotation, "/"+annotationName) { + return true + } + } + return false + }, + } +} diff --git a/cmd/plugin/main.go b/cmd/plugin/main.go new file mode 100644 index 0000000000..f3a809715f --- /dev/null +++ b/cmd/plugin/main.go @@ -0,0 +1,68 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "fmt" + "os" + + "github.com/spf13/cobra" + + "k8s.io/cli-runtime/pkg/genericclioptions" + + //Just importing this is supposed to allow cloud authentication + // eg GCP, AWS, Azure ... + _ "k8s.io/client-go/plugin/pkg/client/auth" + + "k8s.io/ingress-nginx/cmd/plugin/commands/backends" + "k8s.io/ingress-nginx/cmd/plugin/commands/certs" + "k8s.io/ingress-nginx/cmd/plugin/commands/conf" + "k8s.io/ingress-nginx/cmd/plugin/commands/exec" + "k8s.io/ingress-nginx/cmd/plugin/commands/general" + "k8s.io/ingress-nginx/cmd/plugin/commands/info" + "k8s.io/ingress-nginx/cmd/plugin/commands/ingresses" + "k8s.io/ingress-nginx/cmd/plugin/commands/lint" + "k8s.io/ingress-nginx/cmd/plugin/commands/logs" + "k8s.io/ingress-nginx/cmd/plugin/commands/ssh" +) + +func main() { + rootCmd := &cobra.Command{ + Use: "ingress-nginx", + Short: "A kubectl plugin for inspecting your ingress-nginx deployments", + } + + // Respect some basic kubectl flags like --namespace + flags := genericclioptions.NewConfigFlags(true) + flags.AddFlags(rootCmd.PersistentFlags()) + + rootCmd.AddCommand(ingresses.CreateCommand(flags)) + rootCmd.AddCommand(conf.CreateCommand(flags)) + rootCmd.AddCommand(general.CreateCommand(flags)) + rootCmd.AddCommand(backends.CreateCommand(flags)) + rootCmd.AddCommand(info.CreateCommand(flags)) + rootCmd.AddCommand(certs.CreateCommand(flags)) + rootCmd.AddCommand(logs.CreateCommand(flags)) + rootCmd.AddCommand(exec.CreateCommand(flags)) + rootCmd.AddCommand(ssh.CreateCommand(flags)) + rootCmd.AddCommand(lint.CreateCommand(flags)) + + if err := rootCmd.Execute(); err != nil { + fmt.Println(err) + os.Exit(1) + } +} diff --git a/cmd/plugin/request/request.go b/cmd/plugin/request/request.go new file mode 100644 index 0000000000..8e836b9871 --- /dev/null +++ b/cmd/plugin/request/request.go @@ -0,0 +1,285 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package request + +import ( + "fmt" + + appsv1 "k8s.io/api/apps/v1" + apiv1 "k8s.io/api/core/v1" + networking "k8s.io/api/networking/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/cli-runtime/pkg/genericclioptions" + appsv1client "k8s.io/client-go/kubernetes/typed/apps/v1" + corev1 "k8s.io/client-go/kubernetes/typed/core/v1" + typednetworking "k8s.io/client-go/kubernetes/typed/networking/v1beta1" + + "k8s.io/ingress-nginx/cmd/plugin/util" +) + +// ChoosePod finds a pod either by deployment or by name +func ChoosePod(flags *genericclioptions.ConfigFlags, podName string, deployment string) (apiv1.Pod, error) { + if podName != "" { + return GetNamedPod(flags, podName) + } + + return GetDeploymentPod(flags, deployment) +} + +// GetNamedPod finds a pod with the given name +func GetNamedPod(flags *genericclioptions.ConfigFlags, name string) (apiv1.Pod, error) { + allPods, err := getPods(flags) + if err != nil { + return apiv1.Pod{}, err + } + + for _, pod := range allPods { + if pod.Name == name { + return pod, nil + } + } + + return apiv1.Pod{}, fmt.Errorf("Pod %v not found in namespace %v", name, util.GetNamespace(flags)) +} + +// GetDeploymentPod finds a pod from a given deployment +func GetDeploymentPod(flags *genericclioptions.ConfigFlags, deployment string) (apiv1.Pod, error) { + ings, err := getDeploymentPods(flags, deployment) + if err != nil { + return apiv1.Pod{}, err + } + + if len(ings) == 0 { + return apiv1.Pod{}, fmt.Errorf("No pods for deployment %v found in namespace %v", deployment, util.GetNamespace(flags)) + } + + return ings[0], nil +} + +// GetDeployments returns an array of Deployments +func GetDeployments(flags *genericclioptions.ConfigFlags, namespace string) ([]appsv1.Deployment, error) { + rawConfig, err := flags.ToRESTConfig() + if err != nil { + return make([]appsv1.Deployment, 0), err + } + + api, err := appsv1client.NewForConfig(rawConfig) + if err != nil { + return make([]appsv1.Deployment, 0), err + } + + deployments, err := api.Deployments(namespace).List(metav1.ListOptions{}) + if err != nil { + return make([]appsv1.Deployment, 0), err + } + + return deployments.Items, nil +} + +// GetIngressDefinitions returns an array of Ingress resource definitions +func GetIngressDefinitions(flags *genericclioptions.ConfigFlags, namespace string) ([]networking.Ingress, error) { + rawConfig, err := flags.ToRESTConfig() + if err != nil { + return make([]networking.Ingress, 0), err + } + + api, err := typednetworking.NewForConfig(rawConfig) + if err != nil { + return make([]networking.Ingress, 0), err + } + + pods, err := api.Ingresses(namespace).List(metav1.ListOptions{}) + if err != nil { + return make([]networking.Ingress, 0), err + } + + return pods.Items, nil +} + +// GetNumEndpoints counts the number of endpoints for the service with the given name +func GetNumEndpoints(flags *genericclioptions.ConfigFlags, namespace string, serviceName string) (*int, error) { + endpoints, err := GetEndpointsByName(flags, namespace, serviceName) + if err != nil { + return nil, err + } + + if endpoints == nil { + return nil, nil + } + + ret := 0 + for _, subset := range endpoints.Subsets { + ret += len(subset.Addresses) + } + return &ret, nil +} + +// GetEndpointsByName returns the endpoints for the service with the given name +func GetEndpointsByName(flags *genericclioptions.ConfigFlags, namespace string, name string) (*apiv1.Endpoints, error) { + allEndpoints, err := getEndpoints(flags, namespace) + if err != nil { + return nil, err + } + + for _, endpoints := range allEndpoints { + if endpoints.Name == name { + return &endpoints, nil + } + } + + return nil, nil +} + +var endpointsCache = make(map[string]*[]apiv1.Endpoints) + +func getEndpoints(flags *genericclioptions.ConfigFlags, namespace string) ([]apiv1.Endpoints, error) { + cachedEndpoints, ok := endpointsCache[namespace] + if ok { + return *cachedEndpoints, nil + } + + if namespace != "" { + tryAllNamespacesEndpointsCache(flags) + } + + cachedEndpoints = tryFilteringEndpointsFromAllNamespacesCache(flags, namespace) + if cachedEndpoints != nil { + return *cachedEndpoints, nil + } + + rawConfig, err := flags.ToRESTConfig() + if err != nil { + return nil, err + } + + api, err := corev1.NewForConfig(rawConfig) + if err != nil { + return nil, err + } + + endpointsList, err := api.Endpoints(namespace).List(metav1.ListOptions{}) + if err != nil { + return nil, err + } + endpoints := endpointsList.Items + + endpointsCache[namespace] = &endpoints + return endpoints, nil +} + +func tryAllNamespacesEndpointsCache(flags *genericclioptions.ConfigFlags) { + _, ok := endpointsCache[""] + if !ok { + _, err := getEndpoints(flags, "") + if err != nil { + endpointsCache[""] = nil + } + } +} + +func tryFilteringEndpointsFromAllNamespacesCache(flags *genericclioptions.ConfigFlags, namespace string) *[]apiv1.Endpoints { + allEndpoints, _ := endpointsCache[""] + if allEndpoints != nil { + endpoints := make([]apiv1.Endpoints, 0) + for _, thisEndpoints := range *allEndpoints { + if thisEndpoints.Namespace == namespace { + endpoints = append(endpoints, thisEndpoints) + } + } + endpointsCache[namespace] = &endpoints + return &endpoints + } + return nil +} + +// GetServiceByName finds and returns the service definition with the given name +func GetServiceByName(flags *genericclioptions.ConfigFlags, name string, services *[]apiv1.Service) (apiv1.Service, error) { + if services == nil { + servicesArray, err := getServices(flags) + if err != nil { + return apiv1.Service{}, err + } + services = &servicesArray + } + + for _, svc := range *services { + if svc.Name == name { + return svc, nil + } + } + + return apiv1.Service{}, fmt.Errorf("Could not find service %v in namespace %v", name, util.GetNamespace(flags)) +} + +func getPods(flags *genericclioptions.ConfigFlags) ([]apiv1.Pod, error) { + namespace := util.GetNamespace(flags) + + rawConfig, err := flags.ToRESTConfig() + if err != nil { + return make([]apiv1.Pod, 0), err + } + + api, err := corev1.NewForConfig(rawConfig) + if err != nil { + return make([]apiv1.Pod, 0), err + } + + pods, err := api.Pods(namespace).List(metav1.ListOptions{}) + if err != nil { + return make([]apiv1.Pod, 0), err + } + + return pods.Items, nil +} + +func getDeploymentPods(flags *genericclioptions.ConfigFlags, deployment string) ([]apiv1.Pod, error) { + pods, err := getPods(flags) + if err != nil { + return make([]apiv1.Pod, 0), err + } + + ingressPods := make([]apiv1.Pod, 0) + for _, pod := range pods { + if util.PodInDeployment(pod, deployment) { + ingressPods = append(ingressPods, pod) + } + } + + return ingressPods, nil +} + +func getServices(flags *genericclioptions.ConfigFlags) ([]apiv1.Service, error) { + namespace := util.GetNamespace(flags) + + rawConfig, err := flags.ToRESTConfig() + if err != nil { + return make([]apiv1.Service, 0), err + } + + api, err := corev1.NewForConfig(rawConfig) + if err != nil { + return make([]apiv1.Service, 0), err + } + + services, err := api.Services(namespace).List(metav1.ListOptions{}) + if err != nil { + return make([]apiv1.Service, 0), err + } + + return services.Items, nil + +} diff --git a/cmd/plugin/util/util.go b/cmd/plugin/util/util.go new file mode 100644 index 0000000000..1832fca016 --- /dev/null +++ b/cmd/plugin/util/util.go @@ -0,0 +1,145 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + "regexp" + "strconv" + "strings" + + "github.com/spf13/cobra" + apiv1 "k8s.io/api/core/v1" + "k8s.io/cli-runtime/pkg/genericclioptions" +) + +// The default deployment and service names for ingress-nginx +const ( + DefaultIngressDeploymentName = "nginx-ingress-controller" + DefaultIngressServiceName = "ingress-nginx" +) + +// IssuePrefix is the github url that we can append an issue number to to link to it +const IssuePrefix = "https://github.com/kubernetes/ingress-nginx/issues/" + +var versionRegex = regexp.MustCompile(`(\d)+\.(\d)+\.(\d)+.*`) + +// PrintError receives an error value and prints it if it exists +func PrintError(e error) { + if e != nil { + fmt.Println(e) + } +} + +func printWithError(s string, e error) { + if e != nil { + fmt.Println(e) + } + fmt.Print(s) +} + +func printOrError(s string, e error) error { + if e != nil { + return e + } + fmt.Print(s) + return nil +} + +// ParseVersionString returns the major, minor, and patch numbers of a verison string +func ParseVersionString(v string) (int, int, int, error) { + parts := versionRegex.FindStringSubmatch(v) + + if len(parts) != 4 { + return 0, 0, 0, fmt.Errorf("Could not parse %v as a version string (like 0.20.3)", v) + } + + major, _ := strconv.Atoi(parts[1]) + minor, _ := strconv.Atoi(parts[2]) + patch, _ := strconv.Atoi(parts[3]) + + return major, minor, patch, nil +} + +// InVersionRangeInclusive checks that the middle version is between the other two versions +func InVersionRangeInclusive(start, v, stop string) bool { + return !isVersionLessThan(v, start) && !isVersionLessThan(stop, v) +} + +func isVersionLessThan(a, b string) bool { + aMajor, aMinor, aPatch, err := ParseVersionString(a) + if err != nil { + panic(err) + } + + bMajor, bMinor, bPatch, err := ParseVersionString(b) + if err != nil { + panic(err) + } + + if aMajor != bMajor { + return aMajor < bMajor + } + + if aMinor != bMinor { + return aMinor < bMinor + } + + return aPatch < bPatch +} + +// PodInDeployment returns whether a pod is part of a deployment with the given name +// a pod is considered to be in {deployment} if it is owned by a replicaset with a name of format {deployment}-otherchars +func PodInDeployment(pod apiv1.Pod, deployment string) bool { + for _, owner := range pod.OwnerReferences { + if owner.Controller == nil || !*owner.Controller || owner.Kind != "ReplicaSet" { + continue + } + + if strings.Count(owner.Name, "-") != strings.Count(deployment, "-")+1 { + continue + } + + if strings.HasPrefix(owner.Name, deployment+"-") { + return true + } + } + return false +} + +// AddPodFlag adds a --pod flag to a cobra command +func AddPodFlag(cmd *cobra.Command) *string { + v := "" + cmd.Flags().StringVar(&v, "pod", "", "Query a particular ingress-nginx pod") + return &v +} + +// AddDeploymentFlag adds a --deployment flag to a cobra command +func AddDeploymentFlag(cmd *cobra.Command) *string { + v := "" + cmd.Flags().StringVar(&v, "deployment", DefaultIngressDeploymentName, "The name of the ingress-nginx deployment") + return &v +} + +// GetNamespace takes a set of kubectl flag values and returns the namespace we should be operating in +func GetNamespace(flags *genericclioptions.ConfigFlags) string { + namespace, _, err := flags.ToRawKubeConfigLoader().Namespace() + if err != nil || len(namespace) == 0 { + namespace = apiv1.NamespaceDefault + } + return namespace +} diff --git a/code-of-conduct.md b/code-of-conduct.md index 622d4c1d1c..0d15c00cf3 100644 --- a/code-of-conduct.md +++ b/code-of-conduct.md @@ -1,5 +1,3 @@ -## Kubernetes Community Code of Conduct +# Kubernetes Community Code of Conduct -Kubernetes follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/code-of-conduct.md?pixel)]() +Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/deploy/README.md b/deploy/README.md index 8f7c22c752..b365de3336 100644 --- a/deploy/README.md +++ b/deploy/README.md @@ -1,267 +1,3 @@ -# Installation Guide +# Deployment documentation moved! -## Contents - -- [Mandatory commands](#mandatory-commands) -- [Install without RBAC roles](#install-without-rbac-roles) -- [Install with RBAC roles](#install-with-rbac-roles) -- [Custom Provider](#custom-provider) - - [minikube](#minikube) - - [AWS](#aws) - - [GCE - GKE](#gce-gke) - - [Azure](#azure) - - [Baremetal](#baremetal) -- [Using Helm](#using-helm) -- [Verify installation](#verify-installation) -- [Detect installed version](#detect-installed-version) -- [Deploying the config-map](#deploying-the-config-map) - -## Generic Deployment - -The following resources are required for a generic deployment. - -### Mandatory commands - -```console -curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/namespace.yaml \ - | kubectl apply -f - - -curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/default-backend.yaml \ - | kubectl apply -f - - -curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/configmap.yaml \ - | kubectl apply -f - - -curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/tcp-services-configmap.yaml \ - | kubectl apply -f - - -curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/udp-services-configmap.yaml \ - | kubectl apply -f - -``` - -### Install without RBAC roles - -```console -curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/without-rbac.yaml \ - | kubectl apply -f - -``` - -### Install with RBAC roles - -Please check the [RBAC](rbac.md) document. - -```console -curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/rbac.yaml \ - | kubectl apply -f - - -curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/with-rbac.yaml \ - | kubectl apply -f - -``` - -## Custom Service Provider Deployment - -There are cloud provider specific yaml files. - -### minikube - -For standard usage: - -```console -minikube addons enable ingress -``` - -For development: - -1. Disable the ingress addon: - -```console -$ minikube addons disable ingress -``` - -2. Use the [docker daemon](https://github.com/kubernetes/minikube/blob/master/docs/reusing_the_docker_daemon.md) -3. [Build the image](../docs/development.md) -4. Perform [Mandatory commands](#mandatory-commands) -5. Install the `nginx-ingress-controller` deployment [without RBAC roles](#install-without-rbac-roles) or [with RBAC roles](#install-with-rbac-roles) -6. Edit the `nginx-ingress-controller` deployment to use your custom image. Local images can be seen by performing `docker images`. - -```console -$ kubectl edit deployment nginx-ingress-controller -n ingress-nginx -``` - -edit the following section: - -```yaml -image: : -imagePullPolicy: IfNotPresent -name: nginx-ingress-controller -``` - -7. Confirm the `nginx-ingress-controller` deployment exists: - -```console -$ kubectl get pods -n ingress-nginx -NAME READY STATUS RESTARTS AGE -default-http-backend-66b447d9cf-rrlf9 1/1 Running 0 12s -nginx-ingress-controller-fdcdcd6dd-vvpgs 1/1 Running 0 11s -``` - -### AWS - -In AWS we use an Elastic Load Balancer (ELB) to expose the NGINX Ingress controller behind a Service of `Type=LoadBalancer`. -This setup requires to choose in which layer (L4 or L7) we want to configure the ELB: - -- [Layer 4](https://en.wikipedia.org/wiki/OSI_model#Layer_4:_Transport_Layer): use TCP as the listener protocol for ports 80 and 443. -- [Layer 7](https://en.wikipedia.org/wiki/OSI_model#Layer_7:_Application_Layer): use HTTP as the listener protocol for port 80 and terminate TLS in the ELB - -Patch the nginx ingress controller deployment to add the flag `--publish-service` - -```console -kubectl patch deployment -n ingress-nginx nginx-ingress-controller --type='json' \ - --patch="$(curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/patch-deployment.yaml)" -``` - -For L4: - -```console -kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/aws/service-l4.yaml -kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/aws/patch-configmap-l4.yaml -``` - -For L7: - -Change line of the file `provider/aws/service-l7.yaml` replacing the dummy id with a valid one `"arn:aws:acm:us-west-2:XXXXXXXX:certificate/XXXXXX-XXXXXXX-XXXXXXX-XXXXXXXX"` -Then execute: - -```console -kubectl apply -f provider/aws/service-l7.yaml -kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/aws/patch-configmap-l7.yaml -``` - -This example creates an ELB with just two listeners, one in port 80 and another in port 443 - -![Listeners](../docs/images/elb-l7-listener.png) - -If the ingress controller uses RBAC run: - -```console -kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/patch-service-with-rbac.yaml -``` - -If not run: - -```console -kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/patch-service-without-rbac.yaml -``` - -### GCE - GKE - -Patch the nginx ingress controller deployment to add the flag `--publish-service` - -```console -kubectl patch deployment -n ingress-nginx nginx-ingress-controller --type='json' \ - --patch="$(curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/patch-deployment.yaml)" -``` - -```console -curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/gce-gke/service.yaml \ - | kubectl apply -f - -``` - -If the ingress controller uses RBAC run: - -```console -kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/patch-service-with-rbac.yaml -``` - -If not run: - -```console -kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/patch-service-without-rbac.yaml -``` - -**Important Note:** proxy protocol is not supported in GCE/GKE - -### Azure - -Patch the nginx ingress controller deployment to add the flag `--publish-service` - -```console -kubectl patch deployment -n ingress-nginx nginx-ingress-controller --type='json' \ - --patch="$(curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/patch-deployment.yaml)" -``` - -```console -curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/azure/service.yaml \ - | kubectl apply -f - -``` - -If the ingress controller uses RBAC run: - -```console -kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/patch-service-with-rbac.yaml -``` - -If not run: - -```console -kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/patch-service-without-rbac.yaml -``` - -**Important Note:** proxy protocol is not supported in GCE/GKE - -### Baremetal - -Using [NodePort](https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport): - -```console -curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/baremetal/service-nodeport.yaml \ - | kubectl apply -f - -``` - -## Using Helm - -NGINX Ingress controller can be installed via [Helm](https://helm.sh/) using the chart [stable/nginx](https://github.com/kubernetes/charts/tree/master/stable/nginx-ingress) from the official charts repository. -To install the chart with the release name `my-nginx`: - -```console -helm install stable/nginx-ingress --name my-nginx -``` - -## Verify installation - -To check if the ingress controller pods have started, run the following command: - -```console -kubectl get pods --all-namespaces -l app=ingress-nginx --watch -``` - -Once the operator pods are running, you can cancel the above command by typing `Ctrl+C`. -Now, you are ready to create your first ingress. - -## Detect installed version - -To detect which version of the ingress controller is running, exec into the pod and run `nginx-ingress-controller version` command. - -```console -POD_NAMESPACE=ingress-nginx -POD_NAME=$(kubectl get pods -n $POD_NAMESPACE -l app=ingress-nginx -o jsonpath={.items[0].metadata.name}) -kubectl exec -it $POD_NAME -n $POD_NAMESPACE -- /nginx-ingress-controller --version -``` - -## Deploying the config-map - -A config map can be used to configure system components for the nginx-controller. In order to begin using a config-map -make sure it has been created and is being used in the deployment. - -It is created as seen in the [Mandatory Commands](#mandatory-commands) section above. -```console -curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/configmap.yaml \ - | kubectl apply -f - -``` - -and is setup to be used in the deployment [without-rbac](without-rbac.yaml) or [with-rbac](with-rbac.yaml) with the following line: -```yaml -- --configmap=$(POD_NAMESPACE)/nginx-configuration -``` - -For information on using the config-map, see its [user-guide](../docs/user-guide/configmap.md). \ No newline at end of file +See [/docs/deploy](../docs/deploy/index.md). diff --git a/deploy/aws/l4/kustomization.yaml b/deploy/aws/l4/kustomization.yaml new file mode 100644 index 0000000000..a17bd9156e --- /dev/null +++ b/deploy/aws/l4/kustomization.yaml @@ -0,0 +1,11 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +bases: +- ../../cloud-generic +patchesStrategicMerge: +- service-l4.yaml +configMapGenerator: +- name: nginx-configuration + behavior: merge + literals: + - use-proxy-protocol=true diff --git a/deploy/aws/l4/service-l4.yaml b/deploy/aws/l4/service-l4.yaml new file mode 100644 index 0000000000..3d96424917 --- /dev/null +++ b/deploy/aws/l4/service-l4.yaml @@ -0,0 +1,13 @@ +kind: Service +apiVersion: v1 +metadata: + name: ingress-nginx + annotations: + # Enable PROXY protocol + service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: "*" + # Ensure the ELB idle timeout is less than nginx keep-alive timeout. By default, + # NGINX keep-alive is set to 75s. If using WebSockets, the value will need to be + # increased to '3600' to avoid any potential issues. + service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: "60" +spec: + externalTrafficPolicy: Cluster diff --git a/deploy/aws/l7/kustomization.yaml b/deploy/aws/l7/kustomization.yaml new file mode 100644 index 0000000000..35dbc67e47 --- /dev/null +++ b/deploy/aws/l7/kustomization.yaml @@ -0,0 +1,13 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +bases: +- ../../cloud-generic +patchesStrategicMerge: +- service-l7.yaml +configMapGenerator: +- name: nginx-configuration + behavior: merge + literals: + - use-proxy-protocol=false + - use-forwarded-headers=true + - proxy-real-ip-cidr=0.0.0.0/0 # restrict this to the IP addresses of ELB diff --git a/deploy/aws/l7/service-l7.yaml b/deploy/aws/l7/service-l7.yaml new file mode 100644 index 0000000000..b3b0b64d81 --- /dev/null +++ b/deploy/aws/l7/service-l7.yaml @@ -0,0 +1,17 @@ +kind: Service +apiVersion: v1 +metadata: + name: ingress-nginx + annotations: + # replace with the correct value of the generated certificate in the AWS console + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: "arn:aws:acm:us-west-2:XXXXXXXX:certificate/XXXXXX-XXXXXXX-XXXXXXX-XXXXXXXX" + # the backend instances are HTTP + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "http" + # Map port 443 + service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "https" + # Ensure the ELB idle timeout is less than nginx keep-alive timeout. By default, + # NGINX keep-alive is set to 75s. If using WebSockets, the value will need to be + # increased to '3600' to avoid any potential issues. + service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: "60" +spec: + externalTrafficPolicy: Cluster diff --git a/deploy/aws/nlb/kustomization.yaml b/deploy/aws/nlb/kustomization.yaml new file mode 100644 index 0000000000..cfffbefc42 --- /dev/null +++ b/deploy/aws/nlb/kustomization.yaml @@ -0,0 +1,6 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +bases: +- ../../cloud-generic +patchesStrategicMerge: +- service-nlb.yaml diff --git a/deploy/aws/nlb/service-nlb.yaml b/deploy/aws/nlb/service-nlb.yaml new file mode 100644 index 0000000000..a0438c207e --- /dev/null +++ b/deploy/aws/nlb/service-nlb.yaml @@ -0,0 +1,7 @@ +kind: Service +apiVersion: v1 +metadata: + name: ingress-nginx + annotations: + # by default the type is elb (classic load balancer). + service.beta.kubernetes.io/aws-load-balancer-type: nlb diff --git a/deploy/baremetal/kustomization.yaml b/deploy/baremetal/kustomization.yaml new file mode 100644 index 0000000000..3512703b83 --- /dev/null +++ b/deploy/baremetal/kustomization.yaml @@ -0,0 +1,6 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +bases: +- ../cloud-generic +patchesStrategicMerge: +- service-nodeport.yaml diff --git a/deploy/baremetal/service-nodeport.yaml b/deploy/baremetal/service-nodeport.yaml new file mode 100644 index 0000000000..0aadea1576 --- /dev/null +++ b/deploy/baremetal/service-nodeport.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: ingress-nginx +spec: + type: NodePort + ports: + - name: http + port: 80 + targetPort: 80 + protocol: TCP + - name: https + port: 443 + targetPort: 443 + protocol: TCP + externalTrafficPolicy: Cluster diff --git a/deploy/cloud-generic/deployment.yaml b/deploy/cloud-generic/deployment.yaml new file mode 100644 index 0000000000..71de9202b0 --- /dev/null +++ b/deploy/cloud-generic/deployment.yaml @@ -0,0 +1,65 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-ingress-controller +spec: + replicas: 1 + template: + metadata: + annotations: + prometheus.io/port: "10254" + prometheus.io/scrape: "true" + spec: + serviceAccountName: nginx-ingress-serviceaccount + containers: + - name: nginx-ingress-controller + image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.24.1 + args: + - /nginx-ingress-controller + - --configmap=$(POD_NAMESPACE)/$(NGINX_CONFIGMAP_NAME) + - --tcp-services-configmap=$(POD_NAMESPACE)/$(TCP_CONFIGMAP_NAME) + - --udp-services-configmap=$(POD_NAMESPACE)/$(UDP_CONFIGMAP_NAME) + - --publish-service=$(POD_NAMESPACE)/$(SERVICE_NAME) + - --annotations-prefix=nginx.ingress.kubernetes.io + securityContext: + allowPrivilegeEscalation: true + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + # www-data -> 33 + runAsUser: 33 + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + ports: + - name: http + containerPort: 80 + - name: https + containerPort: 443 + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 10 + readinessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 10 diff --git a/deploy/cloud-generic/kustomization.yaml b/deploy/cloud-generic/kustomization.yaml new file mode 100644 index 0000000000..c2b03ddbf6 --- /dev/null +++ b/deploy/cloud-generic/kustomization.yaml @@ -0,0 +1,50 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: ingress-nginx +commonLabels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx +resources: +- deployment.yaml +- role-binding.yaml +- role.yaml +- service-account.yaml +- service.yaml +images: +- name: quay.io/kubernetes-ingress-controller/nginx-ingress-controller + newTag: 0.24.1 +vars: +- fieldref: + fieldPath: metadata.name + name: NGINX_CONFIGMAP_NAME + objref: + apiVersion: v1 + kind: ConfigMap + name: nginx-configuration +- fieldref: + fieldPath: metadata.name + name: TCP_CONFIGMAP_NAME + objref: + apiVersion: v1 + kind: ConfigMap + name: tcp-services +- fieldref: + fieldPath: metadata.name + name: UDP_CONFIGMAP_NAME + objref: + apiVersion: v1 + kind: ConfigMap + name: udp-services +- fieldref: + fieldPath: metadata.name + name: SERVICE_NAME + objref: + apiVersion: v1 + kind: Service + name: ingress-nginx +configMapGenerator: +- name: nginx-configuration +- name: tcp-services +- name: udp-services +generatorOptions: + disableNameSuffixHash: true diff --git a/deploy/cloud-generic/role-binding.yaml b/deploy/cloud-generic/role-binding.yaml new file mode 100644 index 0000000000..228588e6dc --- /dev/null +++ b/deploy/cloud-generic/role-binding.yaml @@ -0,0 +1,11 @@ +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: RoleBinding +metadata: + name: nginx-ingress-role-nisa-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: nginx-ingress-role +subjects: + - kind: ServiceAccount + name: nginx-ingress-serviceaccount diff --git a/deploy/cloud-generic/role.yaml b/deploy/cloud-generic/role.yaml new file mode 100644 index 0000000000..936b63d729 --- /dev/null +++ b/deploy/cloud-generic/role.yaml @@ -0,0 +1,39 @@ +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: Role +metadata: + name: nginx-ingress-role +rules: + - apiGroups: + - "" + resources: + - configmaps + - pods + - secrets + - namespaces + verbs: + - get + - apiGroups: + - "" + resources: + - configmaps + resourceNames: + # Defaults to "-" + # Here: "-" + # This has to be adapted if you change either parameter + # when launching the nginx-ingress-controller. + - "ingress-controller-leader-nginx" + verbs: + - get + - update + - apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - apiGroups: + - "" + resources: + - endpoints + verbs: + - get diff --git a/deploy/cloud-generic/service-account.yaml b/deploy/cloud-generic/service-account.yaml new file mode 100644 index 0000000000..a52fb8ac85 --- /dev/null +++ b/deploy/cloud-generic/service-account.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: nginx-ingress-serviceaccount diff --git a/deploy/cloud-generic/service.yaml b/deploy/cloud-generic/service.yaml new file mode 100644 index 0000000000..3a3a3e2a82 --- /dev/null +++ b/deploy/cloud-generic/service.yaml @@ -0,0 +1,14 @@ +kind: Service +apiVersion: v1 +metadata: + name: ingress-nginx +spec: + externalTrafficPolicy: Local + type: LoadBalancer + ports: + - name: http + port: 80 + targetPort: http + - name: https + port: 443 + targetPort: https diff --git a/deploy/cluster-wide/cluster-role-binding.yaml b/deploy/cluster-wide/cluster-role-binding.yaml new file mode 100644 index 0000000000..7293fb37d7 --- /dev/null +++ b/deploy/cluster-wide/cluster-role-binding.yaml @@ -0,0 +1,11 @@ +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: nginx-ingress-clusterrole-nisa-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: nginx-ingress-clusterrole +subjects: + - kind: ServiceAccount + name: nginx-ingress-serviceaccount diff --git a/deploy/cluster-wide/cluster-role.yaml b/deploy/cluster-wide/cluster-role.yaml new file mode 100644 index 0000000000..da8ecafbf3 --- /dev/null +++ b/deploy/cluster-wide/cluster-role.yaml @@ -0,0 +1,65 @@ +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: nginx-ingress-clusterrole +rules: + - apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - "extensions" + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - "extensions" + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - "networking.k8s.io" + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - "networking.k8s.io" + resources: + - ingresses/status + verbs: + - update diff --git a/deploy/cluster-wide/kustomization.yaml b/deploy/cluster-wide/kustomization.yaml new file mode 100644 index 0000000000..aeef6ed6b3 --- /dev/null +++ b/deploy/cluster-wide/kustomization.yaml @@ -0,0 +1,8 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +commonLabels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx +resources: +- cluster-role.yaml +- cluster-role-binding.yaml diff --git a/deploy/configmap.yaml b/deploy/configmap.yaml deleted file mode 100644 index 08e91017ea..0000000000 --- a/deploy/configmap.yaml +++ /dev/null @@ -1,7 +0,0 @@ -kind: ConfigMap -apiVersion: v1 -metadata: - name: nginx-configuration - namespace: ingress-nginx - labels: - app: ingress-nginx diff --git a/deploy/default-backend.yaml b/deploy/default-backend.yaml deleted file mode 100644 index 64f6f58ad0..0000000000 --- a/deploy/default-backend.yaml +++ /dev/null @@ -1,52 +0,0 @@ -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: default-http-backend - labels: - app: default-http-backend - namespace: ingress-nginx -spec: - replicas: 1 - template: - metadata: - labels: - app: default-http-backend - spec: - terminationGracePeriodSeconds: 60 - containers: - - name: default-http-backend - # Any image is permissable as long as: - # 1. It serves a 404 page at / - # 2. It serves 200 on a /healthz endpoint - image: gcr.io/google_containers/defaultbackend:1.4 - livenessProbe: - httpGet: - path: /healthz - port: 8080 - scheme: HTTP - initialDelaySeconds: 30 - timeoutSeconds: 5 - ports: - - containerPort: 8080 - resources: - limits: - cpu: 10m - memory: 20Mi - requests: - cpu: 10m - memory: 20Mi ---- - -apiVersion: v1 -kind: Service -metadata: - name: default-http-backend - namespace: ingress-nginx - labels: - app: default-http-backend -spec: - ports: - - port: 80 - targetPort: 8080 - selector: - app: default-http-backend diff --git a/deploy/grafana/dashboards/README.md b/deploy/grafana/dashboards/README.md new file mode 100644 index 0000000000..bfb428df06 --- /dev/null +++ b/deploy/grafana/dashboards/README.md @@ -0,0 +1,19 @@ +# Grafana Dashboards +Ingress-nginx supports a rich collection of prometheus metrics. If you have prometheus and grafana installed on your cluster then prometheus will already be scraping this data due to the `scrape` annotation on the deployment. + +This folder contains a dashboard that you can import: + +![Dashboard](screenshot.png) + +## Features + + - Ability to filter by Namespace, Controller Class and Controller + - Visibility of Request Volume, connections, success rates, config reloads and configs out of sync. + - Network IO pressure, memory and CPU use + - Ingress P50, P95 and P99 percentile response times with IN/OUT throughput + - SSL certificate expiry + - Annotational overlays to show when config reloads happened + +## Requirements + + - **Grafana v5.2.0** (or newer) diff --git a/deploy/grafana/dashboards/nginx.json b/deploy/grafana/dashboards/nginx.json new file mode 100644 index 0000000000..35b2c4d3af --- /dev/null +++ b/deploy/grafana/dashboards/nginx.json @@ -0,0 +1,1453 @@ +{ + "__inputs": [ + { + "name": "DS_PROMETHEUS", + "label": "Prometheus", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + } + ], + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "5.2.1" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "5.0.0" + }, + { + "type": "panel", + "id": "singlestat", + "name": "Singlestat", + "version": "5.0.0" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + }, + { + "datasource": "${DS_PROMETHEUS}", + "enable": true, + "expr": "sum(changes(nginx_ingress_controller_config_last_reload_successful_timestamp_seconds{instance!=\"unknown\",controller_class=~\"$controller_class\",namespace=~\"$namespace\"}[30s])) by (controller_class)", + "hide": false, + "iconColor": "rgba(255, 96, 96, 1)", + "limit": 100, + "name": "Config Reloads", + "showIn": 0, + "step": "30s", + "tagKeys": "controller_class", + "tags": [], + "titleFormat": "Config Reloaded", + "type": "tags" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "iteration": 1534359654832, + "links": [], + "panels": [ + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "${DS_PROMETHEUS}", + "format": "ops", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 6, + "x": 0, + "y": 0 + }, + "id": 20, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": true, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "round(sum(irate(nginx_ingress_controller_requests{controller_pod=~\"$controller\",controller_class=~\"$controller_class\",namespace=~\"$namespace\"}[2m])), 0.001)", + "format": "time_series", + "intervalFactor": 1, + "refId": "A", + "step": 4 + } + ], + "thresholds": "", + "title": "Controller Request Volume", + "transparent": false, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "${DS_PROMETHEUS}", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 6, + "x": 6, + "y": 0 + }, + "id": 82, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": true, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(avg_over_time(nginx_ingress_controller_nginx_process_connections{controller_pod=~\"$controller\",controller_class=~\"$controller_class\",controller_namespace=~\"$namespace\"}[2m]))", + "format": "time_series", + "instant": false, + "intervalFactor": 1, + "refId": "A", + "step": 4 + } + ], + "thresholds": "", + "title": "Controller Connections", + "transparent": false, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "${DS_PROMETHEUS}", + "format": "percentunit", + "gauge": { + "maxValue": 100, + "minValue": 80, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": false + }, + "gridPos": { + "h": 3, + "w": 6, + "x": 12, + "y": 0 + }, + "id": 21, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": true, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(rate(nginx_ingress_controller_requests{controller_pod=~\"$controller\",controller_class=~\"$controller_class\",namespace=~\"$namespace\",status!~\"[4-5].*\"}[2m])) / sum(rate(nginx_ingress_controller_requests{controller_pod=~\"$controller\",controller_class=~\"$controller_class\",namespace=~\"$namespace\"}[2m]))", + "format": "time_series", + "intervalFactor": 1, + "refId": "A", + "step": 4 + } + ], + "thresholds": "95, 99, 99.5", + "title": "Controller Success Rate (non-4|5xx responses)", + "transparent": false, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "${DS_PROMETHEUS}", + "decimals": 0, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 18, + "y": 0 + }, + "id": 81, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": true, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "avg(nginx_ingress_controller_success{controller_pod=~\"$controller\",controller_class=~\"$controller_class\",controller_namespace=~\"$namespace\"})", + "format": "time_series", + "instant": true, + "intervalFactor": 1, + "refId": "A", + "step": 4 + } + ], + "thresholds": "", + "title": "Config Reloads", + "transparent": false, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "${DS_PROMETHEUS}", + "decimals": 0, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 21, + "y": 0 + }, + "id": 83, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": true, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "count(nginx_ingress_controller_config_last_reload_successful{controller_pod=~\"$controller\",controller_namespace=~\"$namespace\"} == 0)", + "format": "time_series", + "instant": true, + "intervalFactor": 1, + "refId": "A", + "step": 4 + } + ], + "thresholds": "", + "title": "Last Config Failed", + "transparent": false, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS}", + "decimals": 2, + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 3 + }, + "height": "200px", + "id": 86, + "isNew": true, + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "hideEmpty": false, + "hideZero": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": 300, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "repeatDirection": "h", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "round(sum(irate(nginx_ingress_controller_requests{controller_pod=~\"$controller\",controller_class=~\"$controller_class\",controller_namespace=~\"$namespace\",ingress=~\"$ingress\"}[2m])) by (ingress), 0.001)", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{ ingress }}", + "metric": "network", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Ingress Request Volume", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "transparent": false, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "reqps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "max - istio-proxy": "#890f02", + "max - master": "#bf1b00", + "max - prometheus": "#bf1b00" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS}", + "decimals": 2, + "editable": false, + "error": false, + "fill": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 3 + }, + "id": 87, + "isNew": true, + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "hideEmpty": true, + "hideZero": false, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": 300, + "sort": "avg", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(nginx_ingress_controller_requests{controller_pod=~\"$controller\",controller_class=~\"$controller_class\",namespace=~\"$namespace\",ingress=~\"$ingress\",status!~\"[4-5].*\"}[2m])) by (ingress) / sum(rate(nginx_ingress_controller_requests{controller_pod=~\"$controller\",controller_class=~\"$controller_class\",namespace=~\"$namespace\",ingress=~\"$ingress\"}[2m])) by (ingress)", + "format": "time_series", + "instant": false, + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "{{ ingress }}", + "metric": "container_memory_usage:sort_desc", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Ingress Success Rate (non-4|5xx responses)", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 1, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS}", + "decimals": 2, + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "gridPos": { + "h": 6, + "w": 8, + "x": 0, + "y": 10 + }, + "height": "200px", + "id": 32, + "isNew": true, + "legend": { + "alignAsTable": false, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "sideWidth": 200, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum (irate (nginx_ingress_controller_request_size_sum{controller_pod=~\"$controller\",controller_class=~\"$controller_class\",controller_namespace=~\"$namespace\"}[2m]))", + "format": "time_series", + "instant": false, + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "Received", + "metric": "network", + "refId": "A", + "step": 10 + }, + { + "expr": "- sum (irate (nginx_ingress_controller_response_size_sum{controller_pod=~\"$controller\",controller_class=~\"$controller_class\",controller_namespace=~\"$namespace\"}[2m]))", + "format": "time_series", + "hide": false, + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "Sent", + "metric": "network", + "refId": "B", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Network I/O pressure", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "transparent": false, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "max - istio-proxy": "#890f02", + "max - master": "#bf1b00", + "max - prometheus": "#bf1b00" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS}", + "decimals": 2, + "editable": false, + "error": false, + "fill": 0, + "grid": {}, + "gridPos": { + "h": 6, + "w": 8, + "x": 8, + "y": 10 + }, + "id": 77, + "isNew": true, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "sideWidth": 200, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "avg(nginx_ingress_controller_nginx_process_resident_memory_bytes{controller_pod=~\"$controller\",controller_class=~\"$controller_class\",controller_namespace=~\"$namespace\"}) ", + "format": "time_series", + "instant": false, + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "nginx", + "metric": "container_memory_usage:sort_desc", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Average Memory Usage", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "max - istio-proxy": "#890f02", + "max - master": "#bf1b00" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS}", + "decimals": 3, + "editable": false, + "error": false, + "fill": 0, + "grid": {}, + "gridPos": { + "h": 6, + "w": 8, + "x": 16, + "y": 10 + }, + "height": "", + "id": 79, + "isNew": true, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "sort": null, + "sortDesc": null, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum (rate (nginx_ingress_controller_nginx_process_cpu_seconds_total{controller_pod=~\"$controller\",controller_class=~\"$controller_class\",controller_namespace=~\"$namespace\"}[2m])) ", + "format": "time_series", + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "nginx", + "metric": "container_cpu", + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "gt" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Average CPU Usage", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "transparent": false, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": "cores", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "columns": [], + "datasource": "${DS_PROMETHEUS}", + "fontSize": "100%", + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 16 + }, + "hideTimeOverride": false, + "id": 75, + "links": [], + "pageSize": 7, + "repeat": null, + "repeatDirection": "h", + "scroll": true, + "showHeader": true, + "sort": { + "col": 1, + "desc": true + }, + "styles": [ + { + "alias": "Ingress", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "ingress", + "preserveFormat": false, + "sanitize": false, + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "Requests", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "Value #A", + "thresholds": [ + "" + ], + "type": "number", + "unit": "ops" + }, + { + "alias": "Errors", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "Value #B", + "thresholds": [], + "type": "number", + "unit": "ops" + }, + { + "alias": "P50 Latency", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "link": false, + "pattern": "Value #C", + "thresholds": [], + "type": "number", + "unit": "dtdurations" + }, + { + "alias": "P90 Latency", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "pattern": "Value #D", + "thresholds": [], + "type": "number", + "unit": "dtdurations" + }, + { + "alias": "P99 Latency", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "pattern": "Value #E", + "thresholds": [], + "type": "number", + "unit": "dtdurations" + }, + { + "alias": "IN", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "Value #F", + "thresholds": [ + "" + ], + "type": "number", + "unit": "Bps" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "Time", + "thresholds": [], + "type": "hidden", + "unit": "short" + }, + { + "alias": "OUT", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value #G", + "thresholds": [], + "type": "number", + "unit": "Bps" + } + ], + "targets": [ + { + "expr": "histogram_quantile(0.50, sum(rate(nginx_ingress_controller_request_duration_seconds_bucket{ingress!=\"\",controller_pod=~\"$controller\",controller_class=~\"$controller_class\",controller_namespace=~\"$namespace\",ingress=~\"$ingress\"}[2m])) by (le, ingress))", + "format": "table", + "hide": false, + "instant": true, + "intervalFactor": 1, + "legendFormat": "{{ ingress }}", + "refId": "C" + }, + { + "expr": "histogram_quantile(0.90, sum(rate(nginx_ingress_controller_request_duration_seconds_bucket{ingress!=\"\",controller_pod=~\"$controller\",controller_class=~\"$controller_class\",controller_namespace=~\"$namespace\",ingress=~\"$ingress\"}[2m])) by (le, ingress))", + "format": "table", + "hide": false, + "instant": true, + "intervalFactor": 1, + "legendFormat": "{{ ingress }}", + "refId": "D" + }, + { + "expr": "histogram_quantile(0.99, sum(rate(nginx_ingress_controller_request_duration_seconds_bucket{ingress!=\"\",controller_pod=~\"$controller\",controller_class=~\"$controller_class\",controller_namespace=~\"$namespace\",ingress=~\"$ingress\"}[2m])) by (le, ingress))", + "format": "table", + "hide": false, + "instant": true, + "intervalFactor": 1, + "legendFormat": "{{ destination_service }}", + "refId": "E" + }, + { + "expr": "sum(irate(nginx_ingress_controller_request_size_sum{ingress!=\"\",controller_pod=~\"$controller\",controller_class=~\"$controller_class\",controller_namespace=~\"$namespace\",ingress=~\"$ingress\"}[2m])) by (ingress)", + "format": "table", + "hide": false, + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{ ingress }}", + "refId": "F" + }, + { + "expr": "sum(irate(nginx_ingress_controller_response_size_sum{ingress!=\"\",controller_pod=~\"$controller\",controller_class=~\"$controller_class\",controller_namespace=~\"$namespace\",ingress=~\"$ingress\"}[2m])) by (ingress)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "legendFormat": "{{ ingress }}", + "refId": "G" + } + ], + "timeFrom": null, + "title": "Ingress Percentile Response Times and Transfer Rates", + "transform": "table", + "transparent": false, + "type": "table" + }, + { + "columns": [ + { + "text": "Current", + "value": "current" + } + ], + "datasource": "${DS_PROMETHEUS}", + "fontSize": "100%", + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 24 + }, + "height": "1024", + "id": 85, + "links": [], + "pageSize": 7, + "scroll": true, + "showHeader": true, + "sort": { + "col": 1, + "desc": false + }, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "date" + }, + { + "alias": "TTL", + "colorMode": "cell", + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "pattern": "Current", + "thresholds": [ + "0", + "691200" + ], + "type": "number", + "unit": "s" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "decimals": 2, + "pattern": "/.*/", + "thresholds": [], + "type": "number", + "unit": "short" + } + ], + "targets": [ + { + "expr": "avg(nginx_ingress_controller_ssl_expire_time_seconds{kubernetes_pod_name=~\"$controller\",namespace=~\"$namespace\",ingress=~\"$ingress\"}) by (host) - time()", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{ host }}", + "metric": "gke_letsencrypt_cert_expiration", + "refId": "A", + "step": 1 + } + ], + "title": "Ingress Certificate Expiry", + "transform": "timeseries_aggregations", + "type": "table" + } + ], + "refresh": "5s", + "schemaVersion": 16, + "style": "dark", + "tags": [ + "nginx" + ], + "templating": { + "list": [ + { + "allValue": ".*", + "current": { + "text": "All", + "value": "$__all" + }, + "datasource": "${DS_PROMETHEUS}", + "hide": 0, + "includeAll": true, + "label": "Namespace", + "multi": false, + "name": "namespace", + "options": [], + "query": "label_values(nginx_ingress_controller_config_hash, controller_namespace)", + "refresh": 1, + "regex": "", + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".*", + "current": { + "text": "All", + "value": "$__all" + }, + "datasource": "${DS_PROMETHEUS}", + "hide": 0, + "includeAll": true, + "label": "Controller Class", + "multi": false, + "name": "controller_class", + "options": [], + "query": "label_values(nginx_ingress_controller_config_hash{namespace=~\"$namespace\"}, controller_class) ", + "refresh": 1, + "regex": "", + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".*", + "current": { + "text": "All", + "value": "$__all" + }, + "datasource": "${DS_PROMETHEUS}", + "hide": 0, + "includeAll": true, + "label": "Controller", + "multi": false, + "name": "controller", + "options": [], + "query": "label_values(nginx_ingress_controller_config_hash{namespace=~\"$namespace\",controller_class=~\"$controller_class\"}, controller_pod) ", + "refresh": 1, + "regex": "", + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".*", + "current": { + "tags": [], + "text": "All", + "value": "$__all" + }, + "datasource": "${DS_PROMETHEUS}", + "hide": 0, + "includeAll": true, + "label": "Ingress", + "multi": false, + "name": "ingress", + "options": [], + "query": "label_values(nginx_ingress_controller_requests{namespace=~\"$namespace\",controller_class=~\"$controller_class\",controller=~\"$controller\"}, ingress) ", + "refresh": 1, + "regex": "", + "sort": 2, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "2m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "browser", + "title": "NGINX Ingress controller", + "uid": "nginx", + "version": 1 +} diff --git a/deploy/grafana/dashboards/screenshot.png b/deploy/grafana/dashboards/screenshot.png new file mode 100644 index 0000000000..bc5ad3b199 Binary files /dev/null and b/deploy/grafana/dashboards/screenshot.png differ diff --git a/deploy/grafana/deployment.yaml b/deploy/grafana/deployment.yaml new file mode 100644 index 0000000000..765b2f64fb --- /dev/null +++ b/deploy/grafana/deployment.yaml @@ -0,0 +1,33 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: grafana + namespace: ingress-nginx +spec: + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + spec: + containers: + - image: grafana/grafana + name: grafana + ports: + - containerPort: 3000 + protocol: TCP + resources: + limits: + cpu: 500m + memory: 2500Mi + requests: + cpu: 100m + memory: 100Mi + volumeMounts: + - mountPath: /var/lib/grafana + name: data + restartPolicy: Always + volumes: + - emptyDir: {} + name: data diff --git a/deploy/grafana/kustomization.yaml b/deploy/grafana/kustomization.yaml new file mode 100644 index 0000000000..2069c1a7a9 --- /dev/null +++ b/deploy/grafana/kustomization.yaml @@ -0,0 +1,12 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: ingress-nginx +commonLabels: + app.kubernetes.io/name: grafana + app.kubernetes.io/part-of: ingress-nginx +resources: +- deployment.yaml +- service.yaml +images: +- name: grafana/grafana + newTag: 6.1.6 diff --git a/deploy/grafana/service.yaml b/deploy/grafana/service.yaml new file mode 100644 index 0000000000..16d69ac7a8 --- /dev/null +++ b/deploy/grafana/service.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Service +metadata: + name: grafana +spec: + ports: + - port: 3000 + protocol: TCP + targetPort: 3000 + type: NodePort diff --git a/deploy/minikube/kustomization.yaml b/deploy/minikube/kustomization.yaml new file mode 100644 index 0000000000..34ea278ad0 --- /dev/null +++ b/deploy/minikube/kustomization.yaml @@ -0,0 +1,10 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: ingress-nginx +bases: +- ../baremetal +- ../cluster-wide +images: +- name: quay.io/kubernetes-ingress-controller/nginx-ingress-controller + newName: ingress-controller/nginx-ingress-controller + newTag: dev diff --git a/deploy/namespace.yaml b/deploy/namespace.yaml deleted file mode 100644 index 6878f0be88..0000000000 --- a/deploy/namespace.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: ingress-nginx diff --git a/deploy/patch-deployment.yaml b/deploy/patch-deployment.yaml deleted file mode 100644 index 5959c83b10..0000000000 --- a/deploy/patch-deployment.yaml +++ /dev/null @@ -1 +0,0 @@ -[{'op': 'replace', 'path': '/spec/template/spec/containers/0/args', 'value':['/nginx-ingress-controller','--default-backend-service=$(POD_NAMESPACE)/default-http-backend','--configmap=$(POD_NAMESPACE)/nginx-configuration','--tcp-services-configmap=$(POD_NAMESPACE)/tcp-services','--udp-services-configmap=$(POD_NAMESPACE)/udp-services','--annotations-prefix=nginx.ingress.kubernetes.io','--publish-service=$(POD_NAMESPACE)/ingress-nginx']}] \ No newline at end of file diff --git a/deploy/prometheus/deployment.yaml b/deploy/prometheus/deployment.yaml new file mode 100644 index 0000000000..2b15c686b2 --- /dev/null +++ b/deploy/prometheus/deployment.yaml @@ -0,0 +1,28 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: prometheus-server +spec: + replicas: 1 + template: + spec: + serviceAccountName: prometheus-server + containers: + - name: prometheus + image: prom/prometheus + args: + - "--config.file=/etc/prometheus/prometheus.yaml" + - "--storage.tsdb.path=/prometheus/" + ports: + - containerPort: 9090 + volumeMounts: + - name: prometheus-config-volume + mountPath: /etc/prometheus/ + - name: prometheus-storage-volume + mountPath: /prometheus/ + volumes: + - name: prometheus-config-volume + configMap: + name: prometheus-configuration + - name: prometheus-storage-volume + emptyDir: {} diff --git a/deploy/prometheus/kustomization.yaml b/deploy/prometheus/kustomization.yaml new file mode 100644 index 0000000000..2d6e0a7e26 --- /dev/null +++ b/deploy/prometheus/kustomization.yaml @@ -0,0 +1,19 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: ingress-nginx +commonLabels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: ingress-nginx +resources: +- role.yaml +- service-account.yaml +- role-binding.yaml +- deployment.yaml +- service.yaml +images: +- name: prom/prometheus + newTag: v2.3.2 +configMapGenerator: +- name: prometheus-configuration + files: + - prometheus.yaml diff --git a/deploy/prometheus/prometheus.yaml b/deploy/prometheus/prometheus.yaml new file mode 100644 index 0000000000..fe8cefe961 --- /dev/null +++ b/deploy/prometheus/prometheus.yaml @@ -0,0 +1,29 @@ +global: + scrape_interval: 10s +scrape_configs: +- job_name: 'ingress-nginx-endpoints' + kubernetes_sd_configs: + - role: pod + namespaces: + names: + - ingress-nginx + relabel_configs: + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] + action: keep + regex: true + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scheme] + action: replace + target_label: __scheme__ + regex: (https?) + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] + action: replace + target_label: __address__ + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + - source_labels: [__meta_kubernetes_service_name] + regex: prometheus-server + action: drop diff --git a/deploy/prometheus/role-binding.yaml b/deploy/prometheus/role-binding.yaml new file mode 100644 index 0000000000..b73b94f3a9 --- /dev/null +++ b/deploy/prometheus/role-binding.yaml @@ -0,0 +1,11 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: prometheus-server +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: prometheus-server +subjects: + - kind: ServiceAccount + name: prometheus-server diff --git a/deploy/prometheus/role.yaml b/deploy/prometheus/role.yaml new file mode 100644 index 0000000000..0328f80baf --- /dev/null +++ b/deploy/prometheus/role.yaml @@ -0,0 +1,11 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: prometheus-server +rules: + - apiGroups: [""] + resources: + - services + - endpoints + - pods + verbs: ["get", "list", "watch"] diff --git a/deploy/prometheus/service-account.yaml b/deploy/prometheus/service-account.yaml new file mode 100644 index 0000000000..3e2c9a7700 --- /dev/null +++ b/deploy/prometheus/service-account.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: prometheus-server diff --git a/deploy/prometheus/service.yaml b/deploy/prometheus/service.yaml new file mode 100644 index 0000000000..9a2de8ce44 --- /dev/null +++ b/deploy/prometheus/service.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Service +metadata: + name: prometheus-server +spec: + type: NodePort + ports: + - port: 9090 + targetPort: 9090 diff --git a/deploy/provider/aws/patch-configmap-l4.yaml b/deploy/provider/aws/patch-configmap-l4.yaml deleted file mode 100644 index 18805a5354..0000000000 --- a/deploy/provider/aws/patch-configmap-l4.yaml +++ /dev/null @@ -1,9 +0,0 @@ -kind: ConfigMap -apiVersion: v1 -metadata: - name: nginx-configuration - namespace: ingress-nginx - labels: - app: ingress-nginx -data: - use-proxy-protocol: "true" diff --git a/deploy/provider/aws/patch-configmap-l7.yaml b/deploy/provider/aws/patch-configmap-l7.yaml deleted file mode 100644 index 394f3962c9..0000000000 --- a/deploy/provider/aws/patch-configmap-l7.yaml +++ /dev/null @@ -1,9 +0,0 @@ -kind: ConfigMap -apiVersion: v1 -metadata: - name: nginx-configuration - namespace: ingress-nginx - labels: - app: ingress-nginx -data: - use-proxy-protocol: "false" diff --git a/deploy/provider/aws/service-l4.yaml b/deploy/provider/aws/service-l4.yaml deleted file mode 100644 index c22f579278..0000000000 --- a/deploy/provider/aws/service-l4.yaml +++ /dev/null @@ -1,23 +0,0 @@ -kind: Service -apiVersion: v1 -metadata: - name: ingress-nginx - namespace: ingress-nginx - labels: - app: ingress-nginx - annotations: - # Enable PROXY protocol - service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: '*' - # Increase the ELB idle timeout to avoid issues with WebSockets or Server-Sent Events. - service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: '3600' -spec: - type: LoadBalancer - selector: - app: ingress-nginx - ports: - - name: http - port: 80 - targetPort: http - - name: https - port: 443 - targetPort: https diff --git a/deploy/provider/aws/service-l7.yaml b/deploy/provider/aws/service-l7.yaml deleted file mode 100644 index 404c2c4afd..0000000000 --- a/deploy/provider/aws/service-l7.yaml +++ /dev/null @@ -1,27 +0,0 @@ -kind: Service -apiVersion: v1 -metadata: - name: ingress-nginx - namespace: ingress-nginx - labels: - app: ingress-nginx - annotations: - # replace with the correct value of the generated certifcate in the AWS console - service.beta.kubernetes.io/aws-load-balancer-ssl-cert: "arn:aws:acm:us-west-2:XXXXXXXX:certificate/XXXXXX-XXXXXXX-XXXXXXX-XXXXXXXX" - # the backend instances are HTTP - service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "http" - # Map port 443 - service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "https" - # Increase the ELB idle timeout to avoid issues with WebSockets or Server-Sent Events. - service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: '3600' -spec: - type: LoadBalancer - selector: - app: ingress-nginx - ports: - - name: http - port: 80 - targetPort: http - - name: https - port: 443 - targetPort: http diff --git a/deploy/provider/azure/service.yaml b/deploy/provider/azure/service.yaml deleted file mode 100644 index 8d2f715051..0000000000 --- a/deploy/provider/azure/service.yaml +++ /dev/null @@ -1,19 +0,0 @@ -kind: Service -apiVersion: v1 -metadata: - name: ingress-nginx - namespace: ingress-nginx - labels: - app: ingress-nginx -spec: - externalTrafficPolicy: Local - type: LoadBalancer - selector: - app: ingress-nginx - ports: - - name: http - port: 80 - targetPort: http - - name: https - port: 443 - targetPort: https diff --git a/deploy/provider/baremetal/service-nodeport.yaml b/deploy/provider/baremetal/service-nodeport.yaml deleted file mode 100644 index a00f2453bd..0000000000 --- a/deploy/provider/baremetal/service-nodeport.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: ingress-nginx - namespace: ingress-nginx -spec: - type: NodePort - ports: - - name: http - port: 80 - targetPort: 80 - protocol: TCP - - name: https - port: 443 - targetPort: 443 - protocol: TCP - selector: - app: ingress-nginx diff --git a/deploy/provider/gce-gke/service.yaml b/deploy/provider/gce-gke/service.yaml deleted file mode 100644 index 8d2f715051..0000000000 --- a/deploy/provider/gce-gke/service.yaml +++ /dev/null @@ -1,19 +0,0 @@ -kind: Service -apiVersion: v1 -metadata: - name: ingress-nginx - namespace: ingress-nginx - labels: - app: ingress-nginx -spec: - externalTrafficPolicy: Local - type: LoadBalancer - selector: - app: ingress-nginx - ports: - - name: http - port: 80 - targetPort: http - - name: https - port: 443 - targetPort: https diff --git a/deploy/provider/patch-service-with-rbac.yaml b/deploy/provider/patch-service-with-rbac.yaml deleted file mode 100644 index 44919d2e9a..0000000000 --- a/deploy/provider/patch-service-with-rbac.yaml +++ /dev/null @@ -1,41 +0,0 @@ -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: nginx-ingress-controller - namespace: ingress-nginx -spec: - replicas: 1 - selector: - matchLabels: - app: ingress-nginx - template: - metadata: - labels: - app: ingress-nginx - spec: - serviceAccountName: nginx-ingress-serviceaccount - containers: - - name: nginx-ingress-controller - image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.9.0-beta.19 - args: - - /nginx-ingress-controller - - --default-backend-service=$(POD_NAMESPACE)/default-http-backend - - --configmap=$(POD_NAMESPACE)/nginx-configuration - - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services - - --udp-services-configmap=$(POD_NAMESPACE)/udp-services - - --publish-service=$(POD_NAMESPACE)/ingress-nginx - - --annotations-prefix=nginx.ingress.kubernetes.io - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - ports: - - name: http - containerPort: 80 - - name: https - containerPort: 443 diff --git a/deploy/provider/patch-service-without-rbac.yaml b/deploy/provider/patch-service-without-rbac.yaml deleted file mode 100644 index f0938c944a..0000000000 --- a/deploy/provider/patch-service-without-rbac.yaml +++ /dev/null @@ -1,40 +0,0 @@ -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: nginx-ingress-controller - namespace: ingress-nginx -spec: - replicas: 1 - selector: - matchLabels: - app: ingress-nginx - template: - metadata: - labels: - app: ingress-nginx - spec: - containers: - - name: nginx-ingress-controller - image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.9.0-beta.19 - args: - - /nginx-ingress-controller - - --default-backend-service=$(POD_NAMESPACE)/default-http-backend - - --configmap=$(POD_NAMESPACE)/nginx-configuration - - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services - - --udp-services-configmap=$(POD_NAMESPACE)/udp-services - - --publish-service=$(POD_NAMESPACE)/ingress-nginx - - --annotations-prefix=nginx.ingress.kubernetes.io - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - ports: - - name: http - containerPort: 80 - - name: https - containerPort: 443 diff --git a/deploy/static/configmap.yaml b/deploy/static/configmap.yaml new file mode 100644 index 0000000000..436b660a94 --- /dev/null +++ b/deploy/static/configmap.yaml @@ -0,0 +1,30 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: nginx-configuration + namespace: ingress-nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tcp-services + namespace: ingress-nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: udp-services + namespace: ingress-nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + +--- diff --git a/deploy/static/mandatory.yaml b/deploy/static/mandatory.yaml new file mode 100644 index 0000000000..921bc482d7 --- /dev/null +++ b/deploy/static/mandatory.yaml @@ -0,0 +1,265 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: ingress-nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + +--- + +kind: ConfigMap +apiVersion: v1 +metadata: + name: nginx-configuration + namespace: ingress-nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tcp-services + namespace: ingress-nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: udp-services + namespace: ingress-nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: nginx-ingress-serviceaccount + namespace: ingress-nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: nginx-ingress-clusterrole + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx +rules: + - apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - "extensions" + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - "extensions" + resources: + - ingresses/status + verbs: + - update + +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: Role +metadata: + name: nginx-ingress-role + namespace: ingress-nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx +rules: + - apiGroups: + - "" + resources: + - configmaps + - pods + - secrets + - namespaces + verbs: + - get + - apiGroups: + - "" + resources: + - configmaps + resourceNames: + # Defaults to "-" + # Here: "-" + # This has to be adapted if you change either parameter + # when launching the nginx-ingress-controller. + - "ingress-controller-leader-nginx" + verbs: + - get + - update + - apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - apiGroups: + - "" + resources: + - endpoints + verbs: + - get + +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: RoleBinding +metadata: + name: nginx-ingress-role-nisa-binding + namespace: ingress-nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: nginx-ingress-role +subjects: + - kind: ServiceAccount + name: nginx-ingress-serviceaccount + namespace: ingress-nginx + +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: nginx-ingress-clusterrole-nisa-binding + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: nginx-ingress-clusterrole +subjects: + - kind: ServiceAccount + name: nginx-ingress-serviceaccount + namespace: ingress-nginx + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-ingress-controller + namespace: ingress-nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + template: + metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + annotations: + prometheus.io/port: "10254" + prometheus.io/scrape: "true" + spec: + serviceAccountName: nginx-ingress-serviceaccount + containers: + - name: nginx-ingress-controller + image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.24.1 + args: + - /nginx-ingress-controller + - --configmap=$(POD_NAMESPACE)/nginx-configuration + - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services + - --udp-services-configmap=$(POD_NAMESPACE)/udp-services + - --publish-service=$(POD_NAMESPACE)/ingress-nginx + - --annotations-prefix=nginx.ingress.kubernetes.io + securityContext: + allowPrivilegeEscalation: true + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + # www-data -> 33 + runAsUser: 33 + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + ports: + - name: http + containerPort: 80 + - name: https + containerPort: 443 + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 10 + readinessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 10 + +--- diff --git a/deploy/static/namespace.yaml b/deploy/static/namespace.yaml new file mode 100644 index 0000000000..9196d6d16d --- /dev/null +++ b/deploy/static/namespace.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: ingress-nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + +--- + diff --git a/deploy/static/provider/aws/patch-configmap-l4.yaml b/deploy/static/provider/aws/patch-configmap-l4.yaml new file mode 100644 index 0000000000..1d612289fb --- /dev/null +++ b/deploy/static/provider/aws/patch-configmap-l4.yaml @@ -0,0 +1,10 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: nginx-configuration + namespace: ingress-nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx +data: + use-proxy-protocol: "true" diff --git a/deploy/static/provider/aws/patch-configmap-l7.yaml b/deploy/static/provider/aws/patch-configmap-l7.yaml new file mode 100644 index 0000000000..b1bcd2a971 --- /dev/null +++ b/deploy/static/provider/aws/patch-configmap-l7.yaml @@ -0,0 +1,14 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: nginx-configuration + namespace: ingress-nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx +data: + use-proxy-protocol: "false" + use-forwarded-headers: "true" + proxy-real-ip-cidr: "0.0.0.0/0" # restrict this to the IP addresses of ELB +--- + diff --git a/deploy/static/provider/aws/service-l4.yaml b/deploy/static/provider/aws/service-l4.yaml new file mode 100644 index 0000000000..893b5a03d6 --- /dev/null +++ b/deploy/static/provider/aws/service-l4.yaml @@ -0,0 +1,30 @@ +kind: Service +apiVersion: v1 +metadata: + name: ingress-nginx + namespace: ingress-nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + annotations: + # Enable PROXY protocol + service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: "*" + # Ensure the ELB idle timeout is less than nginx keep-alive timeout. By default, + # NGINX keep-alive is set to 75s. If using WebSockets, the value will need to be + # increased to '3600' to avoid any potential issues. + service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: "60" +spec: + type: LoadBalancer + selector: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + ports: + - name: http + port: 80 + targetPort: http + - name: https + port: 443 + targetPort: https + +--- + diff --git a/deploy/static/provider/aws/service-l7.yaml b/deploy/static/provider/aws/service-l7.yaml new file mode 100644 index 0000000000..6616108a23 --- /dev/null +++ b/deploy/static/provider/aws/service-l7.yaml @@ -0,0 +1,34 @@ +kind: Service +apiVersion: v1 +metadata: + name: ingress-nginx + namespace: ingress-nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + annotations: + # replace with the correct value of the generated certificate in the AWS console + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: "arn:aws:acm:us-west-2:XXXXXXXX:certificate/XXXXXX-XXXXXXX-XXXXXXX-XXXXXXXX" + # the backend instances are HTTP + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "http" + # Map port 443 + service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "https" + # Ensure the ELB idle timeout is less than nginx keep-alive timeout. By default, + # NGINX keep-alive is set to 75s. If using WebSockets, the value will need to be + # increased to '3600' to avoid any potential issues. + service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: "60" +spec: + type: LoadBalancer + selector: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + ports: + - name: http + port: 80 + targetPort: http + - name: https + port: 443 + targetPort: http + +--- + diff --git a/deploy/static/provider/aws/service-nlb.yaml b/deploy/static/provider/aws/service-nlb.yaml new file mode 100644 index 0000000000..244460b6d1 --- /dev/null +++ b/deploy/static/provider/aws/service-nlb.yaml @@ -0,0 +1,28 @@ +kind: Service +apiVersion: v1 +metadata: + name: ingress-nginx + namespace: ingress-nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + annotations: + # by default the type is elb (classic load balancer). + service.beta.kubernetes.io/aws-load-balancer-type: nlb +spec: + # this setting is to make sure the source IP address is preserved. + externalTrafficPolicy: Local + type: LoadBalancer + selector: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + ports: + - name: http + port: 80 + targetPort: http + - name: https + port: 443 + targetPort: https + +--- + diff --git a/deploy/static/provider/baremetal/service-nodeport.yaml b/deploy/static/provider/baremetal/service-nodeport.yaml new file mode 100644 index 0000000000..24e302818b --- /dev/null +++ b/deploy/static/provider/baremetal/service-nodeport.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Service +metadata: + name: ingress-nginx + namespace: ingress-nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx +spec: + type: NodePort + ports: + - name: http + port: 80 + targetPort: 80 + protocol: TCP + - name: https + port: 443 + targetPort: 443 + protocol: TCP + selector: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + +--- + diff --git a/deploy/static/provider/cloud-generic.yaml b/deploy/static/provider/cloud-generic.yaml new file mode 100644 index 0000000000..8bbac569bf --- /dev/null +++ b/deploy/static/provider/cloud-generic.yaml @@ -0,0 +1,24 @@ +kind: Service +apiVersion: v1 +metadata: + name: ingress-nginx + namespace: ingress-nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx +spec: + externalTrafficPolicy: Local + type: LoadBalancer + selector: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + ports: + - name: http + port: 80 + targetPort: http + - name: https + port: 443 + targetPort: https + +--- + diff --git a/deploy/rbac.yaml b/deploy/static/rbac.yaml similarity index 81% rename from deploy/rbac.yaml rename to deploy/static/rbac.yaml index 301853216b..103bd98cc0 100644 --- a/deploy/rbac.yaml +++ b/deploy/static/rbac.yaml @@ -3,13 +3,18 @@ kind: ServiceAccount metadata: name: nginx-ingress-serviceaccount namespace: ingress-nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx --- - apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRole metadata: name: nginx-ingress-clusterrole + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx rules: - apiGroups: - "" @@ -47,10 +52,10 @@ rules: - apiGroups: - "" resources: - - events + - events verbs: - - create - - patch + - create + - patch - apiGroups: - "extensions" resources: @@ -59,12 +64,14 @@ rules: - update --- - apiVersion: rbac.authorization.k8s.io/v1beta1 kind: Role metadata: name: nginx-ingress-role namespace: ingress-nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx rules: - apiGroups: - "" @@ -102,12 +109,14 @@ rules: - get --- - apiVersion: rbac.authorization.k8s.io/v1beta1 kind: RoleBinding metadata: name: nginx-ingress-role-nisa-binding namespace: ingress-nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx roleRef: apiGroup: rbac.authorization.k8s.io kind: Role @@ -118,11 +127,13 @@ subjects: namespace: ingress-nginx --- - apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRoleBinding metadata: name: nginx-ingress-clusterrole-nisa-binding + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole @@ -131,3 +142,6 @@ subjects: - kind: ServiceAccount name: nginx-ingress-serviceaccount namespace: ingress-nginx + +--- + diff --git a/deploy/static/with-rbac.yaml b/deploy/static/with-rbac.yaml new file mode 100644 index 0000000000..3e3661cdba --- /dev/null +++ b/deploy/static/with-rbac.yaml @@ -0,0 +1,79 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-ingress-controller + namespace: ingress-nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + template: + metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + annotations: + prometheus.io/port: "10254" + prometheus.io/scrape: "true" + spec: + serviceAccountName: nginx-ingress-serviceaccount + containers: + - name: nginx-ingress-controller + image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.24.1 + args: + - /nginx-ingress-controller + - --configmap=$(POD_NAMESPACE)/nginx-configuration + - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services + - --udp-services-configmap=$(POD_NAMESPACE)/udp-services + - --publish-service=$(POD_NAMESPACE)/ingress-nginx + - --annotations-prefix=nginx.ingress.kubernetes.io + securityContext: + allowPrivilegeEscalation: true + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + # www-data -> 33 + runAsUser: 33 + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + ports: + - name: http + containerPort: 80 + - name: https + containerPort: 443 + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 10 + readinessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 10 + +--- + diff --git a/deploy/tcp-services-configmap.yaml b/deploy/tcp-services-configmap.yaml deleted file mode 100644 index a963085d3e..0000000000 --- a/deploy/tcp-services-configmap.yaml +++ /dev/null @@ -1,5 +0,0 @@ -kind: ConfigMap -apiVersion: v1 -metadata: - name: tcp-services - namespace: ingress-nginx diff --git a/deploy/udp-services-configmap.yaml b/deploy/udp-services-configmap.yaml deleted file mode 100644 index 1870931a20..0000000000 --- a/deploy/udp-services-configmap.yaml +++ /dev/null @@ -1,5 +0,0 @@ -kind: ConfigMap -apiVersion: v1 -metadata: - name: udp-services - namespace: ingress-nginx diff --git a/deploy/validating-webhook.yaml.tpl b/deploy/validating-webhook.yaml.tpl new file mode 100644 index 0000000000..7aad64f8c9 --- /dev/null +++ b/deploy/validating-webhook.yaml.tpl @@ -0,0 +1,25 @@ +--- +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: ValidatingWebhookConfiguration +metadata: + name: check-ingress +webhooks: +- name: validate.nginx.ingress.kubernetes.io + rules: + - apiGroups: + - extensions + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - ingresses + failurePolicy: Fail + clientConfig: + service: + namespace: ingress-nginx + name: nginx-ingress-webhook + path: /extensions/v1beta1/ingresses + caBundle: +--- \ No newline at end of file diff --git a/deploy/with-rbac.yaml b/deploy/with-rbac.yaml deleted file mode 100644 index 98836148fd..0000000000 --- a/deploy/with-rbac.yaml +++ /dev/null @@ -1,62 +0,0 @@ -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: nginx-ingress-controller - namespace: ingress-nginx -spec: - replicas: 1 - selector: - matchLabels: - app: ingress-nginx - template: - metadata: - labels: - app: ingress-nginx - annotations: - prometheus.io/port: '10254' - prometheus.io/scrape: 'true' - spec: - serviceAccountName: nginx-ingress-serviceaccount - containers: - - name: nginx-ingress-controller - image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.9.0-beta.19 - args: - - /nginx-ingress-controller - - --default-backend-service=$(POD_NAMESPACE)/default-http-backend - - --configmap=$(POD_NAMESPACE)/nginx-configuration - - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services - - --udp-services-configmap=$(POD_NAMESPACE)/udp-services - - --annotations-prefix=nginx.ingress.kubernetes.io - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - ports: - - name: http - containerPort: 80 - - name: https - containerPort: 443 - livenessProbe: - failureThreshold: 3 - httpGet: - path: /healthz - port: 10254 - scheme: HTTP - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - readinessProbe: - failureThreshold: 3 - httpGet: - path: /healthz - port: 10254 - scheme: HTTP - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 diff --git a/deploy/with-validating-webhook.yaml.tpl b/deploy/with-validating-webhook.yaml.tpl new file mode 100644 index 0000000000..15cf37c850 --- /dev/null +++ b/deploy/with-validating-webhook.yaml.tpl @@ -0,0 +1,115 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ingress-validation-webhook + namespace: ingress-nginx +spec: + ports: + - name: admission + port: 443 + protocol: TCP + targetPort: 8080 + selector: + app.kubernetes.io/name: ingress-nginx +--- +apiVersion: v1 +data: + key.pem: + certificate.pem: +kind: Secret +metadata: + name: nginx-ingress-webhook-certificate + namespace: ingress-nginx +type: Opaque +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-ingress-controller + namespace: ingress-nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + template: + metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + annotations: + prometheus.io/port: "10254" + prometheus.io/scrape: "true" + spec: + serviceAccountName: nginx-ingress-serviceaccount + containers: + - name: nginx-ingress-controller + image: containers.schibsted.io/thibault-jamet/ingress-nginx:0.23.0-schibsted + args: + - /nginx-ingress-controller + - --configmap=$(POD_NAMESPACE)/nginx-configuration + - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services + - --udp-services-configmap=$(POD_NAMESPACE)/udp-services + - --publish-service=$(POD_NAMESPACE)/ingress-nginx + - --annotations-prefix=nginx.ingress.kubernetes.io + - --validating-webhook=:8080 + - --validating-webhook-certificate=/usr/local/certificates/certificate.pem + - --validating-webhook-key=/usr/local/certificates/key.pem + volumeMounts: + - name: webhook-cert + mountPath: "/usr/local/certificates/" + readOnly: true + securityContext: + allowPrivilegeEscalation: true + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + # www-data -> 33 + runAsUser: 33 + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + ports: + - name: http + containerPort: 80 + - name: https + containerPort: 443 + - name: webhook + containerPort: 8080 + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 10 + readinessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 10 + volumes: + - name: webhook-cert + secret: + secretName: nginx-ingress-webhook-certificate +--- diff --git a/deploy/without-rbac.yaml b/deploy/without-rbac.yaml deleted file mode 100644 index 4d1d412093..0000000000 --- a/deploy/without-rbac.yaml +++ /dev/null @@ -1,61 +0,0 @@ -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: nginx-ingress-controller - namespace: ingress-nginx -spec: - replicas: 1 - selector: - matchLabels: - app: ingress-nginx - template: - metadata: - labels: - app: ingress-nginx - annotations: - prometheus.io/port: '10254' - prometheus.io/scrape: 'true' - spec: - containers: - - name: nginx-ingress-controller - image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.9.0-beta.19 - args: - - /nginx-ingress-controller - - --default-backend-service=$(POD_NAMESPACE)/default-http-backend - - --configmap=$(POD_NAMESPACE)/nginx-configuration - - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services - - --udp-services-configmap=$(POD_NAMESPACE)/udp-services - - --annotations-prefix=nginx.ingress.kubernetes.io - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - ports: - - name: http - containerPort: 80 - - name: https - containerPort: 443 - livenessProbe: - failureThreshold: 3 - httpGet: - path: /healthz - port: 10254 - scheme: HTTP - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - readinessProbe: - failureThreshold: 3 - httpGet: - path: /healthz - port: 10254 - scheme: HTTP - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 diff --git a/docs/annotations.md b/docs/annotations.md deleted file mode 100644 index 480dd6ef46..0000000000 --- a/docs/annotations.md +++ /dev/null @@ -1,91 +0,0 @@ -# Ingress Annotations - -This file defines a list of annotations which are supported by various Ingress controllers (both those based on the common ingress code, and alternative implementations). -The intention is to ensure the maximum amount of compatibility between different implementations. - -All annotations are assumed to be prefixed with `nginx.ingress.kubernetes.io/` except where otherwise specified. -There is no attempt to record implementation-specific annotations using other prefixes. -(Traefik in particular defines several of its own annotations which are not described here, and does not seem to support any of the standard annotations.) - -Key: - -* `nginx`: the `kubernetes/ingress` nginx controller -* `gce`: the `kubernetes/ingress` GCE controller -* `traefik`: Traefik's built-in Ingress controller -* `voyager`: [Voyager by AppsCode](https://github.com/appscode/voyager) - Secure HAProxy based Ingress Controller for Kubernetes -* `haproxy`: Joao Morais' [HAProxy Ingress controller](https://github.com/jcmoraisjr/haproxy-ingress) -* `trafficserver`: Torchbox's [Apache Traffic Server controller plugin](https://github.com/torchbox/k8s-ts-ingress) - -## TLS-related - -| Name | Meaning | Default | Controller -| --- | --- | --- | --- | -| `ssl-passthrough` | Pass TLS connections directly to backend; do not offload. | `false` | nginx, voyager, haproxy -| `ssl-redirect` | Redirect non-TLS requests to TLS when TLS is enabled. | `true` | nginx, voyager, haproxy, trafficserver -| `force-ssl-redirect` | Redirect non-TLS requests to TLS even when TLS is not configured. | `false` | nginx, voyager, trafficserver -| `secure-backends` | Use TLS to communicate with origin (pods). | `false` | nginx, voyager, haproxy, trafficserver -| `kubernetes.io/ingress.allow-http` | Whether to accept non-TLS HTTP connections. | `true` | gce -| `pre-shared-cert` | Name of the TLS certificate in GCP to use when provisioning the HTTPS load balancer. | empty string | gce -| `hsts-max-age` | Set an HSTS header with this lifetime. | | voyager, trafficserver -| `hsts-include-subdomains` | Add includeSubdomains to the HSTS header. | | voyager, trafficserver - -## Authentication related - -| Name | Meaning | Default | Controller -| --- | --- | --- | --- | -| `auth-type` | Authentication type: `basic`, `digest`, ... | | nginx, voyager, haproxy, trafficserver -| `auth-secret` | Secret name for authentication. | | nginx, voyager, haproxy, trafficserver -| `auth-realm` | Authentication realm. | | nginx, voyager, haproxy, trafficserver -| `auth-tls-secret` | Name of secret for TLS client certification validation. | | nginx, voyager, haproxy -| `auth-tls-verify-depth` | Maximum chain length of TLS client certificate. | | nginx -| `auth-tls-error-page` | The page that user should be redirected in case of Auth error | | nginx, voyager -| `auth-satisfy` | Behaviour when more than one of `auth-type`, `auth-tls-secret` or `whitelist-source-range` are configured: `all` or `any`. | `all` | trafficserver | `trafficserver` -| `whitelist-source-range` | Comma-separate list of IP addresses to enable access to. | | nginx, voyager, haproxy, trafficserver - -## URL related - -| Name | Meaning | Default | Controller -| --- | --- | --- | --- | -| `app-root` | Redirect requests without a path (i.e., for `/`) to this location. | | nginx, haproxy, trafficserver -| `rewrite-target` | Replace matched Ingress `path` with this value. | | nginx, trafficserver -| `add-base-url` | Add `` tag to HTML. | | nginx -| `base-url-scheme` | Specify the scheme of the `` tags. | | nginx -| `preserve-host` | Whether to pass the client request host (`true`) or the origin hostname (`false`) in the HTTP Host field. | | trafficserver - -## CORS Related -| Name | Meaning | Default | Controller -| --- | --- | --- | --- | -| `enable-cors` | Enable CORS headers in response. | false | nginx, voyager -| `cors-allow-origin` | Specifies the Origin allowed in CORS (Access-Control-Allow-Origin) | * | nginx -| `cors-allow-headers` | Specifies the Headers allowed in CORS (Access-Control-Allow-Headers) | DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Authorization | nginx -| `cors-allow-methods` | Specifies the Methods allowed in CORS (Access-Control-Allow-Methods) | GET, PUT, POST, DELETE, PATCH, OPTIONS | nginx -| `cors-allow-credentials` | Specifies the Access-Control-Allow-Credentials | true | nginx - -## Miscellaneous - -| Name | Meaning | Default | Controller -| --- | --- | --- | --- | -| `configuration-snippet` | Arbitrary text to put in the generated configuration file. | | nginx -| `limit-connections` | Limit concurrent connections per IP address[1]. | | nginx, voyager -| `limit-rps` | Limit requests per second per IP address[1]. | | nginx, voyager -| `limit-rpm` | Limit requests per minute per IP address. | | nginx, voyager -| `affinity` | Specify a method to stick clients to origins across requests. Found in `nginx`, where the only supported value is `cookie`. | | nginx, voyager -| `session-cookie-name` | When `affinity` is set to `cookie`, the name of the cookie to use. | | nginx, voyager -| `session-cookie-hash` | When `affinity` is set to `cookie`, the hash algorithm used: `md5`, `sha`, `index`. | | nginx -| `proxy-body-size` | Maximum request body size. | | nginx, voyager, haproxy -| `proxy-pass-params` | Parameters for proxy-pass directives. | | -| `follow-redirects` | Follow HTTP redirects in the response and deliver the redirect target to the client. | | trafficserver -| `kubernetes.io/ingress.global-static-ip-name` | Name of the static global IP address in GCP to use when provisioning the HTTPS load balancer. | empty string | gce - -[1] The documentation for the `nginx` controller says that only one of `limit-connections` or `limit-rps` may be specified; it's not clear why this is. - -## Caching - -| Name | Meaning | Default | Controller -| --- | --- | --- | --- | -| `cache-enable` | Cache responses according to Expires or Cache-Control headers. | | trafficserver -| `cache-generation` | An arbitrary numeric value included in the cache key; changing this effectively clears the cache for this ingress. | | trafficserver -| `cache-ignore-query-params` | Space-separate list of globs matching URL parameters to ignore when doing cache lookups. | | trafficserver -| `cache-whitelist-query-params` | Ignore any URL parameters not in this whitespace-separate list of globs. | | trafficserver -| `cache-sort-query-params` | Lexically sort the query parameters by name before cache lookup. | | trafficserver -| `cache-ignore-cookies` | Requests containing a `Cookie:` header will not use the cache unless all the cookie names match this whitespace-separate list of globs. | | trafficserver diff --git a/docs/catalog.md b/docs/catalog.md deleted file mode 100644 index 1fa1d6f1d2..0000000000 --- a/docs/catalog.md +++ /dev/null @@ -1,13 +0,0 @@ -# Ingress controller Catalog - -This is a non-comprehensive list of existing ingress controllers. - -* [Dummy controller backend](/examples/custom-controller) -* [HAProxy Ingress controller](https://github.com/jcmoraisjr/haproxy-ingress) -* [Linkerd](https://linkerd.io/config/0.9.1/linkerd/index.html#ingress-identifier) -* [traefik](https://docs.traefik.io/configuration/backends/kubernetes/) -* [AWS Application Load Balancer Ingress Controller](https://github.com/coreos/alb-ingress-controller) -* [kube-ingress-aws-controller](https://github.com/zalando-incubator/kube-ingress-aws-controller) -* [Voyager: HAProxy Ingress Controller](https://github.com/appscode/voyager) -* [External Nginx Ingress Controller](https://github.com/unibet/ext_nginx) -* [Heptio Contour controller](https://github.com/heptio/contour) diff --git a/docs/deploy/baremetal.md b/docs/deploy/baremetal.md new file mode 100644 index 0000000000..337fadfeb7 --- /dev/null +++ b/docs/deploy/baremetal.md @@ -0,0 +1,442 @@ +# Bare-metal considerations + +In traditional *cloud* environments, where network load balancers are available on-demand, a single Kubernetes manifest +suffices to provide a single point of contact to the NGINX Ingress controller to external clients and, indirectly, to +any application running inside the cluster. *Bare-metal* environments lack this commodity, requiring a slightly +different setup to offer the same kind of access to external consumers. + +![Cloud environment](../images/baremetal/cloud_overview.jpg) +![Bare-metal environment](../images/baremetal/baremetal_overview.jpg) + +The rest of this document describes a few recommended approaches to deploying the NGINX Ingress controller inside a +Kubernetes cluster running on bare-metal. + +## A pure software solution: MetalLB + +[MetalLB][metallb] provides a network load-balancer implementation for Kubernetes clusters that do not run on a +supported cloud provider, effectively allowing the usage of LoadBalancer Services within any cluster. + +This section demonstrates how to use the [Layer 2 configuration mode][metallb-l2] of MetalLB together with the NGINX +Ingress controller in a Kubernetes cluster that has **publicly accessible nodes**. In this mode, one node attracts all +the traffic for the `ingress-nginx` Service IP. See [Traffic policies][metallb-trafficpolicies] for more details. + +![MetalLB in L2 mode](../images/baremetal/metallb.jpg) + +!!! note + The description of other supported configuration modes is off-scope for this document. + +!!! warning + MetalLB is currently in *beta*. Read about the [Project maturity][metallb-maturity] and make sure you inform + yourself by reading the official documentation thoroughly. + +MetalLB can be deployed either with a simple Kubernetes manifest or with Helm. The rest of this example assumes MetalLB +was deployed following the [Installation][metallb-install] instructions. + +MetalLB requires a pool of IP addresses in order to be able to take ownership of the `ingress-nginx` Service. This pool +can be defined in a ConfigMap named `config` located in the same namespace as the MetalLB controller. In the simplest +possible scenario, the pool is composed of the IP addresses of Kubernetes nodes, but IP addresses can also be handed out +by a DHCP server. + +!!! example + Given the following 3-node Kubernetes cluster (the external IP is added as an example, in most bare-metal + environments this value is ) + + ```console + $ kubectl get node + NAME STATUS ROLES EXTERNAL-IP + host-1 Ready master 203.0.113.1 + host-2 Ready node 203.0.113.2 + host-3 Ready node 203.0.113.3 + ``` + + After creating the following ConfigMap, MetalLB takes ownership of one of the IP addresses in the pool and updates + the *loadBalancer* IP field of the `ingress-nginx` Service accordingly. + + ```yaml + apiVersion: v1 + kind: ConfigMap + metadata: + namespace: metallb-system + name: config + data: + config: | + address-pools: + - name: default + protocol: layer2 + addresses: + - 203.0.113.2-203.0.113.3 + ``` + + ```console + $ kubectl -n ingress-nginx get svc + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) + default-http-backend ClusterIP 10.0.64.249 80/TCP + ingress-nginx LoadBalancer 10.0.220.217 203.0.113.3 80:30100/TCP,443:30101/TCP + ``` + +As soon as MetalLB sets the external IP address of the `ingress-nginx` LoadBalancer Service, the corresponding entries +are created in the iptables NAT table and the node with the selected IP address starts responding to HTTP requests on +the ports configured in the LoadBalancer Service: + +```console +$ curl -D- http://203.0.113.3 -H 'Host: myapp.example.com' +HTTP/1.1 200 OK +Server: nginx/1.15.2 +``` + +!!! tip + In order to preserve the source IP address in HTTP requests sent to NGINX, it is necessary to use the `Local` + traffic policy. Traffic policies are described in more details in [Traffic policies][metallb-trafficpolicies] as + well as in the next section. + +[metallb]: https://metallb.universe.tf/ +[metallb-maturity]: https://metallb.universe.tf/concepts/maturity/ +[metallb-l2]: https://metallb.universe.tf/tutorial/layer2/ +[metallb-install]: https://metallb.universe.tf/installation/ +[metallb-trafficpolicies]: https://metallb.universe.tf/usage/#traffic-policies + +## Over a NodePort Service + +Due to its simplicity, this is the setup a user will deploy by default when following the steps described in the +[installation guide][install-baremetal]. + +!!! info + A Service of type `NodePort` exposes, via the `kube-proxy` component, the **same unprivileged** port (default: + 30000-32767) on every Kubernetes node, masters included. For more information, see [Services][nodeport-def]. + +In this configuration, the NGINX container remains isolated from the host network. As a result, it can safely bind to +any port, including the standard HTTP ports 80 and 443. However, due to the container namespace isolation, a client +located outside the cluster network (e.g. on the public internet) is not able to access Ingress hosts directly on ports +80 and 443. Instead, the external client must append the NodePort allocated to the `ingress-nginx` Service to HTTP +requests. + +![NodePort request flow](../images/baremetal/nodeport.jpg) + +!!! example + Given the NodePort `30100` allocated to the `ingress-nginx` Service + + ```console + $ kubectl -n ingress-nginx get svc + NAME TYPE CLUSTER-IP PORT(S) + default-http-backend ClusterIP 10.0.64.249 80/TCP + ingress-nginx NodePort 10.0.220.217 80:30100/TCP,443:30101/TCP + ``` + + and a Kubernetes node with the public IP address `203.0.113.2` (the external IP is added as an example, in most + bare-metal environments this value is ) + + ```console + $ kubectl get node + NAME STATUS ROLES EXTERNAL-IP + host-1 Ready master 203.0.113.1 + host-2 Ready node 203.0.113.2 + host-3 Ready node 203.0.113.3 + ``` + + a client would reach an Ingress with `host: myapp.example.com` at `http://myapp.example.com:30100`, where the + myapp.example.com subdomain resolves to the 203.0.113.2 IP address. + +!!! danger "Impact on the host system" + While it may sound tempting to reconfigure the NodePort range using the `--service-node-port-range` API server flag + to include unprivileged ports and be able to expose ports 80 and 443, doing so may result in unexpected issues + including (but not limited to) the use of ports otherwise reserved to system daemons and the necessity to grant + `kube-proxy` privileges it may otherwise not require. + + This practice is therefore **discouraged**. See the other approaches proposed in this page for alternatives. + +This approach has a few other limitations one ought to be aware of: + +* **Source IP address** + +Services of type NodePort perform [source address translation][nodeport-nat] by default. This means the source IP of a +HTTP request is always **the IP address of the Kubernetes node that received the request** from the perspective of +NGINX. + +The recommended way to preserve the source IP in a NodePort setup is to set the value of the `externalTrafficPolicy` +field of the `ingress-nginx` Service spec to `Local` ([example][preserve-ip]). + +!!! warning + This setting effectively **drops packets** sent to Kubernetes nodes which are not running any instance of the NGINX + Ingress controller. Consider [assigning NGINX Pods to specific nodes][pod-assign] in order to control on what nodes + the NGINX Ingress controller should be scheduled or not scheduled. + +!!! example + In a Kubernetes cluster composed of 3 nodes (the external IP is added as an example, in most bare-metal environments + this value is ) + + ```console + $ kubectl get node + NAME STATUS ROLES EXTERNAL-IP + host-1 Ready master 203.0.113.1 + host-2 Ready node 203.0.113.2 + host-3 Ready node 203.0.113.3 + ``` + + with a `nginx-ingress-controller` Deployment composed of 2 replicas + + ```console + $ kubectl -n ingress-nginx get pod -o wide + NAME READY STATUS IP NODE + default-http-backend-7c5bc89cc9-p86md 1/1 Running 172.17.1.1 host-2 + nginx-ingress-controller-cf9ff8c96-8vvf8 1/1 Running 172.17.0.3 host-3 + nginx-ingress-controller-cf9ff8c96-pxsds 1/1 Running 172.17.1.4 host-2 + ``` + + Requests sent to `host-2` and `host-3` would be forwarded to NGINX and original client's IP would be preserved, + while requests to `host-1` would get dropped because there is no NGINX replica running on that node. + +* **Ingress status** + +Because NodePort Services do not get a LoadBalancerIP assigned by definition, the NGINX Ingress controller **does not +update the status of Ingress objects it manages**. + +```console +$ kubectl get ingress +NAME HOSTS ADDRESS PORTS +test-ingress myapp.example.com 80 +``` + +Despite the fact there is no load balancer providing a public IP address to the NGINX Ingress controller, it is possible +to force the status update of all managed Ingress objects by setting the `externalIPs` field of the `ingress-nginx` +Service. + +!!! warning + There is more to setting `externalIPs` than just enabling the NGINX Ingress controller to update the status of + Ingress objects. Please read about this option in the [Services][external-ips] page of official Kubernetes + documentation as well as the section about [External IPs](#external-ips) in this document for more information. + +!!! example + Given the following 3-node Kubernetes cluster (the external IP is added as an example, in most bare-metal + environments this value is ) + + ```console + $ kubectl get node + NAME STATUS ROLES EXTERNAL-IP + host-1 Ready master 203.0.113.1 + host-2 Ready node 203.0.113.2 + host-3 Ready node 203.0.113.3 + ``` + + one could edit the `ingress-nginx` Service and add the following field to the object spec + + ```yaml + spec: + externalIPs: + - 203.0.113.1 + - 203.0.113.2 + - 203.0.113.3 + ``` + + which would in turn be reflected on Ingress objects as follows: + + ```console + $ kubectl get ingress -o wide + NAME HOSTS ADDRESS PORTS + test-ingress myapp.example.com 203.0.113.1,203.0.113.2,203.0.113.3 80 + ``` + +* **Redirects** + +As NGINX is **not aware of the port translation operated by the NodePort Service**, backend applications are responsible +for generating redirect URLs that take into account the URL used by external clients, including the NodePort. + +!!! example + Redirects generated by NGINX, for instance HTTP to HTTPS or `domain` to `www.domain`, are generated without + NodePort: + + ```console + $ curl -D- http://myapp.example.com:30100` + HTTP/1.1 308 Permanent Redirect + Server: nginx/1.15.2 + Location: https://myapp.example.com/ #-> missing NodePort in HTTPS redirect + ``` + +[install-baremetal]: ./index.md#bare-metal +[nodeport-def]: https://kubernetes.io/docs/concepts/services-networking/service/#nodeport +[nodeport-nat]: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-type-nodeport +[pod-assign]: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ +[preserve-ip]: https://github.com/kubernetes/ingress-nginx/blob/nginx-0.19.0/deploy/provider/aws/service-nlb.yaml#L12-L14 + +## Via the host network + +In a setup where there is no external load balancer available but using NodePorts is not an option, one can configure +`ingress-nginx` Pods to use the network of the host they run on instead of a dedicated network namespace. The benefit of +this approach is that the NGINX Ingress controller can bind ports 80 and 443 directly to Kubernetes nodes' network +interfaces, without the extra network translation imposed by NodePort Services. + +!!! note + This approach does not leverage any Service object to expose the NGINX Ingress controller. If the `ingress-nginx` + Service exists in the target cluster, it is **recommended to delete it**. + +This can be achieved by enabling the `hostNetwork` option in the Pods' spec. + +```yaml +template: + spec: + hostNetwork: true +``` + +!!! danger "Security considerations" + Enabling this option **exposes every system daemon to the NGINX Ingress controller** on any network interface, + including the host's loopback. Please evaluate the impact this may have on the security of your system carefully. + +!!! example + Consider this `nginx-ingress-controller` Deployment composed of 2 replicas, NGINX Pods inherit from the IP address + of their host instead of an internal Pod IP. + + ```console + $ kubectl -n ingress-nginx get pod -o wide + NAME READY STATUS IP NODE + default-http-backend-7c5bc89cc9-p86md 1/1 Running 172.17.1.1 host-2 + nginx-ingress-controller-5b4cf5fc6-7lg6c 1/1 Running 203.0.113.3 host-3 + nginx-ingress-controller-5b4cf5fc6-lzrls 1/1 Running 203.0.113.2 host-2 + ``` + +One major limitation of this deployment approach is that only **a single NGINX Ingress controller Pod** may be scheduled +on each cluster node, because binding the same port multiple times on the same network interface is technically +impossible. Pods that are unschedulable due to such situation fail with the following event: + +```console +$ kubectl -n ingress-nginx describe pod +... +Events: + Type Reason From Message + ---- ------ ---- ------- + Warning FailedScheduling default-scheduler 0/3 nodes are available: 3 node(s) didn't have free ports for the requested pod ports. +``` + +One way to ensure only schedulable Pods are created is to deploy the NGINX Ingress controller as a *DaemonSet* instead +of a traditional Deployment. + +!!! info + A DaemonSet schedules exactly one type of Pod per cluster node, masters included, unless a node is configured to + [repel those Pods][taints]. For more information, see [DaemonSet][daemonset]. + +Because most properties of DaemonSet objects are identical to Deployment objects, this documentation page leaves the +configuration of the corresponding manifest at the user's discretion. + +![DaemonSet with hostNetwork flow](../images/baremetal/hostnetwork.jpg) + +Like with NodePorts, this approach has a few quirks it is important to be aware of. + +* **DNS resolution** + +Pods configured with `hostNetwork: true` do not use the internal DNS resolver (i.e. *kube-dns* or *CoreDNS*), unless +their `dnsPolicy` spec field is set to [`ClusterFirstWithHostNet`][dnspolicy]. Consider using this setting if NGINX is +expected to resolve internal names for any reason. + +* **Ingress status** + +Because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default +`--publish-service` flag used in standard cloud setups **does not apply** and the status of all Ingress objects remains +blank. + +```console +$ kubectl get ingress +NAME HOSTS ADDRESS PORTS +test-ingress myapp.example.com 80 +``` + +Instead, and because bare-metal nodes usually don't have an ExternalIP, one has to enable the +[`--report-node-internal-ip-address`][cli-args] flag, which sets the status of all Ingress objects to the internal IP +address of all nodes running the NGINX Ingress controller. + +!!! example + Given a `nginx-ingress-controller` DaemonSet composed of 2 replicas + + ```console + $ kubectl -n ingress-nginx get pod -o wide + NAME READY STATUS IP NODE + default-http-backend-7c5bc89cc9-p86md 1/1 Running 172.17.1.1 host-2 + nginx-ingress-controller-5b4cf5fc6-7lg6c 1/1 Running 203.0.113.3 host-3 + nginx-ingress-controller-5b4cf5fc6-lzrls 1/1 Running 203.0.113.2 host-2 + ``` + + the controller sets the status of all Ingress objects it manages to the following value: + + ```console + $ kubectl get ingress -o wide + NAME HOSTS ADDRESS PORTS + test-ingress myapp.example.com 203.0.113.2,203.0.113.3 80 + ``` + +!!! note + Alternatively, it is possible to override the address written to Ingress objects using the + `--publish-status-address` flag. See [Command line arguments][cli-args]. + +[taints]: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +[daemonset]: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ +[dnspolicy]: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy +[cli-args]: ../../user-guide/cli-arguments/ + +## Using a self-provisioned edge + +Similarly to cloud environments, this deployment approach requires an edge network component providing a public +entrypoint to the Kubernetes cluster. This edge component can be either hardware (e.g. vendor appliance) or software +(e.g. _HAproxy_) and is usually managed outside of the Kubernetes landscape by operations teams. + +Such deployment builds upon the NodePort Service described above in [Over a NodePort Service](#over-a-nodeport-service), +with one significant difference: external clients do not access cluster nodes directly, only the edge component does. +This is particularly suitable for private Kubernetes clusters where none of the nodes has a public IP address. + +On the edge side, the only prerequisite is to dedicate a public IP address that forwards all HTTP traffic to Kubernetes +nodes and/or masters. Incoming traffic on TCP ports 80 and 443 is forwarded to the corresponding HTTP and HTTPS NodePort +on the target nodes as shown in the diagram below: + +![User edge](../images/baremetal/user_edge.jpg) + +## External IPs + +!!! danger "Source IP address" + This method does not allow preserving the source IP of HTTP requests in any manner, it is therefore **not + recommended** to use it despite its apparent simplicity. + +The `externalIPs` Service option was previously mentioned in the [NodePort](#over-a-nodeport-service) section. + +As per the [Services][external-ips] page of the official Kubernetes documentation, the `externalIPs` option causes +`kube-proxy` to route traffic sent to arbitrary IP addresses **and on the Service ports** to the endpoints of that +Service. These IP addresses **must belong to the target node**. + +!!! example + Given the following 3-node Kubernetes cluster (the external IP is added as an example, in most bare-metal + environments this value is ) + + ```console + $ kubectl get node + NAME STATUS ROLES EXTERNAL-IP + host-1 Ready master 203.0.113.1 + host-2 Ready node 203.0.113.2 + host-3 Ready node 203.0.113.3 + ``` + + and the following `ingress-nginx` NodePort Service + + ```console + $ kubectl -n ingress-nginx get svc + NAME TYPE CLUSTER-IP PORT(S) + ingress-nginx NodePort 10.0.220.217 80:30100/TCP,443:30101/TCP + ``` + + One could set the following external IPs in the Service spec, and NGINX would become available on both the NodePort + and the Service port: + + ```yaml + spec: + externalIPs: + - 203.0.113.2 + - 203.0.113.3 + ``` + + ```console + $ curl -D- http://myapp.example.com:30100 + HTTP/1.1 200 OK + Server: nginx/1.15.2 + + $ curl -D- http://myapp.example.com + HTTP/1.1 200 OK + Server: nginx/1.15.2 + ``` + + We assume the myapp.example.com subdomain above resolves to both 203.0.113.2 and 203.0.113.3 IP addresses. + +[external-ips]: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips diff --git a/docs/deploy/index.md b/docs/deploy/index.md new file mode 100644 index 0000000000..5ac0ca195d --- /dev/null +++ b/docs/deploy/index.md @@ -0,0 +1,207 @@ +# Installation Guide + +## Contents + +- [Prerequisite Generic Deployment Command](#prerequisite-generic-deployment-command) + - [Provider Specific Steps](#provider-specific-steps) + - [Docker for Mac](#docker-for-mac) + - [minikube](#minikube) + - [AWS](#aws) + - [GCE - GKE](#gce-gke) + - [Azure](#azure) + - [Bare-metal](#bare-metal) + - [Verify installation](#verify-installation) + - [Detect installed version](#detect-installed-version) +- [Using Helm](#using-helm) + +## Prerequisite Generic Deployment Command + +!!! attention + The default configuration watches Ingress object from *all the namespaces*. + To change this behavior use the flag `--watch-namespace` to limit the scope to a particular namespace. + +!!! warning + If multiple Ingresses define different paths for the same host, the ingress controller will merge the definitions. + +!!! attention + If you're using GKE you need to initialize your user as a cluster-admin with the following command: + ```console + kubectl create clusterrolebinding cluster-admin-binding \ + --clusterrole cluster-admin \ + --user $(gcloud config get-value account) + ``` + +The following **Mandatory Command** is required for all deployments. + +```console +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/mandatory.yaml +``` + +### Provider Specific Steps + +There are cloud provider specific yaml files. + +#### Docker for Mac + +Kubernetes is available in Docker for Mac (from [version 18.06.0-ce](https://docs.docker.com/docker-for-mac/release-notes/#stable-releases-of-2018)) + +[enable]: https://docs.docker.com/docker-for-mac/#kubernetes + +Create a service + +```console +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/provider/cloud-generic.yaml +``` + +#### minikube + +For standard usage: + +```console +minikube addons enable ingress +``` + +For development: + +1. Disable the ingress addon: + +```console +minikube addons disable ingress +``` + +2. Execute `make dev-env` +3. Confirm the `nginx-ingress-controller` deployment exists: + +```console +$ kubectl get pods -n ingress-nginx +NAME READY STATUS RESTARTS AGE +default-http-backend-66b447d9cf-rrlf9 1/1 Running 0 12s +nginx-ingress-controller-fdcdcd6dd-vvpgs 1/1 Running 0 11s +``` + +#### AWS + +In AWS we use an Elastic Load Balancer (ELB) to expose the NGINX Ingress controller behind a Service of `Type=LoadBalancer`. +Since Kubernetes v1.9.0 it is possible to use a classic load balancer (ELB) or network load balancer (NLB) +Please check the [elastic load balancing AWS details page](https://aws.amazon.com/elasticloadbalancing/details/) + +##### Elastic Load Balancer - ELB + +This setup requires to choose in which layer (L4 or L7) we want to configure the ELB: + +- [Layer 4](https://en.wikipedia.org/wiki/OSI_model#Layer_4:_Transport_Layer): use TCP as the listener protocol for ports 80 and 443. +- [Layer 7](https://en.wikipedia.org/wiki/OSI_model#Layer_7:_Application_Layer): use HTTP as the listener protocol for port 80 and terminate TLS in the ELB + +For L4: + +Check that no change is necessary with regards to the ELB idle timeout. In some scenarios, users may want to modify the ELB idle timeout, so please check the [ELB Idle Timeouts section](#elb-idle-timeouts) for additional information. If a change is required, users will need to update the value of `service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout` in `provider/aws/service-l4.yaml` + +Then execute: + +```console +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/provider/aws/service-l4.yaml +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/provider/aws/patch-configmap-l4.yaml +``` + +For L7: + +Change line of the file `provider/aws/service-l7.yaml` replacing the dummy id with a valid one `"arn:aws:acm:us-west-2:XXXXXXXX:certificate/XXXXXX-XXXXXXX-XXXXXXX-XXXXXXXX"` + +Check that no change is necessary with regards to the ELB idle timeout. In some scenarios, users may want to modify the ELB idle timeout, so please check the [ELB Idle Timeouts section](#elb-idle-timeouts) for additional information. If a change is required, users will need to update the value of `service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout` in `provider/aws/service-l7.yaml` + +Then execute: + +```console +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/provider/aws/service-l7.yaml +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/provider/aws/patch-configmap-l7.yaml +``` + +This example creates an ELB with just two listeners, one in port 80 and another in port 443 + +![Listeners](../images/elb-l7-listener.png) + +##### ELB Idle Timeouts +In some scenarios users will need to modify the value of the ELB idle timeout. Users need to ensure the idle timeout is less than the [keepalive_timeout](http://nginx.org/en/docs/http/ngx_http_core_module.html#keepalive_timeout) that is configured for NGINX. By default NGINX `keepalive_timeout` is set to `75s`. + +The default ELB idle timeout will work for most scenarios, unless the NGINX [keepalive_timeout](http://nginx.org/en/docs/http/ngx_http_core_module.html#keepalive_timeout) has been modified, in which case `service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout` will need to be modified to ensure it is less than the `keepalive_timeout` the user has configured. + +_Please Note: An idle timeout of `3600s` is recommended when using WebSockets._ + +More information with regards to idle timeouts for your Load Balancer can be found in the [official AWS documentation](https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/config-idle-timeout.html). + +##### Network Load Balancer (NLB) + +This type of load balancer is supported since v1.10.0 as an ALPHA feature. + +```console +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/provider/aws/service-nlb.yaml +``` + +#### GCE-GKE + +```console +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/provider/cloud-generic.yaml +``` + +**Important Note:** proxy protocol is not supported in GCE/GKE + +#### Azure + +```console +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/provider/cloud-generic.yaml +``` + +#### Bare-metal + +Using [NodePort](https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport): + +```console +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/provider/baremetal/service-nodeport.yaml +``` + +!!! tip + For extended notes regarding deployments on bare-metal, see [Bare-metal considerations](./baremetal.md). + +### Verify installation + +To check if the ingress controller pods have started, run the following command: + +```console +kubectl get pods --all-namespaces -l app.kubernetes.io/name=ingress-nginx --watch +``` + +Once the operator pods are running, you can cancel the above command by typing `Ctrl+C`. +Now, you are ready to create your first ingress. + +### Detect installed version + +To detect which version of the ingress controller is running, exec into the pod and run `nginx-ingress-controller version` command. + +```console +POD_NAMESPACE=ingress-nginx +POD_NAME=$(kubectl get pods -n $POD_NAMESPACE -l app.kubernetes.io/name=ingress-nginx -o jsonpath='{.items[0].metadata.name}') + +kubectl exec -it $POD_NAME -n $POD_NAMESPACE -- /nginx-ingress-controller --version +``` + +## Using Helm + +NGINX Ingress controller can be installed via [Helm](https://helm.sh/) using the chart [stable/nginx-ingress](https://github.com/kubernetes/charts/tree/master/stable/nginx-ingress) from the official charts repository. +To install the chart with the release name `my-nginx`: + +```console +helm install stable/nginx-ingress --name my-nginx +``` + +If the kubernetes cluster has RBAC enabled, then run: + +```console +helm install stable/nginx-ingress --name my-nginx --set rbac.create=true +``` + +Detect installed version: + +```console +POD_NAME=$(kubectl get pods -l app.kubernetes.io/name=ingress-nginx -o jsonpath='{.items[0].metadata.name}') +kubectl exec -it $POD_NAME -- /nginx-ingress-controller --version +``` diff --git a/deploy/rbac.md b/docs/deploy/rbac.md similarity index 97% rename from deploy/rbac.md rename to docs/deploy/rbac.md index 718269f363..08c1a02914 100644 --- a/deploy/rbac.md +++ b/docs/deploy/rbac.md @@ -1,4 +1,4 @@ -# Role Based Access Control - RBAC +# Role Based Access Control (RBAC) ## Overview @@ -43,7 +43,7 @@ These permissions are granted specific to the nginx-ingress namespace. These permissions are granted to the Role named `nginx-ingress-role` * `configmaps`, `pods`, `secrets`: get -* `endpoints`: create, get, update +* `endpoints`: get Furthermore to support leader-election, the nginx-ingress-controller needs to have access to a `configmap` using the resourceName `ingress-controller-leader-nginx` diff --git a/docs/deploy/upgrade.md b/docs/deploy/upgrade.md new file mode 100644 index 0000000000..8ca9064126 --- /dev/null +++ b/docs/deploy/upgrade.md @@ -0,0 +1,48 @@ +# Upgrading + +!!! important + No matter the method you use for upgrading, _if you use template overrides, + make sure your templates are compatible with the new version of ingress-nginx_. + +## Without Helm + +To upgrade your ingress-nginx installation, it should be enough to change the version of the image +in the controller Deployment. + +I.e. if your deployment resource looks like (partial example): + +```yaml +kind: Deployment +metadata: + name: nginx-ingress-controller + namespace: ingress-nginx +spec: + replicas: 1 + selector: ... + template: + metadata: ... + spec: + containers: + - name: nginx-ingress-controller + image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.9.0 + args: ... +``` + +simply change the `0.9.0` tag to the version you wish to upgrade to. +The easiest way to do this is e.g. (do note you may need to change the name parameter according to your installation): + +``` +kubectl set image deployment/nginx-ingress-controller \ + nginx-ingress-controller=nginx:quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.18.0 +``` + +For interactive editing, use `kubectl edit deployment nginx-ingress-controller`. + +## With Helm + +If you installed ingress-nginx using the Helm command in the deployment docs so its name is `ngx-ingress`, +you should be able to upgrade using + +```shell +helm upgrade --reuse-values ngx-ingress stable/nginx-ingress +``` diff --git a/docs/deploy/validating-webhook.md b/docs/deploy/validating-webhook.md new file mode 100644 index 0000000000..c083fa35eb --- /dev/null +++ b/docs/deploy/validating-webhook.md @@ -0,0 +1,168 @@ +# Validating webhook (admission controller) + +## Overview + +Nginx ingress controller offers the option to validate ingresses before they enter the cluster, ensuring controller will generate a valid configuration. + +This controller is called, when [ValidatingAdmissionWebhook][1] is enabled, by the Kubernetes API server each time a new ingress is to enter the cluster, and rejects objects for which the generated nginx configuration fails to be validated. + +This feature requires some further configuration of the cluster, hence it is an optional feature, this section explains how to enable it for your cluster. + +## Configure the webhook + +### Generate the webhook certificate + + +#### Self signed certificate + +Validating webhook must be served using TLS, you need to generate a certificate. Note that kube API server is checking the hostname of the certificate, the common name of your certificate will need to match the service name. + +!!! example + To run the validating webhook with a service named `ingress-validation-webhook` in the namespace `ingress-nginx`, run + + ```bash + openssl req -x509 -newkey rsa:2048 -keyout certificate.pem -out key.pem -days 365 -nodes -subj "/CN=ingress-validation-webhook.ingress-nginx.svc" + ``` + +##### Using Kubernetes CA + +Kubernetes also provides primitives to sign a certificate request. Here is an example on how to use it + +!!! example + ``` + #!/bin/bash + + SERVICE_NAME=ingress-nginx + NAMESPACE=ingress-nginx + + TEMP_DIRECTORY=$(mktemp -d) + echo "creating certs in directory ${TEMP_DIRECTORY}" + + cat <> ${TEMP_DIRECTORY}/csr.conf + [req] + req_extensions = v3_req + distinguished_name = req_distinguished_name + [req_distinguished_name] + [ v3_req ] + basicConstraints = CA:FALSE + keyUsage = nonRepudiation, digitalSignature, keyEncipherment + extendedKeyUsage = serverAuth + subjectAltName = @alt_names + [alt_names] + DNS.1 = ${SERVICE_NAME} + DNS.2 = ${SERVICE_NAME}.${NAMESPACE} + DNS.3 = ${SERVICE_NAME}.${NAMESPACE}.svc + EOF + + openssl genrsa -out ${TEMP_DIRECTORY}/server-key.pem 2048 + openssl req -new -key ${TEMP_DIRECTORY}/server-key.pem \ + -subj "/CN=${SERVICE_NAME}.${NAMESPACE}.svc" \ + -out ${TEMP_DIRECTORY}/server.csr \ + -config ${TEMP_DIRECTORY}/csr.conf + + cat <&2 + exit 1 + fi + echo ${SERVER_CERT} | openssl base64 -d -A -out ${TEMP_DIRECTORY}/server-cert.pem + + kubectl create secret generic ingress-nginx.svc \ + --from-file=key.pem=${TEMP_DIRECTORY}/server-key.pem \ + --from-file=cert.pem=${TEMP_DIRECTORY}/server-cert.pem \ + -n ${NAMESPACE} + ``` + +#### Using helm + +To generate the certificate using helm, you can use the following snippet + +!!! example + ``` + {{- $cn := printf "%s.%s.svc" ( include "nginx-ingress.validatingWebhook.fullname" . ) .Release.Namespace }} + {{- $ca := genCA (printf "%s-ca" ( include "nginx-ingress.validatingWebhook.fullname" . )) .Values.validatingWebhook.certificateValidity -}} + {{- $cert := genSignedCert $cn nil nil .Values.validatingWebhook.certificateValidity $ca -}} + ``` + +### Ingress controller flags + +To enable the feature in the ingress controller, you _need_ to provide 3 flags to the command line. + +|flag|description|example usage| +|-|-|-| +|`--validating-webhook`|The address to start an admission controller on|`:8080`| +|`--validating-webhook-certificate`|The certificate the webhook is using for its TLS handling|`/usr/local/certificates/validating-webhook.pem`| +|`--validating-webhook-key`|The key the webhook is using for its TLS handling|`/usr/local/certificates/validating-webhook-key.pem`| + +### kube API server flags + +Validating webhook feature requires specific setup on the kube API server side. Depending on your kubernetes version, the flag can, or not, be enabled by default. +To check that your kube API server runs with the required flags, please refer to the [kubernetes][1] documentation. + +### Additional kubernetes objects + +Once both the ingress controller and the kube API server are configured to serve the webhook, add the you can configure the webhook with the following objects: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: ingress-validation-webhook + namespace: ingress-nginx +spec: + ports: + - name: admission + port: 443 + protocol: TCP + targetPort: 8080 + selector: + app: nginx-ingress + component: controller +--- +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: ValidatingWebhookConfiguration +metadata: + name: check-ingress +webhooks: +- name: validate.nginx.ingress.kubernetes.io + rules: + - apiGroups: + - extensions + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - ingresses + failurePolicy: Fail + clientConfig: + service: + namespace: ingress-nginx + name: ingress-validation-webhook + path: /extensions/v1beta1/ingress + caBundle: +``` + +[1]: https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#validatingadmissionwebhook \ No newline at end of file diff --git a/docs/development.md b/docs/development.md index 4cc91f6c0b..5c5fd2a7a6 100644 --- a/docs/development.md +++ b/docs/development.md @@ -1,8 +1,46 @@ -# Getting Started +# Developing for NGINX Ingress Controller This document explains how to get started with developing for NGINX Ingress controller. It includes how to build, test, and release ingress controllers. +## Quick Start + +### Getting the code + +The code must be checked out as a subdirectory of k8s.io, and not github.com. + +``` +mkdir -p $GOPATH/src/k8s.io +cd $GOPATH/src/k8s.io +# Replace "$YOUR_GITHUB_USERNAME" below with your github username +git clone https://github.com/$YOUR_GITHUB_USERNAME/ingress-nginx.git +cd ingress-nginx +``` + +### Initial developer environment build + +>**Prequisites**: Minikube must be installed. +See [releases](https://github.com/kubernetes/minikube/releases) for installation instructions. + +If you are using **MacOS** and deploying to **minikube**, the following command will build the local nginx controller container image and deploy the ingress controller onto a minikube cluster with RBAC enabled in the namespace `ingress-nginx`: + +``` +$ make dev-env +``` + +### Updating the deployment + +The nginx controller container image can be rebuilt using: +``` +$ ARCH=amd64 TAG=dev REGISTRY=$USER/ingress-controller make build container +``` + +The image will only be used by pods created after the rebuild. To delete old pods which will cause new ones to spin up: +``` +$ kubectl get pods -n ingress-nginx +$ kubectl delete pod -n ingress-nginx nginx-ingress-controller- +``` + ## Dependencies The build uses dependencies in the `vendor` directory, which @@ -81,7 +119,7 @@ $ TAG= REGISTRY=$USER/ingress-controller make docker-push ## Deploying There are several ways to deploy the ingress controller onto a cluster. -Please check the [deployment guide](../deploy/README.md) +Please check the [deployment guide](./deploy) ## Testing @@ -99,6 +137,16 @@ $ cd $GOPATH/src/k8s.io/ingress-nginx $ make e2e-test ``` +To run unit-tests for lua code locally, run: + +```console +$ cd $GOPATH/src/k8s.io/ingress-nginx +$ ./rootfs/etc/nginx/lua/test/up.sh +$ make lua-test +``` + +Lua tests are located in `$GOPATH/src/k8s.io/ingress-nginx/rootfs/etc/nginx/lua/test`. When creating a new test file it must follow the naming convention `_test.lua` or it will be ignored. + ## Releasing All Makefiles will produce a release binary, as shown above. To publish this diff --git a/docs/examples/PREREQUISITES.md b/docs/examples/PREREQUISITES.md index 94581901b5..b6afa1893d 100644 --- a/docs/examples/PREREQUISITES.md +++ b/docs/examples/PREREQUISITES.md @@ -8,7 +8,7 @@ Unless otherwise mentioned, the TLS secret used in examples is a 2048 bit RSA key/cert pair with an arbitrarily chosen hostname, created as follows ```console -$ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj "/CN=nginxsvc/O=nginxsvc" +$ openssl req -x509 -sha256 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj "/CN=nginxsvc/O=nginxsvc" Generating a 2048 bit RSA private key ................+++ ................+++ @@ -19,110 +19,40 @@ $ kubectl create secret tls tls-secret --key tls.key --cert tls.crt secret "tls-secret" created ``` -## CA Authentication +Note: If using CA Authentication, described below, you will need to sign the server certificate with the CA. -You can act as your very own CA, or use an existing one. As an exercise / learning, we're going to generate our -own CA, and also generate a client certificate. +## Client Certificate Authentication -These instructions are based on CoreOS OpenSSL [instructions](https://coreos.com/kubernetes/docs/latest/openssl.html) +CA Authentication also known as Mutual Authentication allows both the server and client to verify each others +identity via a common CA. -### Generating a CA +We have a CA Certificate which we obtain usually from a Certificate Authority and use that to sign +both our server certificate and client certificate. Then every time we want to access our backend, we must +pass the client certificate. -First of all, you've to generate a CA. This is going to be the one who will sign your client certificates. -In real production world, you may face CAs with intermediate certificates, as the following: +These instructions are based on the following [blog](https://medium.com/@awkwardferny/configuring-certificate-based-mutual-authentication-with-kubernetes-ingress-nginx-20e7e38fdfca) -```console -$ openssl s_client -connect www.google.com:443 -[...] ---- -Certificate chain - 0 s:/C=US/ST=California/L=Mountain View/O=Google Inc/CN=www.google.com - i:/C=US/O=Google Inc/CN=Google Internet Authority G2 - 1 s:/C=US/O=Google Inc/CN=Google Internet Authority G2 - i:/C=US/O=GeoTrust Inc./CN=GeoTrust Global CA - 2 s:/C=US/O=GeoTrust Inc./CN=GeoTrust Global CA - i:/C=US/O=Equifax/OU=Equifax Secure Certificate Authority - -``` - -To generate our CA Certificate, we've to run the following commands: - -```console -$ openssl genrsa -out ca.key 2048 -$ openssl req -x509 -new -nodes -key ca.key -days 10000 -out ca.crt -subj "/CN=example-ca" -``` - -This will generate two files: A private key (ca.key) and a public key (ca.crt). This CA is valid for 10000 days. -The ca.crt can be used later in the step of creation of CA authentication secret. - -### Generating the client certificate - -The following steps generate a client certificate signed by the CA generated above. This client can be -used to authenticate in a tls-auth configured ingress. - -First, we need to generate an 'openssl.cnf' file that will be used while signing the keys: - -```console -[req] -req_extensions = v3_req -distinguished_name = req_distinguished_name -[req_distinguished_name] -[ v3_req ] -basicConstraints = CA:FALSE -keyUsage = nonRepudiation, digitalSignature, keyEncipherment -``` - -Then, a user generates his very own private key (that he needs to keep secret) -and a CSR (Certificate Signing Request) that will be sent to the CA to sign and generate a certificate. +**Generate the CA Key and Certificate:** ```console -$ openssl genrsa -out client1.key 2048 -$ openssl req -new -key client1.key -out client1.csr -subj "/CN=client1" -config openssl.cnf +openssl req -x509 -sha256 -newkey rsa:4096 -keyout ca.key -out ca.crt -days 356 -nodes -subj '/CN=My Cert Authority' ``` -As the CA receives the generated 'client1.csr' file, it signs it and generates a client.crt certificate: +**Generate the Server Key, and Certificate and Sign with the CA Certificate:** ```console -$ openssl x509 -req -in client1.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out client1.crt -days 365 -extensions v3_req -extfile openssl.cnf +openssl req -new -newkey rsa:4096 -keyout server.key -out server.csr -nodes -subj '/CN=mydomain.com' +openssl x509 -req -sha256 -days 365 -in server.csr -CA ca.crt -CAkey ca.key -set_serial 01 -out server.crt ``` -Then, you'll have 3 files: the client.key (user's private key), client.crt (user's public key) and client.csr (disposable CSR). - -### Creating the CA Authentication secret - -If you're using the CA Authentication feature, you need to generate a secret containing -all the authorized CAs. You must download them from your CA site in PEM format (like the following): - -``` ------BEGIN CERTIFICATE----- -[....] ------END CERTIFICATE----- -``` - -You can have as many certificates as you want. If they're in the binary DER format, -you can convert them as the following: +**Generate the Client Key, and Certificate and Sign with the CA Certificate:** ```console -$ openssl x509 -in certificate.der -inform der -out certificate.crt -outform pem +openssl req -new -newkey rsa:4096 -keyout client.key -out client.csr -nodes -subj '/CN=My Client' +openssl x509 -req -sha256 -days 365 -in client.csr -CA ca.crt -CAkey ca.key -set_serial 02 -out client.crt ``` -Then, you've to concatenate them all in only one file, named 'ca.crt' as the following: - -```console -$ cat certificate1.crt certificate2.crt certificate3.crt >> ca.crt -``` - -The final step is to create a secret with the content of this file. This secret is going to be used in -the TLS Auth directive: - -```console -$ kubectl create secret generic caingress --namespace=default --from-file=ca.crt= -``` - -Note: You can also generate the CA Authentication Secret along with the TLS Secret by using: -```console -$ kubectl create secret generic caingress --namespace=default --from-file=ca.crt= --from-file=tls.crt= --from-file=tls.key= -``` +Once this is complete you can continue to follow the instructions [here](./auth/client-certs/README.md#creating-certificate-secrets) ## Test HTTP Service @@ -172,7 +102,7 @@ Events: 1m 1m 1 {service-controller } Normal CreatingLoadBalancer Creating load balancer 16s 16s 1 {service-controller } Normal CreatedLoadBalancer Created load balancer -$ curl 108.59.87.126 +$ curl 108.59.87.136 CLIENT VALUES: client_address=10.240.0.3 command=GET diff --git a/docs/examples/README.md b/docs/examples/README.md deleted file mode 100644 index d582d11026..0000000000 --- a/docs/examples/README.md +++ /dev/null @@ -1,31 +0,0 @@ -# Ingress examples - -This directory contains a catalog of examples on how to run, configure and -scale Ingress. Please review the [prerequisites](PREREQUISITES.md) before -trying them. - -## Scaling - -Name | Description | Complexity Level ------| ----------- | ---------------- -Static-ip | a single ingress gets a single static ip | Intermediate - -## Algorithms - -Name | Description | Complexity Level ------| ----------- | ---------------- -Session stickyness | route requests consistently to the same endpoint | Advanced - -## Auth - -Name | Description | Complexity Level ------| ----------- | ---------------- -Basic auth | password protect your website | nginx | Intermediate -[External auth plugin](external-auth/README.md) | defer to an external auth service | Intermediate - -## Customization - -Name | Description | Complexity Level ------| ----------- | ---------------- -configuration-snippets | customize nginx location configuration using annotations | Advanced -custom-headers | set custom headers before send traffic to backends | Advanced diff --git a/docs/examples/affinity/cookie/README.md b/docs/examples/affinity/cookie/README.md index d92564b5ea..5ee8855d03 100644 --- a/docs/examples/affinity/cookie/README.md +++ b/docs/examples/affinity/cookie/README.md @@ -1,18 +1,21 @@ -# Sticky Session +# Sticky sessions -This example demonstrates how to achieve session affinity using cookies +This example demonstrates how to achieve session affinity using cookies. ## Deployment -Session stickyness is achieved through 3 annotations on the Ingress, as shown in the [example](sticky-ingress.yaml). +Session affinity can be configured using the following annotations: -|Name|Description|Values| +|Name|Description|Value| | --- | --- | --- | -|nginx.ingress.kubernetes.io/affinity|Sets the affinity type|string (in NGINX only ``cookie`` is possible| -|nginx.ingress.kubernetes.io/session-cookie-name|Name of the cookie that will be used|string (default to route)| -|nginx.ingress.kubernetes.io/session-cookie-hash|Type of hash that will be used in cookie value|sha1/md5/index| +|nginx.ingress.kubernetes.io/affinity|Type of the affinity, set this to `cookie` to enable session affinity|string (NGINX only supports `cookie`)| +|nginx.ingress.kubernetes.io/session-cookie-name|Name of the cookie that will be created|string (defaults to `INGRESSCOOKIE`)| +|nginx.ingress.kubernetes.io/session-cookie-path|Path that will be set on the cookie (required if your [Ingress paths][ingress-paths] use regular expressions)|string (defaults to the currently [matched path][ingress-paths])| +|nginx.ingress.kubernetes.io/session-cookie-max-age|Time until the cookie expires, corresponds to the `Max-Age` cookie directive|number of seconds| +|nginx.ingress.kubernetes.io/session-cookie-expires|Legacy version of the previous annotation for compatibility with older browsers, generates an `Expires` cookie directive by adding the seconds to the current date|number of seconds| +|nginx.ingress.kubernetes.io/session-cookie-change-on-failure|When set to `false` nginx ingress will send request to upstream pointed by sticky cookie even if previous attempt failed. When set to `true` and previous attempt failed, sticky cookie will be changed to point to another upstream.|`true` or `false` (defaults to `false`)| -You can create the ingress to test this +You can create the [example Ingress](ingress.yaml) to test this: ```console kubectl create -f ingress.yaml @@ -20,7 +23,7 @@ kubectl create -f ingress.yaml ## Validation -You can confirm that the Ingress works. +You can confirm that the Ingress works: ```console $ kubectl describe ing nginx-test @@ -35,8 +38,9 @@ Rules: / nginx-service:80 () Annotations: affinity: cookie - session-cookie-hash: sha1 - session-cookie-name: route + session-cookie-name: INGRESSCOOKIE + session-cookie-expires: 172800 + session-cookie-max-age: 172800 Events: FirstSeen LastSeen Count From SubObjectPath Type Reason Message --------- -------- ----- ---- ------------- -------- ------ ------- @@ -50,19 +54,22 @@ Date: Fri, 10 Feb 2017 14:11:12 GMT Content-Type: text/html Content-Length: 612 Connection: keep-alive -Set-Cookie: route=a9907b79b248140b56bb13723f72b67697baac3d; Path=/; HttpOnly +Set-Cookie: INGRESSCOOKIE=a9907b79b248140b56bb13723f72b67697baac3d; Expires=Sun, 12-Feb-17 14:11:12 GMT; Max-Age=172800; Path=/; HttpOnly Last-Modified: Tue, 24 Jan 2017 14:02:19 GMT ETag: "58875e6b-264" Accept-Ranges: bytes ``` -In the example above, you can see a line containing the 'Set-Cookie: route' setting the right defined stickyness cookie. -This cookie is created by NGINX containing the hash of the used upstream in that request. -If the user changes this cookie, NGINX creates a new one and redirect the user to another upstream. -If the backend pool grows up NGINX will keep sending the requests through the same server of the first request, even if it's overloaded. +In the example above, you can see that the response contains a `Set-Cookie` header with the settings we have defined. +This cookie is created by NGINX, it contains a randomly generated key corresponding to the upstream used for that request (selected using [consistent hashing][consistent-hashing]) and has an `Expires` directive. +If the user changes this cookie, NGINX creates a new one and redirects the user to another upstream. -When the backend server is removed, the requests are then re-routed to another upstream server and NGINX creates a new cookie, as the previous hash became invalid. +If the backend pool grows NGINX will keep sending the requests through the same server of the first request, even if it's overloaded. -When you have more than one Ingress Object pointing to the same Service, but one containing affinity configuration and other don't, the first created Ingress will be used. -This means that you can face the situation that you've configured Session Affinity in one Ingress and it doesn't reflects in NGINX configuration, because there is another Ingress Object pointing to the same service that doesn't configure this. +When the backend server is removed, the requests are re-routed to another upstream server. This does not require the cookie to be updated because the key's [consistent hash][consistent-hashing] will change. +When you have a Service pointing to more than one Ingress, with only one containing affinity configuration, the first created Ingress will be used. +This means that you can face the situation that you've configured session affinity on one Ingress and it doesn't work because the Service is pointing to another Ingress that doesn't configure this. + +[ingress-paths]: ../../../user-guide/ingress-path-matching +[consistent-hashing]: https://en.wikipedia.org/wiki/Consistent_hashing diff --git a/docs/examples/affinity/cookie/ingress.yaml b/docs/examples/affinity/cookie/ingress.yaml index b44752804c..4abebfedfa 100644 --- a/docs/examples/affinity/cookie/ingress.yaml +++ b/docs/examples/affinity/cookie/ingress.yaml @@ -5,7 +5,8 @@ metadata: annotations: nginx.ingress.kubernetes.io/affinity: "cookie" nginx.ingress.kubernetes.io/session-cookie-name: "route" - nginx.ingress.kubernetes.io/session-cookie-hash: "sha1" + nginx.ingress.kubernetes.io/session-cookie-expires: "172800" + nginx.ingress.kubernetes.io/session-cookie-max-age: "172800" spec: rules: diff --git a/docs/examples/auth/basic/README.md b/docs/examples/auth/basic/README.md index 54d068e3a3..15e0a7eeb6 100644 --- a/docs/examples/auth/basic/README.md +++ b/docs/examples/auth/basic/README.md @@ -1,6 +1,7 @@ # Basic Authentication This example shows how to add authentication in a Ingress rule using a secret that contains a file generated with `htpasswd`. +It's important the file generated is named `auth` (actually - that the secret has a key `data.auth`), otherwise the ingress-controller returns a 503. ```console $ htpasswd -c auth foo @@ -38,8 +39,8 @@ metadata: nginx.ingress.kubernetes.io/auth-type: basic # name of the secret that contains the user/password definitions nginx.ingress.kubernetes.io/auth-secret: basic-auth - # message to display with an appropiate context why the authentication is required - nginx.ingress.kubernetes.io/auth-realm: "Authentication Required - foo" + # message to display with an appropriate context why the authentication is required + nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - foo' spec: rules: - host: foo.bar.com diff --git a/docs/examples/auth/client-certs/README.md b/docs/examples/auth/client-certs/README.md index e69de29bb2..17b8bd6903 100644 --- a/docs/examples/auth/client-certs/README.md +++ b/docs/examples/auth/client-certs/README.md @@ -0,0 +1,53 @@ +# Client Certificate Authentication + +It is possible to enable Client-Certificate Authentication by adding additional annotations to your Ingress Resource. +Before getting started you must have the following Certificates Setup: + +1. CA certificate and Key(Intermediate Certs need to be in CA) +2. Server Certificate(Signed by CA) and Key (CN should be equal the hostname you will use) +3. Client Certificate(Signed by CA) and Key + +For more details on the generation process, checkout the Prerequisite [docs](../../PREREQUISITES.md#client-certificate-authentication). + +You can have as many certificates as you want. If they're in the binary DER format, you can convert them as the following: + +```bash +openssl x509 -in certificate.der -inform der -out certificate.crt -outform pem +``` + +Then, you can concatenate them all in only one file, named 'ca.crt' as the following: + +```bash +cat certificate1.crt certificate2.crt certificate3.crt >> ca.crt +``` + +**Note:** Make sure that the Key Size is greater than 1024 and Hashing Algorithm(Digest) is something better than md5 +for each certificate generated. Otherwise you will receive an error. + +## Creating Certificate Secrets + +There are many different ways of configuring your secrets to enable Client-Certificate +Authentication to work properly. + +1. You can create a secret containing just the CA certificate and another + Secret containing the Server Certificate which is Signed by the CA. + + ```bash + kubectl create secret generic ca-secret --from-file=ca.crt=ca.crt + kubectl create secret generic tls-secret --from-file=tls.crt=server.crt --from-file=tls.key=server.key + ``` + +2. You can create a secret containing CA certificate along with the Server + Certificate, that can be used for both TLS and Client Auth. + + ```bash + kubectl create secret generic ca-secret --from-file=tls.crt=server.crt --from-file=tls.key=server.key --from-file=ca.crt=ca.crt + ``` + +Note: The CA Certificate must contain the trusted certificate authority chain to verify client certificates. + +## Setup Instructions + +1. Add the annotations as provided in the [ingress.yaml](ingress.yaml) example to your own ingress resources as required. +2. Test by performing a curl against the Ingress Path without the Client Cert and expect a Status Code 400. +3. Test by performing a curl against the Ingress Path with the Client Cert and expect a Status Code 200. diff --git a/docs/examples/auth/client-certs/ingress.yaml b/docs/examples/auth/client-certs/ingress.yaml new file mode 100644 index 0000000000..39dacce97b --- /dev/null +++ b/docs/examples/auth/client-certs/ingress.yaml @@ -0,0 +1,29 @@ +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + annotations: + # Enable client certificate authentication + nginx.ingress.kubernetes.io/auth-tls-verify-client: "on" + # Create the secret containing the trusted ca certificates + nginx.ingress.kubernetes.io/auth-tls-secret: "default/ca-secret" + # Specify the verification depth in the client certificates chain + nginx.ingress.kubernetes.io/auth-tls-verify-depth: "1" + # Specify an error page to be redirected to verification errors + nginx.ingress.kubernetes.io/auth-tls-error-page: "http://www.mysite.com/error-cert.html" + # Specify if certificates are passed to upstream server + nginx.ingress.kubernetes.io/auth-tls-pass-certificate-to-upstream: "true" + name: nginx-test + namespace: default +spec: + rules: + - host: mydomain.com + http: + paths: + - backend: + serviceName: http-svc + servicePort: 80 + path: / + tls: + - hosts: + - mydomain.com + secretName: tls-secret diff --git a/docs/examples/auth/client-certs/nginx-tls-auth.yaml b/docs/examples/auth/client-certs/nginx-tls-auth.yaml deleted file mode 100644 index 2f44e031bd..0000000000 --- a/docs/examples/auth/client-certs/nginx-tls-auth.yaml +++ /dev/null @@ -1,25 +0,0 @@ -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - annotations: - # Create this with kubectl create secret generic caingress --from-file=ca.crt --namespace=default - nginx.ingress.kubernetes.io/auth-tls-secret: "default/caingress" - nginx.ingress.kubernetes.io/auth-tls-verify-depth: "3" - nginx.ingress.kubernetes.io/auth-tls-verify-client: "on" - auth-tls-error-page: "http://www.mysite.com/error-cert.html" - name: nginx-test - namespace: default -spec: - rules: - - host: ingress.test.com - http: - paths: - - backend: - serviceName: http-svc:80 - servicePort: 80 - path: / - tls: - - hosts: - - ingress.test.com - secretName: tls-secret - diff --git a/docs/examples/auth/external-auth/README.md b/docs/examples/auth/external-auth/README.md index 18497d0c21..d1d6011c62 100644 --- a/docs/examples/auth/external-auth/README.md +++ b/docs/examples/auth/external-auth/README.md @@ -1,4 +1,4 @@ -# External authentication +# External Basic Authentication ### Example 1: @@ -7,9 +7,11 @@ Use an external service (Basic Auth) located in `https://httpbin.org` ``` $ kubectl create -f ingress.yaml ingress "external-auth" created + $ kubectl get ing external-auth NAME HOSTS ADDRESS PORTS AGE external-auth external-auth-01.sample.com 172.17.4.99 80 13s + $ kubectl get ing external-auth -o yaml apiVersion: extensions/v1beta1 kind: Ingress @@ -21,7 +23,7 @@ metadata: name: external-auth namespace: default resourceVersion: "2068378" - selfLink: /apis/extensions/v1beta1/namespaces/default/ingresses/external-auth + selfLink: /apis/networking/v1beta1/namespaces/default/ingresses/external-auth uid: 5c388f1d-8970-11e6-9004-080027d2dc94 spec: rules: diff --git a/docs/examples/auth/oauth-external-auth/README.md b/docs/examples/auth/oauth-external-auth/README.md new file mode 100644 index 0000000000..d6ad3fbed9 --- /dev/null +++ b/docs/examples/auth/oauth-external-auth/README.md @@ -0,0 +1,76 @@ +# External OAUTH Authentication + +### Overview + +The `auth-url` and `auth-signin` annotations allow you to use an external +authentication provider to protect your Ingress resources. + +!!! Important + This annotation requires `nginx-ingress-controller v0.9.0` or greater.) + +### Key Detail + +This functionality is enabled by deploying multiple Ingress objects for a single host. +One Ingress object has no special annotations and handles authentication. + +Other Ingress objects can then be annotated in such a way that require the user to +authenticate against the first Ingress's endpoint, and can redirect `401`s to the +same endpoint. + +Sample: + +```yaml +... +metadata: + name: application + annotations: + nginx.ingress.kubernetes.io/auth-url: "https://$host/oauth2/auth" + nginx.ingress.kubernetes.io/auth-signin: "https://$host/oauth2/start?rd=$escaped_request_uri" +... +``` + +### Example: OAuth2 Proxy + Kubernetes-Dashboard + +This example will show you how to deploy [`oauth2_proxy`](https://github.com/bitly/oauth2_proxy) +into a Kubernetes cluster and use it to protect the Kubernetes Dashboard using github as oAuth2 provider + +#### Prepare + +1. Install the kubernetes dashboard + +```console +kubectl create -f https://raw.githubusercontent.com/kubernetes/kops/master/addons/kubernetes-dashboard/v1.10.1.yaml +``` + +2. Create a [custom Github OAuth application](https://github.com/settings/applications/new) + +![Register OAuth2 Application](images/register-oauth-app.png) + +- Homepage URL is the FQDN in the Ingress rule, like `https://foo.bar.com` +- Authorization callback URL is the same as the base FQDN plus `/oauth2`, like `https://foo.bar.com/oauth2` + +![Register OAuth2 Application](images/register-oauth-app-2.png) + +3. Configure oauth2_proxy values in the file oauth2-proxy.yaml with the values: + +- OAUTH2_PROXY_CLIENT_ID with the github `` +- OAUTH2_PROXY_CLIENT_SECRET with the github `` +- OAUTH2_PROXY_COOKIE_SECRET with value of `python -c 'import os,base64; print base64.b64encode(os.urandom(16))'` + +4. Customize the contents of the file dashboard-ingress.yaml: + +Replace `__INGRESS_HOST__` with a valid FQDN and `__INGRESS_SECRET__` with a Secret with a valid SSL certificate. + +5. Deploy the oauth2 proxy and the ingress rules running: + +```console +$ kubectl create -f oauth2-proxy.yaml,dashboard-ingress.yaml +``` + +Test the oauth integration accessing the configured URL, like `https://foo.bar.com` + +![Register OAuth2 Application](images/github-auth.png) + +![Github authentication](images/oauth-login.png) + +![Kubernetes dashboard](images/dashboard.png) diff --git a/docs/examples/external-auth/dashboard-ingress.yaml b/docs/examples/auth/oauth-external-auth/dashboard-ingress.yaml similarity index 79% rename from docs/examples/external-auth/dashboard-ingress.yaml rename to docs/examples/auth/oauth-external-auth/dashboard-ingress.yaml index 3980a76a0a..17a222939c 100644 --- a/docs/examples/external-auth/dashboard-ingress.yaml +++ b/docs/examples/auth/oauth-external-auth/dashboard-ingress.yaml @@ -2,8 +2,8 @@ apiVersion: extensions/v1beta1 kind: Ingress metadata: annotations: - nginx.ingress.kubernetes.io/auth-signin: https://$host/oauth2/start - nginx.ingress.kubernetes.io/auth-url: https://$host/oauth2/auth + nginx.ingress.kubernetes.io/auth-url: "https://$host/oauth2/auth" + nginx.ingress.kubernetes.io/auth-signin: "https://$host/oauth2/start?rd=$escaped_request_uri" name: external-auth-oauth2 namespace: kube-system spec: diff --git a/docs/examples/external-auth/images/dashboard.png b/docs/examples/auth/oauth-external-auth/images/dashboard.png similarity index 100% rename from docs/examples/external-auth/images/dashboard.png rename to docs/examples/auth/oauth-external-auth/images/dashboard.png diff --git a/docs/examples/external-auth/images/github-auth.png b/docs/examples/auth/oauth-external-auth/images/github-auth.png similarity index 100% rename from docs/examples/external-auth/images/github-auth.png rename to docs/examples/auth/oauth-external-auth/images/github-auth.png diff --git a/docs/examples/external-auth/images/oauth-login.png b/docs/examples/auth/oauth-external-auth/images/oauth-login.png similarity index 100% rename from docs/examples/external-auth/images/oauth-login.png rename to docs/examples/auth/oauth-external-auth/images/oauth-login.png diff --git a/docs/examples/external-auth/images/register-oauth-app-2.png b/docs/examples/auth/oauth-external-auth/images/register-oauth-app-2.png similarity index 100% rename from docs/examples/external-auth/images/register-oauth-app-2.png rename to docs/examples/auth/oauth-external-auth/images/register-oauth-app-2.png diff --git a/docs/examples/external-auth/images/register-oauth-app.png b/docs/examples/auth/oauth-external-auth/images/register-oauth-app.png similarity index 100% rename from docs/examples/external-auth/images/register-oauth-app.png rename to docs/examples/auth/oauth-external-auth/images/register-oauth-app.png diff --git a/docs/examples/external-auth/oauth2-proxy.yaml b/docs/examples/auth/oauth-external-auth/oauth2-proxy.yaml similarity index 87% rename from docs/examples/external-auth/oauth2-proxy.yaml rename to docs/examples/auth/oauth-external-auth/oauth2-proxy.yaml index 1735f4690e..82549f40ec 100644 --- a/docs/examples/external-auth/oauth2-proxy.yaml +++ b/docs/examples/auth/oauth-external-auth/oauth2-proxy.yaml @@ -28,7 +28,7 @@ spec: value: - name: OAUTH2_PROXY_CLIENT_SECRET value: - # python -c 'import os,base64; print base64.b64encode(os.urandom(16))' + # docker run -ti --rm python:3-alpine python -c 'import secrets,base64; print(base64.b64encode(base64.b64encode(secrets.token_bytes(16))));' - name: OAUTH2_PROXY_COOKIE_SECRET value: SECRET image: docker.io/colemickens/oauth2_proxy:latest @@ -46,6 +46,7 @@ metadata: labels: k8s-app: oauth2-proxy name: oauth2-proxy + namespace: kube-system spec: ports: - name: http diff --git a/docs/examples/chashsubset/deployment.yaml b/docs/examples/chashsubset/deployment.yaml new file mode 100644 index 0000000000..5db8d3b375 --- /dev/null +++ b/docs/examples/chashsubset/deployment.yaml @@ -0,0 +1,70 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment + labels: + app: nginxhello +spec: + replicas: 10 + selector: + matchLabels: + app: nginxhello + template: + metadata: + labels: + app: nginxhello + spec: + containers: + - name: nginxhello + image: gcr.io/kubernetes-e2e-test-images/echoserver:2.2 + ports: + - containerPort: 8080 + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + +--- +kind: Service +apiVersion: v1 +metadata: + name: nginxhello + labels: + app: nginxhello +spec: + selector: + app: nginxhello + ports: + - name: http + port: 80 + targetPort: 8080 + +--- +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + annotations: + nginx.ingress.kubernetes.io/upstream-hash-by: "$arg_predictorid" + nginx.ingress.kubernetes.io/upstream-hash-by-subset: "true" + nginx.ingress.kubernetes.io/upstream-hash-by-subset-size: "3" + name: nginxhello-ingress + namespace: default +spec: + backend: + serviceName: nginxhello + servicePort: 80 + diff --git a/docs/examples/customization/configuration-snippets/README.md b/docs/examples/customization/configuration-snippets/README.md index 0e079c0451..11b713faf4 100644 --- a/docs/examples/customization/configuration-snippets/README.md +++ b/docs/examples/customization/configuration-snippets/README.md @@ -1,6 +1,8 @@ +# Configuration Snippets ## Ingress -The Ingress in this example adds a custom header to Nginx configuration that only applies to that specific Ingress. If you want to add headers that apply globally to all Ingresses, please have a look at [this example](/examples/customization/custom-headers/nginx). + +The Ingress in this example adds a custom header to Nginx configuration that only applies to that specific Ingress. If you want to add headers that apply globally to all Ingresses, please have a look at [this example](../custom-headers/README.md). ```console $ kubectl apply -f ingress.yaml diff --git a/docs/examples/customization/configuration-snippets/ingress.yaml b/docs/examples/customization/configuration-snippets/ingress.yaml index 3fe113968a..01af93ea26 100644 --- a/docs/examples/customization/configuration-snippets/ingress.yaml +++ b/docs/examples/customization/configuration-snippets/ingress.yaml @@ -4,8 +4,7 @@ metadata: name: nginx-configuration-snippet annotations: nginx.ingress.kubernetes.io/configuration-snippet: | - more_set_headers "Request-Id: $request_id"; - + more_set_headers "Request-Id: $req_id"; spec: rules: - host: custom.configuration.com diff --git a/docs/examples/customization/custom-configuration/README.md b/docs/examples/customization/custom-configuration/README.md index 92a5331f48..3463e08127 100644 --- a/docs/examples/customization/custom-configuration/README.md +++ b/docs/examples/customization/custom-configuration/README.md @@ -1,5 +1,6 @@ +# Custom Configuration -Using a [ConfigMap](https://kubernetes.io/docs/user-guide/configmap/) is possible to customize the NGINX configuration +Using a [ConfigMap](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/) is possible to customize the NGINX configuration For example, if we want to change the timeouts we need to create a ConfigMap: @@ -12,7 +13,7 @@ data: proxy-send-timeout: "120" kind: ConfigMap metadata: - name: nginx-load-balancer-conf + name: nginx-configuration ``` ``` diff --git a/docs/examples/customization/custom-configuration/configmap.yaml b/docs/examples/customization/custom-configuration/configmap.yaml index b5b5c02fd7..df2a84bdd4 100644 --- a/docs/examples/customization/custom-configuration/configmap.yaml +++ b/docs/examples/customization/custom-configuration/configmap.yaml @@ -4,7 +4,8 @@ metadata: name: nginx-configuration namespace: ingress-nginx labels: - app: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx data: proxy-connect-timeout: "10" proxy-read-timeout: "120" diff --git a/docs/examples/customization/custom-errors/README.md b/docs/examples/customization/custom-errors/README.md index 080f59a831..49393f6589 100644 --- a/docs/examples/customization/custom-errors/README.md +++ b/docs/examples/customization/custom-errors/README.md @@ -1,80 +1,83 @@ -This example shows how is possible to use a custom backend to render custom error pages. The code of this example is located here [custom-error-pages](https://github.com/kubernetes/ingress-nginx/tree/master/docs/examples/customization/custom-errors) +# Custom Errors +This example demonstrates how to use a custom backend to render custom error pages. -The idea is to use the headers `X-Code` and `X-Format` that NGINX pass to the backend in case of an error to find out the best existent representation of the response to be returned. i.e. if the request contains an `Accept` header of type `json` the error should be in that format and not in `html` (the default in NGINX). +## Customized default backend -First create the custom backend to use in the Ingress controller +First, create the custom `default-backend`. It will be used by the Ingress controller later on. ``` $ kubectl create -f custom-default-backend.yaml service "nginx-errors" created -replicationcontroller "nginx-errors" created +deployment.apps "nginx-errors" created ``` -``` -$ kubectl get svc -NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE -echoheaders 10.3.0.7 nodes 80/TCP 23d -kubernetes 10.3.0.1 443/TCP 34d -nginx-errors 10.3.0.102 80/TCP 11s -``` +This should have created a Deployment and a Service with the name `nginx-errors`. ``` -$ kubectl get rc -CONTROLLER REPLICAS AGE -echoheaders 1 19d -nginx-errors 1 19s -``` +$ kubectl get deploy,svc +NAME DESIRED CURRENT READY AGE +deployment.apps/nginx-errors 1 1 1 10s -Next create the Ingress controller executing -``` -$ kubectl create -f rc-custom-errors.yaml +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/nginx-errors ClusterIP 10.0.0.12 80/TCP 10s ``` -Now to check if this is working we use curl: +## Ingress controller configuration + +If you do not already have an instance of the NGINX Ingress controller running, deploy it according to the +[deployment guide][deploy], then follow these steps: + +1. Edit the `nginx-ingress-controller` Deployment and set the value of the `--default-backend` flag to the name of the + newly created error backend. + +2. Edit the `nginx-configuration` ConfigMap and create the key `custom-http-errors` with a value of `404,503`. + +3. Take note of the IP address assigned to the NGINX Ingress controller Service. + ``` + $ kubectl get svc ingress-nginx + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + ingress-nginx ClusterIP 10.0.0.13 80/TCP,443/TCP 10m + ``` + +!!! Note + The `ingress-nginx` Service is of type `ClusterIP` in this example. This may vary depending on your environment. + Make sure you can use the Service to reach NGINX before proceeding with the rest of this example. + +[deploy]: ../../../deploy/ + +## Testing error pages + +Let us send a couple of HTTP requests using cURL and validate everything is working as expected. + +A request to the default backend returns a 404 error with a custom message: ``` -$ curl -v http://172.17.4.99/ -* Trying 172.17.4.99... -* Connected to 172.17.4.99 (172.17.4.99) port 80 (#0) -> GET / HTTP/1.1 -> Host: 172.17.4.99 -> User-Agent: curl/7.43.0 -> Accept: */* -> -< HTTP/1.1 404 Not Found -< Server: nginx/1.10.0 -< Date: Wed, 04 May 2016 02:53:45 GMT -< Content-Type: text/html -< Transfer-Encoding: chunked -< Connection: keep-alive -< Vary: Accept-Encoding -< -The page you're looking for could not be found. +$ curl -D- http://10.0.0.13/ +HTTP/1.1 404 Not Found +Server: nginx/1.13.12 +Date: Tue, 12 Jun 2018 19:11:24 GMT +Content-Type: */* +Transfer-Encoding: chunked +Connection: keep-alive -* Connection #0 to host 172.17.4.99 left intact +The page you're looking for could not be found. ``` -Specifying json as expected format: +A request with a custom `Accept` header returns the corresponding document type (JSON): ``` -$ curl -v http://172.17.4.99/ -H 'Accept: application/json' -* Trying 172.17.4.99... -* Connected to 172.17.4.99 (172.17.4.99) port 80 (#0) -> GET / HTTP/1.1 -> Host: 172.17.4.99 -> User-Agent: curl/7.43.0 -> Accept: application/json -> -< HTTP/1.1 404 Not Found -< Server: nginx/1.10.0 -< Date: Wed, 04 May 2016 02:54:00 GMT -< Content-Type: text/html -< Transfer-Encoding: chunked -< Connection: keep-alive -< Vary: Accept-Encoding -< -{ "message": "The page you're looking for could not be found" } +$ curl -D- -H 'Accept: application/json' http://10.0.0.13/ +HTTP/1.1 404 Not Found +Server: nginx/1.13.12 +Date: Tue, 12 Jun 2018 19:12:36 GMT +Content-Type: application/json +Transfer-Encoding: chunked +Connection: keep-alive +Vary: Accept-Encoding -* Connection #0 to host 172.17.4.99 left intact +{ "message": "The page you're looking for could not be found" } ``` + +To go further with this example, feel free to deploy your own applications and Ingress objects, and validate that the +responses are still in the correct format when a backend returns 503 (eg. if you scale a Deployment down to 0 replica). diff --git a/docs/examples/customization/custom-errors/custom-default-backend.yaml b/docs/examples/customization/custom-errors/custom-default-backend.yaml index fce7c0bcba..70096bdbec 100644 --- a/docs/examples/customization/custom-errors/custom-default-backend.yaml +++ b/docs/examples/customization/custom-errors/custom-default-backend.yaml @@ -1,31 +1,46 @@ +--- apiVersion: v1 kind: Service metadata: name: nginx-errors labels: - app: nginx-errors + app.kubernetes.io/name: nginx-errors + app.kubernetes.io/part-of: ingress-nginx spec: + selector: + app.kubernetes.io/name: nginx-errors + app.kubernetes.io/part-of: ingress-nginx ports: - port: 80 - targetPort: 80 - protocol: TCP + targetPort: 8080 name: http - selector: - app: nginx-errors --- -apiVersion: v1 -kind: ReplicationController +apiVersion: apps/v1 +kind: Deployment metadata: name: nginx-errors + labels: + app.kubernetes.io/name: nginx-errors + app.kubernetes.io/part-of: ingress-nginx spec: replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: nginx-errors + app.kubernetes.io/part-of: ingress-nginx template: metadata: labels: - app: nginx-errors + app.kubernetes.io/name: nginx-errors + app.kubernetes.io/part-of: ingress-nginx spec: containers: - - name: nginx-errors - image: aledbf/nginx-error-server:0.1 + - name: nginx-error-server + image: quay.io/kubernetes-ingress-controller/custom-error-pages-amd64:0.3 ports: - - containerPort: 80 \ No newline at end of file + - containerPort: 8080 + # Setting the environment variable DEBUG we can see the headers sent + # by the ingress controller to the backend in the client response. + # env: + # - name: DEBUG + # value: "true" diff --git a/docs/examples/customization/custom-errors/rc-custom-errors.yaml b/docs/examples/customization/custom-errors/rc-custom-errors.yaml deleted file mode 100644 index e8d48c7e3e..0000000000 --- a/docs/examples/customization/custom-errors/rc-custom-errors.yaml +++ /dev/null @@ -1,51 +0,0 @@ -apiVersion: v1 -kind: ReplicationController -metadata: - name: nginx-ingress-controller - labels: - k8s-app: nginx-ingress-lb -spec: - replicas: 1 - selector: - k8s-app: nginx-ingress-lb - template: - metadata: - labels: - k8s-app: nginx-ingress-lb - name: nginx-ingress-lb - spec: - terminationGracePeriodSeconds: 60 - containers: - - image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.9.0-beta.19 - name: nginx-ingress-lb - imagePullPolicy: Always - readinessProbe: - httpGet: - path: /healthz - port: 10254 - scheme: HTTP - livenessProbe: - httpGet: - path: /healthz - port: 10254 - scheme: HTTP - initialDelaySeconds: 10 - timeoutSeconds: 1 - # use downward API - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - ports: - - containerPort: 80 - hostPort: 80 - - containerPort: 443 - hostPort: 443 - args: - - /nginx-ingress-controller - - --default-backend-service=$(POD_NAMESPACE)/nginx-errors diff --git a/docs/examples/customization/custom-headers/README.md b/docs/examples/customization/custom-headers/README.md index 592c7ec881..7551abda31 100644 --- a/docs/examples/customization/custom-headers/README.md +++ b/docs/examples/customization/custom-headers/README.md @@ -1,19 +1,24 @@ -# Custom configuration +# Custom Headers -This example aims to demonstrate the deployment of an nginx ingress controller and -use a ConfigMap to configure a custom list of headers to be passed to the upstream -server +This example demonstrates configuration of the nginx ingress controller via +a ConfigMap to pass a custom list of headers to the upstream +server. + +[custom-headers.yaml](custom-headers.yaml) defines a ConfigMap in the `ingress-nginx` namespace named `custom-headers`, holding several custom X-prefixed HTTP headers. ```console -curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/customization/custom-headers/configmap.yaml \ - | kubectl apply -f - +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/customization/custom-headers/custom-headers.yaml +``` -curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/customization/custom-headers/custom-headers.yaml \ - | kubectl apply -f - +[configmap.yaml](configmap.yaml) defines a ConfigMap in the `ingress-nginx` namespace named `nginx-configuration`. This controls the [global configuration](../../../user-guide/nginx-configuration/configmap.md) of the ingress controller, and already exists in a standard installation. The key `proxy-set-headers` is set to cite the previously-created `ingress-nginx/custom-headers` ConfigMap. +```console +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/customization/custom-headers/configmap.yaml ``` +The nginx ingress controller will read the `ingress-nginx/nginx-configuration` ConfigMap, find the `proxy-set-headers` key, read HTTP headers from the `ingress-nginx/custom-headers` ConfigMap, and include those HTTP headers in all requests flowing from nginx to the backends. + ## Test -Check the contents of the configmap is present in the nginx.conf file using: -`kubectl exec nginx-ingress-controller-873061567-4n3k2 -n kube-system cat /etc/nginx/nginx.conf` +Check the contents of the ConfigMaps are present in the nginx.conf file using: +`kubectl exec nginx-ingress-controller-873061567-4n3k2 -n ingress-nginx cat /etc/nginx/nginx.conf` diff --git a/docs/examples/customization/custom-headers/configmap.yaml b/docs/examples/customization/custom-headers/configmap.yaml index 27fed44c85..133b0a1c81 100644 --- a/docs/examples/customization/custom-headers/configmap.yaml +++ b/docs/examples/customization/custom-headers/configmap.yaml @@ -6,4 +6,5 @@ metadata: name: nginx-configuration namespace: ingress-nginx labels: - app: ingress-nginx \ No newline at end of file + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx diff --git a/docs/examples/customization/custom-upstream-check/README.md b/docs/examples/customization/custom-upstream-check/README.md deleted file mode 100644 index 434c47c577..0000000000 --- a/docs/examples/customization/custom-upstream-check/README.md +++ /dev/null @@ -1,47 +0,0 @@ -# Custom Upstream server checks - -This example shows how is possible to create a custom configuration for a particular upstream associated with an Ingress rule. - -``` -echo " -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: http-svc - annotations: - nginx.ingress.kubernetes.io/upstream-fail-timeout: "30" -spec: - rules: - - host: foo.bar.com - http: - paths: - - path: / - backend: - serviceName: http-svc - servicePort: 80 -" | kubectl create -f - -``` - -Check the annotation is present in the Ingress rule: -``` -kubectl get ingress http-svc -o yaml -``` - -Check the NGINX configuration is updated using kubectl or the status page: - -``` -$ kubectl exec nginx-ingress-controller-v1ppm cat /etc/nginx/nginx.conf -``` - -``` -.... - upstream default-http-svc-x-80 { - least_conn; - server 10.2.92.2:8080 max_fails=5 fail_timeout=30; - - } -.... -``` - - -![nginx-module-vts](custom-upstream.png "screenshot with custom configuration") diff --git a/docs/examples/customization/custom-upstream-check/custom-upstream.png b/docs/examples/customization/custom-upstream-check/custom-upstream.png deleted file mode 100644 index 30417894bf..0000000000 Binary files a/docs/examples/customization/custom-upstream-check/custom-upstream.png and /dev/null differ diff --git a/docs/examples/customization/custom-vts-metrics-prometheus/README.md b/docs/examples/customization/custom-vts-metrics-prometheus/README.md deleted file mode 100644 index fe5deb7dcb..0000000000 --- a/docs/examples/customization/custom-vts-metrics-prometheus/README.md +++ /dev/null @@ -1,103 +0,0 @@ -# Deploying the Nginx Ingress controller - -This example aims to demonstrate the deployment of an nginx ingress controller and use a ConfigMap to enable [nginx vts module](https://github.com/vozlt/nginx-module-vts -) to export metrics in prometheus format. - -## vts-metrics - -Vts-metrics export NGINX metrics. To deploy all the files simply run `kubectl apply -f nginx`. A deployment and service will be -created which already has a `prometheus.io/scrape: 'true'` annotation and if you added -the recommended Prometheus service-endpoint scraping [configuration](https://raw.githubusercontent.com/prometheus/prometheus/master/documentation/examples/prometheus-kubernetes.yml), -Prometheus will scrape it automatically and you start using the generated metrics right away. - -## Custom configuration - -```console -apiVersion: v1 -data: - enable-vts-status: "true" -kind: ConfigMap -metadata: - name: nginx-configuration - namespace: ingress-nginx - labels: - app: ingress-nginx -``` - -```console -$ kubectl apply -f nginx-vts-metrics-conf.yaml -``` - -## Result - -Check whether the ingress controller successfully generated the NGINX vts status: - -```console -$ kubectl exec nginx-ingress-controller-873061567-4n3k2 -n ingress-nginx cat /etc/nginx/nginx.conf|grep vhost_traffic_status_display - vhost_traffic_status_display; - vhost_traffic_status_display_format html; -``` - -### NGINX vts dashboard - -The vts dashboard provides real time metrics. - -![vts dashboard](imgs/vts-dashboard.png) - -Because the vts port it's not yet exposed, you should forward the controller port to see it. - -```console -$ kubectl port-forward $(kubectl get pods --selector=k8s-app=nginx-ingress-controller -n ingress-nginx --output=jsonpath={.items..metadata.name}) -n ingress-nginx 18080 -``` - -Now open the url [http://localhost:18080/nginx_status](http://localhost:18080/nginx_status) in your browser. - -### Prometheus metrics output - -NGINX Ingress controller already has a parser to convert vts metrics to Prometheus format. It exports prometheus metrics to the address `:10254/metrics`. - -```console -$ kubectl exec -ti -n ingress-nginx $(kubectl get pods --selector=k8s-app=nginx-ingress-controller -n kube-system --output=jsonpath={.items..metadata.name}) curl localhost:10254/metrics -ingress_controller_ssl_expire_time_seconds{host="foo.bar.com"} -6.21355968e+10 -# HELP ingress_controller_success Cumulative number of Ingress controller reload operations -# TYPE ingress_controller_success counter -ingress_controller_success{count="reloads"} 3 -# HELP nginx_bytes_total Nginx bytes count -# TYPE nginx_bytes_total counter -nginx_bytes_total{direction="in",ingress_class="nginx",namespace="",server_zone="*"} 3708 -nginx_bytes_total{direction="in",ingress_class="nginx",namespace="",server_zone="_"} 3708 -nginx_bytes_total{direction="out",ingress_class="nginx",namespace="",server_zone="*"} 5256 -nginx_bytes_total{direction="out",ingress_class="nginx",namespace="",server_zone="_"} 5256 -``` - -### Customize metrics - -The default [vts vhost key](https://github.com/vozlt/nginx-module-vts#vhost_traffic_status_filter_by_set_key) is `$geoip_country_code country::*` that expose metrics grouped by server and country code. The example below show how to have metrics grouped by server and server path. - -![vts dashboard](imgs/vts-dashboard-filter-key-path.png) - -## NGINX custom configuration ( http level ) - -``` - apiVersion: v1 - kind: ConfigMap - data: - enable-vts-status: "true" - vts-default-filter-key: "$server_name" -... -``` - -## Customize ingress - -``` - apiVersion: extensions/v1beta1 - kind: Ingress - metadata: - annotations: - nginx.ingress.kubernetes.io/vts-filter-key: $uri $server_name - name: ingress -``` - -## Result - -![prometheus filter key path](imgs/prometheus-filter-key-path.png) diff --git a/docs/examples/customization/custom-vts-metrics-prometheus/imgs/prometheus-filter-key-path.png b/docs/examples/customization/custom-vts-metrics-prometheus/imgs/prometheus-filter-key-path.png deleted file mode 100644 index a266d40481..0000000000 Binary files a/docs/examples/customization/custom-vts-metrics-prometheus/imgs/prometheus-filter-key-path.png and /dev/null differ diff --git a/docs/examples/customization/custom-vts-metrics-prometheus/imgs/vts-dashboard-filter-key-path.png b/docs/examples/customization/custom-vts-metrics-prometheus/imgs/vts-dashboard-filter-key-path.png deleted file mode 100644 index b9b3238f6f..0000000000 Binary files a/docs/examples/customization/custom-vts-metrics-prometheus/imgs/vts-dashboard-filter-key-path.png and /dev/null differ diff --git a/docs/examples/customization/custom-vts-metrics-prometheus/imgs/vts-dashboard.png b/docs/examples/customization/custom-vts-metrics-prometheus/imgs/vts-dashboard.png deleted file mode 100644 index 0370f5ce9f..0000000000 Binary files a/docs/examples/customization/custom-vts-metrics-prometheus/imgs/vts-dashboard.png and /dev/null differ diff --git a/docs/examples/customization/custom-vts-metrics-prometheus/nginx-vts-metrics-conf.yaml b/docs/examples/customization/custom-vts-metrics-prometheus/nginx-vts-metrics-conf.yaml deleted file mode 100644 index 6a6e795cd3..0000000000 --- a/docs/examples/customization/custom-vts-metrics-prometheus/nginx-vts-metrics-conf.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: v1 -data: - enable-vts-status: "true" -kind: ConfigMap -metadata: - name: nginx-configuration - namespace: ingress-nginx - labels: - app: ingress-nginx diff --git a/docs/examples/customization/external-auth-headers/Makefile b/docs/examples/customization/external-auth-headers/Makefile index 65875f426f..1ee7e06ac3 100644 --- a/docs/examples/customization/external-auth-headers/Makefile +++ b/docs/examples/customization/external-auth-headers/Makefile @@ -3,7 +3,7 @@ all: push TAG=0.1 PREFIX?=electroma/ingress-demo- ARCH?=amd64 -GOLANG_VERSION=1.8 +GOLANG_VERSION=1.9 TEMP_DIR:=$(shell mktemp -d) build: clean diff --git a/docs/examples/customization/external-auth-headers/README.md b/docs/examples/customization/external-auth-headers/README.md index a705dd2209..2ef1dd25d7 100644 --- a/docs/examples/customization/external-auth-headers/README.md +++ b/docs/examples/customization/external-auth-headers/README.md @@ -6,12 +6,12 @@ to backend service. Sample configuration includes: * Sample authentication service producing several response headers - * Authentication logic is based on HTTP header: requests with header `User` containing string `internal` are considered authenticated - * After successful authentication service generates response headers `UserID` and `UserRole` + * Authentication logic is based on HTTP header: requests with header `User` containing string `internal` are considered authenticated + * After successful authentication service generates response headers `UserID` and `UserRole` * Sample echo service displaying header information * Two ingress objects pointing to echo service - * Public, which allows access from unauthenticated users - * Private, which allows access from authenticated users only + * Public, which allows access from unauthenticated users + * Private, which allows access from authenticated users only You can deploy the controller as follows: @@ -21,18 +21,14 @@ $ kubectl create -f deploy/ deployment "demo-auth-service" created service "demo-auth-service" created ingress "demo-auth-service" created -deployment "default-http-backend" created -service "default-http-backend" created deployment "demo-echo-service" created service "demo-echo-service" created ingress "public-demo-echo-service" created ingress "secure-demo-echo-service" created -deployment "nginx-ingress-controller" created $ kubectl get po NAME READY STATUS RESTARTS AGE NAME READY STATUS RESTARTS AGE -default-http-backend-2657704409-vv0hm 1/1 Running 0 29s demo-auth-service-2769076528-7g9mh 1/1 Running 0 30s demo-echo-service-3636052215-3vw8c 1/1 Running 0 29s @@ -42,9 +38,9 @@ public-demo-echo-service public-demo-echo-service.kube.local 80 secure-demo-echo-service secure-demo-echo-service.kube.local 80 1m ``` - Test 1: public service with no auth header -``` + +```console $ curl -H 'Host: public-demo-echo-service.kube.local' -v 192.168.99.100 * Rebuilt URL to: 192.168.99.100/ * Trying 192.168.99.100... @@ -64,8 +60,10 @@ $ curl -H 'Host: public-demo-echo-service.kube.local' -v 192.168.99.100 * Connection #0 to host 192.168.99.100 left intact UserID: , UserRole: ``` + Test 2: secure service with no auth header -``` + +```console $ curl -H 'Host: secure-demo-echo-service.kube.local' -v 192.168.99.100 * Rebuilt URL to: 192.168.99.100/ * Trying 192.168.99.100... @@ -91,8 +89,10 @@ $ curl -H 'Host: secure-demo-echo-service.kube.local' -v 192.168.99.100 * Connection #0 to host 192.168.99.100 left intact ``` + Test 3: public service with valid auth header -``` + +```console $ curl -H 'Host: public-demo-echo-service.kube.local' -H 'User:internal' -v 192.168.99.100 * Rebuilt URL to: 192.168.99.100/ * Trying 192.168.99.100... @@ -113,9 +113,10 @@ $ curl -H 'Host: public-demo-echo-service.kube.local' -H 'User:internal' -v 192. * Connection #0 to host 192.168.99.100 left intact UserID: 1443635317331776148, UserRole: admin ``` -Test 4: public service with valid auth header -``` +Test 4: secure service with valid auth header + +```console $ curl -H 'Host: secure-demo-echo-service.kube.local' -H 'User:internal' -v 192.168.99.100 * Rebuilt URL to: 192.168.99.100/ * Trying 192.168.99.100... diff --git a/docs/examples/customization/external-auth-headers/deploy/auth-service.yaml b/docs/examples/customization/external-auth-headers/deploy/auth-service.yaml index 87a58730b2..af8f095181 100644 --- a/docs/examples/customization/external-auth-headers/deploy/auth-service.yaml +++ b/docs/examples/customization/external-auth-headers/deploy/auth-service.yaml @@ -7,6 +7,9 @@ metadata: namespace: default spec: replicas: 1 + selector: + matchLabels: + k8s-app: demo-auth-service template: metadata: labels: diff --git a/docs/examples/customization/external-auth-headers/deploy/default-backend.yaml b/docs/examples/customization/external-auth-headers/deploy/default-backend.yaml deleted file mode 100644 index a16193f939..0000000000 --- a/docs/examples/customization/external-auth-headers/deploy/default-backend.yaml +++ /dev/null @@ -1,48 +0,0 @@ -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: default-http-backend - labels: - k8s-app: default-http-backend - namespace: default -spec: - replicas: 1 - template: - metadata: - labels: - k8s-app: default-http-backend - spec: - terminationGracePeriodSeconds: 60 - containers: - - name: default-http-backend - image: gcr.io/google_containers/defaultbackend:1.4 - livenessProbe: - httpGet: - path: /healthz - port: 8080 - scheme: HTTP - initialDelaySeconds: 30 - timeoutSeconds: 5 - ports: - - containerPort: 8080 - resources: - limits: - cpu: 10m - memory: 20Mi - requests: - cpu: 10m - memory: 20Mi ---- -apiVersion: v1 -kind: Service -metadata: - name: default-http-backend - namespace: default - labels: - k8s-app: default-http-backend -spec: - ports: - - port: 80 - targetPort: 8080 - selector: - k8s-app: default-http-backend diff --git a/docs/examples/customization/external-auth-headers/deploy/echo-service.yaml b/docs/examples/customization/external-auth-headers/deploy/echo-service.yaml index 7145a3a460..363c4bf3d5 100644 --- a/docs/examples/customization/external-auth-headers/deploy/echo-service.yaml +++ b/docs/examples/customization/external-auth-headers/deploy/echo-service.yaml @@ -7,6 +7,9 @@ metadata: namespace: default spec: replicas: 1 + selector: + matchLabels: + k8s-app: demo-echo-service template: metadata: labels: diff --git a/docs/examples/customization/external-auth-headers/deploy/nginx-ingress-controller.yaml b/docs/examples/customization/external-auth-headers/deploy/nginx-ingress-controller.yaml deleted file mode 100644 index 28eb850746..0000000000 --- a/docs/examples/customization/external-auth-headers/deploy/nginx-ingress-controller.yaml +++ /dev/null @@ -1,33 +0,0 @@ -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: nginx-ingress-controller - labels: - k8s-app: nginx-ingress-controller - namespace: kube-system -spec: - replicas: 1 - template: - metadata: - labels: - k8s-app: nginx-ingress-controller - spec: - terminationGracePeriodSeconds: 60 - containers: - - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.2 - name: nginx-ingress-controller - ports: - - containerPort: 80 - hostPort: 80 - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - args: - - /nginx-ingress-controller - - --default-backend-service=$(POD_NAMESPACE)/default-http-backend diff --git a/docs/examples/customization/ssl-dh-param/README.md b/docs/examples/customization/ssl-dh-param/README.md index 90801867a6..e5167f1d20 100644 --- a/docs/examples/customization/ssl-dh-param/README.md +++ b/docs/examples/customization/ssl-dh-param/README.md @@ -1,4 +1,4 @@ -# Deploying the Nginx Ingress controller +# Custom DH parameters for perfect forward secrecy This example aims to demonstrate the deployment of an nginx ingress controller and use a ConfigMap to configure custom Diffie-Hellman parameters file to help with @@ -16,7 +16,8 @@ metadata: name: nginx-configuration namespace: ingress-nginx labels: - app: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx ``` ```console @@ -40,7 +41,8 @@ metadata: name: nginx-configuration namespace: ingress-nginx labels: - app: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx ``` ```console diff --git a/docs/examples/customization/ssl-dh-param/configmap.yaml b/docs/examples/customization/ssl-dh-param/configmap.yaml index 71dd2903c0..f4382d9a8a 100644 --- a/docs/examples/customization/ssl-dh-param/configmap.yaml +++ b/docs/examples/customization/ssl-dh-param/configmap.yaml @@ -6,4 +6,5 @@ metadata: name: nginx-configuration namespace: ingress-nginx labels: - app: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx diff --git a/docs/examples/customization/sysctl/README.md b/docs/examples/customization/sysctl/README.md new file mode 100644 index 0000000000..909e129894 --- /dev/null +++ b/docs/examples/customization/sysctl/README.md @@ -0,0 +1,9 @@ +# Sysctl tuning + +This example aims to demonstrate the use of an Init Container to adjust sysctl default values +using `kubectl patch` + +```console +kubectl patch deployment -n ingress-nginx nginx-ingress-controller --patch="$(cat patch.json)" +``` + diff --git a/docs/examples/customization/sysctl/patch.json b/docs/examples/customization/sysctl/patch.json new file mode 100644 index 0000000000..56482f511b --- /dev/null +++ b/docs/examples/customization/sysctl/patch.json @@ -0,0 +1,16 @@ +{ + "spec": { + "template": { + "spec": { + "initContainers": [{ + "name": "sysctl", + "image": "alpine:3.6", + "securityContext": { + "privileged": true + }, + "command": ["sh", "-c", "sysctl -w net.core.somaxconn=32768; sysctl -w net.ipv4.ip_local_port_range=1024 65535"] + }] + } + } + } +} \ No newline at end of file diff --git a/docs/examples/docker-registry/README.md b/docs/examples/docker-registry/README.md index 517b18298a..7dbd20cbda 100644 --- a/docs/examples/docker-registry/README.md +++ b/docs/examples/docker-registry/README.md @@ -10,9 +10,10 @@ First we deploy the docker registry in the cluster: kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/docker-registry/deployment.yaml ``` -**Important:** DO NOT RUN THIS IN PRODUCTION. -This deployment uses `emptyDir` in the `volumeMount` which means the contents of the registry will be deleted when the pod dies. +!!! Important + **DO NOT RUN THIS IN PRODUCTION** + This deployment uses `emptyDir` in the `volumeMount` which means the contents of the registry will be deleted when the pod dies. The next required step is creation of the ingress rules. To do this we have two options: with and without TLS @@ -24,8 +25,10 @@ Download and edit the yaml deployment replacing `registry.` with a wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/docker-registry/ingress-without-tls.yaml ``` -**Important:** running a docker registry without TLS requires we configure our local docker daemon with the insecure registry flag. -Please check [deploy a plain http registry](https://docs.docker.com/registry/insecure/#deploy-a-plain-http-registry) +!!! Important + Running a docker registry without TLS requires we configure our local docker daemon with the insecure registry flag. + + Please check [deploy a plain http registry](https://docs.docker.com/registry/insecure/#deploy-a-plain-http-registry) ### With TLS diff --git a/docs/examples/external-auth/README.md b/docs/examples/external-auth/README.md deleted file mode 100644 index 5d94e2d186..0000000000 --- a/docs/examples/external-auth/README.md +++ /dev/null @@ -1,75 +0,0 @@ -# External Authentication - -### Overview - -The `auth-url` and `auth-signin` annotations allow you to use an external -authentication provider to protect your Ingress resources. - -(Note, this annotation requires `nginx-ingress-controller v0.9.0` or greater.) - -### Key Detail - -This functionality is enabled by deploying multiple Ingress objects for a single host. -One Ingress object has no special annotations and handles authentication. - -Other Ingress objects can then be annotated in such a way that require the user to -authenticate against the first Ingress's endpoint, and can redirect `401`s to the -same endpoint. - -Sample: - -```yaml -... -metadata: - name: application - annotations: - "nginx.ingress.kubernetes.io/auth-url": "https://$host/oauth2/auth" - "nginx.ingress.kubernetes.io/auth-signin": "https://$host/oauth2/sign_in" -... -``` - -### Example: OAuth2 Proxy + Kubernetes-Dashboard - -This example will show you how to deploy [`oauth2_proxy`](https://github.com/bitly/oauth2_proxy) -into a Kubernetes cluster and use it to protect the Kubernetes Dashboard using github as oAuth2 provider - -#### Prepare - -1. Install the kubernetes dashboard - -```console -kubectl create -f https://raw.githubusercontent.com/kubernetes/kops/master/addons/kubernetes-dashboard/v1.5.0.yaml -``` - -2. Create a custom Github OAuth application https://github.com/settings/applications/new - -![Register OAuth2 Application](images/register-oauth-app.png) - -- Homepage URL is the FQDN in the Ingress rule, like `https://foo.bar.com` -- Authorization callback URL is the same as the base FQDN plus `/oauth2`, like `https://foo.bar.com/oauth2` - -![Register OAuth2 Application](images/register-oauth-app-2.png) - -3. Configure oauth2_proxy values in the file oauth2-proxy.yaml with the values: - -- OAUTH2_PROXY_CLIENT_ID with the github `` -- OAUTH2_PROXY_CLIENT_SECRET with the github `` -- OAUTH2_PROXY_COOKIE_SECRET with value of `python -c 'import os,base64; print base64.b64encode(os.urandom(16))'` - -4. Customize the contents of the file dashboard-ingress.yaml: - -Replace `__INGRESS_HOST__` with a valid FQDN and `__INGRESS_SECRET__` with a Secret with a valid SSL certificate. - -5. Deploy the oauth2 proxy and the ingress rules running: - -```console -$ kubectl create -f oauth2-proxy.yaml,dashboard-ingress.yaml -``` - -Test the oauth integration accessing the configured URL, like `https://foo.bar.com` - -![Register OAuth2 Application](images/github-auth.png) - -![Github authentication](images/oauth-login.png) - -![Kubernetes dashboard](images/dashboard.png) diff --git a/docs/examples/grpc/README.md b/docs/examples/grpc/README.md new file mode 100644 index 0000000000..06dcb5664e --- /dev/null +++ b/docs/examples/grpc/README.md @@ -0,0 +1,104 @@ +# gRPC + +This example demonstrates how to route traffic to a gRPC service through the +nginx controller. + +## Prerequisites + +1. You have a kubernetes cluster running. +2. You have a domain name such as `example.com` that is configured to route + traffic to the ingress controller. Replace references to + `fortune-teller.stack.build` (the domain name used in this example) to your + own domain name (you're also responsible for provisioning an SSL certificate + for the ingress). +3. You have the nginx-ingress controller installed in typical fashion (must be + at least + [quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.13.0](https://quay.io/kubernetes-ingress-controller/nginx-ingress-controller) + for grpc support. +4. You have a backend application running a gRPC server and listening for TCP + traffic. If you prefer, you can use the + [fortune-teller](https://github.com/kubernetes/ingress-nginx/tree/master/images/grpc-fortune-teller) + application provided here as an example. + +### Step 1: kubernetes `Deployment` + +```sh +$ kubectl create -f app.yaml +``` + +This is a standard kubernetes deployment object. It is running a grpc service +listening on port `50051`. + +The sample application +[fortune-teller-app](https://github.com/kubernetes/ingress-nginx/tree/master/images/grpc-fortune-teller) +is a grpc server implemented in go. Here's the stripped-down implementation: + +```go +func main() { + grpcServer := grpc.NewServer() + fortune.RegisterFortuneTellerServer(grpcServer, &FortuneTeller{}) + lis, _ := net.Listen("tcp", ":50051") + grpcServer.Serve(lis) +} +``` + +The takeaway is that we are not doing any TLS configuration on the server (as we +are terminating TLS at the ingress level, grpc traffic will travel unencrypted +inside the cluster and arrive "insecure"). + +For your own application you may or may not want to do this. If you prefer to +forward encrypted traffic to your POD and terminate TLS at the gRPC server +itself, add the ingress annotation `nginx.ingress.kubernetes.io/backend-protocol: "GRPCS"`. + +### Step 2: the kubernetes `Service` + +```sh +$ kubectl create -f svc.yaml +``` + +Here we have a typical service. Nothing special, just routing traffic to the +backend application on port `50051`. + +### Step 3: the kubernetes `Ingress` + +```sh +$ kubectl create -f ingress.yaml +``` + +A few things to note: + +1. We've tagged the ingress with the annotation + `nginx.ingress.kubernetes.io/backend-protocol: "GRPC"`. This is the magic + ingredient that sets up the appropriate nginx configuration to route http/2 + traffic to our service. +1. We're terminating TLS at the ingress and have configured an SSL certificate + `fortune-teller.stack.build`. The ingress matches traffic arriving as + `https://fortune-teller.stack.build:443` and routes unencrypted messages to + our kubernetes service. + +### Step 4: test the connection + +Once we've applied our configuration to kubernetes, it's time to test that we +can actually talk to the backend. To do this, we'll use the +[grpcurl](https://github.com/fullstorydev/grpcurl) utility: + +```sh +$ grpcurl fortune-teller.stack.build:443 build.stack.fortune.FortuneTeller/Predict +{ + "message": "Let us endeavor so to live that when we come to die even the undertaker will be sorry.\n\t\t-- Mark Twain, \"Pudd'nhead Wilson's Calendar\"" +} +``` + +### Debugging Hints + +1. Obviously, watch the logs on your app. +2. Watch the logs for the nginx-ingress-controller (increasing verbosity as + needed). +3. Double-check your address and ports. +4. Set the `GODEBUG=http2debug=2` environment variable to get detailed http/2 + logging on the client and/or server. +5. Study RFC 7540 (http/2) . + +> If you are developing public gRPC endpoints, check out +> https://proto.stack.build, a protocol buffer / gRPC build service that can use +> to help make it easier for your users to consume your API. diff --git a/docs/examples/grpc/app.yaml b/docs/examples/grpc/app.yaml new file mode 100644 index 0000000000..04f1932d78 --- /dev/null +++ b/docs/examples/grpc/app.yaml @@ -0,0 +1,20 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: fortune-teller-app + labels: + k8s-app: fortune-teller-app + namespace: default +spec: + replicas: 1 + template: + metadata: + labels: + k8s-app: fortune-teller-app + spec: + containers: + - name: fortune-teller-app + image: quay.io/kubernetes-ingress-controller/grpc-fortune-teller:0.1 + ports: + - containerPort: 50051 + name: grpc diff --git a/docs/examples/grpc/cert.yaml b/docs/examples/grpc/cert.yaml new file mode 100644 index 0000000000..562c30313a --- /dev/null +++ b/docs/examples/grpc/cert.yaml @@ -0,0 +1,7 @@ +apiVersion: "stable.k8s.psg.io/v1" +kind: "Certificate" +metadata: + name: fortune-teller.stack.build + namespace: default +spec: + domain: "fortune-teller.stack.build" diff --git a/docs/examples/grpc/ingress.yaml b/docs/examples/grpc/ingress.yaml new file mode 100644 index 0000000000..02174c2dbe --- /dev/null +++ b/docs/examples/grpc/ingress.yaml @@ -0,0 +1,24 @@ +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/ssl-redirect: "true" + nginx.ingress.kubernetes.io/backend-protocol: "GRPC" + name: fortune-ingress + namespace: default +spec: + rules: + - host: fortune-teller.stack.build + http: + paths: + - backend: + serviceName: fortune-teller-service + servicePort: grpc + tls: + # This secret must exist beforehand + # The cert must also contain the subj-name fortune-teller.stack.build + # https://github.com/kubernetes/ingress-nginx/blob/master/docs/examples/PREREQUISITES.md#tls-certificates + - secretName: fortune-teller.stack.build + hosts: + - fortune-teller.stack.build diff --git a/docs/examples/grpc/svc.yaml b/docs/examples/grpc/svc.yaml new file mode 100644 index 0000000000..7bfa789fda --- /dev/null +++ b/docs/examples/grpc/svc.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + name: fortune-teller-service + namespace: default +spec: + selector: + k8s-app: fortune-teller-app + ports: + - port: 50051 + targetPort: 50051 + name: grpc diff --git a/docs/examples/http-svc.yaml b/docs/examples/http-svc.yaml index 58f3c527e6..aabe9a6527 100644 --- a/docs/examples/http-svc.yaml +++ b/docs/examples/http-svc.yaml @@ -4,6 +4,9 @@ metadata: name: http-svc spec: replicas: 1 + selector: + matchLabels: + app: http-svc template: metadata: labels: @@ -11,9 +14,26 @@ spec: spec: containers: - name: http-svc - image: gcr.io/google_containers/echoserver:1.8 + image: gcr.io/kubernetes-e2e-test-images/echoserver:2.1 ports: - containerPort: 8080 + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP --- diff --git a/docs/examples/index.md b/docs/examples/index.md new file mode 100644 index 0000000000..04f42ea60f --- /dev/null +++ b/docs/examples/index.md @@ -0,0 +1,24 @@ +# Ingress examples + +This directory contains a catalog of examples on how to run, configure and scale Ingress. +Please review the [prerequisites](PREREQUISITES.md) before trying them. + +Category | Name | Description | Complexity Level +---------| ---- | ----------- | ---------------- +Apps | [Docker Registry](docker-registry/README.md) | TODO | TODO +Auth | [Basic authentication](auth/basic/README.md) | password protect your website | Intermediate +Auth | [Client certificate authentication](auth/client-certs/README.md) | secure your website with client certificate authentication | Intermediate +Auth | [External authentication plugin](auth/external-auth/README.md) | defer to an external authentication service | Intermediate +Auth | [OAuth external auth](auth/oauth-external-auth/README.md) | TODO | TODO +Customization | [Configuration snippets](customization/configuration-snippets/README.md) | customize nginx location configuration using annotations | Advanced +Customization | [Custom configuration](customization/custom-configuration/README.md) | TODO | TODO +Customization | [Custom DH parameters for perfect forward secrecy](customization/ssl-dh-param/README.md) | TODO | TODO +Customization | [Custom errors](customization/custom-errors/README.md) | serve custom error pages from the default backend | Intermediate +Customization | [Custom headers](customization/custom-headers/README.md) | set custom headers before sending traffic to backends | Advanced +Customization | [External authentication with response header propagation](customization/external-auth-headers/README.md) | TODO | TODO +Customization | [Sysctl tuning](customization/sysctl/README.md) | TODO | TODO +Features | [Rewrite](rewrite/README.md) | TODO | TODO +Features | [Session stickiness](affinity/cookie/README.md) | route requests consistently to the same endpoint | Advanced +Scaling | [Static IP](static-ip/README.md) | a single ingress gets a single static IP | Intermediate +TLS | [Multi TLS certificate termination](multi-tls/README.md) | TODO | TODO +TLS | [TLS termination](tls-termination/README.md) | TODO | TODO diff --git a/docs/examples/multi-tls/README.md b/docs/examples/multi-tls/README.md index dceefe9e11..d1e7295d9b 100644 --- a/docs/examples/multi-tls/README.md +++ b/docs/examples/multi-tls/README.md @@ -4,7 +4,7 @@ This example uses 2 different certificates to terminate SSL for 2 hostnames. 1. Deploy the controller by creating the rc in the parent dir 2. Create tls secrets for foo.bar.com and bar.baz.com as indicated in the yaml -3. Create multi-tls.yaml +3. Create [multi-tls.yaml](multi-tls.yaml) This should generate a segment like: ```console diff --git a/docs/examples/multi-tls/multi-tls.yaml b/docs/examples/multi-tls/multi-tls.yaml index b503f620f3..f6ae876d00 100644 --- a/docs/examples/multi-tls/multi-tls.yaml +++ b/docs/examples/multi-tls/multi-tls.yaml @@ -3,7 +3,8 @@ kind: Service metadata: name: nginx labels: - app: nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx spec: ports: - port: 80 @@ -11,18 +12,23 @@ spec: protocol: TCP name: http selector: - app: nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx --- apiVersion: v1 kind: ReplicationController metadata: name: nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx spec: replicas: 1 template: metadata: labels: - app: nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx spec: containers: - name: nginx @@ -35,7 +41,8 @@ kind: Service metadata: name: http-svc labels: - app: http-svc + app.kubernetes.io/name: http-svc + app.kubernetes.io/part-of: ingress-nginx spec: ports: - port: 80 @@ -43,22 +50,27 @@ spec: protocol: TCP name: http selector: - app: http-svc + app.kubernetes.io/name: http-svc + app.kubernetes.io/part-of: ingress-nginx --- apiVersion: v1 kind: ReplicationController metadata: name: http-svc + labels: + app.kubernetes.io/name: http-svc + app.kubernetes.io/part-of: ingress-nginx spec: replicas: 1 template: metadata: labels: - app: http-svc + app.kubernetes.io/name: http-svc + app.kubernetes.io/part-of: ingress-nginx spec: containers: - name: http-svc - image: gcr.io/google_containers/echoserver:1.8 + image: gcr.io/kubernetes-e2e-test-images/echoserver:2.1 ports: - containerPort: 8080 env: @@ -91,17 +103,13 @@ spec: - foo.bar.com # This secret must exist beforehand # The cert must also contain the subj-name foo.bar.com - # You can create it via: - # make keys secret SECRET=/tmp/foobar.json HOST=foo.bar.com NAME=foobar - # https://github.com/kubernetes/ingress/tree/master/controllers/gce/examples/https + # https://github.com/kubernetes/ingress-nginx/blob/master/docs/examples/PREREQUISITES.md#tls-certificates secretName: foobar - hosts: - bar.baz.com # This secret must exist beforehand # The cert must also contain the subj-name bar.baz.com - # You can create it via: - # make keys secret SECRET=/tmp/barbaz.json HOST=bar.baz.com NAME=barbaz - # https://github.com/kubernetes/ingress/tree/master/controllers/gce/examples/https + # https://github.com/kubernetes/ingress-nginx/blob/master/docs/examples/PREREQUISITES.md#tls-certificates secretName: barbaz rules: - host: foo.bar.com @@ -117,4 +125,4 @@ spec: - backend: serviceName: nginx servicePort: 80 - path: / \ No newline at end of file + path: / diff --git a/docs/examples/rewrite/README.md b/docs/examples/rewrite/README.md index c46e304e3b..e265738994 100644 --- a/docs/examples/rewrite/README.md +++ b/docs/examples/rewrite/README.md @@ -4,9 +4,9 @@ This example demonstrates how to use the Rewrite annotations ## Prerequisites -You will need to make sure you Ingress targets exactly one Ingress -controller by specifying the [ingress.class annotation](/examples/PREREQUISITES.md#ingress-class), -and that you have an ingress controller [running](/examples/deployment) in your cluster. +You will need to make sure your Ingress targets exactly one Ingress +controller by specifying the [ingress.class annotation](../../user-guide/multiple-ingress.md), +and that you have an ingress controller [running](../../deploy) in your cluster. ## Deployment @@ -15,25 +15,30 @@ Rewriting can be controlled using the following annotations: |Name|Description|Values| | --- | --- | --- | |nginx.ingress.kubernetes.io/rewrite-target|Target URI where the traffic must be redirected|string| -|nginx.ingress.kubernetes.io/add-base-url|indicates if is required to add a base tag in the head of the responses from the upstream servers|bool| -|nginx.ingress.kubernetes.io/base-url-scheme|Override for the scheme passed to the base tag|string| |nginx.ingress.kubernetes.io/ssl-redirect|Indicates if the location section is accessible SSL only (defaults to True when Ingress contains a Certificate)|bool| |nginx.ingress.kubernetes.io/force-ssl-redirect|Forces the redirection to HTTPS even if the Ingress is not TLS Enabled|bool| -|nginx.ingress.kubernetes.io/app-root|Defines the Application Root that the Controller must redirect if it's not in '/' context|string| +|nginx.ingress.kubernetes.io/app-root|Defines the Application Root that the Controller must redirect if it's in '/' context|string| +|nginx.ingress.kubernetes.io/use-regex|Indicates if the paths defined on an Ingress use regular expressions|bool| -## Validation +## Examples ### Rewrite Target +!!! attention + Starting in Version 0.22.0, ingress definitions using the annotation `nginx.ingress.kubernetes.io/rewrite-target` are not backwards compatible with previous versions. In Version 0.22.0 and beyond, any substrings within the request URI that need to be passed to the rewritten path must explicitly be defined in a [capture group](https://www.regular-expressions.info/refcapture.html). + +!!! note + [Captured groups](https://www.regular-expressions.info/refcapture.html) are saved in numbered placeholders, chronologically, in the form `$1`, `$2` ... `$n`. These placeholders can be used as parameters in the `rewrite-target` annotation. + Create an Ingress rule with a rewrite annotation: ```console -$ echo " +$ echo ' apiVersion: extensions/v1beta1 kind: Ingress metadata: annotations: - nginx.ingress.kubernetes.io/rewrite-target: / + nginx.ingress.kubernetes.io/rewrite-target: /$2 name: rewrite namespace: default spec: @@ -44,53 +49,16 @@ spec: - backend: serviceName: http-svc servicePort: 80 - path: /something -" | kubectl create -f - + path: /something(/|$)(.*) +' | kubectl create -f - ``` -Check the rewrite is working +In this ingress definition, any characters captured by `(.*)` will be assigned to the placeholder `$2`, which is then used as a parameter in the `rewrite-target` annotation. -``` -$ curl -v http://172.17.4.99/something -H 'Host: rewrite.bar.com' -* Trying 172.17.4.99... -* Connected to 172.17.4.99 (172.17.4.99) port 80 (#0) -> GET /something HTTP/1.1 -> Host: rewrite.bar.com -> User-Agent: curl/7.43.0 -> Accept: */* -> -< HTTP/1.1 200 OK -< Server: nginx/1.11.0 -< Date: Tue, 31 May 2016 16:07:31 GMT -< Content-Type: text/plain -< Transfer-Encoding: chunked -< Connection: keep-alive -< -CLIENT VALUES: -client_address=10.2.56.9 -command=GET -real path=/ -query=nil -request_version=1.1 -request_uri=http://rewrite.bar.com:8080/ - -SERVER VALUES: -server_version=nginx: 1.9.11 - lua: 10001 - -HEADERS RECEIVED: -accept=*/* -connection=close -host=rewrite.bar.com -user-agent=curl/7.43.0 -x-forwarded-for=10.2.56.1 -x-forwarded-host=rewrite.bar.com -x-forwarded-port=80 -x-forwarded-proto=http -x-real-ip=10.2.56.1 -BODY: -* Connection #0 to host 172.17.4.99 left intact --no body in request- -``` +For example, the ingress definition above will result in the following rewrites: +- `rewrite.bar.com/something` rewrites to `rewrite.bar.com/` +- `rewrite.bar.com/something/` rewrites to `rewrite.bar.com/` +- `rewrite.bar.com/something/new` rewrites to `rewrite.bar.com/new` ### App Root diff --git a/docs/examples/static-ip/README.md b/docs/examples/static-ip/README.md index 988968956d..68210fe870 100644 --- a/docs/examples/static-ip/README.md +++ b/docs/examples/static-ip/README.md @@ -4,10 +4,10 @@ This example demonstrates how to assign a static-ip to an Ingress on through the ## Prerequisites -You need a [TLS cert](/examples/PREREQUISITES.md#tls-certificates) and a [test HTTP service](/examples/PREREQUISITES.md#test-http-service) for this example. -You will also need to make sure you Ingress targets exactly one Ingress -controller by specifying the [ingress.class annotation](/examples/PREREQUISITES.md#ingress-class), -and that you have an ingress controller [running](/examples/deployment) in your cluster. +You need a [TLS cert](../PREREQUISITES.md#tls-certificates) and a [test HTTP service](../PREREQUISITES.md#test-http-service) for this example. +You will also need to make sure your Ingress targets exactly one Ingress +controller by specifying the [ingress.class annotation](../../user-guide/multiple-ingress.md), +and that you have an ingress controller [running](../../deploy) in your cluster. ## Acquiring an IP @@ -79,9 +79,9 @@ NAME HOSTS ADDRESS PORTS AGE nginx-ingress * 104.154.109.191 80, 443 13m ``` -Note that unlike the GCE Ingress, the same loadbalancer IP is shared amongst all -Ingresses, because all requests are proxied through the same set of nginx -controllers. +> Note that unlike the GCE Ingress, the same loadbalancer IP is shared amongst all +> Ingresses, because all requests are proxied through the same set of nginx +> controllers. ## Promote ephemeral to static IP diff --git a/docs/examples/static-ip/nginx-ingress-controller.yaml b/docs/examples/static-ip/nginx-ingress-controller.yaml index c5dfd8cb8e..18ed2d4670 100644 --- a/docs/examples/static-ip/nginx-ingress-controller.yaml +++ b/docs/examples/static-ip/nginx-ingress-controller.yaml @@ -3,13 +3,19 @@ kind: Deployment metadata: name: nginx-ingress-controller labels: - k8s-app: nginx-ingress-controller + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx spec: replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx template: metadata: labels: - k8s-app: nginx-ingress-controller + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx spec: # hostNetwork makes it possible to use ipv6 and to preserve the source IP correctly regardless of docker configuration # however, it is not a hard dependency of the nginx-ingress-controller itself and it may cause issues if port 10254 already is taken on the host @@ -18,7 +24,7 @@ spec: # hostNetwork: true terminationGracePeriodSeconds: 60 containers: - - image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.9.0-beta.19 + - image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.18.0 name: nginx-ingress-controller readinessProbe: httpGet: @@ -48,5 +54,4 @@ spec: fieldPath: metadata.namespace args: - /nginx-ingress-controller - - --default-backend-service=$(POD_NAMESPACE)/default-http-backend - --publish-service=$(POD_NAMESPACE)/nginx-ingress-lb diff --git a/docs/examples/static-ip/static-ip-svc.yaml b/docs/examples/static-ip/static-ip-svc.yaml index 57d02d1aca..b64cf96cba 100644 --- a/docs/examples/static-ip/static-ip-svc.yaml +++ b/docs/examples/static-ip/static-ip-svc.yaml @@ -3,11 +3,11 @@ apiVersion: v1 kind: Service metadata: name: nginx-ingress-lb - annotations: - service.beta.kubernetes.io/external-traffic: OnlyLocal labels: - app: nginx-ingress-lb + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx spec: + externalTrafficPolicy: Local type: LoadBalancer loadBalancerIP: 104.154.109.191 ports: @@ -19,5 +19,5 @@ spec: targetPort: 443 selector: # Selects nginx-ingress-controller pods - k8s-app: nginx-ingress-controller - + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx diff --git a/docs/examples/tls-termination/README.md b/docs/examples/tls-termination/README.md index 849cde1158..a4e20e73b5 100644 --- a/docs/examples/tls-termination/README.md +++ b/docs/examples/tls-termination/README.md @@ -8,6 +8,31 @@ You need a [TLS cert](../PREREQUISITES.md#tls-certificates) and a [test HTTP ser ## Deployment +Create a `values.yaml` file. + +```yaml +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: nginx-test +spec: + tls: + - hosts: + - foo.bar.com + # This assumes tls-secret exists and the SSL + # certificate contains a CN for foo.bar.com + secretName: tls-secret + rules: + - host: foo.bar.com + http: + paths: + - path: / + backend: + # This assumes http-svc exists and routes to healthy endpoints + serviceName: http-svc + servicePort: 80 +``` + The following command instructs the controller to terminate traffic using the provided TLS cert, and forward un-encrypted HTTP traffic to the test HTTP service. diff --git a/docs/extra.css b/docs/extra.css new file mode 100644 index 0000000000..713e0f65a7 --- /dev/null +++ b/docs/extra.css @@ -0,0 +1,10 @@ +.md-typeset__table { + min-width: 100%; +} + +@media only screen and (min-width: 768px) +{ + td:nth-child(1){ + white-space: nowrap; + } +} diff --git a/docs/how-it-works.md b/docs/how-it-works.md new file mode 100644 index 0000000000..b69df31ed3 --- /dev/null +++ b/docs/how-it-works.md @@ -0,0 +1,74 @@ +# How it works + +The objective of this document is to explain how the NGINX Ingress controller works, in particular how the NGINX model is built and why we need one. + +## NGINX configuration + +The goal of this Ingress controller is the assembly of a configuration file (nginx.conf). The main implication of this requirement is the need to reload NGINX after any change in the configuration file. _Though it is important to note that we don't reload Nginx on changes that impact only an `upstream` configuration (i.e Endpoints change when you deploy your app)_. We use https://github.com/openresty/lua-nginx-module to achieve this. Check [below](#avoiding-reloads-on-endpoints-changes) to learn more about how it's done. + +## NGINX model + +Usually, a Kubernetes Controller utilizes the [synchronization loop pattern][1] to check if the desired state in the controller is updated or a change is required. To this purpose, we need to build a model using different objects from the cluster, in particular (in no special order) Ingresses, Services, Endpoints, Secrets, and Configmaps to generate a point in time configuration file that reflects the state of the cluster. + +To get this object from the cluster, we use [Kubernetes Informers][2], in particular, `FilteredSharedInformer`. This informers allows reacting to changes in using [callbacks][3] to individual changes when a new object is added, modified or removed. Unfortunately, there is no way to know if a particular change is going to affect the final configuration file. Therefore on every change, we have to rebuild a new model from scratch based on the state of cluster and compare it to the current model. If the new model equals to the current one, then we avoid generating a new NGINX configuration and triggering a reload. Otherwise, we check if the difference is only about Endpoints. If so we then send the new list of Endpoints to a Lua handler running inside Nginx using HTTP POST request and again avoid generating a new NGINX configuration and triggering a reload. If the difference between running and new model is about more than just Endpoints we create a new NGINX configuration based on the new model, replace the current model and trigger a reload. + +One of the uses of the model is to avoid unnecessary reloads when there's no change in the state and to detect conflicts in definitions. + +The final representation of the NGINX configuration is generated from a [Go template][6] using the new model as input for the variables required by the template. + +## Building the NGINX model + +Building a model is an expensive operation, for this reason, the use of the synchronization loop is a must. By using a [work queue][4] it is possible to not lose changes and remove the use of [sync.Mutex][5] to force a single execution of the sync loop and additionally it is possible to create a time window between the start and end of the sync loop that allows us to discard unnecessary updates. It is important to understand that any change in the cluster could generate events that the informer will send to the controller and one of the reasons for the [work queue][4]. + +Operations to build the model: + +- Order Ingress rules by `CreationTimestamp` field, i.e., old rules first. + + - If the same path for the same host is defined in more than one Ingress, the oldest rule wins. + - If more than one Ingress contains a TLS section for the same host, the oldest rule wins. + - If multiple Ingresses define an annotation that affects the configuration of the Server block, the oldest rule wins. + +- Create a list of NGINX Servers (per hostname) +- Create a list of NGINX Upstreams +- If multiple Ingresses define different paths for the same host, the ingress controller will merge the definitions. +- Annotations are applied to all the paths in the Ingress. +- Multiple Ingresses can define different annotations. These definitions are not shared between Ingresses. + +## When a reload is required + +The next list describes the scenarios when a reload is required: + +- New Ingress Resource Created. +- TLS section is added to existing Ingress. +- Change in Ingress annotations that impacts more than just upstream configuration. For instance `load-balance` annotation does not require a reload. +- A path is added/removed from an Ingress. +- An Ingress, Service, Secret is removed. +- Some missing referenced object from the Ingress is available, like a Service or Secret. +- A Secret is updated. + +## Avoiding reloads + +In some cases, it is possible to avoid reloads, in particular when there is a change in the endpoints, i.e., a pod is started or replaced. It is out of the scope of this Ingress controller to remove reloads completely. This would require an incredible amount of work and at some point makes no sense. This can change only if NGINX changes the way new configurations are read, basically, new changes do not replace worker processes. + +### Avoiding reloads on Endpoints changes + +On every endpoint change the controller fetches endpoints from all the services it sees and generates corresponding Backend objects. It then sends these objects to a Lua handler running inside Nginx. The Lua code in turn stores those backends in a shared memory zone. Then for every request Lua code running in [`balancer_by_lua`](https://github.com/openresty/lua-resty-core/blob/master/lib/ngx/balancer.md) context detects what endpoints it should choose upstream peer from and applies the configured load balancing algorithm to choose the peer. Then Nginx takes care of the rest. This way we avoid reloading Nginx on endpoint changes. _Note_ that this includes annotation changes that affects only `upstream` configuration in Nginx as well. + +In a relatively big clusters with frequently deploying apps this feature saves significant number of Nginx reloads which can otherwise affect response latency, load balancing quality (after every reload Nginx resets the state of load balancing) and so on. + +### Avoiding outage from wrong configuration + +Because the ingress controller works using the [synchronization loop pattern](https://coreos.com/kubernetes/docs/latest/replication-controller.html#the-reconciliation-loop-in-detail), it is applying the configuration for all matching objects. In case some Ingress objects have a broken configuration, for example a syntax error in the `nginx.ingress.kubernetes.io/configuration-snippet` annotation, the generated configuration becomes invalid, does not reload and hence no more ingresses will be taken into account. + +To prevent this situation to happen, the nginx ingress controller exposes optionnally a [validating admission webhook server][8] to ensure the validity of incoming ingress objects. +This webhook appends the incoming ingress objects to the list of ingresses, generates the configuration and calls nginx to ensure the configuration has no syntax errors. + +[0]: https://github.com/openresty/lua-nginx-module/pull/1259 +[1]: https://coreos.com/kubernetes/docs/latest/replication-controller.html#the-reconciliation-loop-in-detail +[2]: https://godoc.org/k8s.io/client-go/informers#NewFilteredSharedInformerFactory +[3]: https://godoc.org/k8s.io/client-go/tools/cache#ResourceEventHandlerFuncs +[4]: https://github.com/kubernetes/ingress-nginx/blob/master/internal/task/queue.go#L38 +[5]: https://golang.org/pkg/sync/#Mutex +[6]: https://github.com/kubernetes/ingress-nginx/blob/master/rootfs/etc/nginx/template/nginx.tmpl +[7]: http://nginx.org/en/docs/beginners_guide.html#control +[8]: https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#validatingadmissionwebhook diff --git a/docs/images/baremetal/baremetal_overview.gliffy b/docs/images/baremetal/baremetal_overview.gliffy new file mode 100644 index 0000000000..40ccaa4c99 --- /dev/null +++ b/docs/images/baremetal/baremetal_overview.gliffy @@ -0,0 +1 @@ +{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":737,"height":464,"nodeIndex":367,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":false,"drawingGuidesOn":true,"pageBreaksOn":false,"printGridOn":false,"printPaper":null,"printShrinkToFit":false,"printPortrait":false,"maxWidth":5000,"maxHeight":5000,"themeData":null,"imageCache":{},"viewportType":"default","fitBB":{"min":{"x":36.86442430045474,"y":20},"max":{"x":736.8644243004547,"y":464}},"printModel":{"pageSize":"Letter","portrait":true,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":50.0,"y":52.0,"rotation":0.0,"id":356,"width":146.0,"height":1.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":64,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#999999","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[0.0,0.0],[189.00264548413074,0.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":46.86442430045474,"y":29.0,"rotation":0.0,"id":354,"width":245.0,"height":22.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":63,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":5,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Bare-metal environment

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":326.86442430045474,"y":20.0,"rotation":0.0,"id":325,"width":120.0,"height":90.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":57,"lockAspectRatio":false,"lockShape":false,"children":[{"x":7.225609756097583,"y":20.0,"rotation":0.0,"id":317,"width":62.5,"height":50.0,"uid":"com.gliffy.shape.android.android_v1.icons.monitor","order":56,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.android.android_v1.icons.monitor","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#676767","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":74.72560975609758,"y":30.0,"rotation":0.0,"id":314,"width":38.048780487804876,"height":40.0,"uid":"com.gliffy.shape.android.android_v1.icons.person","order":54,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.android.android_v1.icons.person","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#676767","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":0.0,"y":0.0,"rotation":0.0,"id":315,"width":120.0,"height":90.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":3,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":547.0,"y":626.5,"rotation":0.0,"id":351,"width":46.0,"height":91.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":62,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":315,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":353,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#666666","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":17,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-160.13557569954526,-516.5],[-160.13557569954526,-501.83321142036345],[-160.13557569954526,-487.1664228407269],[-160.13557569954526,-472.49963426109036]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":367.04382652400767,"y":154.0,"rotation":0.0,"id":353,"width":40.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#ffffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":314.0310909671214,"y":265.5,"rotation":0.0,"id":337,"width":40.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":1,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#ff0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":36.86442430045474,"y":204.0,"rotation":0.0,"id":221,"width":700.0,"height":260.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":48,"lockAspectRatio":false,"lockShape":false,"children":[{"x":10.0,"y":8.5,"rotation":0.0,"id":153,"width":42.0,"height":41.0,"uid":"com.gliffy.shape.basic.basic_v1.default.image","order":17,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Image","Image":{"url":"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACoAAAApCAYAAABDV7v1AAAAAXNSR0IArs4c6QAAAAlwSFlzAAA9hAAAPYQB1ayvdAAAActpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IlhNUCBDb3JlIDUuNC4wIj4KICAgPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4KICAgICAgPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIKICAgICAgICAgICAgeG1sbnM6eG1wPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvIgogICAgICAgICAgICB4bWxuczp0aWZmPSJodHRwOi8vbnMuYWRvYmUuY29tL3RpZmYvMS4wLyI+CiAgICAgICAgIDx4bXA6Q3JlYXRvclRvb2w+d3d3Lmlua3NjYXBlLm9yZzwveG1wOkNyZWF0b3JUb29sPgogICAgICAgICA8dGlmZjpPcmllbnRhdGlvbj4xPC90aWZmOk9yaWVudGF0aW9uPgogICAgICA8L3JkZjpEZXNjcmlwdGlvbj4KICAgPC9yZGY6UkRGPgo8L3g6eG1wbWV0YT4KGMtVWAAAD9hJREFUWAmdWAl0ltWZfr7l37JvSBIIq1AETTBhCUeBgAtKW9qisdYz6tixyAxLUceOMvYYempPrYwWgbFF0PbUnqqo48zRnsG2mlhFAiSAQDuCYRMSAglZ/iz/8n3fnee93/+ziY6de873/99y73uf+77Pu9xr4P/TlDJqVtVb9XU1LmAoEVGxpG2SZXn/ZCjzFgVlAGqz65nP7llfst+fgmPqOOYxjjH8MX/L1BT4tzRlVC1qsps2TEmmR1Utaa1UlrEUSn3bCuVleM6A/mTaGXDj3QME9bKpvLU71w7bdXbMop2Bpg1VTnqR6fdf9P/lgFKDVfddCPDqFe0z4LlLqZvbrFCu7SX7oTzHoUAtk2pWhmnbZiCTgHsc6vgVuN66XeuHf5gGVCWAf0nAX0LDXwy0Tpk1qDfr6+Zw9X6rXHpyLgxvGZ++YYXzDDfeS0hKNGwaUFbA9PslPb6GQWrAI5CAFcwRDcvHN6DMtc3rit/xewI1dcqul351Bkddul0aKAFWtTZZ55t48vK2+aZSy2GY86xgNtxElEi8BMXavAhSZgJ29fhzXZ1j6pfyxG/yR82ZwfPGbvE895nd68t+z2+6aQ2XVrmXAvwZoDV179pnNUiTVy5v+xbNuMy0gjWmFSLAPhFKDSqL/1p/IkSuvwwq/Pb2TH3/nZf6MTHii9fexu9sBKy1TA1nkTlxXol6w8AzzWtK3khT4AIM/jifT6l7gJqU1YgpejvabufAJaYdqqYiCLBX5hNTCriUgcW8QJBPnQmFLELf+mQxmQDMfrgdHXGFkrCBmMSGC1UiGualbKEEuQ064Tayen1O0ccvaUWlsLCfbmcnTIOsWt42p7ezrdkMRH5jBTOrvWRMEWRSI0qZOT1Y/hVReQRWGDTQQyZ39zroifJKKhSFDLj8xnB1/hC5l3ltAqPsaNJzYh4pUS1z9nZOaJ6yoq1Gm1/Appq+qa2lGanJqu+3jvCUetUOF1zFwY6b6E8K+eggAer+Ap3I1Bbf5BCgOFAXNXr4gIsBmn8gpnCQ990EG6SWcwKG7vsZuJRJa3C4MmQumdMO51/lut5rgkUw1da+IhQ7Z0J5UC5uskO5BU6sk0ZT4sWBOH22nU9iXkEq2pOWSRcapEnfa/WwtdPDyCwDDy0Io6jARlG+hYe+HkZZhoH3Ozy8d9JDgoaWMdJEhsgSmSe4qJjEBc4lczqxM3E7lFdAQ90kfQ/lj9HK1EM3T1qlsFleo0r/GmwKpsPeNuGOIc/e6VKozjZABerVCYDxBLf+GxHMKCeokiDycizYoma2nywp0jQ42pbEB7sH8eSf49hDml9bYKboAGyLKtxYaOBMnN7pkbAGTK7B0iRXagrFbBjTVeU18YYLYSgmLDF/S3HbNnrjFCfen8wOqMB7Jzz12r1ZxvxZ2XjutW4sf3UAVcMtNJ1myKsJ4c752RhTFhJcZ5s4krSLnAcHj8Tw/JtR/HRrAlVFJpqOufjFHRm455t5eKshioUbo5g5zEKfA8cKZEoC2dlcWDLd52qdadfeBpPKdFuGHB9Nfo8nT4TjlrglQjBOdroIh0wsu6MAoSBw36uDeOnuTNx6Yy6slPZa2xPYsW+QiQf41vU5MhL/8Yde2FTR1CsjKB4SwLhRYfxocQhXlHXj7pcHsOmuTHx3Yb7ue/w0vZBWk0XSkuJh8n5cVeep0dRmSw1mm/ah/CbhgMtZyxkrczwnLq5mDDgK02mmJVsG0XyoHSvvzMOiWwtw8zUJDBsagElKdNHN32zow6aGQTQ0Onj422EsvME3feP/JPDE5hjmTuvHousiuOnaLORm27jja3mYOz0Tw4uDWsuPv9iDX3/ioDrfxIAEP9pXeUkYVjCXgCv4piXamm2Y2aVV2lgkwFTDphkZUTTT2V/S4Iw8E5sOOhi7sgNHjsc1FwVktN/Fyl+cwV1PRhESvyTny4ZoB+UDUFbEe74TCtz+4yhWbTyDfnqfcFhAHjoWx/h/7cSvWxxcQ5BxASmRTBDAcEw7LBoWnkJ4yixk6DxuCHmVNrj0l84SIrWHTslmAJER57WMiInFC7IQYwj6FSeTVBAWT0u1kNzz9Z9Oefje14L4e/I5Qgqd38rz6ZxctMTaVEsL0KGXi9TOvXmz4eqR5YtPXsalX6FcyYx6RTJODxJt7WR4uf/GCEYNDyFJYMdPJmBxgoorMvD08kJs+GoE+NhDkPEy3YISTw572LggA6uXFaH8KxFNl+NtCSRJqzEjQrj/+gh2ciG2cE0PTSOml7CM4KuJGhtFaaBBOzmRShzmkRsSA2QyGSKBvJsxtDDPwLQrw/IaR07EMfGHHfhwV79+zsuxce8t+di6sQDXVhJwqs2qykDjswX4B37LkdzK9n5THyoe68BRypBWXR5BDkNeHxef8kv9nj8mfYVYjOEsxifISw3UNa1KKRJoele8TsMkUgH6EevgW8dYGEZeSduxP4Zom4d5/96DP25lBcXGqIsZkzMxati5UCXan1aeqb/Lz9sfMPys78UZWqfpLzH9vqw4gNsoezfnEK2mjMh/8WflSB1gmKiUL/5nT02VN2zMEQJUQmvKHEyN44fZyAibcGiyfYeTGFtmYjq1fMPjPXh1Sw+HMd+nUlbbqQTaTvsbAKZjkYnf/b4b837Sg7kFBsYOM7UMl8TMzLAwoZQcYXZiJNPhSWSlANNhGLKgpokMu2pRawbNXS4hwe/gC5ePurF7Ua5eBHO4h70nXLQc89BSbGIEAZ/qcsk5ap8zHTgcw91rurQDvnh/vo6dwsfuqIcrR5s6u4HW2JPvMgJ4mhJFjCq6+CPQixp3MMJTo0Iw2iriXg7PGqvICeI3xe/FJ6SYCAtxRE4qGIjWLh9iYs0dEUyZGMboYUEMLUolcI5rPeVg2w6iZrxu+zuHQJkPOf6+2nwsvM4hvxPY+dc4jrSzrEtZQCud08hcrq2YSg1Jp2JRzVOKHSsYbaWsCiuQFfKSfS4HmcIVqS1belKaPer55TtHZGVa+PHiQm0yPn6mTa/IwCs/dMVcmHpVhv4uMVfa0KKAvqZXZLLCOhchxBI44uFt6cQpx5BSUjI6JKHJstoIZIbc5MBk2/SMqaYVAC3vEqQVpakm0NSPXB9CKQN4yRAbV47zvVm0I7yS1seA39vv4VRnElkZJi4fGUaEPK69KVd/lx8JxAePxNE34GFooY1sen92pgmJwSn64vabc1E+LoSTHQ5OMJW+vj2OA91K5bCwpOaUadq0tDfNVoY3RfjpMRbksNDaQQ49Mi+Cuxb4ebizy0HLp3GMLA3qnH+I95v+qxcdveTpGQ9/2uvioZuDeGK57/ESAdJNwDzHvqvfjGN2uY2xTMmFTB7fW5Ct+TtIzgsdhD5XMc5Ks8wu3PXiAGrKLCXFN0t/vjUrSTBzJPctNK8ypb68jOR+f38CuVk9+Piog4074jj4iYf6x/Mwe2oWCnJZPR1zsOWQizkjqV2aat60iA5RMtG+A4NaWzKxmH1+NQP+tqSmz/N7HcwZbuJfcn1e79w3gFmPdqNwhIkHpwcxviyA+r0JlOQbxiAtK6FSYjvRjhS9fsq8Wuq6CYfFrTUqYqr/POIaz+3iJo5KmkotoNTA6w0DmF6ewZrTxhP35GLLz7pxfEDhm2MtVEzwtRHtc7FsQ7fwC2+tCjCY2ygfz0JlbB/2U/sibzXHFubbiHOyN/7MAFrMkMVN4MoGJoFYHMVMqyPCpkqw/KRxGOLDluslP+UO2HtUb30NUzKxm2SHUSy5ZnLl17BuFF+oKTTxzM4kGnb42ahiQgb+cG82Dn7oYsGUECt6X0PvNPahnrXq+7ze3e73FVBfZ5+PP3Dx1nezUTnJd7IPmvvx1PYkZlERMsc1nEPmHMG5BYNgMVhCCTYy9VFNqMplrSu4uXraSUQZHHRjP7+JQwQoSZxMBL72YAG+MtpPpw3b+3TJd/nIEE4w/9/y0070s584sk11vP5Ioa62pGg+xkr/uhnZWmgLK6fbVrOaYt88erhUTiL7XNMHEYrZ0vLi0RVN60rXaEDNa0t/7ib7XuBeRZ79gstPEZp7Sca8AgpklYcHftkNcShps6dl0dt9J9qxfxCNJzzQN/AJM+TONhfb9w7qfhIR0iCPtSbw8HNdaCZthnCXGmeGOgeSzqPnVa4VzrV4CvOCgJSXhmxBpIyqWsTNVaj1PZ4jVfOsiNMZktxlpF6rxOesANDIfU8l90pr7snRsTIdJ6U+7ex2EO3j5oeDchiGCvLsswWJBPjdfx3EP7/Qg3e5/5qZSyvRT86BPDtXwgrlBIlhW05h6UwpQwWjBqGPUnhCN3lF2yju6BrNQPgyVi8UowjtLFadGnMJfxfBynHE6Z8N4a4zoB0jJFvKS7QE00yQ1Y0sYtwPTqGbPjWLILupivMqptQkRpIHHgHXibUTWvXun5ccSWPT0butaYMnGt21PvtM6fQHP1LKvZM2l92gnGWeRSAZRwrdg8xaj04LYv7MLNTTgVb/rgcD3JXJ4UMXAbV3MvYeS2D7R4N49o0oItyBXTEmjEGWdw2HXYzMNLh9Zuw5F3NFYZzLZKbkrsj0FjavKW1Og5T1X0BhOcoRVV+95PgDdjj333znki6K21hxEn+CDmpj62NFuIzZZtYP2rGDhYreR1Nb8KtBne91bUa7TGYIkqMe0ercH3VAwksG1SncT4EVZsEOZptuPPpA87rSpy8+fzqrLfZDPVbJVOAZ5lM8DHs+EOZmxlCSGnQQlwOEPacUVs4JYwQz1f6DMexghX4Da8qaEoaXUhPVDDdyyf1svruRVdPu0ywPmQhkr/T9WWHs61QsQkQqQ7rf3EA4z5Q5BSRnM+rRoLGkvus1p+95SFbHIxR9SgcjXrzYiXc3WoFs0VFCVh6XspqOdLDVoVn78fI7jJUM1n3UsBzp0Pq68pHqR+7FJeUCT0xeYt/GPf04cIIftOkNRnSxqEqwQA44sZ5t8cIz/yhgamuJi1jkPt0uMH36ZZob5UuPjLaNYCMz1xBxLpo/EOBhxdGYZ7R3M7zQKcYQRIKWP0e3tBT/X/K97BSOsjhup7f7mUeCuvDOdx6eJZz2LGPa+c5zoZSLOHr+x0m1+4P7N09KVC5tnUcU/y1IuOOSMwZLwIYsZYjmJFh/Hsi0PAErm0QJDDFaJZ15mH/08Q3r4HlNa0vfFodu2mBIcv9Mu4Cj538VkEIDcmYLCfqA7KmkRiBc7kiUIablAdr/CVJkykKkL08jIWP5KCWHxzqYbFQPCkhx5M8DKTI+F6h83DxJB1EIwemNzwcyhwaoBEnsOtVekjcy8BJN+qb6eyJDZHnJ3k27nil9SrqnHfkSQ/WrLwSKOjmfPOdcyf72dcp1ZDcnJxLisXJ9+eaP4V7I7RZZiJWknEfOZy90nouF/i9FYjxzfww9WAAAAABJRU5ErkJggg==","urlHash":null,"fileName":null,"imageId":null,"strokeWidth":2.0,"strokeColor":"#000000","dropShadow":false,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":0.0,"y":0.0,"rotation":0.0,"id":3,"width":700.0,"height":260.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":5,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#f5f5f5","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":582.8644243004547,"y":273.5,"rotation":0.0,"id":229,"width":88.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":49,"lockAspectRatio":false,"lockShape":false,"children":[{"x":61.5,"y":56.0,"rotation":0.0,"id":199,"width":3.0,"height":3.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":47,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":19.0,"y":52.5,"rotation":0.0,"id":200,"width":50.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":45,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":61.5,"y":43.0,"rotation":0.0,"id":202,"width":3.0,"height":3.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":43,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":19.0,"y":39.5,"rotation":0.0,"id":203,"width":50.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":41,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":-6.0,"y":6.0,"rotation":90.0,"id":204,"width":100.0,"height":88.0,"uid":"com.gliffy.shape.basic.basic_v1.default.hexagon","order":39,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.hexagon.basic_v1","strokeWidth":0.0,"strokeColor":"#000000","fillColor":"#3a5de9","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":436.197757633788,"y":273.5,"rotation":0.0,"id":231,"width":88.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":50,"lockAspectRatio":false,"lockShape":false,"children":[{"x":61.5,"y":56.0,"rotation":0.0,"id":190,"width":3.0,"height":3.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":37,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":19.0,"y":52.5,"rotation":0.0,"id":191,"width":50.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":35,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":61.5,"y":43.0,"rotation":0.0,"id":193,"width":3.0,"height":3.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":33,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":19.0,"y":39.5,"rotation":0.0,"id":194,"width":50.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":-6.0,"y":6.0,"rotation":90.0,"id":195,"width":100.0,"height":88.0,"uid":"com.gliffy.shape.basic.basic_v1.default.hexagon","order":29,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.hexagon.basic_v1","strokeWidth":0.0,"strokeColor":"#000000","fillColor":"#3a5de9","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":289.5310909671214,"y":273.5,"rotation":0.0,"id":233,"width":88.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":51,"lockAspectRatio":false,"lockShape":false,"children":[{"x":61.5,"y":56.0,"rotation":0.0,"id":161,"width":3.0,"height":3.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":27,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":19.0,"y":52.5,"rotation":0.0,"id":162,"width":50.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":25,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":61.5,"y":43.0,"rotation":0.0,"id":164,"width":3.0,"height":3.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":23,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":19.0,"y":39.5,"rotation":0.0,"id":165,"width":50.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":21,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":-6.0,"y":6.0,"rotation":90.0,"id":166,"width":100.0,"height":88.0,"uid":"com.gliffy.shape.basic.basic_v1.default.hexagon","order":19,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.hexagon.basic_v1","strokeWidth":0.0,"strokeColor":"#000000","fillColor":"#3a5de9","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":142.86442430045474,"y":273.5,"rotation":0.0,"id":235,"width":88.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":52,"lockAspectRatio":false,"lockShape":false,"children":[{"x":61.5,"y":56.0,"rotation":0.0,"id":181,"width":3.0,"height":3.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":15,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":19.0,"y":52.5,"rotation":0.0,"id":182,"width":50.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":13,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":61.5,"y":43.0,"rotation":0.0,"id":184,"width":3.0,"height":3.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":11,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":19.0,"y":39.5,"rotation":0.0,"id":185,"width":50.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":9,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":-6.0,"y":6.0,"rotation":90.0,"id":186,"width":100.0,"height":88.0,"uid":"com.gliffy.shape.basic.basic_v1.default.hexagon","order":7,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.hexagon.basic_v1","strokeWidth":0.0,"strokeColor":"#000000","fillColor":"#3a5de9","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":144.36442430045474,"y":383.5,"rotation":0.0,"id":346,"width":85.0,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":58,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

master

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":291.5310909671214,"y":383.5,"rotation":0.0,"id":348,"width":85.0,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":59,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

node

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":439.197757633788,"y":383.5,"rotation":0.0,"id":349,"width":85.0,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":60,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

node

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":584.8644243004547,"y":383.5,"rotation":0.0,"id":350,"width":85.0,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":61,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

node

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":336.86442430045474,"y":162.0,"rotation":0.0,"id":244,"width":100.0,"height":88.0,"uid":"com.gliffy.shape.basic.basic_v1.default.hexagon","order":65,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.hexagon.basic_v1","strokeWidth":0.0,"strokeColor":"#000000","fillColor":"#ff1745","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":362,"width":96.0,"height":78.0,"uid":null,"order":67,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":5,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

?

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"layers":[{"guid":"mcqBzoFMGdMI","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":68}],"shapeStyles":{},"lineStyles":{"global":{"stroke":"#ff1745","strokeWidth":2,"endArrow":17,"orthoMode":0}},"textStyles":{"global":{"bold":true,"face":"Bangers","size":"72px","color":"#ffffff"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","autosaveDisabled":false,"lastSerialized":1536076015434,"libraries":["com.gliffy.libraries.android.android_v1.icons","com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.uml.uml_v2.class","com.gliffy.libraries.uml.uml_v2.sequence","com.gliffy.libraries.uml.uml_v2.activity","com.gliffy.libraries.erd.erd_v1.default","com.gliffy.libraries.ui.ui_v3.containers_content","com.gliffy.libraries.ui.ui_v3.forms_controls","com.gliffy.libraries.images"]},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/docs/images/baremetal/baremetal_overview.jpg b/docs/images/baremetal/baremetal_overview.jpg new file mode 100644 index 0000000000..ecf19d5512 Binary files /dev/null and b/docs/images/baremetal/baremetal_overview.jpg differ diff --git a/docs/images/baremetal/cloud_overview.gliffy b/docs/images/baremetal/cloud_overview.gliffy new file mode 100644 index 0000000000..9825985461 --- /dev/null +++ b/docs/images/baremetal/cloud_overview.gliffy @@ -0,0 +1 @@ +{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":737,"height":547,"nodeIndex":358,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":false,"drawingGuidesOn":true,"pageBreaksOn":false,"printGridOn":false,"printPaper":null,"printShrinkToFit":false,"printPortrait":false,"maxWidth":5000,"maxHeight":5000,"themeData":null,"imageCache":{},"viewportType":"default","fitBB":{"min":{"x":36.86442430045474,"y":20},"max":{"x":736.8644243004547,"y":547}},"printModel":{"pageSize":"Letter","portrait":true,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":50.0,"y":52.0,"rotation":0.0,"id":356,"width":146.0,"height":1.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":129,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#999999","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[0.0,0.0],[146.0,0.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":46.86442430045474,"y":29.0,"rotation":0.0,"id":354,"width":245.0,"height":22.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":127,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":5,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Cloud environment

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":439.6388145443572,"y":190.0,"rotation":0.0,"id":341,"width":85.0,"height":40.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":121,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Cloud Load Balancer

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":382.0,"y":258.0,"rotation":0.0,"id":332,"width":46.0,"height":91.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":118,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":352,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":334,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#ff9100","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":17,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[4.685022076901703,9.0],[4.685022076901703,49.75],[98.197757633788,49.75],[98.197757633788,90.5]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":582.8644243004547,"y":356.5,"rotation":0.0,"id":229,"width":88.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":54,"lockAspectRatio":false,"lockShape":false,"children":[{"x":61.5,"y":56.0,"rotation":0.0,"id":199,"width":3.0,"height":3.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":52,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":19.0,"y":52.5,"rotation":0.0,"id":200,"width":50.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":50,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":61.5,"y":43.0,"rotation":0.0,"id":202,"width":3.0,"height":3.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":48,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":19.0,"y":39.5,"rotation":0.0,"id":203,"width":50.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":46,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":-6.0,"y":6.0,"rotation":90.0,"id":204,"width":100.0,"height":88.0,"uid":"com.gliffy.shape.basic.basic_v1.default.hexagon","order":44,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.hexagon.basic_v1","strokeWidth":0.0,"strokeColor":"#000000","fillColor":"#3a5de9","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":436.197757633788,"y":356.5,"rotation":0.0,"id":231,"width":88.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":55,"lockAspectRatio":false,"lockShape":false,"children":[{"x":61.5,"y":56.0,"rotation":0.0,"id":190,"width":3.0,"height":3.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":42,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":19.0,"y":52.5,"rotation":0.0,"id":191,"width":50.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":40,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":61.5,"y":43.0,"rotation":0.0,"id":193,"width":3.0,"height":3.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":38,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":19.0,"y":39.5,"rotation":0.0,"id":194,"width":50.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":36,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":-6.0,"y":6.0,"rotation":90.0,"id":195,"width":100.0,"height":88.0,"uid":"com.gliffy.shape.basic.basic_v1.default.hexagon","order":34,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.hexagon.basic_v1","strokeWidth":0.0,"strokeColor":"#000000","fillColor":"#3a5de9","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":36.86442430045474,"y":287.0,"rotation":0.0,"id":221,"width":700.0,"height":260.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":53,"lockAspectRatio":false,"lockShape":false,"children":[{"x":10.0,"y":8.5,"rotation":0.0,"id":153,"width":42.0,"height":41.0,"uid":"com.gliffy.shape.basic.basic_v1.default.image","order":22,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Image","Image":{"url":"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACoAAAApCAYAAABDV7v1AAAAAXNSR0IArs4c6QAAAAlwSFlzAAA9hAAAPYQB1ayvdAAAActpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IlhNUCBDb3JlIDUuNC4wIj4KICAgPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4KICAgICAgPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIKICAgICAgICAgICAgeG1sbnM6eG1wPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvIgogICAgICAgICAgICB4bWxuczp0aWZmPSJodHRwOi8vbnMuYWRvYmUuY29tL3RpZmYvMS4wLyI+CiAgICAgICAgIDx4bXA6Q3JlYXRvclRvb2w+d3d3Lmlua3NjYXBlLm9yZzwveG1wOkNyZWF0b3JUb29sPgogICAgICAgICA8dGlmZjpPcmllbnRhdGlvbj4xPC90aWZmOk9yaWVudGF0aW9uPgogICAgICA8L3JkZjpEZXNjcmlwdGlvbj4KICAgPC9yZGY6UkRGPgo8L3g6eG1wbWV0YT4KGMtVWAAAD9hJREFUWAmdWAl0ltWZfr7l37JvSBIIq1AETTBhCUeBgAtKW9qisdYz6tixyAxLUceOMvYYempPrYwWgbFF0PbUnqqo48zRnsG2mlhFAiSAQDuCYRMSAglZ/iz/8n3fnee93/+ziY6de873/99y73uf+77Pu9xr4P/TlDJqVtVb9XU1LmAoEVGxpG2SZXn/ZCjzFgVlAGqz65nP7llfst+fgmPqOOYxjjH8MX/L1BT4tzRlVC1qsps2TEmmR1Utaa1UlrEUSn3bCuVleM6A/mTaGXDj3QME9bKpvLU71w7bdXbMop2Bpg1VTnqR6fdf9P/lgFKDVfddCPDqFe0z4LlLqZvbrFCu7SX7oTzHoUAtk2pWhmnbZiCTgHsc6vgVuN66XeuHf5gGVCWAf0nAX0LDXwy0Tpk1qDfr6+Zw9X6rXHpyLgxvGZ++YYXzDDfeS0hKNGwaUFbA9PslPb6GQWrAI5CAFcwRDcvHN6DMtc3rit/xewI1dcqul351Bkddul0aKAFWtTZZ55t48vK2+aZSy2GY86xgNtxElEi8BMXavAhSZgJ29fhzXZ1j6pfyxG/yR82ZwfPGbvE895nd68t+z2+6aQ2XVrmXAvwZoDV179pnNUiTVy5v+xbNuMy0gjWmFSLAPhFKDSqL/1p/IkSuvwwq/Pb2TH3/nZf6MTHii9fexu9sBKy1TA1nkTlxXol6w8AzzWtK3khT4AIM/jifT6l7gJqU1YgpejvabufAJaYdqqYiCLBX5hNTCriUgcW8QJBPnQmFLELf+mQxmQDMfrgdHXGFkrCBmMSGC1UiGualbKEEuQ064Tayen1O0ccvaUWlsLCfbmcnTIOsWt42p7ezrdkMRH5jBTOrvWRMEWRSI0qZOT1Y/hVReQRWGDTQQyZ39zroifJKKhSFDLj8xnB1/hC5l3ltAqPsaNJzYh4pUS1z9nZOaJ6yoq1Gm1/Appq+qa2lGanJqu+3jvCUetUOF1zFwY6b6E8K+eggAer+Ap3I1Bbf5BCgOFAXNXr4gIsBmn8gpnCQ990EG6SWcwKG7vsZuJRJa3C4MmQumdMO51/lut5rgkUw1da+IhQ7Z0J5UC5uskO5BU6sk0ZT4sWBOH22nU9iXkEq2pOWSRcapEnfa/WwtdPDyCwDDy0Io6jARlG+hYe+HkZZhoH3Ozy8d9JDgoaWMdJEhsgSmSe4qJjEBc4lczqxM3E7lFdAQ90kfQ/lj9HK1EM3T1qlsFleo0r/GmwKpsPeNuGOIc/e6VKozjZABerVCYDxBLf+GxHMKCeokiDycizYoma2nywp0jQ42pbEB7sH8eSf49hDml9bYKboAGyLKtxYaOBMnN7pkbAGTK7B0iRXagrFbBjTVeU18YYLYSgmLDF/S3HbNnrjFCfen8wOqMB7Jzz12r1ZxvxZ2XjutW4sf3UAVcMtNJ1myKsJ4c752RhTFhJcZ5s4krSLnAcHj8Tw/JtR/HRrAlVFJpqOufjFHRm455t5eKshioUbo5g5zEKfA8cKZEoC2dlcWDLd52qdadfeBpPKdFuGHB9Nfo8nT4TjlrglQjBOdroIh0wsu6MAoSBw36uDeOnuTNx6Yy6slPZa2xPYsW+QiQf41vU5MhL/8Yde2FTR1CsjKB4SwLhRYfxocQhXlHXj7pcHsOmuTHx3Yb7ue/w0vZBWk0XSkuJh8n5cVeep0dRmSw1mm/ah/CbhgMtZyxkrczwnLq5mDDgK02mmJVsG0XyoHSvvzMOiWwtw8zUJDBsagElKdNHN32zow6aGQTQ0Onj422EsvME3feP/JPDE5hjmTuvHousiuOnaLORm27jja3mYOz0Tw4uDWsuPv9iDX3/ioDrfxIAEP9pXeUkYVjCXgCv4piXamm2Y2aVV2lgkwFTDphkZUTTT2V/S4Iw8E5sOOhi7sgNHjsc1FwVktN/Fyl+cwV1PRhESvyTny4ZoB+UDUFbEe74TCtz+4yhWbTyDfnqfcFhAHjoWx/h/7cSvWxxcQ5BxASmRTBDAcEw7LBoWnkJ4yixk6DxuCHmVNrj0l84SIrWHTslmAJER57WMiInFC7IQYwj6FSeTVBAWT0u1kNzz9Z9Oefje14L4e/I5Qgqd38rz6ZxctMTaVEsL0KGXi9TOvXmz4eqR5YtPXsalX6FcyYx6RTJODxJt7WR4uf/GCEYNDyFJYMdPJmBxgoorMvD08kJs+GoE+NhDkPEy3YISTw572LggA6uXFaH8KxFNl+NtCSRJqzEjQrj/+gh2ciG2cE0PTSOml7CM4KuJGhtFaaBBOzmRShzmkRsSA2QyGSKBvJsxtDDPwLQrw/IaR07EMfGHHfhwV79+zsuxce8t+di6sQDXVhJwqs2qykDjswX4B37LkdzK9n5THyoe68BRypBWXR5BDkNeHxef8kv9nj8mfYVYjOEsxifISw3UNa1KKRJoele8TsMkUgH6EevgW8dYGEZeSduxP4Zom4d5/96DP25lBcXGqIsZkzMxati5UCXan1aeqb/Lz9sfMPys78UZWqfpLzH9vqw4gNsoezfnEK2mjMh/8WflSB1gmKiUL/5nT02VN2zMEQJUQmvKHEyN44fZyAibcGiyfYeTGFtmYjq1fMPjPXh1Sw+HMd+nUlbbqQTaTvsbAKZjkYnf/b4b837Sg7kFBsYOM7UMl8TMzLAwoZQcYXZiJNPhSWSlANNhGLKgpokMu2pRawbNXS4hwe/gC5ePurF7Ua5eBHO4h70nXLQc89BSbGIEAZ/qcsk5ap8zHTgcw91rurQDvnh/vo6dwsfuqIcrR5s6u4HW2JPvMgJ4mhJFjCq6+CPQixp3MMJTo0Iw2iriXg7PGqvICeI3xe/FJ6SYCAtxRE4qGIjWLh9iYs0dEUyZGMboYUEMLUolcI5rPeVg2w6iZrxu+zuHQJkPOf6+2nwsvM4hvxPY+dc4jrSzrEtZQCud08hcrq2YSg1Jp2JRzVOKHSsYbaWsCiuQFfKSfS4HmcIVqS1belKaPer55TtHZGVa+PHiQm0yPn6mTa/IwCs/dMVcmHpVhv4uMVfa0KKAvqZXZLLCOhchxBI44uFt6cQpx5BSUjI6JKHJstoIZIbc5MBk2/SMqaYVAC3vEqQVpakm0NSPXB9CKQN4yRAbV47zvVm0I7yS1seA39vv4VRnElkZJi4fGUaEPK69KVd/lx8JxAePxNE34GFooY1sen92pgmJwSn64vabc1E+LoSTHQ5OMJW+vj2OA91K5bCwpOaUadq0tDfNVoY3RfjpMRbksNDaQQ49Mi+Cuxb4ebizy0HLp3GMLA3qnH+I95v+qxcdveTpGQ9/2uvioZuDeGK57/ESAdJNwDzHvqvfjGN2uY2xTMmFTB7fW5Ct+TtIzgsdhD5XMc5Ks8wu3PXiAGrKLCXFN0t/vjUrSTBzJPctNK8ypb68jOR+f38CuVk9+Piog4074jj4iYf6x/Mwe2oWCnJZPR1zsOWQizkjqV2aat60iA5RMtG+A4NaWzKxmH1+NQP+tqSmz/N7HcwZbuJfcn1e79w3gFmPdqNwhIkHpwcxviyA+r0JlOQbxiAtK6FSYjvRjhS9fsq8Wuq6CYfFrTUqYqr/POIaz+3iJo5KmkotoNTA6w0DmF6ewZrTxhP35GLLz7pxfEDhm2MtVEzwtRHtc7FsQ7fwC2+tCjCY2ygfz0JlbB/2U/sibzXHFubbiHOyN/7MAFrMkMVN4MoGJoFYHMVMqyPCpkqw/KRxGOLDluslP+UO2HtUb30NUzKxm2SHUSy5ZnLl17BuFF+oKTTxzM4kGnb42ahiQgb+cG82Dn7oYsGUECt6X0PvNPahnrXq+7ze3e73FVBfZ5+PP3Dx1nezUTnJd7IPmvvx1PYkZlERMsc1nEPmHMG5BYNgMVhCCTYy9VFNqMplrSu4uXraSUQZHHRjP7+JQwQoSZxMBL72YAG+MtpPpw3b+3TJd/nIEE4w/9/y0070s584sk11vP5Ioa62pGg+xkr/uhnZWmgLK6fbVrOaYt88erhUTiL7XNMHEYrZ0vLi0RVN60rXaEDNa0t/7ib7XuBeRZ79gstPEZp7Sca8AgpklYcHftkNcShps6dl0dt9J9qxfxCNJzzQN/AJM+TONhfb9w7qfhIR0iCPtSbw8HNdaCZthnCXGmeGOgeSzqPnVa4VzrV4CvOCgJSXhmxBpIyqWsTNVaj1PZ4jVfOsiNMZktxlpF6rxOesANDIfU8l90pr7snRsTIdJ6U+7ex2EO3j5oeDchiGCvLsswWJBPjdfx3EP7/Qg3e5/5qZSyvRT86BPDtXwgrlBIlhW05h6UwpQwWjBqGPUnhCN3lF2yju6BrNQPgyVi8UowjtLFadGnMJfxfBynHE6Z8N4a4zoB0jJFvKS7QE00yQ1Y0sYtwPTqGbPjWLILupivMqptQkRpIHHgHXibUTWvXun5ccSWPT0butaYMnGt21PvtM6fQHP1LKvZM2l92gnGWeRSAZRwrdg8xaj04LYv7MLNTTgVb/rgcD3JXJ4UMXAbV3MvYeS2D7R4N49o0oItyBXTEmjEGWdw2HXYzMNLh9Zuw5F3NFYZzLZKbkrsj0FjavKW1Og5T1X0BhOcoRVV+95PgDdjj333znki6K21hxEn+CDmpj62NFuIzZZtYP2rGDhYreR1Nb8KtBne91bUa7TGYIkqMe0ercH3VAwksG1SncT4EVZsEOZptuPPpA87rSpy8+fzqrLfZDPVbJVOAZ5lM8DHs+EOZmxlCSGnQQlwOEPacUVs4JYwQz1f6DMexghX4Da8qaEoaXUhPVDDdyyf1svruRVdPu0ywPmQhkr/T9WWHs61QsQkQqQ7rf3EA4z5Q5BSRnM+rRoLGkvus1p+95SFbHIxR9SgcjXrzYiXc3WoFs0VFCVh6XspqOdLDVoVn78fI7jJUM1n3UsBzp0Pq68pHqR+7FJeUCT0xeYt/GPf04cIIftOkNRnSxqEqwQA44sZ5t8cIz/yhgamuJi1jkPt0uMH36ZZob5UuPjLaNYCMz1xBxLpo/EOBhxdGYZ7R3M7zQKcYQRIKWP0e3tBT/X/K97BSOsjhup7f7mUeCuvDOdx6eJZz2LGPa+c5zoZSLOHr+x0m1+4P7N09KVC5tnUcU/y1IuOOSMwZLwIYsZYjmJFh/Hsi0PAErm0QJDDFaJZ15mH/08Q3r4HlNa0vfFodu2mBIcv9Mu4Cj538VkEIDcmYLCfqA7KmkRiBc7kiUIablAdr/CVJkykKkL08jIWP5KCWHxzqYbFQPCkhx5M8DKTI+F6h83DxJB1EIwemNzwcyhwaoBEnsOtVekjcy8BJN+qb6eyJDZHnJ3k27nil9SrqnHfkSQ/WrLwSKOjmfPOdcyf72dcp1ZDcnJxLisXJ9+eaP4V7I7RZZiJWknEfOZy90nouF/i9FYjxzfww9WAAAAABJRU5ErkJggg==","urlHash":null,"fileName":null,"imageId":null,"strokeWidth":2.0,"strokeColor":"#000000","dropShadow":false,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":0.0,"y":0.0,"rotation":0.0,"id":3,"width":700.0,"height":260.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":10,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#f5f5f5","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":342.86442430045474,"y":160.0,"rotation":0.0,"id":309,"width":88.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":112,"lockAspectRatio":false,"lockShape":false,"children":[{"x":41.45900267858218,"y":42.043467278595216,"rotation":315.0,"id":303,"width":34.19748871496461,"height":34.46423803734757,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":112,"lockAspectRatio":false,"lockShape":false,"children":[{"x":-3.460796792227029,"y":-61.40762145606963,"rotation":320.0,"id":304,"width":212.0,"height":12.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":100,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":5.0,"strokeColor":"#ffffff","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-0.5000000000002558,-1.7053025658242404E-13],[-11.786227735623157,13.450402450764898]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":8.709929813125086,"y":19.35027714058682,"rotation":0.0,"id":305,"width":16.77762908871432,"height":11.763683756173897,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":111,"lockAspectRatio":false,"lockShape":false,"children":[{"x":-0.6111854556428398,"y":3.881841878086945,"rotation":50.0,"id":306,"width":12.0,"height":4.0,"uid":"com.gliffy.shape.basic.basic_v1.default.ellipse","order":108,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":5.38881454435716,"y":3.881841878086945,"rotation":310.0,"id":307,"width":12.0,"height":4.0,"uid":"com.gliffy.shape.basic.basic_v1.default.ellipse","order":104,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":12.343508606453213,"y":42.04346727859516,"rotation":45.0,"id":298,"width":34.19748871496461,"height":34.46423803734757,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":96,"lockAspectRatio":false,"lockShape":false,"children":[{"x":-3.460796792227029,"y":-61.40762145606963,"rotation":320.0,"id":299,"width":212.0,"height":12.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":84,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":5.0,"strokeColor":"#ffffff","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-0.5000000000002558,-1.7053025658242404E-13],[-11.786227735623157,13.450402450764898]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":8.709929813125086,"y":19.35027714058682,"rotation":0.0,"id":300,"width":16.77762908871432,"height":11.763683756173897,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":95,"lockAspectRatio":false,"lockShape":false,"children":[{"x":-0.6111854556428398,"y":3.881841878086945,"rotation":50.0,"id":301,"width":12.0,"height":4.0,"uid":"com.gliffy.shape.basic.basic_v1.default.ellipse","order":92,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":5.38881454435716,"y":3.881841878086945,"rotation":310.0,"id":302,"width":12.0,"height":4.0,"uid":"com.gliffy.shape.basic.basic_v1.default.ellipse","order":88,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":26.901255642517697,"y":48.0000000000001,"rotation":0.0,"id":297,"width":34.19748871496461,"height":34.46423803734757,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":79,"lockAspectRatio":false,"lockShape":false,"children":[{"x":-3.460796792227029,"y":-61.40762145606963,"rotation":320.0,"id":286,"width":212.0,"height":12.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":64,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":5.0,"strokeColor":"#ffffff","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-0.5000000000002558,-1.7053025658242404E-13],[-11.786227735623157,13.450402450764898]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":8.709929813125086,"y":19.35027714058682,"rotation":0.0,"id":296,"width":16.77762908871432,"height":11.763683756173897,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":77,"lockAspectRatio":false,"lockShape":false,"children":[{"x":-0.6111854556428398,"y":3.881841878086945,"rotation":50.0,"id":284,"width":12.0,"height":4.0,"uid":"com.gliffy.shape.basic.basic_v1.default.ellipse","order":72,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":5.38881454435716,"y":3.881841878086945,"rotation":310.0,"id":285,"width":12.0,"height":4.0,"uid":"com.gliffy.shape.basic.basic_v1.default.ellipse","order":68,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":36.0,"y":42.0,"rotation":0.0,"id":250,"width":16.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":74,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":5.0,"strokeColor":"#ffffff","fillColor":"#ff9100","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":45.5,"y":20.000000000000004,"rotation":0.0,"id":256,"width":1.0,"height":34.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":61,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":253,"py":0.5,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":250,"py":0.5,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":5.0,"strokeColor":"#ffffff","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-1.5,7.0],[-1.5,18.5],[-1.5,18.5],[-1.5,30.0]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":36.0,"y":19.000000000000004,"rotation":0.0,"id":253,"width":16.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":59,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":0.0,"strokeColor":"#ffffff","fillColor":"#ffffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":-6.0,"y":6.0000000000000036,"rotation":90.0,"id":244,"width":100.0,"height":88.0,"uid":"com.gliffy.shape.basic.basic_v1.default.hexagon","order":8,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.hexagon.basic_v1","strokeWidth":0.0,"strokeColor":"#000000","fillColor":"#ff9100","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":326.86442430045474,"y":20.0,"rotation":0.0,"id":325,"width":120.0,"height":90.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":117,"lockAspectRatio":false,"lockShape":false,"children":[{"x":7.225609756097583,"y":20.0,"rotation":0.0,"id":317,"width":62.5,"height":50.0,"uid":"com.gliffy.shape.android.android_v1.icons.monitor","order":116,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.android.android_v1.icons.monitor","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#676767","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":74.72560975609758,"y":30.0,"rotation":0.0,"id":314,"width":38.048780487804876,"height":40.0,"uid":"com.gliffy.shape.android.android_v1.icons.person","order":114,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.android.android_v1.icons.person","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#676767","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":0.0,"y":0.0,"rotation":0.0,"id":315,"width":120.0,"height":90.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":6,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":289.5310909671214,"y":356.5,"rotation":0.0,"id":233,"width":88.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":56,"lockAspectRatio":false,"lockShape":false,"children":[{"x":61.5,"y":56.0,"rotation":0.0,"id":161,"width":3.0,"height":3.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":32,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":19.0,"y":52.5,"rotation":0.0,"id":162,"width":50.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":30,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":61.5,"y":43.0,"rotation":0.0,"id":164,"width":3.0,"height":3.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":28,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":19.0,"y":39.5,"rotation":0.0,"id":165,"width":50.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":26,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":-6.0,"y":6.0,"rotation":90.0,"id":166,"width":100.0,"height":88.0,"uid":"com.gliffy.shape.basic.basic_v1.default.hexagon","order":24,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.hexagon.basic_v1","strokeWidth":0.0,"strokeColor":"#000000","fillColor":"#3a5de9","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":314.0310909671214,"y":348.5,"rotation":0.0,"id":337,"width":40.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":2,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#ff0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":607.3644243004547,"y":348.5,"rotation":0.0,"id":338,"width":40.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":3,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#ff0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":460.197757633788,"y":348.5,"rotation":0.0,"id":334,"width":40.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":4,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#ff0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":392.0,"y":268.0,"rotation":0.0,"id":339,"width":46.0,"height":91.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":119,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":352,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":338,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#ff9100","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":17,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-5.314977923098297,-1.0],[-5.314977923098297,39.75],[235.36442430045474,39.75],[235.36442430045474,80.5]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":537.0,"y":616.5,"rotation":0.0,"id":340,"width":46.0,"height":91.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":120,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":352,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":337,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#ff9100","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":17,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-150.3149779230983,-349.5],[-150.3149779230983,-308.75],[-202.96890903287863,-308.75],[-202.96890903287863,-268.0]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":142.86442430045474,"y":356.5,"rotation":0.0,"id":235,"width":88.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":57,"lockAspectRatio":false,"lockShape":false,"children":[{"x":61.5,"y":56.0,"rotation":0.0,"id":181,"width":3.0,"height":3.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":20,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":19.0,"y":52.5,"rotation":0.0,"id":182,"width":50.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":18,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":61.5,"y":43.0,"rotation":0.0,"id":184,"width":3.0,"height":3.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":16,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":19.0,"y":39.5,"rotation":0.0,"id":185,"width":50.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":14,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":-6.0,"y":6.0,"rotation":90.0,"id":186,"width":100.0,"height":88.0,"uid":"com.gliffy.shape.basic.basic_v1.default.hexagon","order":12,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.hexagon.basic_v1","strokeWidth":0.0,"strokeColor":"#000000","fillColor":"#3a5de9","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":144.36442430045474,"y":466.5,"rotation":0.0,"id":346,"width":85.0,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":122,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

master

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":291.5310909671214,"y":466.5,"rotation":0.0,"id":348,"width":85.0,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":123,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

node

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":439.197757633788,"y":466.5,"rotation":0.0,"id":349,"width":85.0,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":124,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

node

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":584.8644243004547,"y":466.5,"rotation":0.0,"id":350,"width":85.0,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":125,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

node

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":547.0,"y":626.5,"rotation":0.0,"id":351,"width":46.0,"height":91.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":126,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":315,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":353,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#666666","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":17,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-160.13557569954526,-516.5],[-160.13557569954526,-501.83321142036345],[-160.13557569954526,-487.1664228407269],[-160.13557569954526,-472.49963426109036]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":366.6850220769017,"y":251.0,"rotation":0.0,"id":352,"width":40.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":1,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#ffffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":367.04382652400767,"y":154.0,"rotation":0.0,"id":353,"width":40.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#ffffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"layers":[{"guid":"mcqBzoFMGdMI","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":131}],"shapeStyles":{},"lineStyles":{"global":{"stroke":"#999999","strokeWidth":2,"endArrow":0,"orthoMode":0}},"textStyles":{"global":{"bold":true,"face":"Roboto Slab","size":"16px","color":"#999999"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","autosaveDisabled":false,"lastSerialized":1536074379631,"libraries":["com.gliffy.libraries.android.android_v1.icons","com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.uml.uml_v2.class","com.gliffy.libraries.uml.uml_v2.sequence","com.gliffy.libraries.uml.uml_v2.activity","com.gliffy.libraries.erd.erd_v1.default","com.gliffy.libraries.ui.ui_v3.containers_content","com.gliffy.libraries.ui.ui_v3.forms_controls","com.gliffy.libraries.images"]},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/docs/images/baremetal/cloud_overview.jpg b/docs/images/baremetal/cloud_overview.jpg new file mode 100644 index 0000000000..ec8830e883 Binary files /dev/null and b/docs/images/baremetal/cloud_overview.jpg differ diff --git a/docs/images/baremetal/hostnetwork.gliffy b/docs/images/baremetal/hostnetwork.gliffy new file mode 100644 index 0000000000..952ea78161 --- /dev/null +++ b/docs/images/baremetal/hostnetwork.gliffy @@ -0,0 +1 @@ +{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":737,"height":442,"nodeIndex":465,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":false,"drawingGuidesOn":true,"pageBreaksOn":false,"printGridOn":false,"printPaper":null,"printShrinkToFit":false,"printPortrait":false,"maxWidth":5000,"maxHeight":5000,"themeData":null,"imageCache":{},"viewportType":"default","fitBB":{"min":{"x":36.86442430045474,"y":20},"max":{"x":736.8644243004547,"y":442}},"printModel":{"pageSize":"Letter","portrait":true,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":438.6388145443572,"y":293.0,"rotation":90.0,"id":388,"width":40.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":4,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#ff0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":356.5310909671214,"y":293.0,"rotation":90.0,"id":389,"width":40.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":5,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#ff0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":36.86442430045474,"y":182.0,"rotation":0.0,"id":221,"width":700.0,"height":260.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":44,"lockAspectRatio":false,"lockShape":false,"children":[{"x":10.0,"y":8.5,"rotation":0.0,"id":153,"width":42.0,"height":41.0,"uid":"com.gliffy.shape.basic.basic_v1.default.image","order":13,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Image","Image":{"url":"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACoAAAApCAYAAABDV7v1AAAAAXNSR0IArs4c6QAAAAlwSFlzAAA9hAAAPYQB1ayvdAAAActpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IlhNUCBDb3JlIDUuNC4wIj4KICAgPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4KICAgICAgPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIKICAgICAgICAgICAgeG1sbnM6eG1wPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvIgogICAgICAgICAgICB4bWxuczp0aWZmPSJodHRwOi8vbnMuYWRvYmUuY29tL3RpZmYvMS4wLyI+CiAgICAgICAgIDx4bXA6Q3JlYXRvclRvb2w+d3d3Lmlua3NjYXBlLm9yZzwveG1wOkNyZWF0b3JUb29sPgogICAgICAgICA8dGlmZjpPcmllbnRhdGlvbj4xPC90aWZmOk9yaWVudGF0aW9uPgogICAgICA8L3JkZjpEZXNjcmlwdGlvbj4KICAgPC9yZGY6UkRGPgo8L3g6eG1wbWV0YT4KGMtVWAAAD9hJREFUWAmdWAl0ltWZfr7l37JvSBIIq1AETTBhCUeBgAtKW9qisdYz6tixyAxLUceOMvYYempPrYwWgbFF0PbUnqqo48zRnsG2mlhFAiSAQDuCYRMSAglZ/iz/8n3fnee93/+ziY6de873/99y73uf+77Pu9xr4P/TlDJqVtVb9XU1LmAoEVGxpG2SZXn/ZCjzFgVlAGqz65nP7llfst+fgmPqOOYxjjH8MX/L1BT4tzRlVC1qsps2TEmmR1Utaa1UlrEUSn3bCuVleM6A/mTaGXDj3QME9bKpvLU71w7bdXbMop2Bpg1VTnqR6fdf9P/lgFKDVfddCPDqFe0z4LlLqZvbrFCu7SX7oTzHoUAtk2pWhmnbZiCTgHsc6vgVuN66XeuHf5gGVCWAf0nAX0LDXwy0Tpk1qDfr6+Zw9X6rXHpyLgxvGZ++YYXzDDfeS0hKNGwaUFbA9PslPb6GQWrAI5CAFcwRDcvHN6DMtc3rit/xewI1dcqul351Bkddul0aKAFWtTZZ55t48vK2+aZSy2GY86xgNtxElEi8BMXavAhSZgJ29fhzXZ1j6pfyxG/yR82ZwfPGbvE895nd68t+z2+6aQ2XVrmXAvwZoDV179pnNUiTVy5v+xbNuMy0gjWmFSLAPhFKDSqL/1p/IkSuvwwq/Pb2TH3/nZf6MTHii9fexu9sBKy1TA1nkTlxXol6w8AzzWtK3khT4AIM/jifT6l7gJqU1YgpejvabufAJaYdqqYiCLBX5hNTCriUgcW8QJBPnQmFLELf+mQxmQDMfrgdHXGFkrCBmMSGC1UiGualbKEEuQ064Tayen1O0ccvaUWlsLCfbmcnTIOsWt42p7ezrdkMRH5jBTOrvWRMEWRSI0qZOT1Y/hVReQRWGDTQQyZ39zroifJKKhSFDLj8xnB1/hC5l3ltAqPsaNJzYh4pUS1z9nZOaJ6yoq1Gm1/Appq+qa2lGanJqu+3jvCUetUOF1zFwY6b6E8K+eggAer+Ap3I1Bbf5BCgOFAXNXr4gIsBmn8gpnCQ990EG6SWcwKG7vsZuJRJa3C4MmQumdMO51/lut5rgkUw1da+IhQ7Z0J5UC5uskO5BU6sk0ZT4sWBOH22nU9iXkEq2pOWSRcapEnfa/WwtdPDyCwDDy0Io6jARlG+hYe+HkZZhoH3Ozy8d9JDgoaWMdJEhsgSmSe4qJjEBc4lczqxM3E7lFdAQ90kfQ/lj9HK1EM3T1qlsFleo0r/GmwKpsPeNuGOIc/e6VKozjZABerVCYDxBLf+GxHMKCeokiDycizYoma2nywp0jQ42pbEB7sH8eSf49hDml9bYKboAGyLKtxYaOBMnN7pkbAGTK7B0iRXagrFbBjTVeU18YYLYSgmLDF/S3HbNnrjFCfen8wOqMB7Jzz12r1ZxvxZ2XjutW4sf3UAVcMtNJ1myKsJ4c752RhTFhJcZ5s4krSLnAcHj8Tw/JtR/HRrAlVFJpqOufjFHRm455t5eKshioUbo5g5zEKfA8cKZEoC2dlcWDLd52qdadfeBpPKdFuGHB9Nfo8nT4TjlrglQjBOdroIh0wsu6MAoSBw36uDeOnuTNx6Yy6slPZa2xPYsW+QiQf41vU5MhL/8Yde2FTR1CsjKB4SwLhRYfxocQhXlHXj7pcHsOmuTHx3Yb7ue/w0vZBWk0XSkuJh8n5cVeep0dRmSw1mm/ah/CbhgMtZyxkrczwnLq5mDDgK02mmJVsG0XyoHSvvzMOiWwtw8zUJDBsagElKdNHN32zow6aGQTQ0Onj422EsvME3feP/JPDE5hjmTuvHousiuOnaLORm27jja3mYOz0Tw4uDWsuPv9iDX3/ioDrfxIAEP9pXeUkYVjCXgCv4piXamm2Y2aVV2lgkwFTDphkZUTTT2V/S4Iw8E5sOOhi7sgNHjsc1FwVktN/Fyl+cwV1PRhESvyTny4ZoB+UDUFbEe74TCtz+4yhWbTyDfnqfcFhAHjoWx/h/7cSvWxxcQ5BxASmRTBDAcEw7LBoWnkJ4yixk6DxuCHmVNrj0l84SIrWHTslmAJER57WMiInFC7IQYwj6FSeTVBAWT0u1kNzz9Z9Oefje14L4e/I5Qgqd38rz6ZxctMTaVEsL0KGXi9TOvXmz4eqR5YtPXsalX6FcyYx6RTJODxJt7WR4uf/GCEYNDyFJYMdPJmBxgoorMvD08kJs+GoE+NhDkPEy3YISTw572LggA6uXFaH8KxFNl+NtCSRJqzEjQrj/+gh2ciG2cE0PTSOml7CM4KuJGhtFaaBBOzmRShzmkRsSA2QyGSKBvJsxtDDPwLQrw/IaR07EMfGHHfhwV79+zsuxce8t+di6sQDXVhJwqs2qykDjswX4B37LkdzK9n5THyoe68BRypBWXR5BDkNeHxef8kv9nj8mfYVYjOEsxifISw3UNa1KKRJoele8TsMkUgH6EevgW8dYGEZeSduxP4Zom4d5/96DP25lBcXGqIsZkzMxati5UCXan1aeqb/Lz9sfMPys78UZWqfpLzH9vqw4gNsoezfnEK2mjMh/8WflSB1gmKiUL/5nT02VN2zMEQJUQmvKHEyN44fZyAibcGiyfYeTGFtmYjq1fMPjPXh1Sw+HMd+nUlbbqQTaTvsbAKZjkYnf/b4b837Sg7kFBsYOM7UMl8TMzLAwoZQcYXZiJNPhSWSlANNhGLKgpokMu2pRawbNXS4hwe/gC5ePurF7Ua5eBHO4h70nXLQc89BSbGIEAZ/qcsk5ap8zHTgcw91rurQDvnh/vo6dwsfuqIcrR5s6u4HW2JPvMgJ4mhJFjCq6+CPQixp3MMJTo0Iw2iriXg7PGqvICeI3xe/FJ6SYCAtxRE4qGIjWLh9iYs0dEUyZGMboYUEMLUolcI5rPeVg2w6iZrxu+zuHQJkPOf6+2nwsvM4hvxPY+dc4jrSzrEtZQCud08hcrq2YSg1Jp2JRzVOKHSsYbaWsCiuQFfKSfS4HmcIVqS1belKaPer55TtHZGVa+PHiQm0yPn6mTa/IwCs/dMVcmHpVhv4uMVfa0KKAvqZXZLLCOhchxBI44uFt6cQpx5BSUjI6JKHJstoIZIbc5MBk2/SMqaYVAC3vEqQVpakm0NSPXB9CKQN4yRAbV47zvVm0I7yS1seA39vv4VRnElkZJi4fGUaEPK69KVd/lx8JxAePxNE34GFooY1sen92pgmJwSn64vabc1E+LoSTHQ5OMJW+vj2OA91K5bCwpOaUadq0tDfNVoY3RfjpMRbksNDaQQ49Mi+Cuxb4ebizy0HLp3GMLA3qnH+I95v+qxcdveTpGQ9/2uvioZuDeGK57/ESAdJNwDzHvqvfjGN2uY2xTMmFTB7fW5Ct+TtIzgsdhD5XMc5Ks8wu3PXiAGrKLCXFN0t/vjUrSTBzJPctNK8ypb68jOR+f38CuVk9+Piog4074jj4iYf6x/Mwe2oWCnJZPR1zsOWQizkjqV2aat60iA5RMtG+A4NaWzKxmH1+NQP+tqSmz/N7HcwZbuJfcn1e79w3gFmPdqNwhIkHpwcxviyA+r0JlOQbxiAtK6FSYjvRjhS9fsq8Wuq6CYfFrTUqYqr/POIaz+3iJo5KmkotoNTA6w0DmF6ewZrTxhP35GLLz7pxfEDhm2MtVEzwtRHtc7FsQ7fwC2+tCjCY2ygfz0JlbB/2U/sibzXHFubbiHOyN/7MAFrMkMVN4MoGJoFYHMVMqyPCpkqw/KRxGOLDluslP+UO2HtUb30NUzKxm2SHUSy5ZnLl17BuFF+oKTTxzM4kGnb42ahiQgb+cG82Dn7oYsGUECt6X0PvNPahnrXq+7ze3e73FVBfZ5+PP3Dx1nezUTnJd7IPmvvx1PYkZlERMsc1nEPmHMG5BYNgMVhCCTYy9VFNqMplrSu4uXraSUQZHHRjP7+JQwQoSZxMBL72YAG+MtpPpw3b+3TJd/nIEE4w/9/y0070s584sk11vP5Ioa62pGg+xkr/uhnZWmgLK6fbVrOaYt88erhUTiL7XNMHEYrZ0vLi0RVN60rXaEDNa0t/7ib7XuBeRZ79gstPEZp7Sca8AgpklYcHftkNcShps6dl0dt9J9qxfxCNJzzQN/AJM+TONhfb9w7qfhIR0iCPtSbw8HNdaCZthnCXGmeGOgeSzqPnVa4VzrV4CvOCgJSXhmxBpIyqWsTNVaj1PZ4jVfOsiNMZktxlpF6rxOesANDIfU8l90pr7snRsTIdJ6U+7ex2EO3j5oeDchiGCvLsswWJBPjdfx3EP7/Qg3e5/5qZSyvRT86BPDtXwgrlBIlhW05h6UwpQwWjBqGPUnhCN3lF2yju6BrNQPgyVi8UowjtLFadGnMJfxfBynHE6Z8N4a4zoB0jJFvKS7QE00yQ1Y0sYtwPTqGbPjWLILupivMqptQkRpIHHgHXibUTWvXun5ccSWPT0butaYMnGt21PvtM6fQHP1LKvZM2l92gnGWeRSAZRwrdg8xaj04LYv7MLNTTgVb/rgcD3JXJ4UMXAbV3MvYeS2D7R4N49o0oItyBXTEmjEGWdw2HXYzMNLh9Zuw5F3NFYZzLZKbkrsj0FjavKW1Og5T1X0BhOcoRVV+95PgDdjj333znki6K21hxEn+CDmpj62NFuIzZZtYP2rGDhYreR1Nb8KtBne91bUa7TGYIkqMe0ercH3VAwksG1SncT4EVZsEOZptuPPpA87rSpy8+fzqrLfZDPVbJVOAZ5lM8DHs+EOZmxlCSGnQQlwOEPacUVs4JYwQz1f6DMexghX4Da8qaEoaXUhPVDDdyyf1svruRVdPu0ywPmQhkr/T9WWHs61QsQkQqQ7rf3EA4z5Q5BSRnM+rRoLGkvus1p+95SFbHIxR9SgcjXrzYiXc3WoFs0VFCVh6XspqOdLDVoVn78fI7jJUM1n3UsBzp0Pq68pHqR+7FJeUCT0xeYt/GPf04cIIftOkNRnSxqEqwQA44sZ5t8cIz/yhgamuJi1jkPt0uMH36ZZob5UuPjLaNYCMz1xBxLpo/EOBhxdGYZ7R3M7zQKcYQRIKWP0e3tBT/X/K97BSOsjhup7f7mUeCuvDOdx6eJZz2LGPa+c5zoZSLOHr+x0m1+4P7N09KVC5tnUcU/y1IuOOSMwZLwIYsZYjmJFh/Hsi0PAErm0QJDDFaJZ15mH/08Q3r4HlNa0vfFodu2mBIcv9Mu4Cj538VkEIDcmYLCfqA7KmkRiBc7kiUIablAdr/CVJkykKkL08jIWP5KCWHxzqYbFQPCkhx5M8DKTI+F6h83DxJB1EIwemNzwcyhwaoBEnsOtVekjcy8BJN+qb6eyJDZHnJ3k27nil9SrqnHfkSQ/WrLwSKOjmfPOdcyf72dcp1ZDcnJxLisXJ9+eaP4V7I7RZZiJWknEfOZy90nouF/i9FYjxzfww9WAAAAABJRU5ErkJggg==","urlHash":null,"fileName":null,"imageId":null,"strokeWidth":2.0,"strokeColor":"#000000","dropShadow":false,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":0.0,"y":0.0,"rotation":0.0,"id":3,"width":700.0,"height":260.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":9,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#f5f5f5","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":582.8644243004547,"y":251.5,"rotation":0.0,"id":229,"width":88.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":47,"lockAspectRatio":false,"lockShape":false,"children":[{"x":61.5,"y":56.0,"rotation":0.0,"id":199,"width":3.0,"height":3.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":55,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":19.0,"y":52.5,"rotation":0.0,"id":200,"width":50.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":53,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":61.5,"y":43.0,"rotation":0.0,"id":202,"width":3.0,"height":3.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":49,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":19.0,"y":39.5,"rotation":0.0,"id":203,"width":50.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":46,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":-6.0,"y":6.0,"rotation":90.0,"id":204,"width":100.0,"height":88.0,"uid":"com.gliffy.shape.basic.basic_v1.default.hexagon","order":43,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.hexagon.basic_v1","strokeWidth":0.0,"strokeColor":"#000000","fillColor":"#3a5de9","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":436.197757633788,"y":251.5,"rotation":0.0,"id":231,"width":88.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":50,"lockAspectRatio":false,"lockShape":false,"children":[{"x":61.5,"y":56.0,"rotation":0.0,"id":190,"width":3.0,"height":3.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":41,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":19.0,"y":52.5,"rotation":0.0,"id":191,"width":50.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":39,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":61.5,"y":43.0,"rotation":0.0,"id":193,"width":3.0,"height":3.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":37,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":19.0,"y":39.5,"rotation":0.0,"id":194,"width":50.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":35,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":-6.0,"y":6.0,"rotation":90.0,"id":195,"width":100.0,"height":88.0,"uid":"com.gliffy.shape.basic.basic_v1.default.hexagon","order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.hexagon.basic_v1","strokeWidth":0.0,"strokeColor":"#000000","fillColor":"#3a5de9","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":289.5310909671214,"y":251.5,"rotation":0.0,"id":233,"width":88.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":51,"lockAspectRatio":false,"lockShape":false,"children":[{"x":61.5,"y":56.0,"rotation":0.0,"id":161,"width":3.0,"height":3.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":29,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":19.0,"y":52.5,"rotation":0.0,"id":162,"width":50.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":25,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":61.5,"y":43.0,"rotation":0.0,"id":164,"width":3.0,"height":3.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":23,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":19.0,"y":39.5,"rotation":0.0,"id":165,"width":50.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":19,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":-6.0,"y":6.0,"rotation":90.0,"id":166,"width":100.0,"height":88.0,"uid":"com.gliffy.shape.basic.basic_v1.default.hexagon","order":17,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.hexagon.basic_v1","strokeWidth":0.0,"strokeColor":"#000000","fillColor":"#3a5de9","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":537.0,"y":616.5,"rotation":0.0,"id":340,"width":46.0,"height":91.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":57,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":315,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":334,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#00bfa6","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":17,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-150.13557569954526,-506.5],[-150.13557569954526,-459.25],[-56.802242366212,-459.25],[-56.802242366212,-382.0]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":144.36442430045474,"y":361.5,"rotation":0.0,"id":346,"width":85.0,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":58,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

master

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":291.5310909671214,"y":361.5,"rotation":0.0,"id":348,"width":85.0,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":59,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

node

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":439.197757633788,"y":361.5,"rotation":0.0,"id":349,"width":85.0,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":62,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

node

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":584.8644243004547,"y":361.5,"rotation":0.0,"id":350,"width":85.0,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":63,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

node

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":46.86442430045474,"y":29.0,"rotation":0.0,"id":354,"width":245.0,"height":22.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":66,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Via the host network

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":50.0,"y":52.0,"rotation":0.0,"id":356,"width":146.0,"height":1.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":67,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#999999","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[0.0,0.0],[158.00316452527144,0.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":142.86442430045474,"y":251.5,"rotation":0.0,"id":235,"width":88.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":68,"lockAspectRatio":false,"lockShape":false,"children":[{"x":-6.0,"y":6.0,"rotation":90.0,"id":186,"width":100.0,"height":88.0,"uid":"com.gliffy.shape.basic.basic_v1.default.hexagon","order":11,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.hexagon.basic_v1","strokeWidth":0.0,"strokeColor":"#000000","fillColor":"#3a5de9","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":19.0,"y":39.5,"rotation":0.0,"id":185,"width":50.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":15,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":61.5,"y":43.0,"rotation":0.0,"id":184,"width":3.0,"height":3.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":21,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":19.0,"y":52.5,"rotation":0.0,"id":182,"width":50.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":27,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":61.5,"y":56.0,"rotation":0.0,"id":181,"width":3.0,"height":3.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":33,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":452.197757633788,"y":271.0,"rotation":0.0,"id":383,"width":53.00000000000006,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":69,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":5.5,"rotation":0.0,"id":380,"width":53.0,"height":49.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":76,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

N

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":-3.599999999999966,"y":3.6000000000000227,"rotation":90.0,"id":377,"width":60.0,"height":52.8,"uid":"com.gliffy.shape.basic.basic_v1.default.hexagon","order":74,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.hexagon.basic_v1","strokeWidth":0.0,"strokeColor":"#000000","fillColor":"#00963a","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":326.86442430045474,"y":20.0,"rotation":0.0,"id":325,"width":120.0,"height":90.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":56,"lockAspectRatio":false,"lockShape":false,"children":[{"x":7.225609756097583,"y":20.0,"rotation":0.0,"id":317,"width":62.5,"height":50.0,"uid":"com.gliffy.shape.android.android_v1.icons.monitor","order":65,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.android.android_v1.icons.monitor","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#676767","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":74.72560975609758,"y":30.0,"rotation":0.0,"id":314,"width":38.048780487804876,"height":40.0,"uid":"com.gliffy.shape.android.android_v1.icons.person","order":61,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.android.android_v1.icons.person","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#676767","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":0.0,"y":0.0,"rotation":0.0,"id":315,"width":120.0,"height":90.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":7,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":160.36442430045471,"y":271.5,"rotation":0.0,"id":434,"width":53.00000000000006,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":72,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":5.5,"rotation":0.0,"id":435,"width":53.0,"height":49.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":82,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

N

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":-3.599999999999966,"y":3.6000000000000227,"rotation":90.0,"id":436,"width":60.0,"height":52.8,"uid":"com.gliffy.shape.basic.basic_v1.default.hexagon","order":79,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.hexagon.basic_v1","strokeWidth":0.0,"strokeColor":"#000000","fillColor":"#00963a","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":307.0310909671214,"y":274.0,"rotation":0.0,"id":437,"width":53.00000000000006,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":77,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":5.5,"rotation":0.0,"id":438,"width":53.0,"height":49.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":86,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

N

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":-3.599999999999966,"y":3.6000000000000227,"rotation":90.0,"id":439,"width":60.0,"height":52.8,"uid":"com.gliffy.shape.basic.basic_v1.default.hexagon","order":84,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.hexagon.basic_v1","strokeWidth":0.0,"strokeColor":"#000000","fillColor":"#00963a","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":600.8644243004546,"y":271.5,"rotation":0.0,"id":440,"width":53.00000000000006,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":80,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":5.5,"rotation":0.0,"id":441,"width":53.0,"height":49.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":90,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

N

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":-3.599999999999966,"y":3.6000000000000227,"rotation":90.0,"id":442,"width":60.0,"height":52.8,"uid":"com.gliffy.shape.basic.basic_v1.default.hexagon","order":88,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.hexagon.basic_v1","strokeWidth":0.0,"strokeColor":"#000000","fillColor":"#00963a","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":607.3644243004547,"y":234.5,"rotation":0.0,"id":338,"width":40.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":3,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#ff0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":613.728060664091,"y":217.04551732809568,"rotation":0.0,"id":454,"width":84.49629087449557,"height":52.954482671904316,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":93,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":21.954482671904316,"rotation":0.0,"id":448,"width":27.272727272727273,"height":15.0,"uid":"com.gliffy.shape.android.android_v1.icons.expand","order":92,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.android.android_v1.icons.expand","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#676767","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":11.636363636363626,"y":16.977241335952158,"rotation":29.999999999999996,"id":390,"width":73.0,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":71,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

80/tcp

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":460.197757633788,"y":234.5,"rotation":0.0,"id":334,"width":40.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#ff0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":314.0310909671214,"y":234.5,"rotation":0.0,"id":337,"width":40.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":1,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#ff0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":169.36442430045474,"y":234.5,"rotation":0.0,"id":365,"width":40.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":2,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#ff0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":466.6388145443572,"y":218.54551732809568,"rotation":0.0,"id":456,"width":84.49629087449557,"height":52.954482671904316,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":94,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":21.954482671904316,"rotation":0.0,"id":457,"width":27.272727272727273,"height":15.0,"uid":"com.gliffy.shape.android.android_v1.icons.expand","order":98,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.android.android_v1.icons.expand","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#676767","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":11.636363636363626,"y":16.977241335952158,"rotation":29.999999999999996,"id":458,"width":73.0,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":96,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

80/tcp

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":320.7829455298736,"y":217.04551732809568,"rotation":0.0,"id":459,"width":84.49629087449557,"height":52.954482671904316,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":99,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":21.954482671904316,"rotation":0.0,"id":460,"width":27.272727272727273,"height":15.0,"uid":"com.gliffy.shape.android.android_v1.icons.expand","order":103,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.android.android_v1.icons.expand","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#676767","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":11.636363636363626,"y":16.977241335952158,"rotation":29.999999999999996,"id":461,"width":73.0,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":101,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

80/tcp

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":173.11627886320696,"y":217.04551732809568,"rotation":0.0,"id":462,"width":84.49629087449557,"height":52.954482671904316,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":104,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":21.954482671904316,"rotation":0.0,"id":463,"width":27.272727272727273,"height":15.0,"uid":"com.gliffy.shape.android.android_v1.icons.expand","order":108,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.android.android_v1.icons.expand","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#676767","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":11.636363636363626,"y":16.977241335952158,"rotation":29.999999999999996,"id":464,"width":73.0,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":106,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

80/tcp

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"layers":[{"guid":"mcqBzoFMGdMI","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":109}],"shapeStyles":{},"lineStyles":{"global":{"stroke":"#00bfa6","strokeWidth":3,"endArrow":1,"orthoMode":0}},"textStyles":{"global":{"face":"Roboto Slab","size":"36px","color":"#676767"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","autosaveDisabled":false,"lastSerialized":1536084812394,"libraries":["com.gliffy.libraries.android.android_v1.icons","com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.uml.uml_v2.class","com.gliffy.libraries.uml.uml_v2.sequence","com.gliffy.libraries.uml.uml_v2.activity","com.gliffy.libraries.erd.erd_v1.default","com.gliffy.libraries.ui.ui_v3.containers_content","com.gliffy.libraries.ui.ui_v3.forms_controls","com.gliffy.libraries.images"]},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/docs/images/baremetal/hostnetwork.jpg b/docs/images/baremetal/hostnetwork.jpg new file mode 100644 index 0000000000..f86dc4a621 Binary files /dev/null and b/docs/images/baremetal/hostnetwork.jpg differ diff --git a/docs/images/baremetal/metallb.gliffy b/docs/images/baremetal/metallb.gliffy new file mode 100644 index 0000000000..2ad2de0ec3 --- /dev/null +++ b/docs/images/baremetal/metallb.gliffy @@ -0,0 +1 @@ +{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":737,"height":427,"nodeIndex":402,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":true,"drawingGuidesOn":true,"pageBreaksOn":false,"printGridOn":false,"printPaper":null,"printShrinkToFit":false,"printPortrait":false,"maxWidth":5000,"maxHeight":5000,"themeData":null,"imageCache":{},"viewportType":"default","fitBB":{"min":{"x":36.86442430045474,"y":20},"max":{"x":736.8644243004547,"y":427}},"printModel":{"pageSize":"Letter","portrait":true,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":50.0,"y":52.0,"rotation":0.0,"id":356,"width":146.0,"height":1.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":72,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#999999","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[0.0,0.0],[125.0,0.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":46.86442430045474,"y":29.0,"rotation":0.0,"id":354,"width":245.0,"height":22.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":71,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

MetalLB: Layer 2

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":326.86442430045474,"y":20.0,"rotation":0.0,"id":325,"width":120.0,"height":90.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":62,"lockAspectRatio":false,"lockShape":false,"children":[{"x":7.225609756097583,"y":20.0,"rotation":0.0,"id":317,"width":62.5,"height":50.0,"uid":"com.gliffy.shape.android.android_v1.icons.monitor","order":60,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.android.android_v1.icons.monitor","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#676767","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":74.72560975609758,"y":30.0,"rotation":0.0,"id":314,"width":38.048780487804876,"height":40.0,"uid":"com.gliffy.shape.android.android_v1.icons.person","order":56,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.android.android_v1.icons.person","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#676767","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":0.0,"y":0.0,"rotation":0.0,"id":315,"width":120.0,"height":90.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":13,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":547.0,"y":626.5,"rotation":0.0,"id":351,"width":46.0,"height":91.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":70,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":315,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":334,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#f50056","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":17,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-160.13557569954526,-516.5],[-160.13557569954526,-482.25],[-66.802242366212,-482.25],[-66.802242366212,-398.0]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":607.3644243004547,"y":228.5,"rotation":0.0,"id":338,"width":40.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":5,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#ff0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":460.197757633788,"y":228.5,"rotation":0.0,"id":334,"width":40.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":4,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#ff0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":314.0310909671214,"y":228.5,"rotation":0.0,"id":337,"width":40.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":6,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#ff0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":36.86442430045474,"y":167.0,"rotation":0.0,"id":221,"width":700.0,"height":260.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":7,"lockAspectRatio":false,"lockShape":false,"children":[{"x":10.0,"y":8.5,"rotation":0.0,"id":153,"width":42.0,"height":41.0,"uid":"com.gliffy.shape.basic.basic_v1.default.image","order":9,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Image","Image":{"url":"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACoAAAApCAYAAABDV7v1AAAAAXNSR0IArs4c6QAAAAlwSFlzAAA9hAAAPYQB1ayvdAAAActpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IlhNUCBDb3JlIDUuNC4wIj4KICAgPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4KICAgICAgPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIKICAgICAgICAgICAgeG1sbnM6eG1wPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvIgogICAgICAgICAgICB4bWxuczp0aWZmPSJodHRwOi8vbnMuYWRvYmUuY29tL3RpZmYvMS4wLyI+CiAgICAgICAgIDx4bXA6Q3JlYXRvclRvb2w+d3d3Lmlua3NjYXBlLm9yZzwveG1wOkNyZWF0b3JUb29sPgogICAgICAgICA8dGlmZjpPcmllbnRhdGlvbj4xPC90aWZmOk9yaWVudGF0aW9uPgogICAgICA8L3JkZjpEZXNjcmlwdGlvbj4KICAgPC9yZGY6UkRGPgo8L3g6eG1wbWV0YT4KGMtVWAAAD9hJREFUWAmdWAl0ltWZfr7l37JvSBIIq1AETTBhCUeBgAtKW9qisdYz6tixyAxLUceOMvYYempPrYwWgbFF0PbUnqqo48zRnsG2mlhFAiSAQDuCYRMSAglZ/iz/8n3fnee93/+ziY6de873/99y73uf+77Pu9xr4P/TlDJqVtVb9XU1LmAoEVGxpG2SZXn/ZCjzFgVlAGqz65nP7llfst+fgmPqOOYxjjH8MX/L1BT4tzRlVC1qsps2TEmmR1Utaa1UlrEUSn3bCuVleM6A/mTaGXDj3QME9bKpvLU71w7bdXbMop2Bpg1VTnqR6fdf9P/lgFKDVfddCPDqFe0z4LlLqZvbrFCu7SX7oTzHoUAtk2pWhmnbZiCTgHsc6vgVuN66XeuHf5gGVCWAf0nAX0LDXwy0Tpk1qDfr6+Zw9X6rXHpyLgxvGZ++YYXzDDfeS0hKNGwaUFbA9PslPb6GQWrAI5CAFcwRDcvHN6DMtc3rit/xewI1dcqul351Bkddul0aKAFWtTZZ55t48vK2+aZSy2GY86xgNtxElEi8BMXavAhSZgJ29fhzXZ1j6pfyxG/yR82ZwfPGbvE895nd68t+z2+6aQ2XVrmXAvwZoDV179pnNUiTVy5v+xbNuMy0gjWmFSLAPhFKDSqL/1p/IkSuvwwq/Pb2TH3/nZf6MTHii9fexu9sBKy1TA1nkTlxXol6w8AzzWtK3khT4AIM/jifT6l7gJqU1YgpejvabufAJaYdqqYiCLBX5hNTCriUgcW8QJBPnQmFLELf+mQxmQDMfrgdHXGFkrCBmMSGC1UiGualbKEEuQ064Tayen1O0ccvaUWlsLCfbmcnTIOsWt42p7ezrdkMRH5jBTOrvWRMEWRSI0qZOT1Y/hVReQRWGDTQQyZ39zroifJKKhSFDLj8xnB1/hC5l3ltAqPsaNJzYh4pUS1z9nZOaJ6yoq1Gm1/Appq+qa2lGanJqu+3jvCUetUOF1zFwY6b6E8K+eggAer+Ap3I1Bbf5BCgOFAXNXr4gIsBmn8gpnCQ990EG6SWcwKG7vsZuJRJa3C4MmQumdMO51/lut5rgkUw1da+IhQ7Z0J5UC5uskO5BU6sk0ZT4sWBOH22nU9iXkEq2pOWSRcapEnfa/WwtdPDyCwDDy0Io6jARlG+hYe+HkZZhoH3Ozy8d9JDgoaWMdJEhsgSmSe4qJjEBc4lczqxM3E7lFdAQ90kfQ/lj9HK1EM3T1qlsFleo0r/GmwKpsPeNuGOIc/e6VKozjZABerVCYDxBLf+GxHMKCeokiDycizYoma2nywp0jQ42pbEB7sH8eSf49hDml9bYKboAGyLKtxYaOBMnN7pkbAGTK7B0iRXagrFbBjTVeU18YYLYSgmLDF/S3HbNnrjFCfen8wOqMB7Jzz12r1ZxvxZ2XjutW4sf3UAVcMtNJ1myKsJ4c752RhTFhJcZ5s4krSLnAcHj8Tw/JtR/HRrAlVFJpqOufjFHRm455t5eKshioUbo5g5zEKfA8cKZEoC2dlcWDLd52qdadfeBpPKdFuGHB9Nfo8nT4TjlrglQjBOdroIh0wsu6MAoSBw36uDeOnuTNx6Yy6slPZa2xPYsW+QiQf41vU5MhL/8Yde2FTR1CsjKB4SwLhRYfxocQhXlHXj7pcHsOmuTHx3Yb7ue/w0vZBWk0XSkuJh8n5cVeep0dRmSw1mm/ah/CbhgMtZyxkrczwnLq5mDDgK02mmJVsG0XyoHSvvzMOiWwtw8zUJDBsagElKdNHN32zow6aGQTQ0Onj422EsvME3feP/JPDE5hjmTuvHousiuOnaLORm27jja3mYOz0Tw4uDWsuPv9iDX3/ioDrfxIAEP9pXeUkYVjCXgCv4piXamm2Y2aVV2lgkwFTDphkZUTTT2V/S4Iw8E5sOOhi7sgNHjsc1FwVktN/Fyl+cwV1PRhESvyTny4ZoB+UDUFbEe74TCtz+4yhWbTyDfnqfcFhAHjoWx/h/7cSvWxxcQ5BxASmRTBDAcEw7LBoWnkJ4yixk6DxuCHmVNrj0l84SIrWHTslmAJER57WMiInFC7IQYwj6FSeTVBAWT0u1kNzz9Z9Oefje14L4e/I5Qgqd38rz6ZxctMTaVEsL0KGXi9TOvXmz4eqR5YtPXsalX6FcyYx6RTJODxJt7WR4uf/GCEYNDyFJYMdPJmBxgoorMvD08kJs+GoE+NhDkPEy3YISTw572LggA6uXFaH8KxFNl+NtCSRJqzEjQrj/+gh2ciG2cE0PTSOml7CM4KuJGhtFaaBBOzmRShzmkRsSA2QyGSKBvJsxtDDPwLQrw/IaR07EMfGHHfhwV79+zsuxce8t+di6sQDXVhJwqs2qykDjswX4B37LkdzK9n5THyoe68BRypBWXR5BDkNeHxef8kv9nj8mfYVYjOEsxifISw3UNa1KKRJoele8TsMkUgH6EevgW8dYGEZeSduxP4Zom4d5/96DP25lBcXGqIsZkzMxati5UCXan1aeqb/Lz9sfMPys78UZWqfpLzH9vqw4gNsoezfnEK2mjMh/8WflSB1gmKiUL/5nT02VN2zMEQJUQmvKHEyN44fZyAibcGiyfYeTGFtmYjq1fMPjPXh1Sw+HMd+nUlbbqQTaTvsbAKZjkYnf/b4b837Sg7kFBsYOM7UMl8TMzLAwoZQcYXZiJNPhSWSlANNhGLKgpokMu2pRawbNXS4hwe/gC5ePurF7Ua5eBHO4h70nXLQc89BSbGIEAZ/qcsk5ap8zHTgcw91rurQDvnh/vo6dwsfuqIcrR5s6u4HW2JPvMgJ4mhJFjCq6+CPQixp3MMJTo0Iw2iriXg7PGqvICeI3xe/FJ6SYCAtxRE4qGIjWLh9iYs0dEUyZGMboYUEMLUolcI5rPeVg2w6iZrxu+zuHQJkPOf6+2nwsvM4hvxPY+dc4jrSzrEtZQCud08hcrq2YSg1Jp2JRzVOKHSsYbaWsCiuQFfKSfS4HmcIVqS1belKaPer55TtHZGVa+PHiQm0yPn6mTa/IwCs/dMVcmHpVhv4uMVfa0KKAvqZXZLLCOhchxBI44uFt6cQpx5BSUjI6JKHJstoIZIbc5MBk2/SMqaYVAC3vEqQVpakm0NSPXB9CKQN4yRAbV47zvVm0I7yS1seA39vv4VRnElkZJi4fGUaEPK69KVd/lx8JxAePxNE34GFooY1sen92pgmJwSn64vabc1E+LoSTHQ5OMJW+vj2OA91K5bCwpOaUadq0tDfNVoY3RfjpMRbksNDaQQ49Mi+Cuxb4ebizy0HLp3GMLA3qnH+I95v+qxcdveTpGQ9/2uvioZuDeGK57/ESAdJNwDzHvqvfjGN2uY2xTMmFTB7fW5Ct+TtIzgsdhD5XMc5Ks8wu3PXiAGrKLCXFN0t/vjUrSTBzJPctNK8ypb68jOR+f38CuVk9+Piog4074jj4iYf6x/Mwe2oWCnJZPR1zsOWQizkjqV2aat60iA5RMtG+A4NaWzKxmH1+NQP+tqSmz/N7HcwZbuJfcn1e79w3gFmPdqNwhIkHpwcxviyA+r0JlOQbxiAtK6FSYjvRjhS9fsq8Wuq6CYfFrTUqYqr/POIaz+3iJo5KmkotoNTA6w0DmF6ewZrTxhP35GLLz7pxfEDhm2MtVEzwtRHtc7FsQ7fwC2+tCjCY2ygfz0JlbB/2U/sibzXHFubbiHOyN/7MAFrMkMVN4MoGJoFYHMVMqyPCpkqw/KRxGOLDluslP+UO2HtUb30NUzKxm2SHUSy5ZnLl17BuFF+oKTTxzM4kGnb42ahiQgb+cG82Dn7oYsGUECt6X0PvNPahnrXq+7ze3e73FVBfZ5+PP3Dx1nezUTnJd7IPmvvx1PYkZlERMsc1nEPmHMG5BYNgMVhCCTYy9VFNqMplrSu4uXraSUQZHHRjP7+JQwQoSZxMBL72YAG+MtpPpw3b+3TJd/nIEE4w/9/y0070s584sk11vP5Ioa62pGg+xkr/uhnZWmgLK6fbVrOaYt88erhUTiL7XNMHEYrZ0vLi0RVN60rXaEDNa0t/7ib7XuBeRZ79gstPEZp7Sca8AgpklYcHftkNcShps6dl0dt9J9qxfxCNJzzQN/AJM+TONhfb9w7qfhIR0iCPtSbw8HNdaCZthnCXGmeGOgeSzqPnVa4VzrV4CvOCgJSXhmxBpIyqWsTNVaj1PZ4jVfOsiNMZktxlpF6rxOesANDIfU8l90pr7snRsTIdJ6U+7ex2EO3j5oeDchiGCvLsswWJBPjdfx3EP7/Qg3e5/5qZSyvRT86BPDtXwgrlBIlhW05h6UwpQwWjBqGPUnhCN3lF2yju6BrNQPgyVi8UowjtLFadGnMJfxfBynHE6Z8N4a4zoB0jJFvKS7QE00yQ1Y0sYtwPTqGbPjWLILupivMqptQkRpIHHgHXibUTWvXun5ccSWPT0butaYMnGt21PvtM6fQHP1LKvZM2l92gnGWeRSAZRwrdg8xaj04LYv7MLNTTgVb/rgcD3JXJ4UMXAbV3MvYeS2D7R4N49o0oItyBXTEmjEGWdw2HXYzMNLh9Zuw5F3NFYZzLZKbkrsj0FjavKW1Og5T1X0BhOcoRVV+95PgDdjj333znki6K21hxEn+CDmpj62NFuIzZZtYP2rGDhYreR1Nb8KtBne91bUa7TGYIkqMe0ercH3VAwksG1SncT4EVZsEOZptuPPpA87rSpy8+fzqrLfZDPVbJVOAZ5lM8DHs+EOZmxlCSGnQQlwOEPacUVs4JYwQz1f6DMexghX4Da8qaEoaXUhPVDDdyyf1svruRVdPu0ywPmQhkr/T9WWHs61QsQkQqQ7rf3EA4z5Q5BSRnM+rRoLGkvus1p+95SFbHIxR9SgcjXrzYiXc3WoFs0VFCVh6XspqOdLDVoVn78fI7jJUM1n3UsBzp0Pq68pHqR+7FJeUCT0xeYt/GPf04cIIftOkNRnSxqEqwQA44sZ5t8cIz/yhgamuJi1jkPt0uMH36ZZob5UuPjLaNYCMz1xBxLpo/EOBhxdGYZ7R3M7zQKcYQRIKWP0e3tBT/X/K97BSOsjhup7f7mUeCuvDOdx6eJZz2LGPa+c5zoZSLOHr+x0m1+4P7N09KVC5tnUcU/y1IuOOSMwZLwIYsZYjmJFh/Hsi0PAErm0QJDDFaJZ15mH/08Q3r4HlNa0vfFodu2mBIcv9Mu4Cj538VkEIDcmYLCfqA7KmkRiBc7kiUIablAdr/CVJkykKkL08jIWP5KCWHxzqYbFQPCkhx5M8DKTI+F6h83DxJB1EIwemNzwcyhwaoBEnsOtVekjcy8BJN+qb6eyJDZHnJ3k27nil9SrqnHfkSQ/WrLwSKOjmfPOdcyf72dcp1ZDcnJxLisXJ9+eaP4V7I7RZZiJWknEfOZy90nouF/i9FYjxzfww9WAAAAABJRU5ErkJggg==","urlHash":null,"fileName":null,"imageId":null,"strokeWidth":2.0,"strokeColor":"#000000","dropShadow":false,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":0.0,"y":0.0,"rotation":0.0,"id":3,"width":700.0,"height":260.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":11,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#f5f5f5","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":436.197757633788,"y":236.5,"rotation":0.0,"id":231,"width":88.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":57,"lockAspectRatio":false,"lockShape":false,"children":[{"x":61.5,"y":56.0,"rotation":0.0,"id":190,"width":3.0,"height":3.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":43,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":19.0,"y":52.5,"rotation":0.0,"id":191,"width":50.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":41,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":61.5,"y":43.0,"rotation":0.0,"id":193,"width":3.0,"height":3.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":39,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":19.0,"y":39.5,"rotation":0.0,"id":194,"width":50.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":37,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":-6.0,"y":6.0,"rotation":90.0,"id":195,"width":100.0,"height":88.0,"uid":"com.gliffy.shape.basic.basic_v1.default.hexagon","order":35,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.hexagon.basic_v1","strokeWidth":0.0,"strokeColor":"#000000","fillColor":"#3a5de9","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":142.86442430045474,"y":236.5,"rotation":0.0,"id":235,"width":88.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":61,"lockAspectRatio":false,"lockShape":false,"children":[{"x":61.5,"y":56.0,"rotation":0.0,"id":181,"width":3.0,"height":3.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":23,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":19.0,"y":52.5,"rotation":0.0,"id":182,"width":50.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":21,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":61.5,"y":43.0,"rotation":0.0,"id":184,"width":3.0,"height":3.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":19,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":19.0,"y":39.5,"rotation":0.0,"id":185,"width":50.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":17,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":-6.0,"y":6.0,"rotation":90.0,"id":186,"width":100.0,"height":88.0,"uid":"com.gliffy.shape.basic.basic_v1.default.hexagon","order":15,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.hexagon.basic_v1","strokeWidth":0.0,"strokeColor":"#000000","fillColor":"#3a5de9","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":392.0,"y":148.0,"rotation":0.0,"id":339,"width":46.0,"height":91.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":63,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":366,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":338,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#00aeff","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-177.38557569954526,62.25],[-177.38557569954526,45.5],[235.36442430045474,45.5],[235.36442430045474,80.5]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":537.0,"y":496.5,"rotation":0.0,"id":340,"width":46.0,"height":91.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":64,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":368,"py":0.0,"px":0.4999999999999999}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":337,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#00aeff","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-322.38557569954526,-279.75],[-322.38557569954526,-302.75],[-202.96890903287863,-302.75],[-202.96890903287863,-268.0]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":110.0,"y":180.0,"rotation":0.0,"id":341,"width":85.0,"height":38.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":65,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

MetalLB controller

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":144.36442430045474,"y":346.5,"rotation":0.0,"id":346,"width":85.0,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":66,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

master

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":291.5310909671214,"y":346.5,"rotation":0.0,"id":348,"width":85.0,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":67,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

node

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":439.197757633788,"y":346.5,"rotation":0.0,"id":349,"width":85.0,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":68,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

node

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":584.8644243004547,"y":346.5,"rotation":0.0,"id":350,"width":85.0,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":69,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

node

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":187.61442430045474,"y":210.25,"rotation":0.0,"id":377,"width":54.0,"height":54.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":81,"lockAspectRatio":false,"lockShape":false,"children":[{"x":37.63557569954526,"y":39.0,"rotation":0.0,"id":373,"width":23.0,"height":11.5,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":80,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#3f51b5","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[6.0,2.0],[-26.0,-16.5]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":9.0,"y":31.0,"rotation":0.0,"id":371,"width":36.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.triangle","order":78,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.triangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#3f51b5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":9.0,"y":6.5,"rotation":0.0,"id":368,"width":36.0,"height":34.0,"uid":"com.gliffy.shape.basic.basic_v1.default.triangle","order":76,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.triangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":0.0,"y":0.0,"rotation":0.0,"id":366,"width":54.0,"height":54.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":74,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":4.0,"strokeColor":"#ffffff","fillColor":"#3f51b5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":210.0,"y":170.0,"rotation":0.0,"id":378,"width":85.0,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":82,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

IP pool

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":488.9951905283833,"y":233.75,"rotation":29.999999999999996,"id":380,"width":100.0,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":83,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

203.0.113.2

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":340.0,"y":230.0,"rotation":29.999999999999996,"id":381,"width":85.0,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":84,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

203.0.113.1

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":640.0,"y":230.0,"rotation":29.999999999999996,"id":382,"width":85.0,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":85,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

203.0.113.3

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":582.8644243004547,"y":236.5,"rotation":0.0,"id":229,"width":88.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":54,"lockAspectRatio":false,"lockShape":false,"children":[{"x":61.5,"y":56.0,"rotation":0.0,"id":199,"width":3.0,"height":3.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":53,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":19.0,"y":52.5,"rotation":0.0,"id":200,"width":50.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":51,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":61.5,"y":43.0,"rotation":0.0,"id":202,"width":3.0,"height":3.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":49,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":19.0,"y":39.5,"rotation":0.0,"id":203,"width":50.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":47,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":-6.0,"y":6.0,"rotation":90.0,"id":204,"width":100.0,"height":88.0,"uid":"com.gliffy.shape.basic.basic_v1.default.hexagon","order":45,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.hexagon.basic_v1","strokeWidth":0.0,"strokeColor":"#000000","fillColor":"#3a5de9","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":599.3644243004546,"y":256.5,"rotation":0.0,"id":383,"width":53.00000000000006,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":86,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":5.5,"rotation":0.0,"id":384,"width":53.0,"height":49.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":90,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

N

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":-3.599999999999966,"y":3.6000000000000227,"rotation":90.0,"id":385,"width":60.0,"height":52.8,"uid":"com.gliffy.shape.basic.basic_v1.default.hexagon","order":88,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.hexagon.basic_v1","strokeWidth":0.0,"strokeColor":"#000000","fillColor":"#00963a","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":289.5310909671214,"y":236.5,"rotation":0.0,"id":233,"width":88.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":58,"lockAspectRatio":false,"lockShape":false,"children":[{"x":61.5,"y":56.0,"rotation":0.0,"id":161,"width":3.0,"height":3.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":33,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":19.0,"y":52.5,"rotation":0.0,"id":162,"width":50.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":61.5,"y":43.0,"rotation":0.0,"id":164,"width":3.0,"height":3.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":29,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":19.0,"y":39.5,"rotation":0.0,"id":165,"width":50.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":27,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":-6.0,"y":6.0,"rotation":90.0,"id":166,"width":100.0,"height":88.0,"uid":"com.gliffy.shape.basic.basic_v1.default.hexagon","order":25,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.hexagon.basic_v1","strokeWidth":0.0,"strokeColor":"#000000","fillColor":"#3a5de9","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":306.0310909671214,"y":256.5,"rotation":0.0,"id":387,"width":53.00000000000006,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":91,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":5.5,"rotation":0.0,"id":388,"width":53.0,"height":49.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":95,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

N

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":-3.599999999999966,"y":3.6000000000000227,"rotation":90.0,"id":389,"width":60.0,"height":52.8,"uid":"com.gliffy.shape.basic.basic_v1.default.hexagon","order":93,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.hexagon.basic_v1","strokeWidth":0.0,"strokeColor":"#000000","fillColor":"#00963a","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":587.75,"y":778.75,"rotation":0.0,"id":393,"width":46.0,"height":91.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":96,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":399,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":394,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#f50056","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":17,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-57.75,-490.75],[-42.08333333333337,-490.75],[-26.41666666666663,-490.75],[-10.75,-490.75]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":565.0,"y":280.0,"rotation":90.0,"id":394,"width":40.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#ff0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":419.0,"y":280.0,"rotation":90.0,"id":396,"width":40.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":3,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#ff0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":355.0,"y":280.0,"rotation":90.0,"id":398,"width":40.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":2,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#ff0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":502.0,"y":280.0,"rotation":90.0,"id":399,"width":40.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":1,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#ff0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":597.75,"y":788.75,"rotation":0.0,"id":400,"width":46.0,"height":91.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":97,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":396,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":398,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#f50056","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":17,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-166.75,-500.75],[-182.75,-500.75],[-198.75,-500.75],[-214.75,-500.75]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":314.94392033916137,"y":118.0,"rotation":0.0,"id":401,"width":60.0,"height":38.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":98,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

 80/tcp

443/tcp

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"layers":[{"guid":"mcqBzoFMGdMI","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":99}],"shapeStyles":{},"lineStyles":{"global":{"stroke":"#f50056","strokeWidth":1,"orthoMode":0,"endArrow":17,"startArrow":0}},"textStyles":{"global":{"bold":false,"face":"Roboto Slab","size":"13px","color":"#676767"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","autosaveDisabled":false,"lastSerialized":1536676662123,"libraries":["com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.uml.uml_v2.class","com.gliffy.libraries.uml.uml_v2.sequence","com.gliffy.libraries.uml.uml_v2.activity","com.gliffy.libraries.erd.erd_v1.default","com.gliffy.libraries.ui.ui_v3.containers_content","com.gliffy.libraries.ui.ui_v3.forms_controls","com.gliffy.libraries.android.android_v1.icons","com.gliffy.libraries.images"]},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/docs/images/baremetal/metallb.jpg b/docs/images/baremetal/metallb.jpg new file mode 100644 index 0000000000..28dad6dea3 Binary files /dev/null and b/docs/images/baremetal/metallb.jpg differ diff --git a/docs/images/baremetal/nodeport.gliffy b/docs/images/baremetal/nodeport.gliffy new file mode 100644 index 0000000000..6823d8856e --- /dev/null +++ b/docs/images/baremetal/nodeport.gliffy @@ -0,0 +1 @@ +{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":737,"height":442,"nodeIndex":434,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":false,"drawingGuidesOn":true,"pageBreaksOn":false,"printGridOn":false,"printPaper":null,"printShrinkToFit":false,"printPortrait":false,"maxWidth":5000,"maxHeight":5000,"themeData":null,"imageCache":{},"viewportType":"default","fitBB":{"min":{"x":36.86442430045474,"y":20},"max":{"x":736.8644243004547,"y":442}},"printModel":{"pageSize":"Letter","portrait":true,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":438.6388145443572,"y":293.0,"rotation":90.0,"id":388,"width":40.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#ff0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":356.5310909671214,"y":293.0,"rotation":90.0,"id":389,"width":40.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":1,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#ff0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":169.36442430045474,"y":211.5,"rotation":0.0,"id":365,"width":40.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":2,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#ff0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":314.0310909671214,"y":204.5,"rotation":0.0,"id":337,"width":40.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":3,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#ff0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":607.3644243004547,"y":211.5,"rotation":0.0,"id":338,"width":40.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":4,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#ff0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":460.197757633788,"y":211.5,"rotation":0.0,"id":334,"width":40.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":5,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#ff0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":36.86442430045474,"y":182.0,"rotation":0.0,"id":221,"width":700.0,"height":260.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":52,"lockAspectRatio":false,"lockShape":false,"children":[{"x":10.0,"y":8.5,"rotation":0.0,"id":153,"width":42.0,"height":41.0,"uid":"com.gliffy.shape.basic.basic_v1.default.image","order":13,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Image","Image":{"url":"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACoAAAApCAYAAABDV7v1AAAAAXNSR0IArs4c6QAAAAlwSFlzAAA9hAAAPYQB1ayvdAAAActpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IlhNUCBDb3JlIDUuNC4wIj4KICAgPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4KICAgICAgPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIKICAgICAgICAgICAgeG1sbnM6eG1wPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvIgogICAgICAgICAgICB4bWxuczp0aWZmPSJodHRwOi8vbnMuYWRvYmUuY29tL3RpZmYvMS4wLyI+CiAgICAgICAgIDx4bXA6Q3JlYXRvclRvb2w+d3d3Lmlua3NjYXBlLm9yZzwveG1wOkNyZWF0b3JUb29sPgogICAgICAgICA8dGlmZjpPcmllbnRhdGlvbj4xPC90aWZmOk9yaWVudGF0aW9uPgogICAgICA8L3JkZjpEZXNjcmlwdGlvbj4KICAgPC9yZGY6UkRGPgo8L3g6eG1wbWV0YT4KGMtVWAAAD9hJREFUWAmdWAl0ltWZfr7l37JvSBIIq1AETTBhCUeBgAtKW9qisdYz6tixyAxLUceOMvYYempPrYwWgbFF0PbUnqqo48zRnsG2mlhFAiSAQDuCYRMSAglZ/iz/8n3fnee93/+ziY6de873/99y73uf+77Pu9xr4P/TlDJqVtVb9XU1LmAoEVGxpG2SZXn/ZCjzFgVlAGqz65nP7llfst+fgmPqOOYxjjH8MX/L1BT4tzRlVC1qsps2TEmmR1Utaa1UlrEUSn3bCuVleM6A/mTaGXDj3QME9bKpvLU71w7bdXbMop2Bpg1VTnqR6fdf9P/lgFKDVfddCPDqFe0z4LlLqZvbrFCu7SX7oTzHoUAtk2pWhmnbZiCTgHsc6vgVuN66XeuHf5gGVCWAf0nAX0LDXwy0Tpk1qDfr6+Zw9X6rXHpyLgxvGZ++YYXzDDfeS0hKNGwaUFbA9PslPb6GQWrAI5CAFcwRDcvHN6DMtc3rit/xewI1dcqul351Bkddul0aKAFWtTZZ55t48vK2+aZSy2GY86xgNtxElEi8BMXavAhSZgJ29fhzXZ1j6pfyxG/yR82ZwfPGbvE895nd68t+z2+6aQ2XVrmXAvwZoDV179pnNUiTVy5v+xbNuMy0gjWmFSLAPhFKDSqL/1p/IkSuvwwq/Pb2TH3/nZf6MTHii9fexu9sBKy1TA1nkTlxXol6w8AzzWtK3khT4AIM/jifT6l7gJqU1YgpejvabufAJaYdqqYiCLBX5hNTCriUgcW8QJBPnQmFLELf+mQxmQDMfrgdHXGFkrCBmMSGC1UiGualbKEEuQ064Tayen1O0ccvaUWlsLCfbmcnTIOsWt42p7ezrdkMRH5jBTOrvWRMEWRSI0qZOT1Y/hVReQRWGDTQQyZ39zroifJKKhSFDLj8xnB1/hC5l3ltAqPsaNJzYh4pUS1z9nZOaJ6yoq1Gm1/Appq+qa2lGanJqu+3jvCUetUOF1zFwY6b6E8K+eggAer+Ap3I1Bbf5BCgOFAXNXr4gIsBmn8gpnCQ990EG6SWcwKG7vsZuJRJa3C4MmQumdMO51/lut5rgkUw1da+IhQ7Z0J5UC5uskO5BU6sk0ZT4sWBOH22nU9iXkEq2pOWSRcapEnfa/WwtdPDyCwDDy0Io6jARlG+hYe+HkZZhoH3Ozy8d9JDgoaWMdJEhsgSmSe4qJjEBc4lczqxM3E7lFdAQ90kfQ/lj9HK1EM3T1qlsFleo0r/GmwKpsPeNuGOIc/e6VKozjZABerVCYDxBLf+GxHMKCeokiDycizYoma2nywp0jQ42pbEB7sH8eSf49hDml9bYKboAGyLKtxYaOBMnN7pkbAGTK7B0iRXagrFbBjTVeU18YYLYSgmLDF/S3HbNnrjFCfen8wOqMB7Jzz12r1ZxvxZ2XjutW4sf3UAVcMtNJ1myKsJ4c752RhTFhJcZ5s4krSLnAcHj8Tw/JtR/HRrAlVFJpqOufjFHRm455t5eKshioUbo5g5zEKfA8cKZEoC2dlcWDLd52qdadfeBpPKdFuGHB9Nfo8nT4TjlrglQjBOdroIh0wsu6MAoSBw36uDeOnuTNx6Yy6slPZa2xPYsW+QiQf41vU5MhL/8Yde2FTR1CsjKB4SwLhRYfxocQhXlHXj7pcHsOmuTHx3Yb7ue/w0vZBWk0XSkuJh8n5cVeep0dRmSw1mm/ah/CbhgMtZyxkrczwnLq5mDDgK02mmJVsG0XyoHSvvzMOiWwtw8zUJDBsagElKdNHN32zow6aGQTQ0Onj422EsvME3feP/JPDE5hjmTuvHousiuOnaLORm27jja3mYOz0Tw4uDWsuPv9iDX3/ioDrfxIAEP9pXeUkYVjCXgCv4piXamm2Y2aVV2lgkwFTDphkZUTTT2V/S4Iw8E5sOOhi7sgNHjsc1FwVktN/Fyl+cwV1PRhESvyTny4ZoB+UDUFbEe74TCtz+4yhWbTyDfnqfcFhAHjoWx/h/7cSvWxxcQ5BxASmRTBDAcEw7LBoWnkJ4yixk6DxuCHmVNrj0l84SIrWHTslmAJER57WMiInFC7IQYwj6FSeTVBAWT0u1kNzz9Z9Oefje14L4e/I5Qgqd38rz6ZxctMTaVEsL0KGXi9TOvXmz4eqR5YtPXsalX6FcyYx6RTJODxJt7WR4uf/GCEYNDyFJYMdPJmBxgoorMvD08kJs+GoE+NhDkPEy3YISTw572LggA6uXFaH8KxFNl+NtCSRJqzEjQrj/+gh2ciG2cE0PTSOml7CM4KuJGhtFaaBBOzmRShzmkRsSA2QyGSKBvJsxtDDPwLQrw/IaR07EMfGHHfhwV79+zsuxce8t+di6sQDXVhJwqs2qykDjswX4B37LkdzK9n5THyoe68BRypBWXR5BDkNeHxef8kv9nj8mfYVYjOEsxifISw3UNa1KKRJoele8TsMkUgH6EevgW8dYGEZeSduxP4Zom4d5/96DP25lBcXGqIsZkzMxati5UCXan1aeqb/Lz9sfMPys78UZWqfpLzH9vqw4gNsoezfnEK2mjMh/8WflSB1gmKiUL/5nT02VN2zMEQJUQmvKHEyN44fZyAibcGiyfYeTGFtmYjq1fMPjPXh1Sw+HMd+nUlbbqQTaTvsbAKZjkYnf/b4b837Sg7kFBsYOM7UMl8TMzLAwoZQcYXZiJNPhSWSlANNhGLKgpokMu2pRawbNXS4hwe/gC5ePurF7Ua5eBHO4h70nXLQc89BSbGIEAZ/qcsk5ap8zHTgcw91rurQDvnh/vo6dwsfuqIcrR5s6u4HW2JPvMgJ4mhJFjCq6+CPQixp3MMJTo0Iw2iriXg7PGqvICeI3xe/FJ6SYCAtxRE4qGIjWLh9iYs0dEUyZGMboYUEMLUolcI5rPeVg2w6iZrxu+zuHQJkPOf6+2nwsvM4hvxPY+dc4jrSzrEtZQCud08hcrq2YSg1Jp2JRzVOKHSsYbaWsCiuQFfKSfS4HmcIVqS1belKaPer55TtHZGVa+PHiQm0yPn6mTa/IwCs/dMVcmHpVhv4uMVfa0KKAvqZXZLLCOhchxBI44uFt6cQpx5BSUjI6JKHJstoIZIbc5MBk2/SMqaYVAC3vEqQVpakm0NSPXB9CKQN4yRAbV47zvVm0I7yS1seA39vv4VRnElkZJi4fGUaEPK69KVd/lx8JxAePxNE34GFooY1sen92pgmJwSn64vabc1E+LoSTHQ5OMJW+vj2OA91K5bCwpOaUadq0tDfNVoY3RfjpMRbksNDaQQ49Mi+Cuxb4ebizy0HLp3GMLA3qnH+I95v+qxcdveTpGQ9/2uvioZuDeGK57/ESAdJNwDzHvqvfjGN2uY2xTMmFTB7fW5Ct+TtIzgsdhD5XMc5Ks8wu3PXiAGrKLCXFN0t/vjUrSTBzJPctNK8ypb68jOR+f38CuVk9+Piog4074jj4iYf6x/Mwe2oWCnJZPR1zsOWQizkjqV2aat60iA5RMtG+A4NaWzKxmH1+NQP+tqSmz/N7HcwZbuJfcn1e79w3gFmPdqNwhIkHpwcxviyA+r0JlOQbxiAtK6FSYjvRjhS9fsq8Wuq6CYfFrTUqYqr/POIaz+3iJo5KmkotoNTA6w0DmF6ewZrTxhP35GLLz7pxfEDhm2MtVEzwtRHtc7FsQ7fwC2+tCjCY2ygfz0JlbB/2U/sibzXHFubbiHOyN/7MAFrMkMVN4MoGJoFYHMVMqyPCpkqw/KRxGOLDluslP+UO2HtUb30NUzKxm2SHUSy5ZnLl17BuFF+oKTTxzM4kGnb42ahiQgb+cG82Dn7oYsGUECt6X0PvNPahnrXq+7ze3e73FVBfZ5+PP3Dx1nezUTnJd7IPmvvx1PYkZlERMsc1nEPmHMG5BYNgMVhCCTYy9VFNqMplrSu4uXraSUQZHHRjP7+JQwQoSZxMBL72YAG+MtpPpw3b+3TJd/nIEE4w/9/y0070s584sk11vP5Ioa62pGg+xkr/uhnZWmgLK6fbVrOaYt88erhUTiL7XNMHEYrZ0vLi0RVN60rXaEDNa0t/7ib7XuBeRZ79gstPEZp7Sca8AgpklYcHftkNcShps6dl0dt9J9qxfxCNJzzQN/AJM+TONhfb9w7qfhIR0iCPtSbw8HNdaCZthnCXGmeGOgeSzqPnVa4VzrV4CvOCgJSXhmxBpIyqWsTNVaj1PZ4jVfOsiNMZktxlpF6rxOesANDIfU8l90pr7snRsTIdJ6U+7ex2EO3j5oeDchiGCvLsswWJBPjdfx3EP7/Qg3e5/5qZSyvRT86BPDtXwgrlBIlhW05h6UwpQwWjBqGPUnhCN3lF2yju6BrNQPgyVi8UowjtLFadGnMJfxfBynHE6Z8N4a4zoB0jJFvKS7QE00yQ1Y0sYtwPTqGbPjWLILupivMqptQkRpIHHgHXibUTWvXun5ccSWPT0butaYMnGt21PvtM6fQHP1LKvZM2l92gnGWeRSAZRwrdg8xaj04LYv7MLNTTgVb/rgcD3JXJ4UMXAbV3MvYeS2D7R4N49o0oItyBXTEmjEGWdw2HXYzMNLh9Zuw5F3NFYZzLZKbkrsj0FjavKW1Og5T1X0BhOcoRVV+95PgDdjj333znki6K21hxEn+CDmpj62NFuIzZZtYP2rGDhYreR1Nb8KtBne91bUa7TGYIkqMe0ercH3VAwksG1SncT4EVZsEOZptuPPpA87rSpy8+fzqrLfZDPVbJVOAZ5lM8DHs+EOZmxlCSGnQQlwOEPacUVs4JYwQz1f6DMexghX4Da8qaEoaXUhPVDDdyyf1svruRVdPu0ywPmQhkr/T9WWHs61QsQkQqQ7rf3EA4z5Q5BSRnM+rRoLGkvus1p+95SFbHIxR9SgcjXrzYiXc3WoFs0VFCVh6XspqOdLDVoVn78fI7jJUM1n3UsBzp0Pq68pHqR+7FJeUCT0xeYt/GPf04cIIftOkNRnSxqEqwQA44sZ5t8cIz/yhgamuJi1jkPt0uMH36ZZob5UuPjLaNYCMz1xBxLpo/EOBhxdGYZ7R3M7zQKcYQRIKWP0e3tBT/X/K97BSOsjhup7f7mUeCuvDOdx6eJZz2LGPa+c5zoZSLOHr+x0m1+4P7N09KVC5tnUcU/y1IuOOSMwZLwIYsZYjmJFh/Hsi0PAErm0QJDDFaJZ15mH/08Q3r4HlNa0vfFodu2mBIcv9Mu4Cj538VkEIDcmYLCfqA7KmkRiBc7kiUIablAdr/CVJkykKkL08jIWP5KCWHxzqYbFQPCkhx5M8DKTI+F6h83DxJB1EIwemNzwcyhwaoBEnsOtVekjcy8BJN+qb6eyJDZHnJ3k27nil9SrqnHfkSQ/WrLwSKOjmfPOdcyf72dcp1ZDcnJxLisXJ9+eaP4V7I7RZZiJWknEfOZy90nouF/i9FYjxzfww9WAAAAABJRU5ErkJggg==","urlHash":null,"fileName":null,"imageId":null,"strokeWidth":2.0,"strokeColor":"#000000","dropShadow":false,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":0.0,"y":0.0,"rotation":0.0,"id":3,"width":700.0,"height":260.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":9,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#f5f5f5","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":582.8644243004547,"y":251.5,"rotation":0.0,"id":229,"width":88.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":53,"lockAspectRatio":false,"lockShape":false,"children":[{"x":61.5,"y":56.0,"rotation":0.0,"id":199,"width":3.0,"height":3.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":51,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":19.0,"y":52.5,"rotation":0.0,"id":200,"width":50.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":49,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":61.5,"y":43.0,"rotation":0.0,"id":202,"width":3.0,"height":3.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":47,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":19.0,"y":39.5,"rotation":0.0,"id":203,"width":50.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":45,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":-6.0,"y":6.0,"rotation":90.0,"id":204,"width":100.0,"height":88.0,"uid":"com.gliffy.shape.basic.basic_v1.default.hexagon","order":43,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.hexagon.basic_v1","strokeWidth":0.0,"strokeColor":"#000000","fillColor":"#3a5de9","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":436.197757633788,"y":251.5,"rotation":0.0,"id":231,"width":88.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":54,"lockAspectRatio":false,"lockShape":false,"children":[{"x":61.5,"y":56.0,"rotation":0.0,"id":190,"width":3.0,"height":3.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":41,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":19.0,"y":52.5,"rotation":0.0,"id":191,"width":50.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":39,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":61.5,"y":43.0,"rotation":0.0,"id":193,"width":3.0,"height":3.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":37,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":19.0,"y":39.5,"rotation":0.0,"id":194,"width":50.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":35,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":-6.0,"y":6.0,"rotation":90.0,"id":195,"width":100.0,"height":88.0,"uid":"com.gliffy.shape.basic.basic_v1.default.hexagon","order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.hexagon.basic_v1","strokeWidth":0.0,"strokeColor":"#000000","fillColor":"#3a5de9","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":289.5310909671214,"y":251.5,"rotation":0.0,"id":233,"width":88.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":55,"lockAspectRatio":false,"lockShape":false,"children":[{"x":61.5,"y":56.0,"rotation":0.0,"id":161,"width":3.0,"height":3.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":29,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":19.0,"y":52.5,"rotation":0.0,"id":162,"width":50.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":25,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":61.5,"y":43.0,"rotation":0.0,"id":164,"width":3.0,"height":3.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":23,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":19.0,"y":39.5,"rotation":0.0,"id":165,"width":50.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":19,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":-6.0,"y":6.0,"rotation":90.0,"id":166,"width":100.0,"height":88.0,"uid":"com.gliffy.shape.basic.basic_v1.default.hexagon","order":17,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.hexagon.basic_v1","strokeWidth":0.0,"strokeColor":"#000000","fillColor":"#3a5de9","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":537.0,"y":616.5,"rotation":0.0,"id":340,"width":46.0,"height":91.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":61,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":315,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":337,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#661fff","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":17,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-150.13557569954526,-506.5],[-150.13557569954526,-470.75],[-202.96890903287863,-470.75],[-202.96890903287863,-412.0]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":144.36442430045474,"y":361.5,"rotation":0.0,"id":346,"width":85.0,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":62,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

master

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":291.5310909671214,"y":361.5,"rotation":0.0,"id":348,"width":85.0,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":63,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

node

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":439.197757633788,"y":361.5,"rotation":0.0,"id":349,"width":85.0,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":64,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

node

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":584.8644243004547,"y":361.5,"rotation":0.0,"id":350,"width":85.0,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":65,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

node

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":46.86442430045474,"y":29.0,"rotation":0.0,"id":354,"width":245.0,"height":22.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":66,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Over a NodePort Service

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":50.0,"y":52.0,"rotation":0.0,"id":356,"width":146.0,"height":1.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":67,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#999999","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[0.0,0.0],[182.0109886792553,0.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":120.84003405655233,"y":220.0,"rotation":0.0,"id":360,"width":584.0,"height":21.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":68,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#efe8ff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":142.86442430045474,"y":251.5,"rotation":0.0,"id":235,"width":88.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":69,"lockAspectRatio":false,"lockShape":false,"children":[{"x":-6.0,"y":6.0,"rotation":90.0,"id":186,"width":100.0,"height":88.0,"uid":"com.gliffy.shape.basic.basic_v1.default.hexagon","order":11,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.hexagon.basic_v1","strokeWidth":0.0,"strokeColor":"#000000","fillColor":"#3a5de9","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":19.0,"y":39.5,"rotation":0.0,"id":185,"width":50.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":15,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":61.5,"y":43.0,"rotation":0.0,"id":184,"width":3.0,"height":3.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":21,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":19.0,"y":52.5,"rotation":0.0,"id":182,"width":50.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":27,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":61.5,"y":56.0,"rotation":0.0,"id":181,"width":3.0,"height":3.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":33,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":452.197757633788,"y":271.0,"rotation":0.0,"id":383,"width":53.00000000000006,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":72,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":5.5,"rotation":0.0,"id":380,"width":53.0,"height":49.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":74,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

N

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":-3.599999999999966,"y":3.6000000000000227,"rotation":90.0,"id":377,"width":60.0,"height":52.8,"uid":"com.gliffy.shape.basic.basic_v1.default.hexagon","order":71,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.hexagon.basic_v1","strokeWidth":0.0,"strokeColor":"#000000","fillColor":"#00963a","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":543.0,"y":1028.5,"rotation":0.0,"id":385,"width":46.0,"height":91.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":75,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":389,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":388,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#00963a","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":17,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-158.46890903287863,-727.5],[-136.4330011738,-727.5],[-114.39709331472142,-727.5],[-92.36118545564278,-727.5]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":425.6025671054047,"y":321.25,"rotation":29.999999999999996,"id":390,"width":73.0,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":76,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

80/tcp

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":602.0093065339395,"y":201.27275866404784,"rotation":0.0,"id":407,"width":85.71023553303064,"height":60.454482671904316,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":81,"lockAspectRatio":false,"lockShape":false,"children":[{"x":-1.1448822334847364,"y":20.727241335952158,"rotation":29.999999999999996,"id":402,"width":88.0,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":78,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

30100/tcp

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":11.243144374054054,"y":37.22724133595216,"rotation":0.0,"id":403,"width":27.272727272727277,"height":15.0,"uid":"com.gliffy.shape.android.android_v1.icons.expand","order":80,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.android.android_v1.icons.expand","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#676767","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":456.3426398672726,"y":201.27275866404784,"rotation":0.0,"id":408,"width":85.71023553303064,"height":60.454482671904316,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":86,"lockAspectRatio":false,"lockShape":false,"children":[{"x":-1.1448822334847364,"y":20.727241335952158,"rotation":29.999999999999996,"id":409,"width":88.0,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":83,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

30100/tcp

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":11.243144374054054,"y":37.22724133595216,"rotation":0.0,"id":410,"width":27.272727272727277,"height":15.0,"uid":"com.gliffy.shape.android.android_v1.icons.expand","order":85,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.android.android_v1.icons.expand","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#676767","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":309.6759732006058,"y":201.27275866404784,"rotation":0.0,"id":411,"width":85.71023553303064,"height":60.454482671904316,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":91,"lockAspectRatio":false,"lockShape":false,"children":[{"x":-1.1448822334847364,"y":20.727241335952158,"rotation":29.999999999999996,"id":412,"width":88.0,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":88,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

30100/tcp

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":11.243144374054054,"y":37.22724133595216,"rotation":0.0,"id":413,"width":27.272727272727277,"height":15.0,"uid":"com.gliffy.shape.android.android_v1.icons.expand","order":90,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.android.android_v1.icons.expand","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#676767","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":162.00930653393908,"y":201.27275866404784,"rotation":0.0,"id":414,"width":85.71023553303064,"height":60.454482671904316,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":96,"lockAspectRatio":false,"lockShape":false,"children":[{"x":-1.1448822334847364,"y":20.727241335952158,"rotation":29.999999999999996,"id":415,"width":88.0,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":93,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

30100/tcp

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":11.243144374054054,"y":37.22724133595216,"rotation":0.0,"id":416,"width":27.272727272727277,"height":15.0,"uid":"com.gliffy.shape.android.android_v1.icons.expand","order":95,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.android.android_v1.icons.expand","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#676767","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":62.86442430045474,"y":253.5,"rotation":0.0,"id":341,"width":80.0,"height":38.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":100,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

NodePort

\n

Service

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":100.84003405655234,"y":218.01650429449558,"rotation":315.0,"id":426,"width":40.0,"height":35.0,"uid":"com.gliffy.shape.android.android_v1.icons.forward","order":103,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.android.android_v1.icons.forward","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#676767","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":326.86442430045474,"y":20.0,"rotation":0.0,"id":325,"width":120.0,"height":90.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":60,"lockAspectRatio":false,"lockShape":false,"children":[{"x":7.225609756097583,"y":20.0,"rotation":0.0,"id":317,"width":62.5,"height":50.0,"uid":"com.gliffy.shape.android.android_v1.icons.monitor","order":59,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.android.android_v1.icons.monitor","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#676767","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":74.72560975609758,"y":30.0,"rotation":0.0,"id":314,"width":38.048780487804876,"height":40.0,"uid":"com.gliffy.shape.android.android_v1.icons.person","order":57,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.android.android_v1.icons.person","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#676767","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":0.0,"y":0.0,"rotation":0.0,"id":315,"width":120.0,"height":90.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":7,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"layers":[{"guid":"mcqBzoFMGdMI","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":109}],"shapeStyles":{},"lineStyles":{"global":{"stroke":"#00963a","strokeWidth":3,"endArrow":1,"orthoMode":0}},"textStyles":{"global":{"face":"Roboto Slab","size":"36px","color":"#676767"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","autosaveDisabled":false,"lastSerialized":1536083644406,"libraries":["com.gliffy.libraries.android.android_v1.icons","com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.uml.uml_v2.class","com.gliffy.libraries.uml.uml_v2.sequence","com.gliffy.libraries.uml.uml_v2.activity","com.gliffy.libraries.erd.erd_v1.default","com.gliffy.libraries.ui.ui_v3.containers_content","com.gliffy.libraries.ui.ui_v3.forms_controls","com.gliffy.libraries.images"]},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/docs/images/baremetal/nodeport.jpg b/docs/images/baremetal/nodeport.jpg new file mode 100644 index 0000000000..012f533832 Binary files /dev/null and b/docs/images/baremetal/nodeport.jpg differ diff --git a/docs/images/baremetal/user_edge.gliffy b/docs/images/baremetal/user_edge.gliffy new file mode 100644 index 0000000000..387dffaa55 --- /dev/null +++ b/docs/images/baremetal/user_edge.gliffy @@ -0,0 +1 @@ +{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":737,"height":577,"nodeIndex":443,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":false,"drawingGuidesOn":true,"pageBreaksOn":false,"printGridOn":false,"printPaper":null,"printShrinkToFit":false,"printPortrait":false,"maxWidth":5000,"maxHeight":5000,"themeData":null,"imageCache":{},"viewportType":"default","fitBB":{"min":{"x":36.86442430045474,"y":20},"max":{"x":736.8644243004547,"y":577}},"printModel":{"pageSize":"Letter","portrait":true,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":50.0,"y":52.0,"rotation":0.0,"id":356,"width":146.0,"height":1.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":78,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#999999","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[0.0,0.0],[224.00892839349058,0.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":46.86442430045474,"y":29.0,"rotation":0.0,"id":354,"width":245.0,"height":22.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":77,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Using a self-provisioned edge

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":326.86442430045474,"y":20.0,"rotation":0.0,"id":325,"width":120.0,"height":90.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":69,"lockAspectRatio":false,"lockShape":false,"children":[{"x":7.225609756097583,"y":20.0,"rotation":0.0,"id":317,"width":62.5,"height":50.0,"uid":"com.gliffy.shape.android.android_v1.icons.monitor","order":68,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.android.android_v1.icons.monitor","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#676767","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":74.72560975609758,"y":30.0,"rotation":0.0,"id":314,"width":38.048780487804876,"height":40.0,"uid":"com.gliffy.shape.android.android_v1.icons.person","order":66,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.android.android_v1.icons.person","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#676767","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":0.0,"y":0.0,"rotation":0.0,"id":315,"width":120.0,"height":90.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":15,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":392.0,"y":268.0,"rotation":0.0,"id":339,"width":46.0,"height":91.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":70,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":395,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":383,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#009687","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":17,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-4.45617347599233,-1.0],[-4.45617347599233,31.75],[236.36442430045474,31.75],[236.36442430045474,74.5]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":547.0,"y":626.5,"rotation":0.0,"id":351,"width":46.0,"height":91.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":76,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":315,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":353,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#666666","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":17,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-160.13557569954526,-516.5],[-160.13557569954526,-501.83321142036345],[-160.13557569954526,-487.1664228407269],[-160.13557569954526,-472.49963426109036]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":36.86442430045474,"y":317.0,"rotation":0.0,"id":221,"width":700.0,"height":260.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":60,"lockAspectRatio":false,"lockShape":false,"children":[{"x":10.0,"y":8.5,"rotation":0.0,"id":153,"width":42.0,"height":41.0,"uid":"com.gliffy.shape.basic.basic_v1.default.image","order":29,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Image","Image":{"url":"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACoAAAApCAYAAABDV7v1AAAAAXNSR0IArs4c6QAAAAlwSFlzAAA9hAAAPYQB1ayvdAAAActpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IlhNUCBDb3JlIDUuNC4wIj4KICAgPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4KICAgICAgPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIKICAgICAgICAgICAgeG1sbnM6eG1wPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvIgogICAgICAgICAgICB4bWxuczp0aWZmPSJodHRwOi8vbnMuYWRvYmUuY29tL3RpZmYvMS4wLyI+CiAgICAgICAgIDx4bXA6Q3JlYXRvclRvb2w+d3d3Lmlua3NjYXBlLm9yZzwveG1wOkNyZWF0b3JUb29sPgogICAgICAgICA8dGlmZjpPcmllbnRhdGlvbj4xPC90aWZmOk9yaWVudGF0aW9uPgogICAgICA8L3JkZjpEZXNjcmlwdGlvbj4KICAgPC9yZGY6UkRGPgo8L3g6eG1wbWV0YT4KGMtVWAAAD9hJREFUWAmdWAl0ltWZfr7l37JvSBIIq1AETTBhCUeBgAtKW9qisdYz6tixyAxLUceOMvYYempPrYwWgbFF0PbUnqqo48zRnsG2mlhFAiSAQDuCYRMSAglZ/iz/8n3fnee93/+ziY6de873/99y73uf+77Pu9xr4P/TlDJqVtVb9XU1LmAoEVGxpG2SZXn/ZCjzFgVlAGqz65nP7llfst+fgmPqOOYxjjH8MX/L1BT4tzRlVC1qsps2TEmmR1Utaa1UlrEUSn3bCuVleM6A/mTaGXDj3QME9bKpvLU71w7bdXbMop2Bpg1VTnqR6fdf9P/lgFKDVfddCPDqFe0z4LlLqZvbrFCu7SX7oTzHoUAtk2pWhmnbZiCTgHsc6vgVuN66XeuHf5gGVCWAf0nAX0LDXwy0Tpk1qDfr6+Zw9X6rXHpyLgxvGZ++YYXzDDfeS0hKNGwaUFbA9PslPb6GQWrAI5CAFcwRDcvHN6DMtc3rit/xewI1dcqul351Bkddul0aKAFWtTZZ55t48vK2+aZSy2GY86xgNtxElEi8BMXavAhSZgJ29fhzXZ1j6pfyxG/yR82ZwfPGbvE895nd68t+z2+6aQ2XVrmXAvwZoDV179pnNUiTVy5v+xbNuMy0gjWmFSLAPhFKDSqL/1p/IkSuvwwq/Pb2TH3/nZf6MTHii9fexu9sBKy1TA1nkTlxXol6w8AzzWtK3khT4AIM/jifT6l7gJqU1YgpejvabufAJaYdqqYiCLBX5hNTCriUgcW8QJBPnQmFLELf+mQxmQDMfrgdHXGFkrCBmMSGC1UiGualbKEEuQ064Tayen1O0ccvaUWlsLCfbmcnTIOsWt42p7ezrdkMRH5jBTOrvWRMEWRSI0qZOT1Y/hVReQRWGDTQQyZ39zroifJKKhSFDLj8xnB1/hC5l3ltAqPsaNJzYh4pUS1z9nZOaJ6yoq1Gm1/Appq+qa2lGanJqu+3jvCUetUOF1zFwY6b6E8K+eggAer+Ap3I1Bbf5BCgOFAXNXr4gIsBmn8gpnCQ990EG6SWcwKG7vsZuJRJa3C4MmQumdMO51/lut5rgkUw1da+IhQ7Z0J5UC5uskO5BU6sk0ZT4sWBOH22nU9iXkEq2pOWSRcapEnfa/WwtdPDyCwDDy0Io6jARlG+hYe+HkZZhoH3Ozy8d9JDgoaWMdJEhsgSmSe4qJjEBc4lczqxM3E7lFdAQ90kfQ/lj9HK1EM3T1qlsFleo0r/GmwKpsPeNuGOIc/e6VKozjZABerVCYDxBLf+GxHMKCeokiDycizYoma2nywp0jQ42pbEB7sH8eSf49hDml9bYKboAGyLKtxYaOBMnN7pkbAGTK7B0iRXagrFbBjTVeU18YYLYSgmLDF/S3HbNnrjFCfen8wOqMB7Jzz12r1ZxvxZ2XjutW4sf3UAVcMtNJ1myKsJ4c752RhTFhJcZ5s4krSLnAcHj8Tw/JtR/HRrAlVFJpqOufjFHRm455t5eKshioUbo5g5zEKfA8cKZEoC2dlcWDLd52qdadfeBpPKdFuGHB9Nfo8nT4TjlrglQjBOdroIh0wsu6MAoSBw36uDeOnuTNx6Yy6slPZa2xPYsW+QiQf41vU5MhL/8Yde2FTR1CsjKB4SwLhRYfxocQhXlHXj7pcHsOmuTHx3Yb7ue/w0vZBWk0XSkuJh8n5cVeep0dRmSw1mm/ah/CbhgMtZyxkrczwnLq5mDDgK02mmJVsG0XyoHSvvzMOiWwtw8zUJDBsagElKdNHN32zow6aGQTQ0Onj422EsvME3feP/JPDE5hjmTuvHousiuOnaLORm27jja3mYOz0Tw4uDWsuPv9iDX3/ioDrfxIAEP9pXeUkYVjCXgCv4piXamm2Y2aVV2lgkwFTDphkZUTTT2V/S4Iw8E5sOOhi7sgNHjsc1FwVktN/Fyl+cwV1PRhESvyTny4ZoB+UDUFbEe74TCtz+4yhWbTyDfnqfcFhAHjoWx/h/7cSvWxxcQ5BxASmRTBDAcEw7LBoWnkJ4yixk6DxuCHmVNrj0l84SIrWHTslmAJER57WMiInFC7IQYwj6FSeTVBAWT0u1kNzz9Z9Oefje14L4e/I5Qgqd38rz6ZxctMTaVEsL0KGXi9TOvXmz4eqR5YtPXsalX6FcyYx6RTJODxJt7WR4uf/GCEYNDyFJYMdPJmBxgoorMvD08kJs+GoE+NhDkPEy3YISTw572LggA6uXFaH8KxFNl+NtCSRJqzEjQrj/+gh2ciG2cE0PTSOml7CM4KuJGhtFaaBBOzmRShzmkRsSA2QyGSKBvJsxtDDPwLQrw/IaR07EMfGHHfhwV79+zsuxce8t+di6sQDXVhJwqs2qykDjswX4B37LkdzK9n5THyoe68BRypBWXR5BDkNeHxef8kv9nj8mfYVYjOEsxifISw3UNa1KKRJoele8TsMkUgH6EevgW8dYGEZeSduxP4Zom4d5/96DP25lBcXGqIsZkzMxati5UCXan1aeqb/Lz9sfMPys78UZWqfpLzH9vqw4gNsoezfnEK2mjMh/8WflSB1gmKiUL/5nT02VN2zMEQJUQmvKHEyN44fZyAibcGiyfYeTGFtmYjq1fMPjPXh1Sw+HMd+nUlbbqQTaTvsbAKZjkYnf/b4b837Sg7kFBsYOM7UMl8TMzLAwoZQcYXZiJNPhSWSlANNhGLKgpokMu2pRawbNXS4hwe/gC5ePurF7Ua5eBHO4h70nXLQc89BSbGIEAZ/qcsk5ap8zHTgcw91rurQDvnh/vo6dwsfuqIcrR5s6u4HW2JPvMgJ4mhJFjCq6+CPQixp3MMJTo0Iw2iriXg7PGqvICeI3xe/FJ6SYCAtxRE4qGIjWLh9iYs0dEUyZGMboYUEMLUolcI5rPeVg2w6iZrxu+zuHQJkPOf6+2nwsvM4hvxPY+dc4jrSzrEtZQCud08hcrq2YSg1Jp2JRzVOKHSsYbaWsCiuQFfKSfS4HmcIVqS1belKaPer55TtHZGVa+PHiQm0yPn6mTa/IwCs/dMVcmHpVhv4uMVfa0KKAvqZXZLLCOhchxBI44uFt6cQpx5BSUjI6JKHJstoIZIbc5MBk2/SMqaYVAC3vEqQVpakm0NSPXB9CKQN4yRAbV47zvVm0I7yS1seA39vv4VRnElkZJi4fGUaEPK69KVd/lx8JxAePxNE34GFooY1sen92pgmJwSn64vabc1E+LoSTHQ5OMJW+vj2OA91K5bCwpOaUadq0tDfNVoY3RfjpMRbksNDaQQ49Mi+Cuxb4ebizy0HLp3GMLA3qnH+I95v+qxcdveTpGQ9/2uvioZuDeGK57/ESAdJNwDzHvqvfjGN2uY2xTMmFTB7fW5Ct+TtIzgsdhD5XMc5Ks8wu3PXiAGrKLCXFN0t/vjUrSTBzJPctNK8ypb68jOR+f38CuVk9+Piog4074jj4iYf6x/Mwe2oWCnJZPR1zsOWQizkjqV2aat60iA5RMtG+A4NaWzKxmH1+NQP+tqSmz/N7HcwZbuJfcn1e79w3gFmPdqNwhIkHpwcxviyA+r0JlOQbxiAtK6FSYjvRjhS9fsq8Wuq6CYfFrTUqYqr/POIaz+3iJo5KmkotoNTA6w0DmF6ewZrTxhP35GLLz7pxfEDhm2MtVEzwtRHtc7FsQ7fwC2+tCjCY2ygfz0JlbB/2U/sibzXHFubbiHOyN/7MAFrMkMVN4MoGJoFYHMVMqyPCpkqw/KRxGOLDluslP+UO2HtUb30NUzKxm2SHUSy5ZnLl17BuFF+oKTTxzM4kGnb42ahiQgb+cG82Dn7oYsGUECt6X0PvNPahnrXq+7ze3e73FVBfZ5+PP3Dx1nezUTnJd7IPmvvx1PYkZlERMsc1nEPmHMG5BYNgMVhCCTYy9VFNqMplrSu4uXraSUQZHHRjP7+JQwQoSZxMBL72YAG+MtpPpw3b+3TJd/nIEE4w/9/y0070s584sk11vP5Ioa62pGg+xkr/uhnZWmgLK6fbVrOaYt88erhUTiL7XNMHEYrZ0vLi0RVN60rXaEDNa0t/7ib7XuBeRZ79gstPEZp7Sca8AgpklYcHftkNcShps6dl0dt9J9qxfxCNJzzQN/AJM+TONhfb9w7qfhIR0iCPtSbw8HNdaCZthnCXGmeGOgeSzqPnVa4VzrV4CvOCgJSXhmxBpIyqWsTNVaj1PZ4jVfOsiNMZktxlpF6rxOesANDIfU8l90pr7snRsTIdJ6U+7ex2EO3j5oeDchiGCvLsswWJBPjdfx3EP7/Qg3e5/5qZSyvRT86BPDtXwgrlBIlhW05h6UwpQwWjBqGPUnhCN3lF2yju6BrNQPgyVi8UowjtLFadGnMJfxfBynHE6Z8N4a4zoB0jJFvKS7QE00yQ1Y0sYtwPTqGbPjWLILupivMqptQkRpIHHgHXibUTWvXun5ccSWPT0butaYMnGt21PvtM6fQHP1LKvZM2l92gnGWeRSAZRwrdg8xaj04LYv7MLNTTgVb/rgcD3JXJ4UMXAbV3MvYeS2D7R4N49o0oItyBXTEmjEGWdw2HXYzMNLh9Zuw5F3NFYZzLZKbkrsj0FjavKW1Og5T1X0BhOcoRVV+95PgDdjj333znki6K21hxEn+CDmpj62NFuIzZZtYP2rGDhYreR1Nb8KtBne91bUa7TGYIkqMe0ercH3VAwksG1SncT4EVZsEOZptuPPpA87rSpy8+fzqrLfZDPVbJVOAZ5lM8DHs+EOZmxlCSGnQQlwOEPacUVs4JYwQz1f6DMexghX4Da8qaEoaXUhPVDDdyyf1svruRVdPu0ywPmQhkr/T9WWHs61QsQkQqQ7rf3EA4z5Q5BSRnM+rRoLGkvus1p+95SFbHIxR9SgcjXrzYiXc3WoFs0VFCVh6XspqOdLDVoVn78fI7jJUM1n3UsBzp0Pq68pHqR+7FJeUCT0xeYt/GPf04cIIftOkNRnSxqEqwQA44sZ5t8cIz/yhgamuJi1jkPt0uMH36ZZob5UuPjLaNYCMz1xBxLpo/EOBhxdGYZ7R3M7zQKcYQRIKWP0e3tBT/X/K97BSOsjhup7f7mUeCuvDOdx6eJZz2LGPa+c5zoZSLOHr+x0m1+4P7N09KVC5tnUcU/y1IuOOSMwZLwIYsZYjmJFh/Hsi0PAErm0QJDDFaJZ15mH/08Q3r4HlNa0vfFodu2mBIcv9Mu4Cj538VkEIDcmYLCfqA7KmkRiBc7kiUIablAdr/CVJkykKkL08jIWP5KCWHxzqYbFQPCkhx5M8DKTI+F6h83DxJB1EIwemNzwcyhwaoBEnsOtVekjcy8BJN+qb6eyJDZHnJ3k27nil9SrqnHfkSQ/WrLwSKOjmfPOdcyf72dcp1ZDcnJxLisXJ9+eaP4V7I7RZZiJWknEfOZy90nouF/i9FYjxzfww9WAAAAABJRU5ErkJggg==","urlHash":null,"fileName":null,"imageId":null,"strokeWidth":2.0,"strokeColor":"#000000","dropShadow":false,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":0.0,"y":0.0,"rotation":0.0,"id":3,"width":700.0,"height":260.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":17,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#f5f5f5","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":142.86442430045474,"y":386.5,"rotation":0.0,"id":235,"width":88.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":64,"lockAspectRatio":false,"lockShape":false,"children":[{"x":61.5,"y":56.0,"rotation":0.0,"id":181,"width":3.0,"height":3.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":27,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":19.0,"y":52.5,"rotation":0.0,"id":182,"width":50.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":25,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":61.5,"y":43.0,"rotation":0.0,"id":184,"width":3.0,"height":3.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":23,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":19.0,"y":39.5,"rotation":0.0,"id":185,"width":50.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":21,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":-6.0,"y":6.0,"rotation":90.0,"id":186,"width":100.0,"height":88.0,"uid":"com.gliffy.shape.basic.basic_v1.default.hexagon","order":19,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.hexagon.basic_v1","strokeWidth":0.0,"strokeColor":"#000000","fillColor":"#3a5de9","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":144.36442430045474,"y":496.5,"rotation":0.0,"id":346,"width":85.0,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":72,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

master

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":584.8644243004547,"y":496.5,"rotation":0.0,"id":350,"width":85.0,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":75,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

node

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":63.86442430045474,"y":388.5,"rotation":0.0,"id":368,"width":80.0,"height":38.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":101,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

NodePort

\n

Service

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":367.04382652400767,"y":154.0,"rotation":0.0,"id":353,"width":40.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":13,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#ffffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":431.04382652400767,"y":199.5,"rotation":0.0,"id":341,"width":116.0,"height":38.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":71,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

User-managed

public edge

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":101.84003405655233,"y":353.0165042944956,"rotation":315.0,"id":367,"width":40.0,"height":35.0,"uid":"com.gliffy.shape.android.android_v1.icons.forward","order":102,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.android.android_v1.icons.forward","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#676767","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":391.78489778522317,"y":149.5,"rotation":29.999999999999996,"id":389,"width":68.0,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":103,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

80/tcp

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":367.54382652400767,"y":251.0,"rotation":0.0,"id":395,"width":40.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":12,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#ffffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":402.0,"y":278.0,"rotation":0.0,"id":396,"width":46.0,"height":91.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":104,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":395,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":382,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#009687","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":17,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-14.45617347599233,-11.0],[-14.45617347599233,21.75],[79.197757633788,21.75],[79.197757633788,64.5]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":1043.0,"y":375.5,"rotation":0.0,"id":397,"width":46.0,"height":91.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":105,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":395,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":384,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#009687","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":17,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-655.4561734759923,-108.5],[-655.4561734759923,-75.75],[-707.9689090328786,-75.75],[-707.9689090328786,-33.0]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":1088.0,"y":394.5,"rotation":0.0,"id":398,"width":46.0,"height":91.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":106,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":395,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":385,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#009687","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":17,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-700.4561734759923,-127.5],[-700.4561734759923,-94.75],[-897.6355756995453,-94.75],[-897.6355756995453,-52.0]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":337.04382652400767,"y":167.0,"rotation":90.0,"id":363,"width":100.0,"height":88.0,"uid":"com.gliffy.shape.basic.basic_v1.default.hexagon","order":79,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.hexagon.basic_v1","strokeWidth":0.0,"strokeColor":"#000000","fillColor":"#009687","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":360.68668366686484,"y":187.50000000000003,"rotation":90.0,"id":402,"width":53.7142857142857,"height":47.0,"uid":"com.gliffy.shape.android.android_v1.icons.forward","order":107,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.android.android_v1.icons.forward","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#ffffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":439.197757633788,"y":496.5,"rotation":0.0,"id":349,"width":85.0,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":74,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

node

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":582.8644243004547,"y":386.5,"rotation":0.0,"id":229,"width":88.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":61,"lockAspectRatio":false,"lockShape":false,"children":[{"x":61.5,"y":56.0,"rotation":0.0,"id":199,"width":3.0,"height":3.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":59,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":19.0,"y":52.5,"rotation":0.0,"id":200,"width":50.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":57,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":61.5,"y":43.0,"rotation":0.0,"id":202,"width":3.0,"height":3.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":55,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":19.0,"y":39.5,"rotation":0.0,"id":203,"width":50.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":53,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":-6.0,"y":6.0,"rotation":90.0,"id":204,"width":100.0,"height":88.0,"uid":"com.gliffy.shape.basic.basic_v1.default.hexagon","order":51,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.hexagon.basic_v1","strokeWidth":0.0,"strokeColor":"#000000","fillColor":"#3a5de9","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":436.197757633788,"y":386.5,"rotation":0.0,"id":231,"width":88.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":62,"lockAspectRatio":false,"lockShape":false,"children":[{"x":61.5,"y":56.0,"rotation":0.0,"id":190,"width":3.0,"height":3.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":49,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":19.0,"y":52.5,"rotation":0.0,"id":191,"width":50.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":47,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":61.5,"y":43.0,"rotation":0.0,"id":193,"width":3.0,"height":3.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":45,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":19.0,"y":39.5,"rotation":0.0,"id":194,"width":50.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":43,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":-6.0,"y":6.0,"rotation":90.0,"id":195,"width":100.0,"height":88.0,"uid":"com.gliffy.shape.basic.basic_v1.default.hexagon","order":41,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.hexagon.basic_v1","strokeWidth":0.0,"strokeColor":"#000000","fillColor":"#3a5de9","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":571.8644243004546,"y":455.97724133595216,"rotation":29.999999999999996,"id":406,"width":73.0,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":114,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

80/tcp

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":170.36442430045474,"y":342.5,"rotation":0.0,"id":385,"width":40.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":8,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#ff0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":315.0310909671214,"y":342.5,"rotation":0.0,"id":384,"width":40.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":9,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#ff0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":608.3644243004547,"y":342.5,"rotation":0.0,"id":383,"width":40.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":10,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#ff0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":461.197757633788,"y":342.5,"rotation":0.0,"id":382,"width":40.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":11,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#ff0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":121.84003405655233,"y":355.0,"rotation":0.0,"id":381,"width":584.0,"height":21.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":80,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#e5f8fb","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":603.0093065339395,"y":336.27275866404784,"rotation":0.0,"id":378,"width":85.71023553303064,"height":60.454482671904316,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":85,"lockAspectRatio":false,"lockShape":false,"children":[{"x":-1.1448822334847364,"y":20.727241335952158,"rotation":29.999999999999996,"id":379,"width":88.0,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":82,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

30100/tcp

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":11.243144374054054,"y":37.22724133595216,"rotation":0.0,"id":380,"width":27.272727272727277,"height":15.0,"uid":"com.gliffy.shape.android.android_v1.icons.expand","order":84,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.android.android_v1.icons.expand","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#676767","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":457.3426398672726,"y":336.27275866404784,"rotation":0.0,"id":375,"width":85.71023553303064,"height":60.454482671904316,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":90,"lockAspectRatio":false,"lockShape":false,"children":[{"x":-1.1448822334847364,"y":20.727241335952158,"rotation":29.999999999999996,"id":376,"width":88.0,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":87,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

30100/tcp

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":11.243144374054054,"y":37.22724133595216,"rotation":0.0,"id":377,"width":27.272727272727277,"height":15.0,"uid":"com.gliffy.shape.android.android_v1.icons.expand","order":89,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.android.android_v1.icons.expand","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#676767","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":310.6759732006058,"y":336.27275866404784,"rotation":0.0,"id":372,"width":85.71023553303064,"height":60.454482671904316,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":95,"lockAspectRatio":false,"lockShape":false,"children":[{"x":-1.1448822334847364,"y":20.727241335952158,"rotation":29.999999999999996,"id":373,"width":88.0,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":92,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

30100/tcp

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":11.243144374054054,"y":37.22724133595216,"rotation":0.0,"id":374,"width":27.272727272727277,"height":15.0,"uid":"com.gliffy.shape.android.android_v1.icons.expand","order":94,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.android.android_v1.icons.expand","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#676767","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":163.00930653393908,"y":336.27275866404784,"rotation":0.0,"id":369,"width":85.71023553303064,"height":60.454482671904316,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":100,"lockAspectRatio":false,"lockShape":false,"children":[{"x":-1.1448822334847364,"y":20.727241335952158,"rotation":29.999999999999996,"id":370,"width":88.0,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":97,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

30100/tcp

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":11.243144374054054,"y":37.22724133595216,"rotation":0.0,"id":371,"width":27.272727272727277,"height":15.0,"uid":"com.gliffy.shape.android.android_v1.icons.expand","order":99,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.android.android_v1.icons.expand","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#676767","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":545.3333333333333,"y":1163.3863793320238,"rotation":0.0,"id":407,"width":46.0,"height":91.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":113,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":435,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":413,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#00b8d4","fillColor":"none","dashStyle":null,"startArrow":17,"endArrow":17,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-310.9724875712815,-727.4999999999999],[-293.4353868660686,-727.4999999999999],[-275.89828616085566,-727.4999999999999],[-258.3611854556427,-727.4999999999999]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":289.5310909671214,"y":386.5,"rotation":0.0,"id":233,"width":88.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":63,"lockAspectRatio":false,"lockShape":false,"children":[{"x":61.5,"y":56.0,"rotation":0.0,"id":161,"width":3.0,"height":3.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":39,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":19.0,"y":52.5,"rotation":0.0,"id":162,"width":50.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":37,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":61.5,"y":43.0,"rotation":0.0,"id":164,"width":3.0,"height":3.0,"uid":"com.gliffy.shape.basic.basic_v1.default.circle","order":35,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":19.0,"y":39.5,"rotation":0.0,"id":165,"width":50.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":33,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":-6.0,"y":6.0,"rotation":90.0,"id":166,"width":100.0,"height":88.0,"uid":"com.gliffy.shape.basic.basic_v1.default.hexagon","order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.hexagon.basic_v1","strokeWidth":0.0,"strokeColor":"#000000","fillColor":"#3a5de9","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":274.97214787769053,"y":427.8863793320239,"rotation":90.0,"id":413,"width":40.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":7,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#ff0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":292.5310909671214,"y":496.5,"rotation":0.0,"id":348,"width":85.0,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":73,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

node

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":307.5310909671215,"y":405.8863793320239,"rotation":0.0,"id":408,"width":53.00000000000006,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":110,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":5.5,"rotation":0.0,"id":409,"width":53.0,"height":49.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":112,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

N

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":-3.599999999999966,"y":3.6000000000000227,"rotation":90.0,"id":410,"width":60.0,"height":52.8,"uid":"com.gliffy.shape.basic.basic_v1.default.hexagon","order":109,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.hexagon.basic_v1","strokeWidth":0.0,"strokeColor":"#000000","fillColor":"#00963a","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":353.5310909671214,"y":428.5,"rotation":90.0,"id":431,"width":40.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":6,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#ff0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":419.197757633788,"y":428.5,"rotation":90.0,"id":432,"width":40.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":5,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#ff0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":206.36442430045474,"y":428.5,"rotation":90.0,"id":435,"width":40.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":4,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#ff0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":555.3333333333333,"y":1173.3863793320238,"rotation":0.0,"id":436,"width":46.0,"height":91.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":115,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":431,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":432,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#00b8d4","fillColor":"none","dashStyle":null,"startArrow":17,"endArrow":17,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-173.80224236621189,-736.8863793320238],[-157.24668681065634,-736.8863793320238],[-140.6911312551008,-736.8863793320238],[-124.13557569954526,-736.8863793320238]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":278.5310909671214,"y":455.97724133595216,"rotation":29.999999999999996,"id":437,"width":73.0,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":116,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

80/tcp

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":598.8644243004547,"y":405.47724133595216,"rotation":0.0,"id":438,"width":53.00000000000006,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":117,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":5.5,"rotation":0.0,"id":439,"width":53.0,"height":49.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":121,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

N

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":-3.599999999999966,"y":3.6000000000000227,"rotation":90.0,"id":440,"width":60.0,"height":52.8,"uid":"com.gliffy.shape.basic.basic_v1.default.hexagon","order":119,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.hexagon.basic_v1","strokeWidth":0.0,"strokeColor":"#000000","fillColor":"#00963a","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":565.3333333333333,"y":1183.3863793320238,"rotation":0.0,"id":441,"width":46.0,"height":91.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":122,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":433,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":434,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#00b8d4","fillColor":"none","dashStyle":null,"startArrow":17,"endArrow":17,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-37.13557569954526,-746.8863793320238],[-20.246686810656342,-746.8863793320238],[-3.3577979217674283,-746.8863793320238],[13.531090967121486,-746.8863793320238]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":500.197757633788,"y":428.5,"rotation":90.0,"id":433,"width":40.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":1,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#ff0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"},{"x":566.8644243004547,"y":428.5,"rotation":90.0,"id":434,"width":40.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":3,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":0.0,"strokeColor":"#333333","fillColor":"#ff0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"mcqBzoFMGdMI"}],"layers":[{"guid":"mcqBzoFMGdMI","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":123}],"shapeStyles":{},"lineStyles":{"global":{"stroke":"#00b8d4","strokeWidth":2,"orthoMode":0,"startArrow":17}},"textStyles":{"global":{"bold":true,"face":"Roboto Slab","size":"16px","color":"#00b8d4"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","autosaveDisabled":false,"lastSerialized":1536087239311,"libraries":["com.gliffy.libraries.android.android_v1.icons","com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.uml.uml_v2.class","com.gliffy.libraries.uml.uml_v2.sequence","com.gliffy.libraries.uml.uml_v2.activity","com.gliffy.libraries.erd.erd_v1.default","com.gliffy.libraries.ui.ui_v3.containers_content","com.gliffy.libraries.ui.ui_v3.forms_controls","com.gliffy.libraries.images"]},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/docs/images/baremetal/user_edge.jpg b/docs/images/baremetal/user_edge.jpg new file mode 100644 index 0000000000..f1165f5cc8 Binary files /dev/null and b/docs/images/baremetal/user_edge.jpg differ diff --git a/docs/images/grafana.png b/docs/images/grafana.png new file mode 100644 index 0000000000..662a9878a7 Binary files /dev/null and b/docs/images/grafana.png differ diff --git a/docs/images/jaeger-demo.png b/docs/images/jaeger-demo.png new file mode 100644 index 0000000000..3a3e315365 Binary files /dev/null and b/docs/images/jaeger-demo.png differ diff --git a/docs/images/prometheus-dashboard.png b/docs/images/prometheus-dashboard.png new file mode 100644 index 0000000000..5de60fafd2 Binary files /dev/null and b/docs/images/prometheus-dashboard.png differ diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 0000000000..2be8c2c9dc --- /dev/null +++ b/docs/index.md @@ -0,0 +1,11 @@ +# Welcome + +This is the documentation for the NGINX Ingress Controller. + +It is built around the [Kubernetes Ingress resource](http://kubernetes.io/docs/user-guide/ingress/), using a [ConfigMap](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/#understanding-configmaps-and-pods) to store the NGINX configuration. + +Learn more about using Ingress on [k8s.io](http://kubernetes.io/docs/user-guide/ingress/). + +## Getting Started + +See [Deployment](./deploy) for a whirlwind tour that will get you started. diff --git a/docs/kubectl-plugin.md b/docs/kubectl-plugin.md new file mode 100644 index 0000000000..7c6c67b1df --- /dev/null +++ b/docs/kubectl-plugin.md @@ -0,0 +1,357 @@ + + +# The ingress-nginx kubectl plugin + +## Installation + +Install [krew](https://github.com/GoogleContainerTools/krew), then run +```console +$ kubectl krew install ingress-nginx +``` +to install the plugin. Then run +```console +$ kubectl ingress-nginx --help +``` +to make sure the plugin is properly installed and to get a list of commands: + +```console +$ kubectl ingress-nginx --help +A kubectl plugin for inspecting your ingress-nginx deployments + +Usage: + ingress-nginx [command] + +Available Commands: + backends Inspect the dynamic backend information of an ingress-nginx instance + certs Output the certificate data stored in an ingress-nginx pod + conf Inspect the generated nginx.conf + exec Execute a command inside an ingress-nginx pod + general Inspect the other dynamic ingress-nginx information + help Help about any command + info Show information about the ingress-nginx service + ingresses Provide a short summary of all of the ingress definitions + lint Inspect kubernetes resources for possible issues + logs Get the kubernetes logs for an ingress-nginx pod + ssh ssh into a running ingress-nginx pod + +Flags: + --as string Username to impersonate for the operation + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --cache-dir string Default HTTP cache directory (default "/Users/alexkursell/.kube/http-cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + -h, --help help for ingress-nginx + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use + +Use "ingress-nginx [command] --help" for more information about a command. +``` + +If a new `ingress-nginx` version has just been released, the plugin may not yet have been updated inside the repository. In that case, you can install the latest version of the plugin by running: + +```console +( + set -x; cd "$(mktemp -d)" && + curl -fsSLO "https://github.com/kubernetes/ingress-nginx/releases/download/nginx-0.24.0/{ingress-nginx.yaml,kubectl-ingress_nginx-$(uname | tr '[:upper:]' '[:lower:]')-amd64.tar.gz}" && + kubectl krew install \ + --manifest=ingress-nginx.yaml --archive=kubectl-ingress_nginx-$(uname | tr '[:upper:]' '[:lower:]')-amd64.tar.gz +) +``` + +Replacing `0.24.0` with the recently released version. + +## Common Flags + + - Every subcommand supports the basic `kubectl` configuration flags like `--namespace`, `--context`, `--client-key` and so on. + - Subcommands that act on a particular `ingress-nginx` pod (`backends`, `certs`, `conf`, `exec`, `general`, `logs`, `ssh`), support the `--deployment ` and `--pod ` flags to select either a pod from a deployment with the given name, or a pod with the given name. The `--deployment` flag defaults to `nginx-ingress-controller`. + - Subcommands that inspect resources (`ingresses`, `lint`) support the `--all-namespaces` flag, which causes them to inspect resources in every namespace. + +## Subcommands + +Note that `backends`, `general`, `certs`, and `conf` require `ingress-nginx` version `0.23.0` or higher. + +### backends + +Run `kubectl ingress-nginx backends` to get a JSON array of the backends that an ingress-nginx controller currently knows about: + +```console +$ kubectl ingress-nginx backends -n ingress-nginx +[ + { + "name": "default-apple-service-5678", + "service": { + "metadata": { + "creationTimestamp": null + }, + "spec": { + "ports": [ + { + "protocol": "TCP", + "port": 5678, + "targetPort": 5678 + } + ], + "selector": { + "app": "apple" + }, + "clusterIP": "10.97.230.121", + "type": "ClusterIP", + "sessionAffinity": "None" + }, + "status": { + "loadBalancer": {} + } + }, + "port": 0, + "secureCACert": { + "secret": "", + "caFilename": "", + "pemSha": "" + }, + "sslPassthrough": false, + "endpoints": [ + { + "address": "10.1.3.86", + "port": "5678" + } + ], + "sessionAffinityConfig": { + "name": "", + "cookieSessionAffinity": { + "name": "" + } + }, + "upstreamHashByConfig": { + "upstream-hash-by-subset-size": 3 + }, + "noServer": false, + "trafficShapingPolicy": { + "weight": 0, + "header": "", + "headerValue": "", + "cookie": "" + } + }, + { + "name": "default-echo-service-8080", + ... + }, + { + "name": "upstream-default-backend", + ... + } +] +``` + +Add the `--list` option to show only the backend names. Add the `--backend ` option to show only the backend with the given name. + +### certs + +Use `kubectl ingress-nginx certs --host ` to dump the SSL cert/key information for a given host. Requires that `--enable-dynamic-certificates` is `true` (this is the default as of version `0.24.0`). WARNING: This command will dump sensitive private key information. Don't blindly share the output, and certainly don't log it anywhere. + +```console +$ kubectl ingress-nginx certs --host testaddr.local -n ingress-nginx +-----BEGIN CERTIFICATE----- +... +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +... +-----END CERTIFICATE----- + +-----BEGIN RSA PRIVATE KEY----- + +-----END RSA PRIVATE KEY----- +``` + +### conf + +Use `kubectl ingress-nginx conf` to dump the generated `nginx.conf` file. Add the `--host ` option to view only the server block for that host: + +```console +$ kubectl ingress-nginx conf -n ingress-nginx --host testaddr.local + + server { + server_name testaddr.local ; + + listen 80; + + set $proxy_upstream_name "-"; + set $pass_access_scheme $scheme; + set $pass_server_port $server_port; + set $best_http_host $http_host; + set $pass_port $pass_server_port; + + location / { + + set $namespace ""; + set $ingress_name ""; + set $service_name ""; + set $service_port "0"; + set $location_path "/"; + +... +``` + +### exec + +`kubectl ingress-nginx exec` is exactly the same as `kubectl exec`, with the same command flags. It will automatically choose an `ingress-nginx` pod to run the command in. + +```console +$ kubectl ingress-nginx exec -i -n ingress-nginx -- ls /etc/nginx +fastcgi.conf +fastcgi.conf.default +fastcgi_params +fastcgi_params.default +geoip +koi-utf +koi-win +lua +mime.types +mime.types.default +modsecurity +modules +nginx.conf +nginx.conf.default +opentracing.json +owasp-modsecurity-crs +scgi_params +scgi_params.default +template +uwsgi_params +uwsgi_params.default +win-utf +``` + +### general + +`kubectl ingress-nginx general` dumps miscellaneous controller state as a JSON object. Currently it just shows the number of controller pods known to a particular controller pod. + +```console +$ kubectl ingress-nginx general +{ + "controllerPodsCount": 1 +} +``` + +### info + +Shows the internal and external IP/CNAMES for an `ingress-nginx` service. + +```console +$ kubectl ingress-nginx info -n ingress-nginx +Service cluster IP address: 10.187.253.31 +LoadBalancer IP|CNAME: 35.123.123.123 +``` + +Use the `--service ` flag if your `ingress-nginx` `LoadBalancer` service is not named `ingress-nginx`. + +### ingresses + +`kubectl ingress-nginx ingresses`, alternately `kubectl ingress-nginx ing`, shows a more detailed view of the ingress definitions in a namespace. Compare: + +```console +$ kubectl get ingresses --all-namespaces +NAMESPACE NAME HOSTS ADDRESS PORTS AGE +default example-ingress1 testaddr.local,testaddr2.local localhost 80 5d +default test-ingress-2 * localhost 80 5d +``` + +vs + +```console +$ kubectl ingress-nginx ingresses --all-namespaces +NAMESPACE INGRESS NAME HOST+PATH ADDRESSES TLS SERVICE SERVICE PORT ENDPOINTS +default example-ingress1 testaddr.local/etameta localhost NO pear-service 5678 5 +default example-ingress1 testaddr2.local/otherpath localhost NO apple-service 5678 1 +default example-ingress1 testaddr2.local/otherotherpath localhost NO pear-service 5678 5 +default test-ingress-2 * localhost NO echo-service 8080 2 +``` + +### lint + +`kubectl ingress-nginx lint` can check a namespace or entire cluster for potential configuration issues. This command is especially useful when upgrading between `ingress-nginx` versions. + +```console +$ kubectl ingress-nginx lint --all-namespaces --verbose +Checking ingresses... +✗ anamespace/this-nginx + - Contains the removed session-cookie-hash annotation. + Lint added for version 0.24.0 + https://github.com/kubernetes/ingress-nginx/issues/3743 +✗ othernamespace/ingress-definition-blah + - The rewrite-target annotation value does not reference a capture group + Lint added for version 0.22.0 + https://github.com/kubernetes/ingress-nginx/issues/3174 + +Checking deployments... +✗ namespace2/nginx-ingress-controller + - Uses removed config flag --sort-backends + Lint added for version 0.22.0 + https://github.com/kubernetes/ingress-nginx/issues/3655 + - Uses removed config flag --enable-dynamic-certificates + Lint added for version 0.24.0 + https://github.com/kubernetes/ingress-nginx/issues/3808 +``` + +to show the lints added **only** for a particular `ingress-nginx` release, use the `--from-version` and `--to-version` flags: + +```console +$ kubectl ingress-nginx lint --all-namespaces --verbose --from-version 0.24.0 --to-version 0.24.0 +Checking ingresses... +✗ anamespace/this-nginx + - Contains the removed session-cookie-hash annotation. + Lint added for version 0.24.0 + https://github.com/kubernetes/ingress-nginx/issues/3743 + +Checking deployments... +✗ namespace2/nginx-ingress-controller + - Uses removed config flag --enable-dynamic-certificates + Lint added for version 0.24.0 + https://github.com/kubernetes/ingress-nginx/issues/3808 +``` + +### logs + +`kubectl ingress-nginx logs` is almost the same as `kubectl logs`, with fewer flags. It will automatically choose an `ingress-nginx` pod to read logs from. + +```console +$ kubectl ingress-nginx logs -n ingress-nginx +------------------------------------------------------------------------------- +NGINX Ingress controller + Release: dev + Build: git-48dc3a867 + Repository: git@github.com:kubernetes/ingress-nginx.git +------------------------------------------------------------------------------- + +W0405 16:53:46.061589 7 flags.go:214] SSL certificate chain completion is disabled (--enable-ssl-chain-completion=false) +nginx version: nginx/1.15.9 +W0405 16:53:46.070093 7 client_config.go:549] Neither --kubeconfig nor --master was specified. Using the inClusterConfig. This might not work. +I0405 16:53:46.070499 7 main.go:205] Creating API client for https://10.96.0.1:443 +I0405 16:53:46.077784 7 main.go:249] Running in Kubernetes cluster version v1.10 (v1.10.11) - git (clean) commit 637c7e288581ee40ab4ca210618a89a555b6e7e9 - platform linux/amd64 +I0405 16:53:46.183359 7 nginx.go:265] Starting NGINX Ingress controller +I0405 16:53:46.193913 7 event.go:209] Event(v1.ObjectReference{Kind:"ConfigMap", Namespace:"ingress-nginx", Name:"udp-services", UID:"82258915-563e-11e9-9c52-025000000001", APIVersion:"v1", ResourceVersion:"494", FieldPath:""}): type: 'Normal' reason: 'CREATE' ConfigMap ingress-nginx/udp-services +... +``` + +### ssh + +`kubectl ingress-nginx ssh` is exactly the same as `kubectl ingress-nginx exec -it -- /bin/bash`. Use it when you want to quickly be dropped into a shell inside a running `ingress-nginx` container. + +```console +$ kubectl ingress-nginx ssh -n ingress-nginx +www-data@nginx-ingress-controller-7cbf77c976-wx5pn:/etc/nginx$ +``` diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index 1b40a7621f..c91ce2f709 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -1,46 +1,123 @@ -# Debug & Troubleshooting +# Troubleshooting -## Debug +## Ingress-Controller Logs and Events -Using the flag `--v=XX` it is possible to increase the level of logging. -In particular: +There are many ways to troubleshoot the ingress-controller. The following are basic troubleshooting +methods to obtain more information. -- `--v=2` shows details using `diff` about the changes in the configuration in nginx +Check the Ingress Resource Events ```console -I0316 12:24:37.581267 1 utils.go:148] NGINX configuration diff a//etc/nginx/nginx.conf b//etc/nginx/nginx.conf -I0316 12:24:37.581356 1 utils.go:149] --- /tmp/922554809 2016-03-16 12:24:37.000000000 +0000 -+++ /tmp/079811012 2016-03-16 12:24:37.000000000 +0000 -@@ -235,7 +235,6 @@ +$ kubectl get ing -n +NAME HOSTS ADDRESS PORTS AGE +cafe-ingress cafe.com 10.0.2.15 80 25s + +$ kubectl describe ing -n +Name: cafe-ingress +Namespace: default +Address: 10.0.2.15 +Default backend: default-http-backend:80 (172.17.0.5:8080) +Rules: + Host Path Backends + ---- ---- -------- + cafe.com + /tea tea-svc:80 () + /coffee coffee-svc:80 () +Annotations: + kubectl.kubernetes.io/last-applied-configuration: {"apiVersion":"extensions/v1beta1","kind":"Ingress","metadata":{"annotations":{},"name":"cafe-ingress","namespace":"default","selfLink":"/apis/networking/v1beta1/namespaces/default/ingresses/cafe-ingress"},"spec":{"rules":[{"host":"cafe.com","http":{"paths":[{"backend":{"serviceName":"tea-svc","servicePort":80},"path":"/tea"},{"backend":{"serviceName":"coffee-svc","servicePort":80},"path":"/coffee"}]}}]},"status":{"loadBalancer":{"ingress":[{"ip":"169.48.142.110"}]}}} + +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal CREATE 1m nginx-ingress-controller Ingress default/cafe-ingress + Normal UPDATE 58s nginx-ingress-controller Ingress default/cafe-ingress +``` - upstream default-http-svcx { - least_conn; -- server 10.2.112.124:5000; - server 10.2.208.50:5000; +Check the Ingress Controller Logs - } -I0316 12:24:37.610073 1 command.go:69] change in configuration detected. Reloading... +```console +$ kubectl get pods -n +NAME READY STATUS RESTARTS AGE +nginx-ingress-controller-67956bf89d-fv58j 1/1 Running 0 1m + +$ kubectl logs -n nginx-ingress-controller-67956bf89d-fv58j +------------------------------------------------------------------------------- +NGINX Ingress controller + Release: 0.14.0 + Build: git-734361d + Repository: https://github.com/kubernetes/ingress-nginx +------------------------------------------------------------------------------- +.... ``` -- `--v=3` shows details about the service, Ingress rule, endpoint changes and it dumps the nginx configuration in JSON format -- `--v=5` configures NGINX in [debug mode](http://nginx.org/en/docs/debugging_log.html) +Check the Nginx Configuration + +```console +$ kubectl get pods -n +NAME READY STATUS RESTARTS AGE +nginx-ingress-controller-67956bf89d-fv58j 1/1 Running 0 1m + +$ kubectl exec -it -n nginx-ingress-controller-67956bf89d-fv58j cat /etc/nginx/nginx.conf +daemon off; +worker_processes 2; +pid /run/nginx.pid; +worker_rlimit_nofile 523264; +worker_shutdown_timeout 10s; +events { + multi_accept on; + worker_connections 16384; + use epoll; +} +http { +.... +``` + +Check if used Services Exist + +```console +$ kubectl get svc --all-namespaces +NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +default coffee-svc ClusterIP 10.106.154.35 80/TCP 18m +default kubernetes ClusterIP 10.96.0.1 443/TCP 30m +default tea-svc ClusterIP 10.104.172.12 80/TCP 18m +kube-system default-http-backend NodePort 10.108.189.236 80:30001/TCP 30m +kube-system kube-dns ClusterIP 10.96.0.10 53/UDP,53/TCP 30m +kube-system kubernetes-dashboard NodePort 10.103.128.17 80:30000/TCP 30m +``` + +## Debug Logging -## Troubleshooting +Using the flag `--v=XX` it is possible to increase the level of logging. This is performed by editing +the deployment. + +```console +$ kubectl get deploy -n +NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE +default-http-backend 1 1 1 1 35m +nginx-ingress-controller 1 1 1 1 35m +$ kubectl edit deploy -n nginx-ingress-controller +# Add --v=X to "- args", where X is an integer +``` -### Authentication to the Kubernetes API Server +- `--v=2` shows details using `diff` about the changes in the configuration in nginx +- `--v=3` shows details about the service, Ingress rule, endpoint changes and it dumps the nginx configuration in JSON format +- `--v=5` configures NGINX in [debug mode](http://nginx.org/en/docs/debugging_log.html) +## Authentication to the Kubernetes API Server A number of components are involved in the authentication process and the first step is to narrow -down the source of the problem, namely whether it is a problem with service authentication or with the kubeconfig file. +down the source of the problem, namely whether it is a problem with service authentication or +with the kubeconfig file. + Both authentications must work: ``` @@ -49,25 +126,24 @@ Both authentications must work: + apiserver +<-------------------+ ingress | | | | controller | +-------------+ +------------+ - ``` -__Service authentication__ +**Service authentication** The Ingress controller needs information from apiserver. Therefore, authentication is required, which can be achieved in two different ways: 1. _Service Account:_ This is recommended, because nothing has to be configured. The Ingress controller will use information provided by the system to communicate with the API server. See 'Service Account' section for details. 2. _Kubeconfig file:_ In some Kubernetes environments service accounts are not available. In this case a manual configuration is required. The Ingress controller binary can be started with the `--kubeconfig` flag. The value of the flag is a path to a file specifying how to connect to the API server. Using the `--kubeconfig` does not requires the flag `--apiserver-host`. -The format of the file is identical to `~/.kube/config` which is used by kubectl to connect to the API server. See 'kubeconfig' section for details. + The format of the file is identical to `~/.kube/config` which is used by kubectl to connect to the API server. See 'kubeconfig' section for details. -3. _Using the flag `--apiserver-host`:_ Using this flag `--apiserver-host=http://localhost:8080` it is possible to specify an unsecured api server or reach a remote kubernetes cluster using [kubectl proxy](https://kubernetes.io/docs/user-guide/kubectl/kubectl_proxy/). -Please do not use this approach in production. +3. _Using the flag `--apiserver-host`:_ Using this flag `--apiserver-host=http://localhost:8080` it is possible to specify an unsecured API server or reach a remote kubernetes cluster using [kubectl proxy](https://kubernetes.io/docs/user-guide/kubectl/kubectl_proxy/). + Please do not use this approach in production. In the diagram below you can see the full authentication flow with all options, starting with the browser on the lower left hand side. -``` +``` Kubernetes Workstation +---------------------------------------------------+ +------------------+ | | | | @@ -88,13 +164,14 @@ Kubernetes Workstation ``` ### Service Account + If using a service account to connect to the API server, Dashboard expects the file `/var/run/secrets/kubernetes.io/serviceaccount/token` to be present. It provides a secret token that is required to authenticate with the API server. Verify with the following commands: -```shell +```console # start a container that contains curl $ kubectl run test --image=tutum/curl -- sleep 10000 @@ -141,8 +218,8 @@ $ kubectl exec test-701078429-s5kca -- curl --cacert /var/run/secrets/kubernetes "/apis/batch/v2alpha1", "/apis/certificates.k8s.io", "/apis/certificates.k8s.io/v1alpha1", - "/apis/extensions", - "/apis/extensions/v1beta1", + "/apis/networking", + "/apis/networking/v1beta1", "/apis/policy", "/apis/policy/v1alpha1", "/apis/rbac.authorization.k8s.io", @@ -163,21 +240,100 @@ $ kubectl exec test-701078429-s5kca -- curl --cacert /var/run/secrets/kubernetes If it is not working, there are two possible reasons: 1. The contents of the tokens are invalid. Find the secret name with `kubectl get secrets | grep service-account` and -delete it with `kubectl delete secret `. It will automatically be recreated. + delete it with `kubectl delete secret `. It will automatically be recreated. 2. You have a non-standard Kubernetes installation and the file containing the token may not be present. -The API server will mount a volume containing this file, but only if the API server is configured to use -the ServiceAccount admission controller. -If you experience this error, verify that your API server is using the ServiceAccount admission controller. -If you are configuring the API server by hand, you can set this with the `--admission-control` parameter. -Please note that you should use other admission controllers as well. Before configuring this option, you should -read about admission controllers. + The API server will mount a volume containing this file, but only if the API server is configured to use + the ServiceAccount admission controller. + If you experience this error, verify that your API server is using the ServiceAccount admission controller. + If you are configuring the API server by hand, you can set this with the `--admission-control` parameter. + > Note that you should use other admission controllers as well. Before configuring this option, you should read about admission controllers. More information: -* [User Guide: Service Accounts](http://kubernetes.io/docs/user-guide/service-accounts/) -* [Cluster Administrator Guide: Managing Service Accounts](http://kubernetes.io/docs/admin/service-accounts-admin/) +- [User Guide: Service Accounts](http://kubernetes.io/docs/user-guide/service-accounts/) +- [Cluster Administrator Guide: Managing Service Accounts](http://kubernetes.io/docs/admin/service-accounts-admin/) + +## Kube-Config + +If you want to use a kubeconfig file for authentication, follow the [deploy procedure](deploy/index.md) and +add the flag `--kubeconfig=/etc/kubernetes/kubeconfig.yaml` to the args section of the deployment. + +## Using GDB with Nginx + +[Gdb](https://www.gnu.org/software/gdb/) can be used to with nginx to perform a configuration +dump. This allows us to see which configuration is being used, as well as older configurations. + +Note: The below is based on the nginx [documentation](https://docs.nginx.com/nginx/admin-guide/monitoring/debugging/#dumping-nginx-configuration-from-a-running-process). + +1. SSH into the worker -### Kubeconfig -If you want to use a kubeconfig file for authentication, follow the deploy procedure and -add the flag `--kubeconfig=/etc/kubernetes/kubeconfig.yaml` to the deployment +```console +$ ssh user@workerIP +``` + +2. Obtain the Docker Container Running nginx + +```console +$ docker ps | grep nginx-ingress-controller +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +d9e1d243156a quay.io/kubernetes-ingress-controller/nginx-ingress-controller "/usr/bin/dumb-init …" 19 minutes ago Up 19 minutes k8s_nginx-ingress-controller_nginx-ingress-controller-67956bf89d-mqxzt_kube-system_079f31ec-aa37-11e8-ad39-080027a227db_0 +``` + +3. Exec into the container + +```console +$ docker exec -it --user=0 --privileged d9e1d243156a bash +``` + +4. Make sure nginx is running in `--with-debug` + +```console +$ nginx -V 2>&1 | grep -- '--with-debug' +``` + +5. Get list of processes running on container + +```console +$ ps -ef +UID PID PPID C STIME TTY TIME CMD +root 1 0 0 20:23 ? 00:00:00 /usr/bin/dumb-init /nginx-ingres +root 5 1 0 20:23 ? 00:00:05 /nginx-ingress-controller --defa +root 21 5 0 20:23 ? 00:00:00 nginx: master process /usr/sbin/ +nobody 106 21 0 20:23 ? 00:00:00 nginx: worker process +nobody 107 21 0 20:23 ? 00:00:00 nginx: worker process +root 172 0 0 20:43 pts/0 00:00:00 bash +``` + +7. Attach gdb to the nginx master process + +```console +$ gdb -p 21 +.... +Attaching to process 21 +Reading symbols from /usr/sbin/nginx...done. +.... +(gdb) +``` + +8. Copy and paste the following: + +```console +set $cd = ngx_cycle->config_dump +set $nelts = $cd.nelts +set $elts = (ngx_conf_dump_t*)($cd.elts) +while ($nelts-- > 0) +set $name = $elts[$nelts]->name.data +printf "Dumping %s to nginx_conf.txt\n", $name +append memory nginx_conf.txt \ + $elts[$nelts]->buffer.start $elts[$nelts]->buffer.end +end +``` + +9. Quit GDB by pressing CTRL+D + +10. Open nginx_conf.txt + +```console +cat nginx_conf.txt +``` diff --git a/docs/user-guide/annotations.md b/docs/user-guide/annotations.md deleted file mode 100644 index a8b93102a1..0000000000 --- a/docs/user-guide/annotations.md +++ /dev/null @@ -1,381 +0,0 @@ -# Annotations - -The following annotations are supported: - -|Name | type | -|---------------------------|------| -|[nginx.ingress.kubernetes.io/add-base-url](#rewrite)|true or false| -|[nginx.ingress.kubernetes.io/app-root](#rewrite)|string| -|[nginx.ingress.kubernetes.io/affinity](#session-affinity)|cookie| -|[nginx.ingress.kubernetes.io/auth-realm](#authentication)|string| -|[nginx.ingress.kubernetes.io/auth-secret](#authentication)|string| -|[nginx.ingress.kubernetes.io/auth-type](#authentication)|basic or digest| -|[nginx.ingress.kubernetes.io/auth-tls-secret](#certificate-authentication)|string| -|[nginx.ingress.kubernetes.io/auth-tls-verify-depth](#certificate-authentication)|number| -|[nginx.ingress.kubernetes.io/auth-tls-verify-client](#certificate-authentication)|string| -|[nginx.ingress.kubernetes.io/auth-tls-error-page](#certificate-authentication)|string| -|[nginx.ingress.kubernetes.io/auth-tls-pass-certificate-to-upstream](#certificate-authentication)|true or false| -|[nginx.ingress.kubernetes.io/auth-url](#external-authentication)|string| -|[nginx.ingress.kubernetes.io/base-url-scheme](#rewrite)|string| -|[nginx.ingress.kubernetes.io/client-body-buffer-size](#client-body-buffer-size)|string| -|[nginx.ingress.kubernetes.io/configuration-snippet](#configuration-snippet)|string| -|[nginx.ingress.kubernetes.io/default-backend](#default-backend)|string| -|[nginx.ingress.kubernetes.io/enable-cors](#enable-cors)|true or false| -|[nginx.ingress.kubernetes.io/cors-allow-origin](#enable-cors)|string| -|[nginx.ingress.kubernetes.io/cors-allow-methods](#enable-cors)|string| -|[nginx.ingress.kubernetes.io/cors-allow-headers](#enable-cors)|string| -|[nginx.ingress.kubernetes.io/cors-allow-credentials](#enable-cors)|true or false| -|[nginx.ingress.kubernetes.io/force-ssl-redirect](#server-side-https-enforcement-through-redirect)|true or false| -|[nginx.ingress.kubernetes.io/from-to-www-redirect](#redirect-from-to-www)|true or false| -|[nginx.ingress.kubernetes.io/limit-connections](#rate-limiting)|number| -|[nginx.ingress.kubernetes.io/limit-rps](#rate-limiting)|number| -|[nginx.ingress.kubernetes.io/proxy-body-size](#custom-max-body-size)|string| -|[nginx.ingress.kubernetes.io/proxy-connect-timeout](#custom-timeouts)|number| -|[nginx.ingress.kubernetes.io/proxy-send-timeout](#custom-timeouts)|number| -|[nginx.ingress.kubernetes.io/proxy-read-timeout](#custom-timeouts)|number| -|[nginx.ingress.kubernetes.io/proxy-next-upstream](#custom-timeouts)|string| -|[nginx.ingress.kubernetes.io/proxy-request-buffering](#custom-timeouts)|string| -|[nginx.ingress.kubernetes.io/proxy-redirect-from](#proxy-redirect)|string| -|[nginx.ingress.kubernetes.io/proxy-redirect-to](#proxy-redirect)|string| -|[nginx.ingress.kubernetes.io/rewrite-target](#rewrite)|URI| -|[nginx.ingress.kubernetes.io/secure-backends](#secure-backends)|true or false| -|[nginx.ingress.kubernetes.io/server-alias](#server-alias)|string| -|[nginx.ingress.kubernetes.io/server-snippet](#server-snippet)|string| -|[nginx.ingress.kubernetes.io/service-upstream](#service-upstream)|true or false| -|[nginx.ingress.kubernetes.io/session-cookie-name](#cookie-affinity)|string| -|[nginx.ingress.kubernetes.io/session-cookie-hash](#cookie-affinity)|string| -|[nginx.ingress.kubernetes.io/ssl-redirect](#server-side-https-enforcement-through-redirect)|true or false| -|[nginx.ingress.kubernetes.io/ssl-passthrough](#ssl-passthrough)|true or false| -|[nginx.ingress.kubernetes.io/upstream-max-fails](#custom-nginx-upstream-checks)|number| -|[nginx.ingress.kubernetes.io/upstream-fail-timeout](#custom-nginx-upstream-checks)|number| -|[nginx.ingress.kubernetes.io/upstream-hash-by](#custom-nginx-upstream-hashing)|string| -|[nginx.ingress.kubernetes.io/whitelist-source-range](#whitelist-source-range)|CIDR| - -### Rewrite - -In some scenarios the exposed URL in the backend service differs from the specified path in the Ingress rule. Without a rewrite any request will return 404. -Set the annotation `nginx.ingress.kubernetes.io/rewrite-target` to the path expected by the service. - -If the application contains relative links it is possible to add an additional annotation `nginx.ingress.kubernetes.io/add-base-url` that will prepend a [`base` tag](https://developer.mozilla.org/en/docs/Web/HTML/Element/base) in the header of the returned HTML from the backend. - -If the scheme of [`base` tag](https://developer.mozilla.org/en/docs/Web/HTML/Element/base) need to be specific, set the annotation `nginx.ingress.kubernetes.io/base-url-scheme` to the scheme such as `http` and `https`. - -If the Application Root is exposed in a different path and needs to be redirected, set the annotation `nginx.ingress.kubernetes.io/app-root` to redirect requests for `/`. - -Please check the [rewrite](../examples/rewrite/README.md) example. - -### Session Affinity - -The annotation `nginx.ingress.kubernetes.io/affinity` enables and sets the affinity type in all Upstreams of an Ingress. This way, a request will always be directed to the same upstream server. -The only affinity type available for NGINX is `cookie`. - -Please check the [affinity](../examples/affinity/cookie/README.md) example. - -### Authentication - -Is possible to add authentication adding additional annotations in the Ingress rule. The source of the authentication is a secret that contains usernames and passwords inside the key `auth`. - -The annotations are: -``` -nginx.ingress.kubernetes.io/auth-type: [basic|digest] -``` - -Indicates the [HTTP Authentication Type: Basic or Digest Access Authentication](https://tools.ietf.org/html/rfc2617). - -``` -nginx.ingress.kubernetes.io/auth-secret: secretName -``` - -The name of the secret that contains the usernames and passwords with access to the `path`s defined in the Ingress Rule. -The secret must be created in the same namespace as the Ingress rule. - -``` -nginx.ingress.kubernetes.io/auth-realm: "realm string" -``` - -Please check the [auth](../examples/auth/basic/README.md) example. - -### Custom NGINX upstream checks - -NGINX exposes some flags in the [upstream configuration](http://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream) that enable the configuration of each server in the upstream. The Ingress controller allows custom `max_fails` and `fail_timeout` parameters in a global context using `upstream-max-fails` and `upstream-fail-timeout` in the NGINX ConfigMap or in a particular Ingress rule. `upstream-max-fails` defaults to 0. This means NGINX will respect the container's `readinessProbe` if it is defined. If there is no probe and no values for `upstream-max-fails` NGINX will continue to send traffic to the container. - -**With the default configuration NGINX will not health check your backends. Whenever the endpoints controller notices a readiness probe failure, that pod's IP will be removed from the list of endpoints. This will trigger the NGINX controller to also remove it from the upstreams.** - -To use custom values in an Ingress rule define these annotations: - -`nginx.ingress.kubernetes.io/upstream-max-fails`: number of unsuccessful attempts to communicate with the server that should occur in the duration set by the `upstream-fail-timeout` parameter to consider the server unavailable. - -`nginx.ingress.kubernetes.io/upstream-fail-timeout`: time in seconds during which the specified number of unsuccessful attempts to communicate with the server should occur to consider the server unavailable. This is also the period of time the server will be considered unavailable. - -In NGINX, backend server pools are called "[upstreams](http://nginx.org/en/docs/http/ngx_http_upstream_module.html)". Each upstream contains the endpoints for a service. An upstream is created for each service that has Ingress rules defined. - -**Important:** All Ingress rules using the same service will use the same upstream. Only one of the Ingress rules should define annotations to configure the upstream servers. - -Please check the [custom upstream check](../examples/customization/custom-upstream-check/README.md) example. - -### Custom NGINX upstream hashing - -NGINX supports load balancing by client-server mapping based on [consistent hashing](http://nginx.org/en/docs/http/ngx_http_upstream_module.html#hash) for a given key. The key can contain text, variables or any combination thereof. This feature allows for request stickiness other than client IP or cookies. The [ketama](http://www.last.fm/user/RJ/journal/2007/04/10/392555/) consistent hashing method will be used which ensures only a few keys would be remapped to different servers on upstream group changes. - -To enable consistent hashing for a backend: - -`nginx.ingress.kubernetes.io/upstream-hash-by`: the nginx variable, text value or any combination thereof to use for consistent hashing. For example `nginx.ingress.kubernetes.io/upstream-hash-by: "$request_uri"` to consistently hash upstream requests by the current request URI. - -### Certificate Authentication - -It's possible to enable Certificate-Based Authentication (Mutual Authentication) using additional annotations in Ingress Rule. - -The annotations are: -``` -nginx.ingress.kubernetes.io/auth-tls-secret: secretName -``` - -The name of the secret that contains the full Certificate Authority chain `ca.crt` that is enabled to authenticate against this ingress. It's composed of namespace/secretName. - -``` -nginx.ingress.kubernetes.io/auth-tls-verify-depth -``` - -The validation depth between the provided client certificate and the Certification Authority chain. - -``` -nginx.ingress.kubernetes.io/auth-tls-verify-client -``` - -Enables verification of client certificates. - -``` -nginx.ingress.kubernetes.io/auth-tls-error-page -``` - -The URL/Page that user should be redirected in case of a Certificate Authentication Error - -``` -nginx.ingress.kubernetes.io/auth-tls-pass-certificate-to-upstream -``` - -Indicates if the received certificates should be passed or not to the upstream server. -By default this is disabled. - -Please check the [tls-auth](../examples/auth/client-certs/README.md) example. - -**Important:** - -TLS with Client Authentication is NOT possible in Cloudflare as is not allowed it and might result in unexpected behavior. - -Cloudflare only allows Authenticated Origin Pulls and is required to use their own certificate: -https://blog.cloudflare.com/protecting-the-origin-with-tls-authenticated-origin-pulls/ - -Only Authenticated Origin Pulls are allowed and can be configured by following their tutorial: -https://support.cloudflare.com/hc/en-us/articles/204494148-Setting-up-NGINX-to-use-TLS-Authenticated-Origin-Pulls - - -### Configuration snippet - -Using this annotation you can add additional configuration to the NGINX location. For example: - -```yaml -nginx.ingress.kubernetes.io/configuration-snippet: | - more_set_headers "Request-Id: $request_id"; -``` - -### Default Backend - -The ingress controller requires a default backend. This service is handle the response when the service in the Ingress rule does not have endpoints. -This is a global configuration for the ingress controller. In some cases could be required to return a custom content or format. In this scenario we can use the annotation `nginx.ingress.kubernetes.io/default-backend: ` to specify a custom default backend. - -### Enable CORS - -To enable Cross-Origin Resource Sharing (CORS) in an Ingress rule add the annotation `nginx.ingress.kubernetes.io/enable-cors: "true"`. This will add a section in the server location enabling this functionality. - -CORS can be controlled with the following annotations: - -* `nginx.ingress.kubernetes.io/cors-allow-methods` controls which methods are accepted. This is a multi-valued field, separated by ',' and accepts only letters (upper and lower case). - -Example: `nginx.ingress.kubernetes.io/cors-allow-methods: "PUT, GET, POST, OPTIONS"` - -* `nginx.ingress.kubernetes.io/cors-allow-headers` controls which headers are accepted. This is a multi-valued field, separated by ',' and accepts letters, numbers, _ and -. - -Example: `nginx.ingress.kubernetes.io/cors-allow-headers: "X-Forwarded-For, X-app123-XPTO"` - -* `nginx.ingress.kubernetes.io/cors-allow-origin` controls what's the accepted Origin for CORS and defaults to '*'. This is a single field value, with the following format: http(s)://origin-site.com or http(s)://origin-site.com:port - -Example: `nginx.ingress.kubernetes.io/cors-allow-origin: "https://origin-site.com:4443"` - -* `nginx.ingress.kubernetes.io/cors-allow-credentials` controls if credentials can be passed during CORS operations. - -Example: `nginx.ingress.kubernetes.io/cors-allow-credentials: "true"` - - -For more information please check https://enable-cors.org/server_nginx.html - -### Server Alias - -To add Server Aliases to an Ingress rule add the annotation `nginx.ingress.kubernetes.io/server-alias: ""`. -This will create a server with the same configuration, but a different server_name as the provided host. - -*Note:* A server-alias name cannot conflict with the hostname of an existing server. If it does the server-alias -annotation will be ignored. If a server-alias is created and later a new server with the same hostname is created -the new server configuration will take place over the alias configuration. - -For more information please see http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name - -### Server snippet - -Using the annotation `nginx.ingress.kubernetes.io/server-snippet` it is possible to add custom configuration in the server configuration block. - -```yaml -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - annotations: - nginx.ingress.kubernetes.io/server-snippet: | -set $agentflag 0; - -if ($http_user_agent ~* "(Mobile)" ){ - set $agentflag 1; -} - -if ( $agentflag = 1 ) { - return 301 https://m.example.com; -} -``` - -**Important:** This annotation can be used only once per host - -### Client Body Buffer Size - -Sets buffer size for reading client request body per location. In case the request body is larger than the buffer, -the whole body or only its part is written to a temporary file. By default, buffer size is equal to two memory pages. -This is 8K on x86, other 32-bit platforms, and x86-64. It is usually 16K on other 64-bit platforms. This annotation is -applied to each location provided in the ingress rule. - -*Note:* The annotation value must be given in a valid format otherwise the -For example to set the client-body-buffer-size the following can be done: - -* `nginx.ingress.kubernetes.io/client-body-buffer-size: "1000"` # 1000 bytes -* `nginx.ingress.kubernetes.io/client-body-buffer-size: 1k` # 1 kilobyte -* `nginx.ingress.kubernetes.io/client-body-buffer-size: 1K` # 1 kilobyte -* `nginx.ingress.kubernetes.io/client-body-buffer-size: 1m` # 1 megabyte -* `nginx.ingress.kubernetes.io/client-body-buffer-size: 1M` # 1 megabyte - -For more information please see http://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_buffer_size - -### External Authentication - -To use an existing service that provides authentication the Ingress rule can be annotated with `nginx.ingress.kubernetes.io/auth-url` to indicate the URL where the HTTP request should be sent. -Additionally it is possible to set `nginx.ingress.kubernetes.io/auth-method` to specify the HTTP method to use (GET or POST). - -```yaml -nginx.ingress.kubernetes.io/auth-url: "URL to the authentication service" -``` - -Please check the [external-auth](../examples/auth/external-auth/README.md) example. - -### Rate limiting - -The annotations `nginx.ingress.kubernetes.io/limit-connections`, `nginx.ingress.kubernetes.io/limit-rps`, and `nginx.ingress.kubernetes.io/limit-rpm` define a limit on the connections that can be opened by a single client IP address. This can be used to mitigate [DDoS Attacks](https://www.nginx.com/blog/mitigating-ddos-attacks-with-nginx-and-nginx-plus). - -`nginx.ingress.kubernetes.io/limit-connections`: number of concurrent connections allowed from a single IP address. - -`nginx.ingress.kubernetes.io/limit-rps`: number of connections that may be accepted from a given IP each second. - -`nginx.ingress.kubernetes.io/limit-rpm`: number of connections that may be accepted from a given IP each minute. - -You can specify the client IP source ranges to be excluded from rate-limiting through the `nginx.ingress.kubernetes.io/limit-whitelist` annotation. The value is a comma separated list of CIDRs. - -If you specify multiple annotations in a single Ingress rule, `limit-rpm`, and then `limit-rps` takes precedence. - -The annotation `nginx.ingress.kubernetes.io/limit-rate`, `nginx.ingress.kubernetes.io/limit-rate-after` define a limit the rate of response transmission to a client. The rate is specified in bytes per second. The zero value disables rate limiting. The limit is set per a request, and so if a client simultaneously opens two connections, the overall rate will be twice as much as the specified limit. - -`nginx.ingress.kubernetes.io/limit-rate-after`: sets the initial amount after which the further transmission of a response to a client will be rate limited. - -`nginx.ingress.kubernetes.io/limit-rate`: rate of request that accepted from a client each second. - -To configure this setting globally for all Ingress rules, the `limit-rate-after` and `limit-rate` value may be set in the NGINX ConfigMap. if you set the value in ingress annotation will cover global setting. - -### SSL Passthrough - -The annotation `nginx.ingress.kubernetes.io/ssl-passthrough` allows to configure TLS termination in the pod and not in NGINX. - -**Important:** - -- Using the annotation `nginx.ingress.kubernetes.io/ssl-passthrough` invalidates all the other available annotations. This is because SSL Passthrough works in L4 (TCP). -- The use of this annotation requires the flag `--enable-ssl-passthrough` (By default it is disabled) - -### Secure backends - -By default NGINX uses `http` to reach the services. Adding the annotation `nginx.ingress.kubernetes.io/secure-backends: "true"` in the Ingress rule changes the protocol to `https`. - -### Service Upstream - -By default the NGINX ingress controller uses a list of all endpoints (Pod IP/port) in the NGINX upstream configuration. This annotation disables that behavior and instead uses a single upstream in NGINX, the service's Cluster IP and port. This can be desirable for things like zero-downtime deployments as it reduces the need to reload NGINX configuration when Pods come up and down. See issue [#257](https://github.com/kubernetes/ingress-nginx/issues/257). - -#### Known Issues - -If the `service-upstream` annotation is specified the following things should be taken into consideration: - -* Sticky Sessions will not work as only round-robin load balancing is supported. -* The `proxy_next_upstream` directive will not have any effect meaning on error the request will not be dispatched to another upstream. - -### Server-side HTTPS enforcement through redirect - -By default the controller redirects (301) to `HTTPS` if TLS is enabled for that ingress. If you want to disable that behavior globally, you can use `ssl-redirect: "false"` in the NGINX config map. - -To configure this feature for specific ingress resources, you can use the `nginx.ingress.kubernetes.io/ssl-redirect: "false"` annotation in the particular resource. - -When using SSL offloading outside of cluster (e.g. AWS ELB) it may be useful to enforce a redirect to `HTTPS` even when there is not TLS cert available. This can be achieved by using the `nginx.ingress.kubernetes.io/force-ssl-redirect: "true"` annotation in the particular resource. - -### Redirect from to www - -In some scenarios is required to redirect from `www.domain.com` to `domain.com` or viceversa. -To enable this feature use the annotation `nginx.ingress.kubernetes.io/from-to-www-redirect: "true"` - -**Important:** -If at some point a new Ingress is created with a host equal to one of the options (like `domain.com`) the annotation will be omitted. - -### Whitelist source range - -You can specify the allowed client IP source ranges through the `nginx.ingress.kubernetes.io/whitelist-source-range` annotation. The value is a comma separated list of [CIDRs](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing), e.g. `10.0.0.0/24,172.10.0.1`. - -To configure this setting globally for all Ingress rules, the `whitelist-source-range` value may be set in the NGINX ConfigMap. - -*Note:* Adding an annotation to an Ingress rule overrides any global restriction. - -### Cookie affinity - -If you use the ``cookie`` type you can also specify the name of the cookie that will be used to route the requests with the annotation `nginx.ingress.kubernetes.io/session-cookie-name`. The default is to create a cookie named 'route'. - -In case of NGINX the annotation `nginx.ingress.kubernetes.io/session-cookie-hash` defines which algorithm will be used to 'hash' the used upstream. Default value is `md5` and possible values are `md5`, `sha1` and `index`. -The `index` option is not hashed, an in-memory index is used instead, it's quicker and the overhead is shorter Warning: the matching against upstream servers list is inconsistent. So, at reload, if upstreams servers has changed, index values are not guaranteed to correspond to the same server as before! USE IT WITH CAUTION and only if you need to! - -In NGINX this feature is implemented by the third party module [nginx-sticky-module-ng](https://bitbucket.org/nginx-goodies/nginx-sticky-module-ng). The workflow used to define which upstream server will be used is explained [here](https://bitbucket.org/nginx-goodies/nginx-sticky-module-ng/raw/08a395c66e425540982c00482f55034e1fee67b6/docs/sticky.pdf) - -### Custom timeouts - -Using the configuration configmap it is possible to set the default global timeout for connections to the upstream servers. -In some scenarios is required to have different values. To allow this we provide annotations that allows this customization: - -- `nginx.ingress.kubernetes.io/proxy-connect-timeout` -- `nginx.ingress.kubernetes.io/proxy-send-timeout` -- `nginx.ingress.kubernetes.io/proxy-read-timeout` -- `nginx.ingress.kubernetes.io/proxy-next-upstream` -- `nginx.ingress.kubernetes.io/proxy-request-buffering` - -### Proxy redirect - -With the annotations `nginx.ingress.kubernetes.io/proxy-redirect-from` and `nginx.ingress.kubernetes.io/proxy-redirect-to` it is possible to set the text that should be changed in the `Location` and `Refresh` header fields of a proxied server response (http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_redirect) -Setting "off" or "default" in the annotation `nginx.ingress.kubernetes.io/proxy-redirect-to` disables `nginx.ingress.kubernetes.io/proxy-redirect-to` -Both annotations will be used in any other case -By default the value is "off". - -### Custom max body size - -For NGINX, 413 error will be returned to the client when the size in a request exceeds the maximum allowed size of the client request body. This size can be configured by the parameter [`client_max_body_size`](http://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size). - -To configure this setting globally for all Ingress rules, the `proxy-body-size` value may be set in the NGINX ConfigMap. -To use custom values in an Ingress rule define these annotation: - -```yaml -nginx.ingress.kubernetes.io/proxy-body-size: 8m -``` diff --git a/docs/user-guide/basic-usage.md b/docs/user-guide/basic-usage.md new file mode 100644 index 0000000000..0f3d9e8edb --- /dev/null +++ b/docs/user-guide/basic-usage.md @@ -0,0 +1,50 @@ +# Basic usage - host based routing + +ingress-nginx can be used for many use cases, inside various cloud provider and supports a lot of configurations. In this section you can find a common usage scenario where a single load balancer powerd by ingress-nginx will route traffic to 2 different HTTP backend services based on the host name. + +First of all follow the instructions to install ingress-nginx. Then imagine that you need to expose 2 HTTP services already installed: `myServiceA`, `myServiceB`. Let's say that you want to expose the first at `myServiceA.foo.org` and the second at `myServiceB.foo.org`. One possible solution is to create two **ingress** resources: + +``` +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: ingress-myServiceA + annotations: + # use the shared ingress-nginx + kubernetes.io/ingress.class: "nginx" +spec: + rules: + - host: myServiceA.foo.org + http: + paths: + - path: / + backend: + serviceName: myServiceA + servicePort: 80 +--- +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: ingress-myServiceB + annotations: + # use the shared ingress-nginx + kubernetes.io/ingress.class: "nginx" +spec: + rules: + - host: myServiceB.foo.org + http: + paths: + - path: / + backend: + serviceName: myServiceB + servicePort: 80 +``` + +When you apply this yaml, 2 ingress resources will be created managed by the **ingress-nginx** instance. Nginx is configured to automatically discover all ingress with the `kubernetes.io/ingress.class: "nginx"` annotation. +Please note that the ingress resource should be placed inside the same namespace of the backend resource. + +On many cloud providers ingress-nginx will also create the corresponding Load Balancer resource. All you have to do is get the external IP and add a DNS `A record` inside your DNS provider that point myServiceA.foo.org and myServiceB.foo.org to the nginx external IP. Get the external IP by running: + +``` +kubectl get services -n ingress-nginx +``` diff --git a/docs/user-guide/cli-arguments.md b/docs/user-guide/cli-arguments.md index b4ec882a6f..b8581a2761 100644 --- a/docs/user-guide/cli-arguments.md +++ b/docs/user-guide/cli-arguments.md @@ -1,61 +1,50 @@ # Command line arguments -```console -Usage of : - --alsologtostderr log to standard error as well as files - --annotations-prefix string Prefix of the ingress annotations. (default "nginx.ingress.kubernetes.io") - --apiserver-host string The address of the Kubernetes Apiserver to connect to in the format of protocol://address:port, e.g., http://localhost:8080. If not specified, the assumption is that the binary runs inside a Kubernetes cluster and local discovery is attempted. - --configmap string Name of the ConfigMap that contains the custom configuration to use - --default-backend-service string Service used to serve a 404 page for the default backend. Takes the form - namespace/name. The controller uses the first node port of this Service for - the default backend. - --default-server-port int Default port to use for exposing the default server (catch all) (default 8181) - --default-ssl-certificate string Name of the secret - that contains a SSL certificate to be used as default for a HTTPS catch-all server. - Takes the form /. - --disable-node-list Disable querying nodes. If --force-namespace-isolation is true, this should also be set. (DEPRECATED) - --election-id string Election id to use for status update. (default "ingress-controller-leader") - --enable-ssl-chain-completion Defines if the nginx ingress controller should check the secrets for missing intermediate CA certificates. - If the certificate contain issues chain issues is not possible to enable OCSP. - Default is true. (default true) - --enable-ssl-passthrough Enable SSL passthrough feature. Default is disabled - --force-namespace-isolation Force namespace isolation. This flag is required to avoid the reference of secrets or - configmaps located in a different namespace than the specified in the flag --watch-namespace. - --health-check-path string Defines - the URL to be used as health check inside in the default server in NGINX. (default "/healthz") - --healthz-port int port for healthz endpoint. (default 10254) - --http-port int Indicates the port to use for HTTP traffic (default 80) - --https-port int Indicates the port to use for HTTPS traffic (default 443) - --ingress-class string Name of the ingress class to route through this controller. - --kubeconfig string Path to kubeconfig file with authorization and master location information. - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) - --log_dir string If non-empty, write log files in this directory - --logtostderr log to standard error instead of files (default true) - --profiling Enable profiling via web interface host:port/debug/pprof/ (default true) - --publish-service string Service fronting the ingress controllers. Takes the form namespace/name. - The controller will set the endpoint records on the ingress objects to reflect those on the service. - --report-node-internal-ip-address Defines if the nodes IP address to be returned in the ingress status should be the internal instead of the external IP address - --sort-backends Defines if backends and it's endpoints should be sorted - --ssl-passtrough-proxy-port int Default port to use internally for SSL when SSL Passthgough is enabled (default 442) - --status-port int Indicates the TCP port to use for exposing the nginx status page (default 18080) - --stderrthreshold severity logs at or above this threshold go to stderr (default 2) - --sync-period duration Relist and confirm cloud resources this often. Default is 10 minutes (default 10m0s) - --tcp-services-configmap string Name of the ConfigMap that contains the definition of the TCP services to expose. - The key in the map indicates the external port to be used. The value is the name of the - service with the format namespace/serviceName and the port of the service could be a - number of the name of the port. - The ports 80 and 443 are not allowed as external ports. This ports are reserved for the backend - --udp-services-configmap string Name of the ConfigMap that contains the definition of the UDP services to expose. - The key in the map indicates the external port to be used. The value is the name of the - service with the format namespace/serviceName and the port of the service could be a - number of the name of the port. - --update-status Indicates if the - ingress controller should update the Ingress status IP/hostname. Default is true (default true) - --update-status-on-shutdown Indicates if the - ingress controller should update the Ingress status IP/hostname when the controller - is being stopped. Default is true (default true) - -v, --v Level log level for V logs - --version Shows release information about the NGINX Ingress controller - --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging - --watch-namespace string Namespace to watch for Ingress. Default is to watch all namespaces -``` +The following command line arguments are accepted by the Ingress controller executable. + +They are set in the container spec of the `nginx-ingress-controller` Deployment manifest + +| Argument | Description | +|----------|-------------| +| `--alsologtostderr` | log to standard error as well as files | +| `--annotations-prefix string` | Prefix of the Ingress annotations specific to the NGINX controller. (default "nginx.ingress.kubernetes.io") | +| `--apiserver-host string` | Address of the Kubernetes API server. Takes the form "protocol://address:port". If not specified, it is assumed the program runs inside a Kubernetes cluster and local discovery is attempted. | +| `--configmap string` | Name of the ConfigMap containing custom global configurations for the controller. | +| `--default-backend-service string` | Service used to serve HTTP requests not matching any known server name (catch-all). Takes the form "namespace/name". The controller configures NGINX to forward requests to the first port of this Service. If not specified, a 404 page will be returned directly from NGINX.| +| `--default-server-port int` | When `default-backend-service` is not specified or specified service does not have any endpoint, a local endpoint with this port will be used to serve 404 page from inside Nginx. | +| `--default-ssl-certificate string` | Secret containing a SSL certificate to be used by the default HTTPS server (catch-all). Takes the form "namespace/name". | +| `--disable-catch-all` | Disable support for catch-all Ingresses. | +| `--election-id string` | Election id to use for Ingress status updates. (default "ingress-controller-leader") | +| `--enable-dynamic-certificates` | Dynamically serves certificates instead of reloading NGINX when certificates are created, updated, or deleted. Currently does not support OCSP stapling, so --enable-ssl-chain-completion must be turned off (default behaviour). Assuming the certificate is generated with a 2048 bit RSA key/cert pair, this feature can store roughly 5000 certificates. (enabled by default) | +| `--enable-ssl-chain-completion` | Autocomplete SSL certificate chains with missing intermediate CA certificates. A valid certificate chain is required to enable OCSP stapling. Certificates uploaded to Kubernetes must have the "Authority Information Access" X.509 v3 extension for this to succeed. (default true) | +| `--enable-ssl-passthrough` | Enable SSL Passthrough. | +| `--health-check-path string` | URL path of the health check endpoint. Configured inside the NGINX status server. All requests received on the port defined by the healthz-port parameter are forwarded internally to this path. (default "/healthz") | +| `--health-check-timeout duration` | Time limit, in seconds, for a probe to health-check-path to succeed. (default 10) | +| `--healthz-port int` | Port to use for the healthz endpoint. (default 10254) | +| `--http-port int` | Port to use for servicing HTTP traffic. (default 80) | +| `--https-port int` | Port to use for servicing HTTPS traffic. (default 443) | +| `--ingress-class string` | Name of the ingress class this controller satisfies. The class of an Ingress object is set using the annotation "kubernetes.io/ingress.class". All ingress classes are satisfied if this parameter is left empty. | +| `--kubeconfig string` | Path to a kubeconfig file containing authorization and API server information. | +| `--log_backtrace_at traceLocation` | when logging hits line file:N, emit a stack trace (default :0) | +| `--log_dir string` | If non-empty, write log files in this directory | +| `--logtostderr` | log to standard error instead of files (default true) | +| `--profiling` | Enable profiling via web interface host:port/debug/pprof/ (default true) | +| `--publish-service string` | Service fronting the Ingress controller. Takes the form "namespace/name". When used together with update-status, the controller mirrors the address of this service's endpoints to the load-balancer status of all Ingress objects it satisfies. | +| `--publish-status-address string` | Customized address to set as the load-balancer status of Ingress objects this controller satisfies. Requires the update-status parameter. | +| `--report-node-internal-ip-address` | Set the load-balancer status of Ingress objects to internal Node addresses instead of external. Requires the update-status parameter. | +| `--ssl-passthrough-proxy-port int` | Port to use internally for SSL Passthrough. (default 442) | +| `--stderrthreshold severity` | logs at or above this threshold go to stderr (default 2) | +| `--sync-period duration` | Period at which the controller forces the repopulation of its local object stores. Disabled by default. | +| `--sync-rate-limit float32` | Define the sync frequency upper limit (default 0.3) | +| `--tcp-services-configmap string` | Name of the ConfigMap containing the definition of the TCP services to expose. The key in the map indicates the external port to be used. The value is a reference to a Service in the form "namespace/name:port", where "port" can either be a port number or name. TCP ports 80 and 443 are reserved by the controller for servicing HTTP traffic. | +| `--udp-services-configmap string` | Name of the ConfigMap containing the definition of the UDP services to expose. The key in the map indicates the external port to be used. The value is a reference to a Service in the form "namespace/name:port", where "port" can either be a port name or number. | +| `--update-status` | Update the load-balancer status of Ingress objects this controller satisfies. Requires setting the publish-service parameter to a valid Service reference. (default true) | +| `--update-status-on-shutdown` | Update the load-balancer status of Ingress objects when the controller shuts down. Requires the update-status parameter. (default true) | +| `-v`, `--v Level` | log level for V logs | +| `--version` | Show release information about the NGINX Ingress controller and exit. | +| `--vmodule moduleSpec` | comma-separated list of pattern=N settings for file-filtered logging | +| `--watch-namespace string` | Namespace the controller watches for updates to Kubernetes objects. This includes Ingresses, Services and all configuration resources. All namespaces are watched if this parameter is left empty. | +| `--disable-catch-all` | Disable support for catch-all Ingresses. | +|`--validating-webhook`|The address to start an admission controller on| +|`--validating-webhook-certificate`|The certificate the webhook is using for its TLS handling| +|`--validating-webhook-key`|The key the webhook is using for its TLS handling| diff --git a/docs/user-guide/configmap.md b/docs/user-guide/configmap.md deleted file mode 100644 index 019bfe12f7..0000000000 --- a/docs/user-guide/configmap.md +++ /dev/null @@ -1,656 +0,0 @@ -# NGINX Ingress controller configuration ConfigMap - -ConfigMaps allow you to decouple configuration artifacts from image content to keep containerized applications portable. -The ConfigMap API resource stores configuration data as key-value pairs. The data provides the configurations for system -components for the nginx-controller. Before you can begin using a config-map it must be [deployed](../../deploy/README.md/#deploying-the-config-map). - -In order to overwrite nginx-controller configuration values as seen in [config.go](https://github.com/kubernetes/ingress-nginx/blob/master/internal/ingress/controller/config/config.go), -you can add key-value pairs to the data section of the config-map. For Example: - -```yaml -data: - map-hash-bucket-size: "128" - ssl-protocols: SSLv2 -``` - -## Configuration options - -The following table shows a configuration option's name, type, and the default value: - -|name|type|default| -|:---|:---|:------| -|[add-headers](#add-headers)|string|""| -|[allow-backend-server-header](#allow-backend-server-header)|bool|false| -|[access-log-path](#access-log-path)|string|"/var/log/nginx/access.log"| -|[error-log-path](#error-log-path)|string|"/var/log/nginx/error.log"| -|[enable-dynamic-tls-records](#enable-dynamic-tls-records)|bool|true| -|[enable-modsecurity](#enable-modsecurity)|bool|false| -|[enable-owasp-modsecurity-crs](#enable-owasp-modsecurity-crs)|bool|false| -|[client-header-buffer-size](#client-header-buffer-size)|string|"1k"| -|[client-header-timeout](#client-header-timeout)|int|60| -|[client-body-buffer-size](#client-body-buffer-size)|string|"8k"| -|[client-body-timeout](#client-body-timeout)|int|60| -|[disable-access-log](#disable-access-log)|bool|false| -|[disable-ipv6](#disable-ipv6)|bool|false| -|[enable-underscores-in-headers](#enable-underscores-in-headers)|bool|false| -|[ignore-invalid-headers](#ignore-invalid-headers)|bool|true| -|[enable-vts-status](#enable-vts-status)|bool|false| -|[vts-status-zone-size](#vts-status-zone-size)|string|"10m"| -|[vts-default-filter-key](#vts-default-filter-key)|string|"$geoip_country_code country::*"| -|[retry-non-idempotent](#retry-non-idempotent)|bool|false| -|[error-log-level](#error-log-level)|string|"notice"| -|[http2-max-field-size](#http2-max-field-size)|string|"4k"| -|[http2-max-header-size](#http2-max-header-size)|string|"16k"| -|[hsts](#hsts)|bool|true| -|[hsts-include-subdomains](#hsts-include-subdomains)|bool|true| -|[hsts-max-age](#hsts-max-age)|string|"15724800"| -|[hsts-preload](#hsts-preload)|bool|false| -|[keep-alive](#keep-alive)|int|75| -|[keep-alive-requests](#keep-alive-requests)|int|100| -|[large-client-header-buffers](#large-client-header-buffers)|string|"4 8k"| -|[log-format-escape-json](#log-format-escape-json)|bool|false| -|[log-format-upstream](#log-format-upstream)|string|`%v - [$the_real_ip] - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" $request_length $request_time [$proxy_upstream_name] $upstream_addr $upstream_response_length $upstream_response_time $upstream_status`| -|[log-format-stream](#log-format-stream)|string|`[$time_local] $protocol $status $bytes_sent $bytes_received $session_time`| -|[max-worker-connections](#max-worker-connections)|int|16384| -|[map-hash-bucket-size](#max-worker-connections)|int|64| -|[proxy-real-ip-cidr](#proxy-real-ip-cidr)|[]string|"0.0.0.0/0"| -|[proxy-set-headers](#proxy-set-headers)|string|""| -|[server-name-hash-max-size](#server-name-hash-max-size)|int|1024| -|[server-name-hash-bucket-size](#server-name-hash-bucket-size)|int|`` -|[proxy-headers-hash-max-size](#proxy-headers-hash-max-size)|int|512| -|[proxy-headers-hash-bucket-size](#proxy-headers-hash-bucket-size)|int|64| -|[server-tokens](#server-tokens)|bool|true| -|[ssl-ciphers](#ssl-ciphers)|string|"ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256"| -|[ssl-ecdh-curve](#ssl-ecdh-curve)|string|"auto"| -|[ssl-dh-param](#ssl-dh-param)|string|""| -|[ssl-protocols](#ssl-protocols)|string|"TLSv1.2"| -|[ssl-session-cache](#ssl-session-cache)|bool|true| -|[ssl-session-cache-size](#ssl-session-cache-size)|string|"10m"| -|[ssl-session-tickets](#ssl-session-tickets)|bool|true| -|[ssl-session-ticket-key](#ssl-session-ticket-key)|string|`` -|[ssl-session-timeout](#ssl-session-timeout)|string|"10m"| -|[ssl-buffer-size](#ssl-buffer-size)|string|"4k"| -|[use-proxy-protocol](#use-proxy-protocol)|bool|false| -|[use-gzip](#use-gzip)|bool|true| -|[enable-brotli](#enable-brotli)|bool|true| -|[brotli-level](#brotli-level)|int|4| -|[brotli-types](#brotli-types)|string|"application/xml+rss application/atom+xml application/javascript application/x-javascript application/json application/rss+xml application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/svg+xml image/x-icon text/css text/plain text/x-component"| -|[use-http2](#use-http2)|bool|true| -|[gzip-types](#gzip-types)|string|"application/atom+xml application/javascript application/x-javascript application/json application/rss+xml application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/svg+xml image/x-icon text/css text/plain text/x-component"| -|[worker-processes](#worker-processes)|string|``| -|[worker-shutdown-timeout](#worker-shutdown-timeout)|string|"10s"| -|[load-balance](#load-balance)|string|"least_conn"| -|[variables-hash-bucket-size](#variables-hash-bucket-size)|int|128| -|[variables-hash-max-size](#variables-hash-max-size)|int|2048| -|[upstream-keepalive-connections](#upstream-keepalive-connections)|int|32| -|[limit-conn-zone-variable](#limit-conn-zone-variable)|string|"$binary_remote_addr"| -|[proxy-stream-timeout](#proxy-stream-timeout)|string|"600s"| -|[proxy-stream-responses](#proxy-stream-responses)|int|1| -|[bind-address-ipv4](#bind-address-ipv4)|[]string|""| -|[bind-address-ipv6](#bind-address-ipv6)|[]string|""| -|[forwarded-for-header](#forwarded-for-header)|string|"X-Forwarded-For"| -|[compute-full-forwarded-for](#compute-full-forwarded-for)|bool|false| -|[enable-opentracing](#enable-opentracing)|bool|false| -|[zipkin-collector-host](#zipkin-collector-host)|string|""| -|[zipkin-collector-port](#zipkin-collector-port)|int|9411| -|[zipkin-service-name](#zipkin-service-name)|string|"nginx"| -|[http-snippet](#http-snippet)|string|""| -|[server-snippet](#server-snippet)|string|""| -|[location-snippet](#location-snippet)|string|""| -|[custom-http-errors](#custom-http-errors)|[]int]|[]int{}| -|[proxy-body-size](#proxy-body-size)|string|"1m"| -|[proxy-connect-timeout](#proxy-connect-timeout)|int|5| -|[proxy-read-timeout](#proxy-read-timeout)|int|60| -|[proxy-send-timeout](#proxy-send-timeout)|int|60| -|[proxy-buffer-size](#proxy-buffer-size)|string|"4k"| -|[proxy-cookie-path](#proxy-cookie-path)|string|"off"| -|[proxy-cookie-domain](#proxy-cookie-domain)|string|"off"| -|[proxy-next-upstream](#proxy-next-upstream)|string|"error timeout invalid_header http_502 http_503 http_504"| -|[proxy-redirect-from](#proxy-redirect-from)|string|"off"| -|[proxy-request-buffering](#proxy-request-buffering)|string|"on"| -|[ssl-redirect](#ssl-redirect)|bool|true| -|[whitelist-source-range](#whitelist-source-range)|[]string|[]string{}| -|[skip-access-log-urls](#skip-access-log-urls)|[]string|[]string{}| -|[limit-rate](#limit-rate)|int|0| -|[limit-rate-after](#limit-rate-after)|int|0| -|[http-redirect-code](#http-redirect-code)|int|308| - -## add-headers - -Sets custom headers from a configmap before sending traffic to the client. See [proxy-set-headers](#proxy-set-headers). [example](https://github.com/kubernetes/ingress-nginx/tree/master/docs/examples/customization/custom-headers) - -## allow-backend-server-header - -AllowBackendServerHeader enables the return of the header Server from the backend instead of the generic nginx string. By default this is disabled. - -_References:_ -- http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_hide_header - -## access-log-path - -Access log path. Goes to '/var/log/nginx/access.log' by default. - -## error-log-path - -Error log path. Goes to '/var/log/nginx/error.log' by default. - -_References:_ -- http://nginx.org/en/docs/ngx_core_module.html#error_log - -## enable-dynamic-tls-records - -Enables dynamically sized TLS records to improve time-to-first-byte. By default this is enabled. See [CloudFlare's blog](https://blog.cloudflare.com/optimizing-tls-over-tcp-to-reduce-latency) for more information. - -## enable-modsecurity - -Enables the modsecurity module for NGINX. By default this is disabled. - -## enable-owasp-modsecurity-crs - -Enables the OWASP ModSecurity Core Rule Set (CRS). By default this is disabled. - -## client-header-buffer-size - -Allows to configure a custom buffer size for reading client request header. - -_References:_ -- http://nginx.org/en/docs/http/ngx_http_core_module.html#client_header_buffer_size - -## client-header-timeout - -Defines a timeout for reading client request header, in seconds. - -_References:_ -- http://nginx.org/en/docs/http/ngx_http_core_module.html#client_header_timeout - -## client-body-buffer-size - -Sets buffer size for reading client request body. - -_References:_ -- http://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_buffer_size - -## client-body-timeout - -Defines a timeout for reading client request body, in seconds. - -_References:_ -- http://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_timeout - -## disable-access-log - -Disables the Access Log from the entire Ingress Controller. This is 'false' by default. - -_References:_ -- http://nginx.org/en/docs/http/ngx_http_log_module.html#access_log - -## disable-ipv6 - -Disable listening on IPV6. By default this is disabled. - -## enable-underscores-in-headers - -Enables underscores in header names. By default this is disabled. - -## ignore-invalid-headers - -Set if header fields with invalid names should be ignored. -By default this is enabled. - -## enable-vts-status - -Allows the replacement of the default status page with a third party module named [nginx-module-vts](https://github.com/vozlt/nginx-module-vts). -By default this is disabled. - -## vts-status-zone-size - -Vts config on http level sets parameters for a shared memory zone that will keep states for various keys. The cache is shared between all worker processes. Default value is 10m - -_References:_ -- https://github.com/vozlt/nginx-module-vts#vhost_traffic_status_zone - -## vts-default-filter-key - -Vts config on http level enables the keys by user defined variable. The key is a key string to calculate traffic. The name is a group string to calculate traffic. The key and name can contain variables such as $host, $server_name. The name's group belongs to filterZones if specified. The key's group belongs to serverZones if not specified second argument name. Default value is $geoip_country_code country::* - -_References:_ -- https://github.com/vozlt/nginx-module-vts#vhost_traffic_status_filter_by_set_key - -## retry-non-idempotent - -Since 1.9.13 NGINX will not retry non-idempotent requests (POST, LOCK, PATCH) in case of an error in the upstream server. The previous behavior can be restored using the value "true". - -## error-log-level - -Configures the logging level of errors. Log levels above are listed in the order of increasing severity. - -_References:_ -- http://nginx.org/en/docs/ngx_core_module.html#error_log - -## http2-max-field-size - -Limits the maximum size of an HPACK-compressed request header field. - -_References:_ -- https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_max_field_size - -## http2-max-header-size - -Limits the maximum size of the entire request header list after HPACK decompression. - -_References:_ -- https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_max_header_size - -## hsts - -Enables or disables the header HSTS in servers running SSL. -HTTP Strict Transport Security (often abbreviated as HSTS) is a security feature (HTTP header) that tell browsers that it should only be communicated with using HTTPS, instead of using HTTP. It provides protection against protocol downgrade attacks and cookie theft. - -_References:_ -- https://developer.mozilla.org/en-US/docs/Web/Security/HTTP_strict_transport_security -- https://blog.qualys.com/securitylabs/2016/03/28/the-importance-of-a-proper-http-strict-transport-security-implementation-on-your-web-server - -## hsts-include-subdomains - -Enables or disables the use of HSTS in all the subdomains of the server-name. - -## hsts-max-age - -Sets the time, in seconds, that the browser should remember that this site is only to be accessed using HTTPS. - -## hsts-preload - -Enables or disables the preload attribute in the HSTS feature (when it is enabled) dd - -## keep-alive - -Sets the time during which a keep-alive client connection will stay open on the server side. The zero value disables keep-alive client connections. - -_References:_ -- http://nginx.org/en/docs/http/ngx_http_core_module.html#keepalive_timeout - -## keep-alive-requests - -Sets the maximum number of requests that can be served through one keep-alive connection. - -_References:_ -- http://nginx.org/en/docs/http/ngx_http_core_module.html#keepalive_requests - -## large-client-header-buffers - -Sets the maximum number and size of buffers used for reading large client request header. Default: 4 8k. - -_References:_ -- http://nginx.org/en/docs/http/ngx_http_core_module.html#large_client_header_buffers - -## log-format-escape-json - -Sets if the escape parameter allows JSON (true) or default characters escaping in variables (false) Sets the nginx [log format](http://nginx.org/en/docs/http/ngx_http_log_module.html#log_format). - -## log-format-upstream - -Sets the nginx [log format](http://nginx.org/en/docs/http/ngx_http_log_module.html#log_format). -Example for json output: - -```console -log-format-upstream: '{ "time": "$time_iso8601", "remote_addr": "$proxy_protocol_addr", - "x-forward-for": "$proxy_add_x_forwarded_for", "request_id": "$request_id", "remote_user": - "$remote_user", "bytes_sent": $bytes_sent, "request_time": $request_time, "status": - $status, "vhost": "$host", "request_proto": "$server_protocol", "path": "$uri", - "request_query": "$args", "request_length": $request_length, "duration": $request_time, - "method": "$request_method", "http_referrer": "$http_referer", "http_user_agent": - "$http_user_agent" }' - ``` - -Please check [log-format](log-format.md) for definition of each field. - -## log-format-stream - -Sets the nginx [stream format](https://nginx.org/en/docs/stream/ngx_stream_log_module.html#log_format). - -## max-worker-connections - -Sets the maximum number of simultaneous connections that can be opened by each [worker process](http://nginx.org/en/docs/ngx_core_module.html#worker_connections) - -## map-hash-bucket-size - -Sets the bucket size for the [map variables hash tables](http://nginx.org/en/docs/http/ngx_http_map_module.html#map_hash_bucket_size). The details of setting up hash tables are provided in a separate [document](http://nginx.org/en/docs/hash.html). - -## proxy-real-ip-cidr - -If use-proxy-protocol is enabled, proxy-real-ip-cidr defines the default the IP/network address of your external load balancer. - -## proxy-set-headers - -Sets custom headers from a configmap before sending traffic to backends. See [example](https://github.com/kubernetes/ingress-nginx/tree/master/docs/examples/customization/custom-headers) - -## server-name-hash-max-size - -Sets the maximum size of the [server names hash tables](http://nginx.org/en/docs/http/ngx_http_core_module.html#server_names_hash_max_size) used in server names,map directive’s values, MIME types, names of request header strings, etc. - -_References:_ -- http://nginx.org/en/docs/hash.html - -## server-name-hash-bucket-size - -Sets the size of the bucket for the server names hash tables. - -_References:_ -- http://nginx.org/en/docs/hash.html -- http://nginx.org/en/docs/http/ngx_http_core_module.html#server_names_hash_bucket_size - -## proxy-headers-hash-max-size - -Sets the maximum size of the proxy headers hash tables. - -_References:_ -- http://nginx.org/en/docs/hash.html -- https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_headers_hash_max_size - -## proxy-headers-hash-bucket-size - -Sets the size of the bucket for the proxy headers hash tables. - -_References:_ -- http://nginx.org/en/docs/hash.html -- https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_headers_hash_bucket_size - -## server-tokens - -Send NGINX Server header in responses and display NGINX version in error pages. By default this is enabled. - -## ssl-ciphers - -Sets the [ciphers](http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_ciphers) list to enable. The ciphers are specified in the format understood by the OpenSSL library. - -The default cipher list is: - `ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256`. - -The ordering of a ciphersuite is very important because it decides which algorithms are going to be selected in priority. The recommendation above prioritizes algorithms that provide perfect [forward secrecy](https://wiki.mozilla.org/Security/Server_Side_TLS#Forward_Secrecy). - -Please check the [Mozilla SSL Configuration Generator](https://mozilla.github.io/server-side-tls/ssl-config-generator/). - -## ssl-ecdh-curve - -Specifies a curve for ECDHE ciphers. - -_References:_ -- http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_ecdh_curve - -## ssl-dh-param - -Sets the name of the secret that contains Diffie-Hellman key to help with "Perfect Forward Secrecy". - -_References:_ -- https://www.openssl.org/docs/manmaster/apps/dhparam.html -- https://wiki.mozilla.org/Security/Server_Side_TLS#DHE_handshake_and_dhparam -- http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_dhparam - -## ssl-protocols - -Sets the [SSL protocols](http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_protocols) to use. The default is: `TLSv1.2`. - -Please check the result of the configuration using `https://ssllabs.com/ssltest/analyze.html` or `https://testssl.sh`. - -## ssl-session-cache - -Enables or disables the use of shared [SSL cache](http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_session_cache) among worker processes. - -## ssl-session-cache-size - -Sets the size of the [SSL shared session cache](http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_session_cache) between all worker processes. - -## ssl-session-tickets - -Enables or disables session resumption through [TLS session tickets](http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_session_tickets). - -## ssl-session-ticket-key - -Sets the secret key used to encrypt and decrypt TLS session tickets. The value must be a valid base64 string. - -[TLS session ticket-key](http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_session_tickets), by default, a randomly generated key is used. To create a ticket: `openssl rand 80 | base64 -w0` - -## ssl-session-timeout - -Sets the time during which a client may [reuse the session](http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_session_timeout) parameters stored in a cache. - -## ssl-buffer-size - -Sets the size of the [SSL buffer](http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_buffer_size) used for sending data. The default of 4k helps NGINX to improve TLS Time To First Byte (TTTFB). - -_References:_ -- https://www.igvita.com/2013/12/16/optimizing-nginx-tls-time-to-first-byte/ - -## use-proxy-protocol - -Enables or disables the [PROXY protocol](https://www.nginx.com/resources/admin-guide/proxy-protocol/) to receive client connection (real IP address) information passed through proxy servers and load balancers such as HAProxy and Amazon Elastic Load Balancer (ELB). - -## use-gzip - -Enables or disables compression of HTTP responses using the ["gzip" module](http://nginx.org/en/docs/http/ngx_http_gzip_module.html). -The default mime type list to compress is: `application/atom+xml application/javascript application/x-javascript application/json application/rss+xml application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/svg+xml image/x-icon text/css text/plain text/x-component`. - -## enable-brotli - -Enables or disables compression of HTTP responses using the ["brotli" module](https://github.com/google/ngx_brotli). -The default mime type list to compress is: `application/xml+rss application/atom+xml application/javascript application/x-javascript application/json application/rss+xml application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/svg+xml image/x-icon text/css text/plain text/x-component`. This is *enabled* by default. - -## brotli-level - -Sets the Brotli Compression Level that will be used. *Defaults to* 4. - -## brotli-types - -Sets the MIME Types that will be compressed on-the-fly by brotli. -*Defaults to* `application/xml+rss application/atom+xml application/javascript application/x-javascript application/json application/rss+xml application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/svg+xml image/x-icon text/css text/plain text/x-component`. - -## use-http2 - -Enables or disables [HTTP/2](http://nginx.org/en/docs/http/ngx_http_v2_module.html) support in secure connections. - -## gzip-types - -Sets the MIME types in addition to "text/html" to compress. The special value "\*" matches any MIME type. Responses with the "text/html" type are always compressed if `use-gzip` is enabled. - -## worker-processes - -Sets the number of [worker processes](http://nginx.org/en/docs/ngx_core_module.html#worker_processes). -The default of "auto" means number of available CPU cores. - -## worker-shutdown-timeout - -Sets a timeout for Nginx to [wait for worker to gracefully shutdown](http://nginx.org/en/docs/ngx_core_module.html#worker_shutdown_timeout). The default is "10s". - -## load-balance - -Sets the algorithm to use for load balancing. -The value can either be: - -- round_robin: to use the default round robin loadbalancer -- least_conn: to use the least connected method -- ip_hash: to use a hash of the server for routing. - -The default is least_conn. - -_References:_ -- http://nginx.org/en/docs/http/load_balancing.html. - -## variables-hash-bucket-size - -Sets the bucket size for the variables hash table. - -_References:_ -- http://nginx.org/en/docs/http/ngx_http_map_module.html#variables_hash_bucket_size - -## variables-hash-max-size - -Sets the maximum size of the variables hash table. - -_References:_ -- http://nginx.org/en/docs/http/ngx_http_map_module.html#variables_hash_max_size - -## upstream-keepalive-connections - -Activates the cache for connections to upstream servers. The connections parameter sets the maximum number of idle keepalive connections to upstream servers that are preserved in the cache of each worker process. When this -number is exceeded, the least recently used connections are closed. Default: 32 - -_References:_ -- http://nginx.org/en/docs/http/ngx_http_upstream_module.html#keepalive - -## limit-conn-zone-variable - -Sets parameters for a shared memory zone that will keep states for various keys of [limit_conn_zone](http://nginx.org/en/docs/http/ngx_http_limit_conn_module.html#limit_conn_zone). The default of "$binary_remote_addr" variable’s size is always 4 bytes for IPv4 addresses or 16 bytes for IPv6 addresses. - -## proxy-stream-timeout - -Sets the timeout between two successive read or write operations on client or proxied server connections. If no data is transmitted within this time, the connection is closed. - -_References:_ -- http://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_timeout - -## proxy-stream-responses - -Sets the number of datagrams expected from the proxied server in response to the client request if the UDP protocol is used. - -_References:_ -- http://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_responses - -## bind-address-ipv4 - -Sets the addresses on which the server will accept requests instead of *. It should be noted that these addresses must exist in the runtime environment or the controller will crash loop. - -## bind-address-ipv6 - -Sets the addresses on which the server will accept requests instead of *. It should be noted that these addresses must exist in the runtime environment or the controller will crash loop. - -## forwarded-for-header - -Sets the header field for identifying the originating IP address of a client. Default is X-Forwarded-For - -## compute-full-forwarded-for - -Append the remote address to the X-Forwarded-For header instead of replacing it. When this option is enabled, the upstream application is responsible for extracting the client IP based on its own list of trusted proxies. - -## enable-opentracing - -Enables the nginx Opentracing extension. By default this is disabled. - -_References:_ -- https://github.com/opentracing-contrib/nginx-opentracing - -## zipkin-collector-host - -Specifies the host to use when uploading traces. It must be a valid URL. - -## zipkin-collector-port - -Specifies the port to use when uploading traces. Default: 9411 - -## zipkin-service-name - -Specifies the service name to use for any traces created. Default: nginx - -## http-snippet - -Adds custom configuration to the http section of the nginx configuration. -Default: "" - -## server-snippet - -Adds custom configuration to all the servers in the nginx configuration. -Default: "" - -## location-snippet - -Adds custom configuration to all the locations in the nginx configuration. -Default: "" - -## custom-http-errors - -Enables which HTTP codes should be passed for processing with the [error_page directive](http://nginx.org/en/docs/http/ngx_http_core_module.html#error_page) - -Setting at least one code also enables [proxy_intercept_errors](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_intercept_errors) which are required to process error_page. - -Example usage: `custom-http-errors: 404,415` - -## proxy-body-size - -Sets the maximum allowed size of the client request body. -See NGINX [client_max_body_size](http://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size). - -## proxy-connect-timeout - -Sets the timeout for [establishing a connection with a proxied server](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_connect_timeout). It should be noted that this timeout cannot usually exceed 75 seconds. - -## proxy-read-timeout - -Sets the timeout in seconds for [reading a response from the proxied server](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_read_timeout). The timeout is set only between two successive read operations, not for the transmission of the whole response. - -## proxy-send-timeout - -Sets the timeout in seconds for [transmitting a request to the proxied server](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_send_timeout). The timeout is set only between two successive write operations, not for the transmission of the whole request. - -## proxy-buffer-size - -Sets the size of the buffer used for [reading the first part of the response](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffer_size) received from the proxied server. This part usually contains a small response header. - -## proxy-cookie-path - -Sets a text that [should be changed in the path attribute](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cookie_path) of the “Set-Cookie” header fields of a proxied server response. - -## proxy-cookie-domain - -Sets a text that [should be changed in the domain attribute](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cookie_domain) of the “Set-Cookie” header fields of a proxied server response. - -## proxy-next-upstream - -Specifies in [which cases](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_next_upstream) a request should be passed to the next server. - -## proxy-redirect-from - -Sets the original text that should be changed in the "Location" and "Refresh" header fields of a proxied server response. Default: off. - -_References:_ -- http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_redirect - -## proxy-request-buffering - -Enables or disables [buffering of a client request body](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_request_buffering). - -## ssl-redirect - -Sets the global value of redirects (301) to HTTPS if the server has a TLS certificate (defined in an Ingress rule). -Default is "true". - -## whitelist-source-range - -Sets the default whitelisted IPs for each `server` block. This can be overwritten by an annotation on an Ingress rule. -See [ngx_http_access_module](http://nginx.org/en/docs/http/ngx_http_access_module.html). - -## skip-access-log-urls - -Sets a list of URLs that should not appear in the NGINX access log. This is useful with urls like `/health` or `health-check` that make "complex" reading the logs. By default this list is empty - -## limit-rate - -Limits the rate of response transmission to a client. The rate is specified in bytes per second. The zero value disables rate limiting. The limit is set per a request, and so if a client simultaneously opens two connections, the overall rate will be twice as much as the specified limit. - -_References:_ -- http://nginx.org/en/docs/http/ngx_http_core_module.html#limit_rate - -## limit-rate-after - -Sets the initial amount after which the further transmission of a response to a client will be rate limited. - -_References:_ -- http://nginx.org/en/docs/http/ngx_http_core_module.html#limit_rate_after - -## http-redirect-code - -Sets the HTTP status code to be used in redirects. -Supported codes are [301](https://developer.mozilla.org/es/docs/Web/HTTP/Status/301),[302](https://developer.mozilla.org/es/docs/Web/HTTP/Status/302),[307](https://developer.mozilla.org/es/docs/Web/HTTP/Status/307) and [308](https://developer.mozilla.org/es/docs/Web/HTTP/Status/308) -Default code is 308. - -Why the default code is 308? - -[RFC 7238](https://tools.ietf.org/html/rfc7238) was created to define the 308 (Permanent Redirect) status code that is similar to 301 (Moved Permanently) but it keeps the payload in the redirect. This is important if the we send a redirect in methods like POST. diff --git a/docs/user-guide/convert_arguments_to_doc.py b/docs/user-guide/convert_arguments_to_doc.py new file mode 100755 index 0000000000..9f419672a7 --- /dev/null +++ b/docs/user-guide/convert_arguments_to_doc.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python + +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Convert the output of `nginx-ingress-controller --help` to +a Markdown table. +""" + +import re +import sys + +assert sys.version_info[0] == 3, 'This script requires Python 3' + +data = sys.stdin.read() +data = data.replace('\t', ' ' * 8) # Expand tabs +data = data.replace('\n' + (' ' * 8 * 2), ' ') # Unwrap lines + +print(''' +| Argument | Description | +|----------|-------------| +'''.rstrip()) + +for arg_m in re.finditer('^\s+(-.+?)\s{2,}(.+)$', data, flags=re.MULTILINE): + arg, description = arg_m.groups() + print('| `{arg}` | {description} |'.format( + arg=arg.replace(', ', '`, `'), + description=description, + )) diff --git a/docs/user-guide/custom-errors.md b/docs/user-guide/custom-errors.md index 197ff33ade..17fb6f7f7f 100644 --- a/docs/user-guide/custom-errors.md +++ b/docs/user-guide/custom-errors.md @@ -1,18 +1,31 @@ # Custom errors -In case of an error in a request the body of the response is obtained from the `default backend`. -Each request to the default backend includes two headers: +When the [`custom-http-errors`][cm-custom-http-errors] option is enabled, the Ingress controller configures NGINX so +that it passes several HTTP headers down to its `default-backend` in case of error: -- `X-Code` indicates the HTTP code to be returned to the client. -- `X-Format` the value of the `Accept` header. +| Header | Value | +| ---------------- | ------------------------------------------------------------------- | +| `X-Code` | HTTP status code retuned by the request | +| `X-Format` | Value of the `Accept` header sent by the client | +| `X-Original-URI` | URI that caused the error | +| `X-Namespace` | Namespace where the backend Service is located | +| `X-Ingress-Name` | Name of the Ingress where the backend is defined | +| `X-Service-Name` | Name of the Service backing the backend | +| `X-Service-Port` | Port number of the Service backing the backend | +| `X-Request-ID` | Unique ID that identifies the request - same as for backend service | -**Important:** the custom backend must return the correct HTTP status code to be returned. NGINX do not changes the response from the custom default backend. +A custom error backend can use this information to return the best possible representation of an error page. For +example, if the value of the `Accept` header send by the client was `application/json`, a carefully crafted backend +could decide to return the error payload as a JSON document instead of HTML. -Using this two headers is possible to use a custom backend service like [this one](https://github.com/kubernetes/ingress-nginx/tree/master/examples/customization/custom-errors/nginx) that inspect each request and returns a custom error page with the format expected by the client. Please check the example [custom-errors](examples/customization/custom-errors/README.md) +!!! Important + The custom backend is expected to return the correct HTTP status code instead of `200`. + NGINX does not change the response from the custom default backend. -NGINX sends additional headers that can be used to build custom response: +An example of such custom backend is available inside the source repository at [images/custom-error-pages][img-custom-error-pages]. -- X-Original-URI -- X-Namespace -- X-Ingress-Name -- X-Service-Name +See also the [Custom errors][example-custom-errors] example. + +[cm-custom-http-errors]: ./nginx-configuration/configmap.md#custom-http-errors +[img-custom-error-pages]: https://github.com/kubernetes/ingress-nginx/tree/master/images/custom-error-pages +[example-custom-errors]: ../examples/customization/custom-errors diff --git a/docs/user-guide/default-backend.md b/docs/user-guide/default-backend.md new file mode 100644 index 0000000000..97b30a2b18 --- /dev/null +++ b/docs/user-guide/default-backend.md @@ -0,0 +1,17 @@ +# Default backend + +The default backend is a service which handles all URL paths and hosts the nginx controller doesn't understand +(i.e., all the requests that are not mapped with an Ingress). + +Basically a default backend exposes two URLs: + +- `/healthz` that returns 200 +- `/` that returns 404 + +!!! example + The sub-directory [`/images/404-server`](https://github.com/kubernetes/ingress-nginx/tree/master/images/404-server) + provides a service which satisfies the requirements for a default backend. + +!!! example + The sub-directory [`/images/custom-error-pages`](https://github.com/kubernetes/ingress-nginx/tree/master/images/custom-error-pages) + provides an additional service for the purpose of customizing the error pages served via the default backend. diff --git a/docs/user-guide/default-ssl-certificate.md b/docs/user-guide/default-ssl-certificate.md deleted file mode 100644 index 549f672b04..0000000000 --- a/docs/user-guide/default-ssl-certificate.md +++ /dev/null @@ -1,103 +0,0 @@ -# Default SSL Certificate - -NGINX provides the option to configure a server as a catch-all with [server name _](http://nginx.org/en/docs/http/server_names.html) for requests that do not match any of the configured server names. This configuration works without issues for HTTP traffic. -In case of HTTPS, NGINX requires a certificate. -For this reason the Ingress controller provides the flag `--default-ssl-certificate`. The secret behind this flag contains the default certificate to be used in the mentioned scenario. If this flag is not provided NGINX will use a self signed certificate. - -Running without the flag `--default-ssl-certificate`: - -```console -$ curl -v https://10.2.78.7:443 -k -* Rebuilt URL to: https://10.2.78.7:443/ -* Trying 10.2.78.4... -* Connected to 10.2.78.7 (10.2.78.7) port 443 (#0) -* ALPN, offering http/1.1 -* Cipher selection: ALL:!EXPORT:!EXPORT40:!EXPORT56:!aNULL:!LOW:!RC4:@STRENGTH -* successfully set certificate verify locations: -* CAfile: /etc/ssl/certs/ca-certificates.crt - CApath: /etc/ssl/certs -* TLSv1.2 (OUT), TLS header, Certificate Status (22): -* TLSv1.2 (OUT), TLS handshake, Client hello (1): -* TLSv1.2 (IN), TLS handshake, Server hello (2): -* TLSv1.2 (IN), TLS handshake, Certificate (11): -* TLSv1.2 (IN), TLS handshake, Server key exchange (12): -* TLSv1.2 (IN), TLS handshake, Server finished (14): -* TLSv1.2 (OUT), TLS handshake, Client key exchange (16): -* TLSv1.2 (OUT), TLS change cipher, Client hello (1): -* TLSv1.2 (OUT), TLS handshake, Finished (20): -* TLSv1.2 (IN), TLS change cipher, Client hello (1): -* TLSv1.2 (IN), TLS handshake, Finished (20): -* SSL connection using TLSv1.2 / ECDHE-RSA-AES128-GCM-SHA256 -* ALPN, server accepted to use http/1.1 -* Server certificate: -* subject: CN=foo.bar.com -* start date: Apr 13 00:50:56 2016 GMT -* expire date: Apr 13 00:50:56 2017 GMT -* issuer: CN=foo.bar.com -* SSL certificate verify result: self signed certificate (18), continuing anyway. -> GET / HTTP/1.1 -> Host: 10.2.78.7 -> User-Agent: curl/7.47.1 -> Accept: */* -> -< HTTP/1.1 404 Not Found -< Server: nginx/1.11.1 -< Date: Thu, 21 Jul 2016 15:38:46 GMT -< Content-Type: text/html -< Transfer-Encoding: chunked -< Connection: keep-alive -< Strict-Transport-Security: max-age=15724800; includeSubDomains; preload -< -The page you're looking for could not be found. - -* Connection #0 to host 10.2.78.7 left intact -``` - -Specifying `--default-ssl-certificate=default/foo-tls`: - -```console -core@localhost ~ $ curl -v https://10.2.78.7:443 -k -* Rebuilt URL to: https://10.2.78.7:443/ -* Trying 10.2.78.7... -* Connected to 10.2.78.7 (10.2.78.7) port 443 (#0) -* ALPN, offering http/1.1 -* Cipher selection: ALL:!EXPORT:!EXPORT40:!EXPORT56:!aNULL:!LOW:!RC4:@STRENGTH -* successfully set certificate verify locations: -* CAfile: /etc/ssl/certs/ca-certificates.crt - CApath: /etc/ssl/certs -* TLSv1.2 (OUT), TLS header, Certificate Status (22): -* TLSv1.2 (OUT), TLS handshake, Client hello (1): -* TLSv1.2 (IN), TLS handshake, Server hello (2): -* TLSv1.2 (IN), TLS handshake, Certificate (11): -* TLSv1.2 (IN), TLS handshake, Server key exchange (12): -* TLSv1.2 (IN), TLS handshake, Server finished (14): -* TLSv1.2 (OUT), TLS handshake, Client key exchange (16): -* TLSv1.2 (OUT), TLS change cipher, Client hello (1): -* TLSv1.2 (OUT), TLS handshake, Finished (20): -* TLSv1.2 (IN), TLS change cipher, Client hello (1): -* TLSv1.2 (IN), TLS handshake, Finished (20): -* SSL connection using TLSv1.2 / ECDHE-RSA-AES128-GCM-SHA256 -* ALPN, server accepted to use http/1.1 -* Server certificate: -* subject: CN=foo.bar.com -* start date: Apr 13 00:50:56 2016 GMT -* expire date: Apr 13 00:50:56 2017 GMT -* issuer: CN=foo.bar.com -* SSL certificate verify result: self signed certificate (18), continuing anyway. -> GET / HTTP/1.1 -> Host: 10.2.78.7 -> User-Agent: curl/7.47.1 -> Accept: */* -> -< HTTP/1.1 404 Not Found -< Server: nginx/1.11.1 -< Date: Mon, 18 Jul 2016 21:02:59 GMT -< Content-Type: text/html -< Transfer-Encoding: chunked -< Connection: keep-alive -< Strict-Transport-Security: max-age=15724800; includeSubDomains; preload -< -The page you're looking for could not be found. - -* Connection #0 to host 10.2.78.7 left intact -``` diff --git a/docs/user-guide/exposing-tcp-udp-services.md b/docs/user-guide/exposing-tcp-udp-services.md index 95e0991180..5268f0081b 100644 --- a/docs/user-guide/exposing-tcp-udp-services.md +++ b/docs/user-guide/exposing-tcp-udp-services.md @@ -4,7 +4,7 @@ Ingress does not support TCP or UDP services. For this reason this Ingress contr `::[PROXY]:[PROXY]` It is also possible to use a number or the name of the port. The two last fields are optional. -Adding `PROXY` in either or both of the two last fields we can use Proxy Protocol decoding (listen) and/or encoding (proxy_pass) in a TCP service (https://www.nginx.com/resources/admin-guide/proxy-protocol/). +Adding `PROXY` in either or both of the two last fields we can use Proxy Protocol decoding (listen) and/or encoding (proxy_pass) in a TCP service https://www.nginx.com/resources/admin-guide/proxy-protocol The next example shows how to expose the service `example-go` running in the namespace `default` in the port `8080` using the port `9000` @@ -12,7 +12,8 @@ The next example shows how to expose the service `example-go` running in the nam apiVersion: v1 kind: ConfigMap metadata: - name: tcp-configmap-example + name: tcp-services + namespace: ingress-nginx data: 9000: "default/example-go:8080" ``` @@ -24,6 +25,39 @@ The next example shows how to expose the service `kube-dns` running in the names apiVersion: v1 kind: ConfigMap metadata: - name: udp-configmap-example + name: udp-services + namespace: ingress-nginx data: 53: "kube-system/kube-dns:53" +``` + +If TCP/UDP proxy support is used, then those ports need to be exposed in the Service defined for the Ingress. + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: ingress-nginx + namespace: ingress-nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx +spec: + type: LoadBalancer + ports: + - name: http + port: 80 + targetPort: 80 + protocol: TCP + - name: https + port: 443 + targetPort: 443 + protocol: TCP + - name: proxied-tcp-9000 + port: 9000 + targetPort: 9000 + protocol: TCP + selector: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx +``` diff --git a/docs/user-guide/ingress-path-matching.md b/docs/user-guide/ingress-path-matching.md new file mode 100644 index 0000000000..ae1c1b5842 --- /dev/null +++ b/docs/user-guide/ingress-path-matching.md @@ -0,0 +1,157 @@ +# Ingress Path Matching + +## Regular Expression Support + +!!! important + Regular expressions and wild cards are not supported in the `spec.rules.host` field. Full hostnames must be used. + +The ingress controller supports **case insensitive** regular expressions in the `spec.rules.http.paths.path` field. +This can be enabled by setting the `nginx.ingress.kubernetes.io/use-regex` annotation to `true` (the default is false). + +See the [description](./nginx-configuration/annotations.md#use-regex) of the `use-regex` annotation for more details. + +```yaml +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: test-ingress + annotations: + nginx.ingress.kubernetes.io/use-regex: "true" +spec: + rules: + - host: test.com + http: + paths: + - path: /foo/.* + backend: + serviceName: test + servicePort: 80 +``` + +The preceding ingress definition would translate to the following location block within the NGINX configuration for the `test.com` server: + +```txt +location ~* "^/foo/.*" { + ... +} +``` + +## Path Priority + +In NGINX, regular expressions follow a **first match** policy. In order to enable more accurate path matching, ingress-nginx first orders the paths by descending length before writing them to the NGINX template as location blocks. + +**Please read the [warning](#warning) before using regular expressions in your ingress definitions.** + +### Example + +Let the following two ingress definitions be created: + +```yaml +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: test-ingress-1 +spec: + rules: + - host: test.com + http: + paths: + - path: /foo/bar + backend: + serviceName: service1 + servicePort: 80 + - path: /foo/bar/ + backend: + serviceName: service2 + servicePort: 80 +``` + +```yaml +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: test-ingress-2 + annotations: + nginx.ingress.kubernetes.io/rewrite-target: /$1 +spec: + rules: + - host: test.com + http: + paths: + - path: /foo/bar/(.+) + backend: + serviceName: service3 + servicePort: 80 +``` + +The ingress controller would define the following location blocks, in order of descending length, within the NGINX template for the `test.com` server: + +```txt +location ~* ^/foo/bar/.+ { + ... +} + +location ~* "^/foo/bar/" { + ... +} + +location ~* "^/foo/bar" { + ... +} +``` + +The following request URI's would match the corresponding location blocks: + +- `test.com/foo/bar/1` matches `~* ^/foo/bar/.+` and will go to service 3. +- `test.com/foo/bar/` matches `~* ^/foo/bar/` and will go to service 2. +- `test.com/foo/bar` matches `~* ^/foo/bar` and will go to service 1. + +**IMPORTANT NOTES**: + +- If the `use-regex` OR `rewrite-target` annotation is used on any Ingress for a given host, then the case insensitive regular expression [location modifier](https://nginx.org/en/docs/http/ngx_http_core_module.html#location) will be enforced on ALL paths for a given host regardless of what Ingress they are defined on. + +## Warning + +The following example describes a case that may inflict unwanted path matching behaviour. + +This case is expected and a result of NGINX's a first match policy for paths that use the regular expression [location modifier](https://nginx.org/en/docs/http/ngx_http_core_module.html#location). For more information about how a path is chosen, please read the following article: ["Understanding Nginx Server and Location Block Selection Algorithms"](https://www.digitalocean.com/community/tutorials/understanding-nginx-server-and-location-block-selection-algorithms). + +### Example + +Let the following ingress be defined: + +```yaml +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: test-ingress-3 + annotations: + nginx.ingress.kubernetes.io/use-regex: "true" +spec: + rules: + - host: test.com + http: + paths: + - path: /foo/bar/bar + backend: + serviceName: test + servicePort: 80 + - path: /foo/bar/[A-Z0-9]{3} + backend: + serviceName: test + servicePort: 80 +``` + +The ingress controller would define the following location blocks (in this order) within the NGINX template for the `test.com` server: + +```txt +location ~* "^/foo/bar/[A-Z0-9]{3}" { + ... +} + +location ~* "^/foo/bar/bar" { + ... +} +``` + +A request to `test.com/foo/bar/bar` would match the `^/foo/[A-Z0-9]{3}` location block instead of the longest EXACT matching path. diff --git a/docs/user-guide/log-format.md b/docs/user-guide/log-format.md deleted file mode 100644 index 553ca79975..0000000000 --- a/docs/user-guide/log-format.md +++ /dev/null @@ -1,34 +0,0 @@ -# Log format - -The default configuration uses a custom logging format to add additional information about upstreams, response time and status - -``` - log_format upstreaminfo '{{ if $cfg.useProxyProtocol }}$proxy_protocol_addr{{ else }}$remote_addr{{ end }} - ' - '[$proxy_add_x_forwarded_for] - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" ' - '$request_length $request_time [$proxy_upstream_name] $upstream_addr $upstream_response_length $upstream_response_time $upstream_status'; -``` - -Sources: - -- [upstream variables](http://nginx.org/en/docs/http/ngx_http_upstream_module.html#variables) -- [embedded variables](http://nginx.org/en/docs/http/ngx_http_core_module.html#variables) - -Description: - -- `$proxy_protocol_addr`: if PROXY protocol is enabled -- `$remote_addr`: if PROXY protocol is disabled (default) -- `$proxy_add_x_forwarded_for`: the `X-Forwarded-For` client request header field with the $remote_addr variable appended to it, separated by a comma -- `$remote_user`: user name supplied with the Basic authentication -- `$time_local`: local time in the Common Log Format -- `$request`: full original request line -- `$status`: response status -- `$body_bytes_sent`: number of bytes sent to a client, not counting the response header -- `$http_referer`: value of the Referer header -- `$http_user_agent`: value of User-Agent header -- `$request_length`: request length (including request line, header, and request body) -- `$request_time`: time elapsed since the first bytes were read from the client -- `$proxy_upstream_name`: name of the upstream. The format is `upstream---` -- `$upstream_addr`: keeps the IP address and port, or the path to the UNIX-domain socket of the upstream server. If several servers were contacted during request processing, their addresses are separated by commas -- `$upstream_response_length`: keeps the length of the response obtained from the upstream server -- `$upstream_response_time`: keeps time spent on receiving the response from the upstream server; the time is kept in seconds with millisecond resolution -- `$upstream_status`: keeps status code of the response obtained from the upstream server \ No newline at end of file diff --git a/docs/user-guide/miscellaneous.md b/docs/user-guide/miscellaneous.md new file mode 100644 index 0000000000..ea4fb21bfd --- /dev/null +++ b/docs/user-guide/miscellaneous.md @@ -0,0 +1,50 @@ +# Miscellaneous + +## Source IP address + +By default NGINX uses the content of the header `X-Forwarded-For` as the source of truth to get information about the client IP address. This works without issues in L7 **if we configure the setting `proxy-real-ip-cidr`** with the correct information of the IP/network address of trusted external load balancer. + +If the ingress controller is running in AWS we need to use the VPC IPv4 CIDR. + +Another option is to enable proxy protocol using `use-proxy-protocol: "true"`. + +In this mode NGINX does not use the content of the header to get the source IP address of the connection. + +## Proxy Protocol + +If you are using a L4 proxy to forward the traffic to the NGINX pods and terminate HTTP/HTTPS there, you will lose the remote endpoint's IP address. To prevent this you could use the [Proxy Protocol](http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt) for forwarding traffic, this will send the connection details before forwarding the actual TCP connection itself. + +Amongst others [ELBs in AWS](http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/enable-proxy-protocol.html) and [HAProxy](http://www.haproxy.org/) support Proxy Protocol. + +## Websockets + +Support for websockets is provided by NGINX out of the box. No special configuration required. + +The only requirement to avoid the close of connections is the increase of the values of `proxy-read-timeout` and `proxy-send-timeout`. + +The default value of this settings is `60 seconds`. + +A more adequate value to support websockets is a value higher than one hour (`3600`). + +!!! Important + If the NGINX ingress controller is exposed with a service `type=LoadBalancer` make sure the protocol between the loadbalancer and NGINX is TCP. + +## Optimizing TLS Time To First Byte (TTTFB) + +NGINX provides the configuration option [ssl_buffer_size](http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_buffer_size) to allow the optimization of the TLS record size. + +This improves the [TLS Time To First Byte](https://www.igvita.com/2013/12/16/optimizing-nginx-tls-time-to-first-byte/) (TTTFB). +The default value in the Ingress controller is `4k` (NGINX default is `16k`). + +## Retries in non-idempotent methods + +Since 1.9.13 NGINX will not retry non-idempotent requests (POST, LOCK, PATCH) in case of an error. +The previous behavior can be restored using `retry-non-idempotent=true` in the configuration ConfigMap. + +## Limitations + +- Ingress rules for TLS require the definition of the field `host` + +## Why endpoints and not services + +The NGINX ingress controller does not use [Services](http://kubernetes.io/docs/user-guide/services) to route traffic to the pods. Instead it uses the Endpoints API in order to bypass [kube-proxy](http://kubernetes.io/docs/admin/kube-proxy/) to allow NGINX features like session affinity and custom load balancing algorithms. It also removes some overhead, such as conntrack entries for iptables DNAT. \ No newline at end of file diff --git a/docs/user-guide/modsecurity.md b/docs/user-guide/modsecurity.md deleted file mode 100644 index ddfb9fd4ea..0000000000 --- a/docs/user-guide/modsecurity.md +++ /dev/null @@ -1,16 +0,0 @@ -# ModSecurity Web Application Firewall - -ModSecurity is an open source, cross platform web application firewall (WAF) engine for Apache, IIS and Nginx that is developed by Trustwave's SpiderLabs. It has a robust event-based programming language which provides protection from a range of attacks against web applications and allows for HTTP traffic monitoring, logging and real-time analysis - https://www.modsecurity.org - -The [ModSecurity-nginx](https://github.com/SpiderLabs/ModSecurity-nginx) connector is the connection point between NGINX and libmodsecurity (ModSecurity v3). - -The default modsecurity configuration file is located in `/etc/nginx/modsecurity/modsecurity.conf`. This is the only file located in this directory and contains the default recommended configuration. Using a volume we can replace this file with the desired configuration. -To enable the modsecurity feature we need to specify `enable-modsecurity: "true"` in the configuration configmap. - -**NOTE:** the default configuration use detection only, because that minimises the chances of post-installation disruption. -The file `/var/log/modsec_audit.log` contains the log of modsecurity. - - -The OWASP ModSecurity Core Rule Set (CRS) is a set of generic attack detection rules for use with ModSecurity or compatible web application firewalls. The CRS aims to protect web applications from a wide range of attacks, including the OWASP Top Ten, with a minimum of false alerts. -The directory `/etc/nginx/owasp-modsecurity-crs` contains the https://github.com/SpiderLabs/owasp-modsecurity-crs repository. -Using `enable-owasp-modsecurity-crs: "true"` we enable the use of the this rules. diff --git a/docs/user-guide/monitoring.md b/docs/user-guide/monitoring.md new file mode 100644 index 0000000000..ed5de73e78 --- /dev/null +++ b/docs/user-guide/monitoring.md @@ -0,0 +1,85 @@ +# Prometheus and Grafana installation + +This tutorial will show you how to install [Prometheus](https://prometheus.io/) and [Grafana](https://grafana.com/) for scraping the metrics of the NGINX Ingress controller. + +!!! important + This example uses `emptyDir` volumes for Prometheus and Grafana. This means once the pod gets terminated you will lose all the data. + +## Before You Begin + +The NGINX Ingress controller should already be deployed according to the deployment instructions [here](../deploy/index.md). + +Note that the kustomize bases used in this tutorial are stored in the [deploy](https://github.com/kubernetes/ingress-nginx/tree/master/deploy) folder of the GitHub repository [kubernetes/ingress-nginx](https://github.com/kubernetes/ingress-nginx). + +## Deploy and configure Prometheus Server + +The Prometheus server must be configured so that it can discover endpoints of services. If a Prometheus server is already running in the cluster and if it is configured in a way that it can find the ingress controller pods, no extra configuration is needed. + +If there is no existing Prometheus server running, the rest of this tutorial will guide you through the steps needed to deploy a properly configured Prometheus server. + +Running the following command deploys prometheus in Kubernetes: + +```console +kubectl apply --kustomize github.com/kubernetes/ingress-nginx/deploy/prometheus/ +serviceaccount/prometheus-server created +role.rbac.authorization.k8s.io/prometheus-server created +rolebinding.rbac.authorization.k8s.io/prometheus-server created +configmap/prometheus-configuration-bc6bcg7b65 created +service/prometheus-server created +deployment.apps/prometheus-server created +``` + +### Prometheus Dashboard + +Open Prometheus dashboard in a web browser: + +```console +kubectl get svc -n ingress-nginx +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +default-http-backend ClusterIP 10.103.59.201 80/TCP 3d +ingress-nginx NodePort 10.97.44.72 80:30100/TCP,443:30154/TCP,10254:32049/TCP 5h +prometheus-server NodePort 10.98.233.86 9090:32630/TCP 1m +``` + +Obtain the IP address of the nodes in the running cluster: + +```console +kubectl get nodes -o wide +``` + +In some cases where the node only have internal IP addresses we need to execute: + +```console +kubectl get nodes --selector=kubernetes.io/role!=master -o jsonpath={.items[*].status.addresses[?\(@.type==\"InternalIP\"\)].address} +10.192.0.2 10.192.0.3 10.192.0.4 +``` + +Open your browser and visit the following URL: _http://{node IP address}:{prometheus-svc-nodeport}_ to load the Prometheus Dashboard. + +According to the above example, this URL will be http://10.192.0.3:32630 + +![Dashboard](../images/prometheus-dashboard.png) + +### Grafana + +```console +kubectl apply --kustomize github.com/kubernetes/ingress-nginx/deploy/grafana/ +``` + +```console +kubectl get svc -n ingress-nginx +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +default-http-backend ClusterIP 10.103.59.201 80/TCP 3d +ingress-nginx NodePort 10.97.44.72 80:30100/TCP,443:30154/TCP,10254:32049/TCP 5h +prometheus-server NodePort 10.98.233.86 9090:32630/TCP 10m +grafana NodePort 10.98.233.87 3000:31086/TCP 10m +``` + +Open your browser and visit the following URL: _http://{node IP address}:{grafana-svc-nodeport}_ to load the Grafana Dashboard. +According to the above example, this URL will be http://10.192.0.3:31086 + +The username and password is `admin` + +After the login you can import the Grafana dashboard from _https://github.com/kubernetes/ingress-nginx/tree/master/deploy/grafana/dashboards_ + +![Dashboard](../images/grafana.png) diff --git a/docs/user-guide/multiple-ingress.md b/docs/user-guide/multiple-ingress.md new file mode 100644 index 0000000000..7a5acf830b --- /dev/null +++ b/docs/user-guide/multiple-ingress.md @@ -0,0 +1,56 @@ +# Multiple Ingress controllers + +If you're running multiple ingress controllers, or running on a cloud provider that natively handles ingress such as GKE, +you need to specify the annotation `kubernetes.io/ingress.class: "nginx"` in all ingresses that you would like the ingress-nginx controller to claim. + + +For instance, + +```yaml +metadata: + name: foo + annotations: + kubernetes.io/ingress.class: "gce" +``` + +will target the GCE controller, forcing the nginx controller to ignore it, while an annotation like + +```yaml +metadata: + name: foo + annotations: + kubernetes.io/ingress.class: "nginx" +``` + +will target the nginx controller, forcing the GCE controller to ignore it. + +To reiterate, setting the annotation to any value which does not match a valid ingress class will force the NGINX Ingress controller to ignore your Ingress. +If you are only running a single NGINX ingress controller, this can be achieved by setting the annotation to any value except "nginx" or an empty string. + +Do this if you wish to use one of the other Ingress controllers at the same time as the NGINX controller. + +## Multiple ingress-nginx controllers + +This mechanism also provides users the ability to run _multiple_ NGINX ingress controllers (e.g. one which serves public traffic, one which serves "internal" traffic). +To do this, the option `--ingress-class` must be changed to a value unique for the cluster within the definition of the replication controller. +Here is a partial example: + +```yaml +spec: + template: + spec: + containers: + - name: nginx-ingress-internal-controller + args: + - /nginx-ingress-controller + - '--election-id=ingress-controller-leader-internal' + - '--ingress-class=nginx-internal' + - '--configmap=ingress/nginx-ingress-internal-controller' +``` + +!!! important + Deploying multiple Ingress controllers, of different types (e.g., `ingress-nginx` & `gce`), and not specifying a class annotation will + result in both or all controllers fighting to satisfy the Ingress, and all of them racing to update Ingress status field in confusing ways. + + When running multiple ingress-nginx controllers, it will only process an unset class annotation if one of the controllers uses the default + `--ingress-class` value (see `IsValid` method in `internal/ingress/annotations/class/main.go`), otherwise the class annotation become required. diff --git a/docs/user-guide/nginx-configuration/annotations.md b/docs/user-guide/nginx-configuration/annotations.md new file mode 100755 index 0000000000..0a359b4967 --- /dev/null +++ b/docs/user-guide/nginx-configuration/annotations.md @@ -0,0 +1,783 @@ +# Annotations + +You can add these Kubernetes annotations to specific Ingress objects to customize their behavior. + +!!! tip + Annotation keys and values can only be strings. + Other types, such as boolean or numeric values must be quoted, + i.e. `"true"`, `"false"`, `"100"`. + +!!! note + The annotation prefix can be changed using the + [`--annotations-prefix` command line argument](../cli-arguments.md), + but the default is `nginx.ingress.kubernetes.io`, as described in the + table below. + +|Name | type | +|---------------------------|------| +|[nginx.ingress.kubernetes.io/app-root](#rewrite)|string| +|[nginx.ingress.kubernetes.io/affinity](#session-affinity)|cookie| +|[nginx.ingress.kubernetes.io/auth-realm](#authentication)|string| +|[nginx.ingress.kubernetes.io/auth-secret](#authentication)|string| +|[nginx.ingress.kubernetes.io/auth-type](#authentication)|basic or digest| +|[nginx.ingress.kubernetes.io/auth-tls-secret](#client-certificate-authentication)|string| +|[nginx.ingress.kubernetes.io/auth-tls-verify-depth](#client-certificate-authentication)|number| +|[nginx.ingress.kubernetes.io/auth-tls-verify-client](#client-certificate-authentication)|string| +|[nginx.ingress.kubernetes.io/auth-tls-error-page](#client-certificate-authentication)|string| +|[nginx.ingress.kubernetes.io/auth-tls-pass-certificate-to-upstream](#client-certificate-authentication)|"true" or "false"| +|[nginx.ingress.kubernetes.io/auth-url](#external-authentication)|string| +|[nginx.ingress.kubernetes.io/auth-snippet](#external-authentication)|string| +|[nginx.ingress.kubernetes.io/enable-global-auth](#external-authentication)|"true" or "false"| +|[nginx.ingress.kubernetes.io/backend-protocol](#backend-protocol)|string|HTTP,HTTPS,GRPC,GRPCS,AJP| +|[nginx.ingress.kubernetes.io/canary](#canary)|"true" or "false"| +|[nginx.ingress.kubernetes.io/canary-by-header](#canary)|string| +|[nginx.ingress.kubernetes.io/canary-by-header-value](#canary)|string +|[nginx.ingress.kubernetes.io/canary-by-cookie](#canary)|string| +|[nginx.ingress.kubernetes.io/canary-weight](#canary)|number| +|[nginx.ingress.kubernetes.io/client-body-buffer-size](#client-body-buffer-size)|string| +|[nginx.ingress.kubernetes.io/configuration-snippet](#configuration-snippet)|string| +|[nginx.ingress.kubernetes.io/custom-http-errors](#custom-http-errors)|[]int| +|[nginx.ingress.kubernetes.io/default-backend](#default-backend)|string| +|[nginx.ingress.kubernetes.io/enable-cors](#enable-cors)|"true" or "false"| +|[nginx.ingress.kubernetes.io/cors-allow-origin](#enable-cors)|string| +|[nginx.ingress.kubernetes.io/cors-allow-methods](#enable-cors)|string| +|[nginx.ingress.kubernetes.io/cors-allow-headers](#enable-cors)|string| +|[nginx.ingress.kubernetes.io/cors-allow-credentials](#enable-cors)|"true" or "false"| +|[nginx.ingress.kubernetes.io/cors-max-age](#enable-cors)|number| +|[nginx.ingress.kubernetes.io/force-ssl-redirect](#server-side-https-enforcement-through-redirect)|"true" or "false"| +|[nginx.ingress.kubernetes.io/from-to-www-redirect](#redirect-from-to-www)|"true" or "false"| +|[nginx.ingress.kubernetes.io/http2-push-preload](#http2-push-preload)|"true" or "false"| +|[nginx.ingress.kubernetes.io/limit-connections](#rate-limiting)|number| +|[nginx.ingress.kubernetes.io/limit-rps](#rate-limiting)|number| +|[nginx.ingress.kubernetes.io/permanent-redirect](#permanent-redirect)|string| +|[nginx.ingress.kubernetes.io/permanent-redirect-code](#permanent-redirect-code)|number| +|[nginx.ingress.kubernetes.io/temporal-redirect](#temporal-redirect)|string| +|[nginx.ingress.kubernetes.io/proxy-body-size](#custom-max-body-size)|string| +|[nginx.ingress.kubernetes.io/proxy-cookie-domain](#proxy-cookie-domain)|string| +|[nginx.ingress.kubernetes.io/proxy-cookie-path](#proxy-cookie-path)|string| +|[nginx.ingress.kubernetes.io/proxy-connect-timeout](#custom-timeouts)|number| +|[nginx.ingress.kubernetes.io/proxy-send-timeout](#custom-timeouts)|number| +|[nginx.ingress.kubernetes.io/proxy-read-timeout](#custom-timeouts)|number| +|[nginx.ingress.kubernetes.io/proxy-next-upstream](#custom-timeouts)|string| +|[nginx.ingress.kubernetes.io/proxy-next-upstream-timeout](#custom-timeouts)|number| +|[nginx.ingress.kubernetes.io/proxy-next-upstream-tries](#custom-timeouts)|number| +|[nginx.ingress.kubernetes.io/proxy-request-buffering](#custom-timeouts)|string| +|[nginx.ingress.kubernetes.io/proxy-redirect-from](#proxy-redirect)|string| +|[nginx.ingress.kubernetes.io/proxy-redirect-to](#proxy-redirect)|string| +|[nginx.ingress.kubernetes.io/enable-rewrite-log](#enable-rewrite-log)|"true" or "false"| +|[nginx.ingress.kubernetes.io/rewrite-target](#rewrite)|URI| +|[nginx.ingress.kubernetes.io/satisfy](#satisfy)|string| +|[nginx.ingress.kubernetes.io/secure-verify-ca-secret](#secure-backends)|string| +|[nginx.ingress.kubernetes.io/server-alias](#server-alias)|string| +|[nginx.ingress.kubernetes.io/server-snippet](#server-snippet)|string| +|[nginx.ingress.kubernetes.io/service-upstream](#service-upstream)|"true" or "false"| +|[nginx.ingress.kubernetes.io/session-cookie-name](#cookie-affinity)|string| +|[nginx.ingress.kubernetes.io/session-cookie-path](#cookie-affinity)|string| +|[nginx.ingress.kubernetes.io/session-cookie-change-on-failure](#cookie-affinity)|"true" or "false"| +|[nginx.ingress.kubernetes.io/ssl-redirect](#server-side-https-enforcement-through-redirect)|"true" or "false"| +|[nginx.ingress.kubernetes.io/ssl-passthrough](#ssl-passthrough)|"true" or "false"| +|[nginx.ingress.kubernetes.io/upstream-hash-by](#custom-nginx-upstream-hashing)|string| +|[nginx.ingress.kubernetes.io/x-forwarded-prefix](#x-forwarded-prefix-header)|string| +|[nginx.ingress.kubernetes.io/load-balance](#custom-nginx-load-balancing)|string| +|[nginx.ingress.kubernetes.io/upstream-vhost](#custom-nginx-upstream-vhost)|string| +|[nginx.ingress.kubernetes.io/whitelist-source-range](#whitelist-source-range)|CIDR| +|[nginx.ingress.kubernetes.io/proxy-buffering](#proxy-buffering)|string| +|[nginx.ingress.kubernetes.io/proxy-buffers-number](#proxy-buffers-number)|number| +|[nginx.ingress.kubernetes.io/proxy-buffer-size](#proxy-buffer-size)|string| +|[nginx.ingress.kubernetes.io/ssl-ciphers](#ssl-ciphers)|string| +|[nginx.ingress.kubernetes.io/connection-proxy-header](#connection-proxy-header)|string| +|[nginx.ingress.kubernetes.io/enable-access-log](#enable-access-log)|"true" or "false"| +|[nginx.ingress.kubernetes.io/lua-resty-waf](#lua-resty-waf)|string| +|[nginx.ingress.kubernetes.io/lua-resty-waf-debug](#lua-resty-waf)|"true" or "false"| +|[nginx.ingress.kubernetes.io/lua-resty-waf-ignore-rulesets](#lua-resty-waf)|string| +|[nginx.ingress.kubernetes.io/lua-resty-waf-extra-rules](#lua-resty-waf)|string| +|[nginx.ingress.kubernetes.io/lua-resty-waf-allow-unknown-content-types](#lua-resty-waf)|"true" or "false"| +|[nginx.ingress.kubernetes.io/lua-resty-waf-score-threshold](#lua-resty-waf)|number| +|[nginx.ingress.kubernetes.io/lua-resty-waf-process-multipart-body](#lua-resty-waf)|"true" or "false"| +|[nginx.ingress.kubernetes.io/enable-influxdb](#influxdb)|"true" or "false"| +|[nginx.ingress.kubernetes.io/influxdb-measurement](#influxdb)|string| +|[nginx.ingress.kubernetes.io/influxdb-port](#influxdb)|string| +|[nginx.ingress.kubernetes.io/influxdb-host](#influxdb)|string| +|[nginx.ingress.kubernetes.io/influxdb-server-name](#influxdb)|string| +|[nginx.ingress.kubernetes.io/use-regex](#use-regex)|bool| +|[nginx.ingress.kubernetes.io/enable-modsecurity](#modsecurity)|bool| +|[nginx.ingress.kubernetes.io/enable-owasp-core-rules](#modsecurity)|bool| +|[nginx.ingress.kubernetes.io/modsecurity-transaction-id](#modsecurity)|string| +|[nginx.ingress.kubernetes.io/modsecurity-snippet](#modsecurity)|string| + +### Canary + +In some cases, you may want to "canary" a new set of changes by sending a small number of requests to a different service than the production service. The canary annotation enables the Ingress spec to act as an alternative service for requests to route to depending on the rules applied. The following annotations to configure canary can be enabled after `nginx.ingress.kubernetes.io/canary: "true"` is set: + +* `nginx.ingress.kubernetes.io/canary-by-header`: The header to use for notifying the Ingress to route the request to the service specified in the Canary Ingress. When the request header is set to `always`, it will be routed to the canary. When the header is set to `never`, it will never be routed to the canary. For any other value, the header will be ignored and the request compared against the other canary rules by precedence. + +* `nginx.ingress.kubernetes.io/canary-by-header-value`: The header value to match for notifying the Ingress to route the request to the service specified in the Canary Ingress. When the request header is set to this value, it will be routed to the canary. For any other header value, the header will be ignored and the request compared against the other canary rules by precedence. This annotation has to be used together with . The annotation is an extension of the `nginx.ingress.kubernetes.io/canary-by-header` to allow customizing the header value instead of using hardcoded values. It doesn't have any effect if the `nginx.ingress.kubernetes.io/canary-by-header` annotation is not defined. + +* `nginx.ingress.kubernetes.io/canary-by-cookie`: The cookie to use for notifying the Ingress to route the request to the service specified in the Canary Ingress. When the cookie value is set to `always`, it will be routed to the canary. When the cookie is set to `never`, it will never be routed to the canary. For any other value, the cookie will be ingored and the request compared against the other canary rules by precedence. + +* `nginx.ingress.kubernetes.io/canary-weight`: The integer based (0 - 100) percent of random requests that should be routed to the service specified in the canary Ingress. A weight of 0 implies that no requests will be sent to the service in the Canary ingress by this canary rule. A weight of 100 means implies all requests will be sent to the alternative service specified in the Ingress. + +Canary rules are evaluated in order of precedence. Precedence is as follows: +`canary-by-header -> canary-by-cookie -> canary-weight` + +**Note** that when you mark an ingress as canary, then all the other non-canary annotations will be ignored (inherited from the corresponding main ingress) except `nginx.ingress.kubernetes.io/load-balance` and `nginx.ingress.kubernetes.io/upstream-hash-by`. + +**Known Limitations** + +Currently a maximum of one canary ingress can be applied per Ingress rule. + +### Rewrite + +In some scenarios the exposed URL in the backend service differs from the specified path in the Ingress rule. Without a rewrite any request will return 404. +Set the annotation `nginx.ingress.kubernetes.io/rewrite-target` to the path expected by the service. + +If the Application Root is exposed in a different path and needs to be redirected, set the annotation `nginx.ingress.kubernetes.io/app-root` to redirect requests for `/`. + +!!! example + Please check the [rewrite](../../examples/rewrite/README.md) example. + +### Session Affinity + +The annotation `nginx.ingress.kubernetes.io/affinity` enables and sets the affinity type in all Upstreams of an Ingress. This way, a request will always be directed to the same upstream server. +The only affinity type available for NGINX is `cookie`. + +!!! attention + If more than one Ingress is defined for a host and at least one Ingress uses `nginx.ingress.kubernetes.io/affinity: cookie`, then only paths on the Ingress using `nginx.ingress.kubernetes.io/affinity` will use session cookie affinity. All paths defined on other Ingresses for the host will be load balanced through the random selection of a backend server. + +!!! example + Please check the [affinity](../../examples/affinity/cookie/README.md) example. + +#### Cookie affinity + +If you use the ``cookie`` affinity type you can also specify the name of the cookie that will be used to route the requests with the annotation `nginx.ingress.kubernetes.io/session-cookie-name`. The default is to create a cookie named 'INGRESSCOOKIE'. + +The NGINX annotation `nginx.ingress.kubernetes.io/session-cookie-path` defines the path that will be set on the cookie. This is optional unless the annotation `nginx.ingress.kubernetes.io/use-regex` is set to true; Session cookie paths do not support regex. + + +### Authentication + +Is possible to add authentication adding additional annotations in the Ingress rule. The source of the authentication is a secret that contains usernames and passwords inside the key `auth`. + +The annotations are: +``` +nginx.ingress.kubernetes.io/auth-type: [basic|digest] +``` + +Indicates the [HTTP Authentication Type: Basic or Digest Access Authentication](https://tools.ietf.org/html/rfc2617). + +``` +nginx.ingress.kubernetes.io/auth-secret: secretName +``` + +The name of the Secret that contains the usernames and passwords which are granted access to the `path`s defined in the Ingress rules. +This annotation also accepts the alternative form "namespace/secretName", in which case the Secret lookup is performed in the referenced namespace instead of the Ingress namespace. + +``` +nginx.ingress.kubernetes.io/auth-realm: "realm string" +``` + +!!! example + Please check the [auth](../../examples/auth/basic/README.md) example. + +### Custom NGINX upstream hashing + +NGINX supports load balancing by client-server mapping based on [consistent hashing](http://nginx.org/en/docs/http/ngx_http_upstream_module.html#hash) for a given key. The key can contain text, variables or any combination thereof. This feature allows for request stickiness other than client IP or cookies. The [ketama](http://www.last.fm/user/RJ/journal/2007/04/10/392555/) consistent hashing method will be used which ensures only a few keys would be remapped to different servers on upstream group changes. + +There is a special mode of upstream hashing called subset. In this mode, upstream servers are grouped into subsets, and stickiness works by mapping keys to a subset instead of individual upstream servers. Specific server is chosen uniformly at random from the selected sticky subset. It provides a balance between stickiness and load distribution. + +To enable consistent hashing for a backend: + +`nginx.ingress.kubernetes.io/upstream-hash-by`: the nginx variable, text value or any combination thereof to use for consistent hashing. For example `nginx.ingress.kubernetes.io/upstream-hash-by: "$request_uri"` to consistently hash upstream requests by the current request URI. + +"subset" hashing can be enabled setting `nginx.ingress.kubernetes.io/upstream-hash-by-subset`: "true". This maps requests to subset of nodes instead of a single one. `upstream-hash-by-subset-size` determines the size of each subset (default 3). + +Please check the [chashsubset](../../examples/chashsubset/deployment.yaml) example. + +### Custom NGINX load balancing + +This is similar to [`load-balance` in ConfigMap](./configmap.md#load-balance), but configures load balancing algorithm per ingress. +>Note that `nginx.ingress.kubernetes.io/upstream-hash-by` takes preference over this. If this and `nginx.ingress.kubernetes.io/upstream-hash-by` are not set then we fallback to using globally configured load balancing algorithm. + +### Custom NGINX upstream vhost + +This configuration setting allows you to control the value for host in the following statement: `proxy_set_header Host $host`, which forms part of the location block. This is useful if you need to call the upstream server by something other than `$host`. + +### Client Certificate Authentication + +It is possible to enable Client Certificate Authentication using additional annotations in Ingress Rule. + +The annotations are: + +* `nginx.ingress.kubernetes.io/auth-tls-secret: secretName`: + The name of the Secret that contains the full Certificate Authority chain `ca.crt` that is enabled to authenticate against this Ingress. + This annotation also accepts the alternative form "namespace/secretName", in which case the Secret lookup is performed in the referenced namespace instead of the Ingress namespace. +* `nginx.ingress.kubernetes.io/auth-tls-verify-depth`: + The validation depth between the provided client certificate and the Certification Authority chain. +* `nginx.ingress.kubernetes.io/auth-tls-verify-client`: + Enables verification of client certificates. +* `nginx.ingress.kubernetes.io/auth-tls-error-page`: + The URL/Page that user should be redirected in case of a Certificate Authentication Error +* `nginx.ingress.kubernetes.io/auth-tls-pass-certificate-to-upstream`: + Indicates if the received certificates should be passed or not to the upstream server. By default this is disabled. + +!!! example + Please check the [client-certs](../../examples/auth/client-certs/README.md) example. + +!!! attention + TLS with Client Authentication is **not** possible in Cloudflare and might result in unexpected behavior. + + Cloudflare only allows Authenticated Origin Pulls and is required to use their own certificate: [https://blog.cloudflare.com/protecting-the-origin-with-tls-authenticated-origin-pulls/](https://blog.cloudflare.com/protecting-the-origin-with-tls-authenticated-origin-pulls/) + + Only Authenticated Origin Pulls are allowed and can be configured by following their tutorial: [https://support.cloudflare.com/hc/en-us/articles/204494148-Setting-up-NGINX-to-use-TLS-Authenticated-Origin-Pulls](https://support.cloudflare.com/hc/en-us/articles/204494148-Setting-up-NGINX-to-use-TLS-Authenticated-Origin-Pulls) + + +### Configuration snippet + +Using this annotation you can add additional configuration to the NGINX location. For example: + +```yaml +nginx.ingress.kubernetes.io/configuration-snippet: | + more_set_headers "Request-Id: $req_id"; +``` + +### Custom HTTP Errors + +Like the [`custom-http-errors`](./configmap.md#custom-http-errors) value in the ConfigMap, this annotation will set NGINX `proxy-intercept-errors`, but only for the NGINX location associated with this ingress. If a [default backend annotation](#default-backend) is specified on the ingress, the errors will be routed to that annotation's default backend service (instead of the global default backend). +Different ingresses can specify different sets of error codes. Even if multiple ingress objects share the same hostname, this annotation can be used to intercept different error codes for each ingress (for example, different error codes to be intercepted for different paths on the same hostname, if each path is on a different ingress). +If `custom-http-errors` is also specified globally, the error values specified in this annotation will override the global value for the given ingress' hostname and path. + +Example usage: +``` +nginx.ingress.kubernetes.io/custom-http-errors: "404,415" +``` + +### Default Backend + +This annotation is of the form `nginx.ingress.kubernetes.io/default-backend: ` to specify a custom default backend. This `` is a reference to a service inside of the same namespace in which you are applying this annotation. This annotation overrides the global default backend. + +This service will be handle the response when the service in the Ingress rule does not have active endpoints. It will also handle the error responses if both this annotation and the [custom-http-errors annotation](#custom-http-errors) is set. + +### Enable CORS + +To enable Cross-Origin Resource Sharing (CORS) in an Ingress rule, add the annotation +`nginx.ingress.kubernetes.io/enable-cors: "true"`. This will add a section in the server +location enabling this functionality. + +CORS can be controlled with the following annotations: + +* `nginx.ingress.kubernetes.io/cors-allow-methods` + controls which methods are accepted. This is a multi-valued field, separated by ',' and + accepts only letters (upper and lower case). + - Default: `GET, PUT, POST, DELETE, PATCH, OPTIONS` + - Example: `nginx.ingress.kubernetes.io/cors-allow-methods: "PUT, GET, POST, OPTIONS"` + +* `nginx.ingress.kubernetes.io/cors-allow-headers` + controls which headers are accepted. This is a multi-valued field, separated by ',' and accepts letters, + numbers, _ and -. + - Default: `DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Authorization` + - Example: `nginx.ingress.kubernetes.io/cors-allow-headers: "X-Forwarded-For, X-app123-XPTO"` + +* `nginx.ingress.kubernetes.io/cors-allow-origin` + controls what's the accepted Origin for CORS. + This is a single field value, with the following format: `http(s)://origin-site.com` or `http(s)://origin-site.com:port` + - Default: `*` + - Example: `nginx.ingress.kubernetes.io/cors-allow-origin: "https://origin-site.com:4443"` + +* `nginx.ingress.kubernetes.io/cors-allow-credentials` + controls if credentials can be passed during CORS operations. + - Default: `true` + - Example: `nginx.ingress.kubernetes.io/cors-allow-credentials: "false"` + +* `nginx.ingress.kubernetes.io/cors-max-age` + controls how long preflight requests can be cached. + Default: `1728000` + Example: `nginx.ingress.kubernetes.io/cors-max-age: 600` + +!!! note + For more information please see [https://enable-cors.org](https://enable-cors.org/server_nginx.html) + +### HTTP2 Push Preload. + +Enables automatic conversion of preload links specified in the “Link” response header fields into push requests. + +!!! example + + * `nginx.ingress.kubernetes.io/http2-push-preload: "true"` + +### Server Alias + +To add Server Aliases to an Ingress rule add the annotation `nginx.ingress.kubernetes.io/server-alias: ""`. +This will create a server with the same configuration, but a different `server_name` as the provided host. + +!!! Note + A server-alias name cannot conflict with the hostname of an existing server. If it does the server-alias annotation will be ignored. + If a server-alias is created and later a new server with the same hostname is created, + the new server configuration will take place over the alias configuration. + +For more information please see [the `server_name` documentation](http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name). + +### Server snippet + +Using the annotation `nginx.ingress.kubernetes.io/server-snippet` it is possible to add custom configuration in the server configuration block. + +```yaml +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + annotations: + nginx.ingress.kubernetes.io/server-snippet: | + set $agentflag 0; + + if ($http_user_agent ~* "(Mobile)" ){ + set $agentflag 1; + } + + if ( $agentflag = 1 ) { + return 301 https://m.example.com; + } +``` + +!!! attention + This annotation can be used only once per host. + +### Client Body Buffer Size + +Sets buffer size for reading client request body per location. In case the request body is larger than the buffer, +the whole body or only its part is written to a temporary file. By default, buffer size is equal to two memory pages. +This is 8K on x86, other 32-bit platforms, and x86-64. It is usually 16K on other 64-bit platforms. This annotation is +applied to each location provided in the ingress rule. + +!!! note + The annotation value must be given in a format understood by Nginx. + +!!! example + + * `nginx.ingress.kubernetes.io/client-body-buffer-size: "1000"` # 1000 bytes + * `nginx.ingress.kubernetes.io/client-body-buffer-size: 1k` # 1 kilobyte + * `nginx.ingress.kubernetes.io/client-body-buffer-size: 1K` # 1 kilobyte + * `nginx.ingress.kubernetes.io/client-body-buffer-size: 1m` # 1 megabyte + * `nginx.ingress.kubernetes.io/client-body-buffer-size: 1M` # 1 megabyte + +For more information please see [http://nginx.org](http://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_buffer_size) + +### External Authentication + +To use an existing service that provides authentication the Ingress rule can be annotated with `nginx.ingress.kubernetes.io/auth-url` to indicate the URL where the HTTP request should be sent. + +```yaml +nginx.ingress.kubernetes.io/auth-url: "URL to the authentication service" +``` + +Additionally it is possible to set: + +* `nginx.ingress.kubernetes.io/auth-method`: + `` to specify the HTTP method to use. +* `nginx.ingress.kubernetes.io/auth-signin`: + `` to specify the location of the error page. +* `nginx.ingress.kubernetes.io/auth-response-headers`: + `` to specify headers to pass to backend once authentication request completes. +* `nginx.ingress.kubernetes.io/auth-request-redirect`: + `` to specify the X-Auth-Request-Redirect header value. +* `nginx.ingress.kubernetes.io/auth-snippet`: + `` to specify a custom snippet to use with external authentication, e.g. + +```yaml +nginx.ingress.kubernetes.io/auth-url: http://foo.com/external-auth +nginx.ingress.kubernetes.io/auth-snippet: | + proxy_set_header Foo-Header 42; +``` +> Note: `nginx.ingress.kubernetes.io/auth-snippet` is an optional annotation. However, it may only be used in conjunction with `nginx.ingress.kubernetes.io/auth-url` and will be ignored if `nginx.ingress.kubernetes.io/auth-url` is not set + +!!! example + Please check the [external-auth](../../examples/auth/external-auth/README.md) example. + +#### Global External Authentication + +By default the controller redirects all requests to an existing service that provides authentication if `global-auth-url` is set in the NGINX ConfigMap. If you want to disable this behavior for that ingress, you can use `enable-global-auth: "false"` in the NGINX ConfigMap. +`nginx.ingress.kubernetes.io/enable-global-auth`: + indicates if GlobalExternalAuth configuration should be applied or not to this Ingress rule. Default values is set to `"true"`. + +!!! note For more information please see [global-auth-url](./configmap.md#global-auth-url). + +### Rate limiting + +These annotations define a limit on the connections that can be opened by a single client IP address. +This can be used to mitigate [DDoS Attacks](https://www.nginx.com/blog/mitigating-ddos-attacks-with-nginx-and-nginx-plus). + +* `nginx.ingress.kubernetes.io/limit-connections`: number of concurrent connections allowed from a single IP address. +* `nginx.ingress.kubernetes.io/limit-rps`: number of connections that may be accepted from a given IP each second. +* `nginx.ingress.kubernetes.io/limit-rpm`: number of connections that may be accepted from a given IP each minute. +* `nginx.ingress.kubernetes.io/limit-rate-after`: sets the initial amount after which the further transmission of a response to a client will be rate limited. +* `nginx.ingress.kubernetes.io/limit-rate`: rate of request that accepted from a client each second. + +You can specify the client IP source ranges to be excluded from rate-limiting through the `nginx.ingress.kubernetes.io/limit-whitelist` annotation. The value is a comma separated list of CIDRs. + +If you specify multiple annotations in a single Ingress rule, `limit-rpm`, and then `limit-rps` takes precedence. + +The annotation `nginx.ingress.kubernetes.io/limit-rate`, `nginx.ingress.kubernetes.io/limit-rate-after` define a limit the rate of response transmission to a client. The rate is specified in bytes per second. The zero value disables rate limiting. The limit is set per a request, and so if a client simultaneously opens two connections, the overall rate will be twice as much as the specified limit. + +To configure this setting globally for all Ingress rules, the `limit-rate-after` and `limit-rate` value may be set in the [NGINX ConfigMap](./configmap.md#limit-rate). if you set the value in ingress annotation will cover global setting. + +### Permanent Redirect + +This annotation allows to return a permanent redirect instead of sending data to the upstream. For example `nginx.ingress.kubernetes.io/permanent-redirect: https://www.google.com` would redirect everything to Google. + +### Permanent Redirect Code + +This annotation allows you to modify the status code used for permanent redirects. For example `nginx.ingress.kubernetes.io/permanent-redirect-code: '308'` would return your permanent-redirect with a 308. + +### Temporal Redirect +This annotation allows you to return a temporal redirect (Return Code 302) instead of sending data to the upstream. For example `nginx.ingress.kubernetes.io/temporal-redirect: https://www.google.com` would redirect everything to Google with a Return Code of 302 (Moved Temporarily) + +### SSL Passthrough + +The annotation `nginx.ingress.kubernetes.io/ssl-passthrough` instructs the controller to send TLS connections directly +to the backend instead of letting NGINX decrypt the communication. See also [TLS/HTTPS](../tls.md#ssl-passthrough) in +the User guide. + +!!! note + SSL Passthrough is **disabled by default** and requires starting the controller with the + [`--enable-ssl-passthrough`](../cli-arguments.md) flag. + +!!! attention + Because SSL Passthrough works on layer 4 of the OSI model (TCP) and not on the layer 7 (HTTP), using SSL Passthrough + invalidates all the other annotations set on an Ingress object. + +### Service Upstream + +By default the NGINX ingress controller uses a list of all endpoints (Pod IP/port) in the NGINX upstream configuration. + +The `nginx.ingress.kubernetes.io/service-upstream` annotation disables that behavior and instead uses a single upstream in NGINX, the service's Cluster IP and port. + +This can be desirable for things like zero-downtime deployments as it reduces the need to reload NGINX configuration when Pods come up and down. See issue [#257](https://github.com/kubernetes/ingress-nginx/issues/257). + +#### Known Issues + +If the `service-upstream` annotation is specified the following things should be taken into consideration: + +* Sticky Sessions will not work as only round-robin load balancing is supported. +* The `proxy_next_upstream` directive will not have any effect meaning on error the request will not be dispatched to another upstream. + +### Server-side HTTPS enforcement through redirect + +By default the controller redirects (308) to HTTPS if TLS is enabled for that ingress. +If you want to disable this behavior globally, you can use `ssl-redirect: "false"` in the NGINX [ConfigMap](./configmap.md#ssl-redirect). + +To configure this feature for specific ingress resources, you can use the `nginx.ingress.kubernetes.io/ssl-redirect: "false"` +annotation in the particular resource. + +When using SSL offloading outside of cluster (e.g. AWS ELB) it may be useful to enforce a redirect to HTTPS +even when there is no TLS certificate available. +This can be achieved by using the `nginx.ingress.kubernetes.io/force-ssl-redirect: "true"` annotation in the particular resource. + +### Redirect from/to www + +In some scenarios is required to redirect from `www.domain.com` to `domain.com` or vice versa. +To enable this feature use the annotation `nginx.ingress.kubernetes.io/from-to-www-redirect: "true"` + +!!! attention + If at some point a new Ingress is created with a host equal to one of the options (like `domain.com`) the annotation will be omitted. + +!!! attention + For HTTPS to HTTPS redirects is mandatory the SSL Certificate defined in the Secret, located in the TLS section of Ingress, contains both FQDN in the common name of the certificate. + +### Whitelist source range + +You can specify allowed client IP source ranges through the `nginx.ingress.kubernetes.io/whitelist-source-range` annotation. +The value is a comma separated list of [CIDRs](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing), e.g. `10.0.0.0/24,172.10.0.1`. + +To configure this setting globally for all Ingress rules, the `whitelist-source-range` value may be set in the [NGINX ConfigMap](./configmap.md#whitelist-source-range). + +!!! note + Adding an annotation to an Ingress rule overrides any global restriction. + +### Custom timeouts + +Using the configuration configmap it is possible to set the default global timeout for connections to the upstream servers. +In some scenarios is required to have different values. To allow this we provide annotations that allows this customization: + +- `nginx.ingress.kubernetes.io/proxy-connect-timeout` +- `nginx.ingress.kubernetes.io/proxy-send-timeout` +- `nginx.ingress.kubernetes.io/proxy-read-timeout` +- `nginx.ingress.kubernetes.io/proxy-next-upstream` +- `nginx.ingress.kubernetes.io/proxy-next-upstream-timeout` +- `nginx.ingress.kubernetes.io/proxy-next-upstream-tries` +- `nginx.ingress.kubernetes.io/proxy-request-buffering` + +### Proxy redirect + +With the annotations `nginx.ingress.kubernetes.io/proxy-redirect-from` and `nginx.ingress.kubernetes.io/proxy-redirect-to` it is possible to +set the text that should be changed in the `Location` and `Refresh` header fields of a [proxied server response](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_redirect) + +Setting "off" or "default" in the annotation `nginx.ingress.kubernetes.io/proxy-redirect-from` disables `nginx.ingress.kubernetes.io/proxy-redirect-to`, +otherwise, both annotations must be used in unison. Note that each annotation must be a string without spaces. + +By default the value of each annotation is "off". + +### Custom max body size + +For NGINX, an 413 error will be returned to the client when the size in a request exceeds the maximum allowed size of the client request body. This size can be configured by the parameter [`client_max_body_size`](http://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size). + +To configure this setting globally for all Ingress rules, the `proxy-body-size` value may be set in the [NGINX ConfigMap](./configmap.md#proxy-body-size). +To use custom values in an Ingress rule define these annotation: + +```yaml +nginx.ingress.kubernetes.io/proxy-body-size: 8m +``` + +### Proxy cookie domain + +Sets a text that [should be changed in the domain attribute](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cookie_domain) of the "Set-Cookie" header fields of a proxied server response. + +To configure this setting globally for all Ingress rules, the `proxy-cookie-domain` value may be set in the [NGINX ConfigMap](./configmap.md#proxy-cookie-domain). + +### Proxy cookie path + +Sets a text that [should be changed in the path attribute](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cookie_path) of the "Set-Cookie" header fields of a proxied server response. + +To configure this setting globally for all Ingress rules, the `proxy-cookie-path` value may be set in the [NGINX ConfigMap](./configmap.md#proxy-cookie-path). + +### Proxy buffering + +Enable or disable proxy buffering [`proxy_buffering`](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffering). +By default proxy buffering is disabled in the NGINX config. + +To configure this setting globally for all Ingress rules, the `proxy-buffering` value may be set in the [NGINX ConfigMap](./configmap.md#proxy-buffering). +To use custom values in an Ingress rule define these annotation: + +```yaml +nginx.ingress.kubernetes.io/proxy-buffering: "on" +``` + +### Proxy buffers Number + +Sets the number of the buffers in [`proxy_buffers`](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffers) used for reading the first part of the response received from the proxied server. +By default proxy buffers number is set as 4 + +To configure this setting globally, set `proxy-buffers-number` in [NGINX ConfigMap](./configmap.md#proxy-buffers-number). To use custom values in an Ingress rule, define this annotation: +```yaml +nginx.ingress.kubernetes.io/proxy-buffers-number: "4" +``` + +### Proxy buffer size + +Sets the size of the buffer [`proxy_buffer_size`](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffer_size) used for reading the first part of the response received from the proxied server. +By default proxy buffer size is set as "4k" + +To configure this setting globally, set `proxy-buffer-size` in [NGINX ConfigMap](./configmap.md#proxy-buffer-size). To use custom values in an Ingress rule, define this annotation: +```yaml +nginx.ingress.kubernetes.io/proxy-buffer-size: "8k" +``` + +### SSL ciphers + +Specifies the [enabled ciphers](http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_ciphers). + +Using this annotation will set the `ssl_ciphers` directive at the server level. This configuration is active for all the paths in the host. + +```yaml +nginx.ingress.kubernetes.io/ssl-ciphers: "ALL:!aNULL:!EXPORT56:RC4+RSA:+HIGH:+MEDIUM:+LOW:+SSLv2:+EXP" +``` + +### Connection proxy header + +Using this annotation will override the default connection header set by NGINX. +To use custom values in an Ingress rule, define the annotation: + +```yaml +nginx.ingress.kubernetes.io/connection-proxy-header: "keep-alive" +``` + +### Enable Access Log + +Access logs are enabled by default, but in some scenarios access logs might be required to be disabled for a given +ingress. To do this, use the annotation: + +```yaml +nginx.ingress.kubernetes.io/enable-access-log: "false" +``` + +### Enable Rewrite Log + +Rewrite logs are not enabled by default. In some scenarios it could be required to enable NGINX rewrite logs. +Note that rewrite logs are sent to the error_log file at the notice level. To enable this feature use the annotation: + +```yaml +nginx.ingress.kubernetes.io/enable-rewrite-log: "true" +``` + +### X-Forwarded-Prefix Header +To add the non-standard `X-Forwarded-Prefix` header to the upstream request with a string value, the following annotation can be used: + +```yaml +nginx.ingress.kubernetes.io/x-forwarded-prefix: "/path" +``` + +### Lua Resty WAF + +Using `lua-resty-waf-*` annotations we can enable and control the [lua-resty-waf](https://github.com/p0pr0ck5/lua-resty-waf) +Web Application Firewall per location. + +Following configuration will enable the WAF for the paths defined in the corresponding ingress: + +```yaml +nginx.ingress.kubernetes.io/lua-resty-waf: "active" +``` + +In order to run it in debugging mode you can set `nginx.ingress.kubernetes.io/lua-resty-waf-debug` to `"true"` in addition to the above configuration. +The other possible values for `nginx.ingress.kubernetes.io/lua-resty-waf` are `inactive` and `simulate`. +In `inactive` mode WAF won't do anything, whereas in `simulate` mode it will log a warning message if there's a matching WAF rule for given request. This is useful to debug a rule and eliminate possible false positives before fully deploying it. + +`lua-resty-waf` comes with predefined set of rules [https://github.com/p0pr0ck5/lua-resty-waf/tree/84b4f40362500dd0cb98b9e71b5875cb1a40f1ad/rules](https://github.com/p0pr0ck5/lua-resty-waf/tree/84b4f40362500dd0cb98b9e71b5875cb1a40f1ad/rules) that covers ModSecurity CRS. +You can use `nginx.ingress.kubernetes.io/lua-resty-waf-ignore-rulesets` to ignore a subset of those rulesets. For an example: + +```yaml +nginx.ingress.kubernetes.io/lua-resty-waf-ignore-rulesets: "41000_sqli, 42000_xss" +``` + +will ignore the two mentioned rulesets. + +It is also possible to configure custom WAF rules per ingress using the `nginx.ingress.kubernetes.io/lua-resty-waf-extra-rules` annotation. For an example the following snippet will configure a WAF rule to deny requests with query string value that contains word `foo`: + + +```yaml +nginx.ingress.kubernetes.io/lua-resty-waf-extra-rules: '[=[ { "access": [ { "actions": { "disrupt" : "DENY" }, "id": 10001, "msg": "my custom rule", "operator": "STR_CONTAINS", "pattern": "foo", "vars": [ { "parse": [ "values", 1 ], "type": "REQUEST_ARGS" } ] } ], "body_filter": [], "header_filter":[] } ]=]' +``` + +Since the default allowed contents were `"text/html", "text/json", "application/json"` +We can enable the following annotation for allow all contents type: + + +```yaml +nginx.ingress.kubernetes.io/lua-resty-waf-allow-unknown-content-types: "true" +``` + +The default score of lua-resty-waf is 5, which usually triggered if hitting 2 default rules, you can modify the score threshold with following annotation: + + +```yaml +nginx.ingress.kubernetes.io/lua-resty-waf-score-threshold: "10" +``` + +When you enabled HTTPS in the endpoint and since resty-lua will return 500 error when processing "multipart" contents +Reference for this [issue](https://github.com/p0pr0ck5/lua-resty-waf/issues/166) + +By default, it will be "true" + +You may enable the following annotation for work around: + +```yaml +nginx.ingress.kubernetes.io/lua-resty-waf-process-multipart-body: "false" +``` + +For details on how to write WAF rules, please refer to [https://github.com/p0pr0ck5/lua-resty-waf](https://github.com/p0pr0ck5/lua-resty-waf). + +[configmap]: ./configmap.md + +### ModSecurity + +[ModSecurity](http://modsecurity.org/) is an OpenSource Web Application firewall. It can be enabled for a particular set +of ingress locations. The ModSecurity module must first be enabled by enabling ModSecurity in the +[ConfigMap](./configmap.md#enable-modsecurity). Note this will enable ModSecurity for all paths, and each path +must be disabled manually. + +It can be enabled using the following annotation: +```yaml +nginx.ingress.kubernetes.io/enable-modsecurity: "true" +``` +ModSecurity will run in "Detection-Only" mode using the [recommended configuration](https://github.com/SpiderLabs/ModSecurity/blob/v3/master/modsecurity.conf-recommended). + +You can enable the [OWASP Core Rule Set](https://www.modsecurity.org/CRS/Documentation/) by +setting the following annotation: +```yaml +nginx.ingress.kubernetes.io/enable-owasp-core-rules: "true" +``` + +You can pass transactionIDs from nginx by setting up the following: +```yaml +nginx.ingress.kubernetes.io/modsecurity-transaction-id: "$request_id" +``` + +You can also add your own set of modsecurity rules via a snippet: +```yaml +nginx.ingress.kubernetes.io/modsecurity-snippet: | +SecRuleEngine On +SecDebugLog /tmp/modsec_debug.log +``` + +Note: If you use both `enable-owasp-core-rules` and `modsecurity-snippet` annotations together, only the +`modsecurity-snippet` will take effect. If you wish to include the [OWASP Core Rule Set](https://www.modsecurity.org/CRS/Documentation/) or +[recommended configuration](https://github.com/SpiderLabs/ModSecurity/blob/v3/master/modsecurity.conf-recommended) simply use the include +statement: +```yaml +nginx.ingress.kubernetes.io/modsecurity-snippet: | +Include /etc/nginx/owasp-modsecurity-crs/nginx-modsecurity.conf +Include /etc/nginx/modsecurity/modsecurity.conf +``` + +### InfluxDB + +Using `influxdb-*` annotations we can monitor requests passing through a Location by sending them to an InfluxDB backend exposing the UDP socket +using the [nginx-influxdb-module](https://github.com/influxdata/nginx-influxdb-module/). + +```yaml +nginx.ingress.kubernetes.io/enable-influxdb: "true" +nginx.ingress.kubernetes.io/influxdb-measurement: "nginx-reqs" +nginx.ingress.kubernetes.io/influxdb-port: "8089" +nginx.ingress.kubernetes.io/influxdb-host: "127.0.0.1" +nginx.ingress.kubernetes.io/influxdb-server-name: "nginx-ingress" +``` + +For the `influxdb-host` parameter you have two options: + +- Use an InfluxDB server configured with the [UDP protocol](https://docs.influxdata.com/influxdb/v1.5/supported_protocols/udp/) enabled. +- Deploy Telegraf as a sidecar proxy to the Ingress controller configured to listen UDP with the [socket listener input](https://github.com/influxdata/telegraf/tree/release-1.6/plugins/inputs/socket_listener) and to write using +anyone of the [outputs plugins](https://github.com/influxdata/telegraf/tree/release-1.7/plugins/outputs) like InfluxDB, Apache Kafka, +Prometheus, etc.. (recommended) + +It's important to remember that there's no DNS resolver at this stage so you will have to configure +an ip address to `nginx.ingress.kubernetes.io/influxdb-host`. If you deploy Influx or Telegraf as sidecar (another container in the same pod) this becomes straightforward since you can directly use `127.0.0.1`. + +### Backend Protocol + +Using `backend-protocol` annotations is possible to indicate how NGINX should communicate with the backend service. (Replaces `secure-backends` in older versions) +Valid Values: HTTP, HTTPS, GRPC, GRPCS and AJP + +By default NGINX uses `HTTP`. + +Example: + +```yaml +nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" +``` + +### Use Regex + +!!! attention +When using this annotation with the NGINX annotation `nginx.ingress.kubernetes.io/affinity` of type `cookie`, `nginx.ingress.kubernetes.io/session-cookie-path` must be also set; Session cookie paths do not support regex. + +Using the `nginx.ingress.kubernetes.io/use-regex` annotation will indicate whether or not the paths defined on an Ingress use regular expressions. The default value is `false`. + +The following will indicate that regular expression paths are being used: +```yaml +nginx.ingress.kubernetes.io/use-regex: "true" +``` + +The following will indicate that regular expression paths are __not__ being used: +```yaml +nginx.ingress.kubernetes.io/use-regex: "false" +``` + +When this annotation is set to `true`, the case insensitive regular expression [location modifier](https://nginx.org/en/docs/http/ngx_http_core_module.html#location) will be enforced on ALL paths for a given host regardless of what Ingress they are defined on. + +Additionally, if the [`rewrite-target` annotation](#rewrite) is used on any Ingress for a given host, then the case insensitive regular expression [location modifier](https://nginx.org/en/docs/http/ngx_http_core_module.html#location) will be enforced on ALL paths for a given host regardless of what Ingress they are defined on. + +Please read about [ingress path matching](../ingress-path-matching.md) before using this modifier. + +### Satisfy + +By default, a request would need to satisfy all authentication requirements in order to be allowed. By using this annotation, requests that satisfy either any or all authentication requirements are allowed, based on the configuration value. + +```yaml +nginx.ingress.kubernetes.io/satisfy: "any" +``` diff --git a/docs/user-guide/nginx-configuration/configmap.md b/docs/user-guide/nginx-configuration/configmap.md new file mode 100755 index 0000000000..cb3b6798cf --- /dev/null +++ b/docs/user-guide/nginx-configuration/configmap.md @@ -0,0 +1,951 @@ +# ConfigMaps + +ConfigMaps allow you to decouple configuration artifacts from image content to keep containerized applications portable. + +The ConfigMap API resource stores configuration data as key-value pairs. The data provides the configurations for system +components for the nginx-controller. + +In order to overwrite nginx-controller configuration values as seen in [config.go](https://github.com/kubernetes/ingress-nginx/blob/master/internal/ingress/controller/config/config.go), +you can add key-value pairs to the data section of the config-map. For Example: + +```yaml +data: + map-hash-bucket-size: "128" + ssl-protocols: SSLv2 +``` + +!!! Important + The key and values in a ConfigMap can only be strings. + This means that we want a value with boolean values we need to quote the values, like "true" or "false". + Same for numbers, like "100". + + "Slice" types (defined below as `[]string` or `[]int` can be provided as a comma-delimited string. + +## Configuration options + +The following table shows a configuration option's name, type, and the default value: + +|name|type|default| +|:---|:---|:------| +|[add-headers](#add-headers)|string|""| +|[allow-backend-server-header](#allow-backend-server-header)|bool|"false"| +|[hide-headers](#hide-headers)|string array|empty| +|[access-log-params](#access-log-params)|string|""| +|[access-log-path](#access-log-path)|string|"/var/log/nginx/access.log"| +|[enable-access-log-for-default-backend](#enable-access-log-for-default-backend)|bool|"false"| +|[error-log-path](#error-log-path)|string|"/var/log/nginx/error.log"| +|[enable-dynamic-tls-records](#enable-dynamic-tls-records)|bool|"true"| +|[enable-modsecurity](#enable-modsecurity)|bool|"false"| +|[enable-owasp-modsecurity-crs](#enable-owasp-modsecurity-crs)|bool|"false"| +|[client-header-buffer-size](#client-header-buffer-size)|string|"1k"| +|[client-header-timeout](#client-header-timeout)|int|60| +|[client-body-buffer-size](#client-body-buffer-size)|string|"8k"| +|[client-body-timeout](#client-body-timeout)|int|60| +|[disable-access-log](#disable-access-log)|bool|false| +|[disable-ipv6](#disable-ipv6)|bool|false| +|[disable-ipv6-dns](#disable-ipv6-dns)|bool|false| +|[enable-underscores-in-headers](#enable-underscores-in-headers)|bool|false| +|[ignore-invalid-headers](#ignore-invalid-headers)|bool|true| +|[retry-non-idempotent](#retry-non-idempotent)|bool|"false"| +|[error-log-level](#error-log-level)|string|"notice"| +|[http2-max-field-size](#http2-max-field-size)|string|"4k"| +|[http2-max-header-size](#http2-max-header-size)|string|"16k"| +|[http2-max-requests](#http2-max-requests)|int|1000| +|[hsts](#hsts)|bool|"true"| +|[hsts-include-subdomains](#hsts-include-subdomains)|bool|"true"| +|[hsts-max-age](#hsts-max-age)|string|"15724800"| +|[hsts-preload](#hsts-preload)|bool|"false"| +|[keep-alive](#keep-alive)|int|75| +|[keep-alive-requests](#keep-alive-requests)|int|100| +|[large-client-header-buffers](#large-client-header-buffers)|string|"4 8k"| +|[log-format-escape-json](#log-format-escape-json)|bool|"false"| +|[log-format-upstream](#log-format-upstream)|string|`%v - [$the_real_ip] - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" $request_length $request_time [$proxy_upstream_name] $upstream_addr $upstream_response_length $upstream_response_time $upstream_status $req_id`| +|[log-format-stream](#log-format-stream)|string|`[$time_local] $protocol $status $bytes_sent $bytes_received $session_time`| +|[enable-multi-accept](#enable-multi-accept)|bool|"true"| +|[max-worker-connections](#max-worker-connections)|int|16384| +|[max-worker-open-files](#max-worker-open-files)|int|0| +|[map-hash-bucket-size](#max-hash-bucket-size)|int|64| +|[nginx-status-ipv4-whitelist](#nginx-status-ipv4-whitelist)|[]string|"127.0.0.1"| +|[nginx-status-ipv6-whitelist](#nginx-status-ipv6-whitelist)|[]string|"::1"| +|[proxy-real-ip-cidr](#proxy-real-ip-cidr)|[]string|"0.0.0.0/0"| +|[proxy-set-headers](#proxy-set-headers)|string|""| +|[server-name-hash-max-size](#server-name-hash-max-size)|int|1024| +|[server-name-hash-bucket-size](#server-name-hash-bucket-size)|int|`` +|[proxy-headers-hash-max-size](#proxy-headers-hash-max-size)|int|512| +|[proxy-headers-hash-bucket-size](#proxy-headers-hash-bucket-size)|int|64| +|[reuse-port](#reuse-port)|bool|"true"| +|[server-tokens](#server-tokens)|bool|"true"| +|[ssl-ciphers](#ssl-ciphers)|string|"ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256"| +|[ssl-ecdh-curve](#ssl-ecdh-curve)|string|"auto"| +|[ssl-dh-param](#ssl-dh-param)|string|""| +|[ssl-protocols](#ssl-protocols)|string|"TLSv1.2"| +|[ssl-session-cache](#ssl-session-cache)|bool|"true"| +|[ssl-session-cache-size](#ssl-session-cache-size)|string|"10m"| +|[ssl-session-tickets](#ssl-session-tickets)|bool|"true"| +|[ssl-session-ticket-key](#ssl-session-ticket-key)|string|`` +|[ssl-session-timeout](#ssl-session-timeout)|string|"10m"| +|[ssl-buffer-size](#ssl-buffer-size)|string|"4k"| +|[use-proxy-protocol](#use-proxy-protocol)|bool|"false"| +|[proxy-protocol-header-timeout](#proxy-protocol-header-timeout)|string|"5s"| +|[use-gzip](#use-gzip)|bool|"true"| +|[use-geoip](#use-geoip)|bool|"true"| +|[use-geoip2](#use-geoip2)|bool|"false"| +|[enable-brotli](#enable-brotli)|bool|"false"| +|[brotli-level](#brotli-level)|int|4| +|[brotli-types](#brotli-types)|string|"application/xml+rss application/atom+xml application/javascript application/x-javascript application/json application/rss+xml application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/svg+xml image/x-icon text/css text/javascript text/plain text/x-component"| +|[use-http2](#use-http2)|bool|"true"| +|[gzip-level](#gzip-level)|int|5| +|[gzip-types](#gzip-types)|string|"application/atom+xml application/javascript application/x-javascript application/json application/rss+xml application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/svg+xml image/x-icon text/css text/javascript text/plain text/x-component"| +|[worker-processes](#worker-processes)|string|``| +|[worker-cpu-affinity](#worker-cpu-affinity)|string|""| +|[worker-shutdown-timeout](#worker-shutdown-timeout)|string|"10s"| +|[load-balance](#load-balance)|string|"round_robin"| +|[variables-hash-bucket-size](#variables-hash-bucket-size)|int|128| +|[variables-hash-max-size](#variables-hash-max-size)|int|2048| +|[upstream-keepalive-connections](#upstream-keepalive-connections)|int|32| +|[upstream-keepalive-timeout](#upstream-keepalive-timeout)|int|60| +|[upstream-keepalive-requests](#upstream-keepalive-requests)|int|100| +|[limit-conn-zone-variable](#limit-conn-zone-variable)|string|"$binary_remote_addr"| +|[proxy-stream-timeout](#proxy-stream-timeout)|string|"600s"| +|[proxy-stream-responses](#proxy-stream-responses)|int|1| +|[bind-address](#bind-address)|[]string|""| +|[use-forwarded-headers](#use-forwarded-headers)|bool|"false"| +|[forwarded-for-header](#forwarded-for-header)|string|"X-Forwarded-For"| +|[compute-full-forwarded-for](#compute-full-forwarded-for)|bool|"false"| +|[proxy-add-original-uri-header](#proxy-add-original-uri-header)|bool|"true"| +|[generate-request-id](#generate-request-id)|bool|"true"| +|[enable-opentracing](#enable-opentracing)|bool|"false"| +|[zipkin-collector-host](#zipkin-collector-host)|string|""| +|[zipkin-collector-port](#zipkin-collector-port)|int|9411| +|[zipkin-service-name](#zipkin-service-name)|string|"nginx"| +|[zipkin-sample-rate](#zipkin-sample-rate)|float|1.0| +|[jaeger-collector-host](#jaeger-collector-host)|string|""| +|[jaeger-collector-port](#jaeger-collector-port)|int|6831| +|[jaeger-service-name](#jaeger-service-name)|string|"nginx"| +|[jaeger-sampler-type](#jaeger-sampler-type)|string|"const"| +|[jaeger-sampler-param](#jaeger-sampler-param)|string|"1"| +|[jaeger-sampler-host](#jaeger-sampler-host)|string|"http://127.0.0.1"| +|[jaeger-sampler-port](#jaeger-sampler-port)|int|5778| +|[main-snippet](#main-snippet)|string|""| +|[http-snippet](#http-snippet)|string|""| +|[server-snippet](#server-snippet)|string|""| +|[location-snippet](#location-snippet)|string|""| +|[custom-http-errors](#custom-http-errors)|[]int|[]int{}| +|[proxy-body-size](#proxy-body-size)|string|"1m"| +|[proxy-connect-timeout](#proxy-connect-timeout)|int|5| +|[proxy-read-timeout](#proxy-read-timeout)|int|60| +|[proxy-send-timeout](#proxy-send-timeout)|int|60| +|[proxy-buffers-number](#proxy-buffers-number)|int|4| +|[proxy-buffer-size](#proxy-buffer-size)|string|"4k"| +|[proxy-cookie-path](#proxy-cookie-path)|string|"off"| +|[proxy-cookie-domain](#proxy-cookie-domain)|string|"off"| +|[proxy-next-upstream](#proxy-next-upstream)|string|"error timeout"| +|[proxy-next-upstream-timeout](#proxy-next-upstream-timeout)|int|0| +|[proxy-next-upstream-tries](#proxy-next-upstream-tries)|int|3| +|[proxy-redirect-from](#proxy-redirect-from)|string|"off"| +|[proxy-request-buffering](#proxy-request-buffering)|string|"on"| +|[ssl-redirect](#ssl-redirect)|bool|"true"| +|[whitelist-source-range](#whitelist-source-range)|[]string|[]string{}| +|[skip-access-log-urls](#skip-access-log-urls)|[]string|[]string{}| +|[limit-rate](#limit-rate)|int|0| +|[limit-rate-after](#limit-rate-after)|int|0| +|[http-redirect-code](#http-redirect-code)|int|308| +|[proxy-buffering](#proxy-buffering)|string|"off"| +|[limit-req-status-code](#limit-req-status-code)|int|503| +|[limit-conn-status-code](#limit-conn-status-code)|int|503| +|[no-tls-redirect-locations](#no-tls-redirect-locations)|string|"/.well-known/acme-challenge"| +|[global-auth-url](#global-auth-url)|string|""| +|[global-auth-method](#global-auth-method)|string|""| +|[global-auth-signin](#global-auth-signin)|string|""| +|[global-auth-response-headers](#global-auth-response-headers)|string|""| +|[global-auth-request-redirect](#global-auth-request-redirect)|string|""| +|[global-auth-snippet](#global-auth-snippet)|string|""| +|[no-auth-locations](#no-auth-locations)|string|"/.well-known/acme-challenge"| +|[block-cidrs](#block-cidrs)|[]string|""| +|[block-user-agents](#block-user-agents)|[]string|""| +|[block-referers](#block-referers)|[]string|""| + +## add-headers + +Sets custom headers from named configmap before sending traffic to the client. See [proxy-set-headers](#proxy-set-headers). [example](https://github.com/kubernetes/ingress-nginx/tree/master/docs/examples/customization/custom-headers) + +## allow-backend-server-header + +Enables the return of the header Server from the backend instead of the generic nginx string. _**default:**_ is disabled + +## hide-headers + +Sets additional header that will not be passed from the upstream server to the client response. +_**default:**_ empty + +_References:_ +[http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_hide_header](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_hide_header) + +## access-log-params + +Additional params for access_log. For example, buffer=16k, gzip, flush=1m + +_References:_ +[http://nginx.org/en/docs/http/ngx_http_log_module.html#access_log](http://nginx.org/en/docs/http/ngx_http_log_module.html#access_log) + +## access-log-path + +Access log path. Goes to `/var/log/nginx/access.log` by default. + +__Note:__ the file `/var/log/nginx/access.log` is a symlink to `/dev/stdout` + +## enable-access-log-for-default-backend + +Enables logging access to default backend. _**default:**_ is disabled. + +## error-log-path + +Error log path. Goes to `/var/log/nginx/error.log` by default. + +__Note:__ the file `/var/log/nginx/error.log` is a symlink to `/dev/stderr` + +_References:_ +[http://nginx.org/en/docs/ngx_core_module.html#error_log](http://nginx.org/en/docs/ngx_core_module.html#error_log) + +## enable-dynamic-tls-records + +Enables dynamically sized TLS records to improve time-to-first-byte. _**default:**_ is enabled + +_References:_ +[https://blog.cloudflare.com/optimizing-tls-over-tcp-to-reduce-latency](https://blog.cloudflare.com/optimizing-tls-over-tcp-to-reduce-latency) + +## enable-modsecurity + +Enables the modsecurity module for NGINX. _**default:**_ is disabled + +## enable-owasp-modsecurity-crs + +Enables the OWASP ModSecurity Core Rule Set (CRS). _**default:**_ is disabled + +## client-header-buffer-size + +Allows to configure a custom buffer size for reading client request header. + +_References:_ +[http://nginx.org/en/docs/http/ngx_http_core_module.html#client_header_buffer_size](http://nginx.org/en/docs/http/ngx_http_core_module.html#client_header_buffer_size) + +## client-header-timeout + +Defines a timeout for reading client request header, in seconds. + +_References:_ +[http://nginx.org/en/docs/http/ngx_http_core_module.html#client_header_timeout](http://nginx.org/en/docs/http/ngx_http_core_module.html#client_header_timeout) + +## client-body-buffer-size + +Sets buffer size for reading client request body. + +_References:_ +[http://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_buffer_size](http://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_buffer_size) + +## client-body-timeout + +Defines a timeout for reading client request body, in seconds. + +_References:_ +[http://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_timeout](http://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_timeout) + +## disable-access-log + +Disables the Access Log from the entire Ingress Controller. _**default:**_ '"false"' + +_References:_ +[http://nginx.org/en/docs/http/ngx_http_log_module.html#access_log](http://nginx.org/en/docs/http/ngx_http_log_module.html#access_log) + +## disable-ipv6 + +Disable listening on IPV6. _**default:**_ `false`; IPv6 listening is enabled + +## disable-ipv6-dns + +Disable IPV6 for nginx DNS resolver. _**default:**_ `false`; IPv6 resolving enabled. + +## enable-underscores-in-headers + +Enables underscores in header names. _**default:**_ is disabled + +## ignore-invalid-headers + +Set if header fields with invalid names should be ignored. +_**default:**_ is enabled + +## retry-non-idempotent + +Since 1.9.13 NGINX will not retry non-idempotent requests (POST, LOCK, PATCH) in case of an error in the upstream server. The previous behavior can be restored using the value "true". + +## error-log-level + +Configures the logging level of errors. Log levels above are listed in the order of increasing severity. + +_References:_ +[http://nginx.org/en/docs/ngx_core_module.html#error_log](http://nginx.org/en/docs/ngx_core_module.html#error_log) + +## http2-max-field-size + +Limits the maximum size of an HPACK-compressed request header field. + +_References:_ +[https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_max_field_size](https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_max_field_size) + +## http2-max-header-size + +Limits the maximum size of the entire request header list after HPACK decompression. + +_References:_ +[https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_max_header_size](https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_max_header_size) + +## http2-max-requests + +Sets the maximum number of requests (including push requests) that can be served through one HTTP/2 connection, after which the next client request will lead to connection closing and the need of establishing a new connection. + +_References:_ +[http://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_max_requests](http://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_max_requests) + +## hsts + +Enables or disables the header HSTS in servers running SSL. +HTTP Strict Transport Security (often abbreviated as HSTS) is a security feature (HTTP header) that tell browsers that it should only be communicated with using HTTPS, instead of using HTTP. It provides protection against protocol downgrade attacks and cookie theft. + +_References:_ + +- [https://developer.mozilla.org/en-US/docs/Web/Security/HTTP_strict_transport_security](https://developer.mozilla.org/en-US/docs/Web/Security/HTTP_strict_transport_security) +- [https://blog.qualys.com/securitylabs/2016/03/28/the-importance-of-a-proper-http-strict-transport-security-implementation-on-your-web-server](https://blog.qualys.com/securitylabs/2016/03/28/the-importance-of-a-proper-http-strict-transport-security-implementation-on-your-web-server) + +## hsts-include-subdomains + +Enables or disables the use of HSTS in all the subdomains of the server-name. + +## hsts-max-age + +Sets the time, in seconds, that the browser should remember that this site is only to be accessed using HTTPS. + +## hsts-preload + +Enables or disables the preload attribute in the HSTS feature (when it is enabled) dd + +## keep-alive + +Sets the time during which a keep-alive client connection will stay open on the server side. The zero value disables keep-alive client connections. + +_References:_ +[http://nginx.org/en/docs/http/ngx_http_core_module.html#keepalive_timeout](http://nginx.org/en/docs/http/ngx_http_core_module.html#keepalive_timeout) + +## keep-alive-requests + +Sets the maximum number of requests that can be served through one keep-alive connection. + +_References:_ +[http://nginx.org/en/docs/http/ngx_http_core_module.html#keepalive_requests](http://nginx.org/en/docs/http/ngx_http_core_module.html#keepalive_requests) + +## large-client-header-buffers + +Sets the maximum number and size of buffers used for reading large client request header. _**default:**_ 4 8k + +_References:_ +[http://nginx.org/en/docs/http/ngx_http_core_module.html#large_client_header_buffers](http://nginx.org/en/docs/http/ngx_http_core_module.html#large_client_header_buffers) + +## log-format-escape-json + +Sets if the escape parameter allows JSON ("true") or default characters escaping in variables ("false") Sets the nginx [log format](http://nginx.org/en/docs/http/ngx_http_log_module.html#log_format). + +## log-format-upstream + +Sets the nginx [log format](http://nginx.org/en/docs/http/ngx_http_log_module.html#log_format). +Example for json output: + +```console +log-format-upstream: '{ "time": "$time_iso8601", "remote_addr": "$proxy_protocol_addr", + "x-forward-for": "$proxy_add_x_forwarded_for", "request_id": "$req_id", "remote_user": + "$remote_user", "bytes_sent": $bytes_sent, "request_time": $request_time, "status": + $status, "vhost": "$host", "request_proto": "$server_protocol", "path": "$uri", + "request_query": "$args", "request_length": $request_length, "duration": $request_time, + "method": "$request_method", "http_referrer": "$http_referer", "http_user_agent": + "$http_user_agent" }' + ``` + +Please check the [log-format](log-format.md) for definition of each field. + +## log-format-stream + +Sets the nginx [stream format](https://nginx.org/en/docs/stream/ngx_stream_log_module.html#log_format). + +## enable-multi-accept + +If disabled, a worker process will accept one new connection at a time. Otherwise, a worker process will accept all new connections at a time. +_**default:**_ true + +_References:_ +[http://nginx.org/en/docs/ngx_core_module.html#multi_accept](http://nginx.org/en/docs/ngx_core_module.html#multi_accept) + +## max-worker-connections + +Sets the [maximum number of simultaneous connections](http://nginx.org/en/docs/ngx_core_module.html#worker_connections) that can be opened by each worker process. +0 will use the value of [max-worker-open-files](#max-worker-open-files). +_**default:**_ 16384 + +!!! tip + Using 0 in scenarios of high load improves performance at the cost of increasing RAM utilization (even on idle). + +## max-worker-open-files + +Sets the [maximum number of files](http://nginx.org/en/docs/ngx_core_module.html#worker_rlimit_nofile) that can be opened by each worker process. +The default of 0 means "max open files (system's limit) / [worker-processes](#worker-processes) - 1024". +_**default:**_ 0 + +## map-hash-bucket-size + +Sets the bucket size for the [map variables hash tables](http://nginx.org/en/docs/http/ngx_http_map_module.html#map_hash_bucket_size). The details of setting up hash tables are provided in a separate [document](http://nginx.org/en/docs/hash.html). + +## proxy-real-ip-cidr + +If use-proxy-protocol is enabled, proxy-real-ip-cidr defines the default the IP/network address of your external load balancer. + +## proxy-set-headers + +Sets custom headers from named configmap before sending traffic to backends. The value format is namespace/name. See [example](https://github.com/kubernetes/ingress-nginx/tree/master/docs/examples/customization/custom-headers) + +## server-name-hash-max-size + +Sets the maximum size of the [server names hash tables](http://nginx.org/en/docs/http/ngx_http_core_module.html#server_names_hash_max_size) used in server names,map directive’s values, MIME types, names of request header strings, etc. + +_References:_ +[http://nginx.org/en/docs/hash.html](http://nginx.org/en/docs/hash.html) + +## server-name-hash-bucket-size + +Sets the size of the bucket for the server names hash tables. + +_References:_ + +- [http://nginx.org/en/docs/hash.html](http://nginx.org/en/docs/hash.html) +- [http://nginx.org/en/docs/http/ngx_http_core_module.html#server_names_hash_bucket_size](http://nginx.org/en/docs/http/ngx_http_core_module.html#server_names_hash_bucket_size) + +## proxy-headers-hash-max-size + +Sets the maximum size of the proxy headers hash tables. + +_References:_ + +- [http://nginx.org/en/docs/hash.html](http://nginx.org/en/docs/hash.html) +- [https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_headers_hash_max_size](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_headers_hash_max_size) + +## reuse-port + +Instructs NGINX to create an individual listening socket for each worker process (using the SO_REUSEPORT socket option), allowing a kernel to distribute incoming connections between worker processes +_**default:**_ true + +## proxy-headers-hash-bucket-size + +Sets the size of the bucket for the proxy headers hash tables. + +_References:_ + +- [http://nginx.org/en/docs/hash.html](http://nginx.org/en/docs/hash.html) +- [https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_headers_hash_bucket_size](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_headers_hash_bucket_size) + +## server-tokens + +Send NGINX Server header in responses and display NGINX version in error pages. _**default:**_ is enabled + +## ssl-ciphers + +Sets the [ciphers](http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_ciphers) list to enable. The ciphers are specified in the format understood by the OpenSSL library. + +The default cipher list is: + `ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256`. + +The ordering of a ciphersuite is very important because it decides which algorithms are going to be selected in priority. The recommendation above prioritizes algorithms that provide perfect [forward secrecy](https://wiki.mozilla.org/Security/Server_Side_TLS#Forward_Secrecy). + +Please check the [Mozilla SSL Configuration Generator](https://mozilla.github.io/server-side-tls/ssl-config-generator/). + +## ssl-ecdh-curve + +Specifies a curve for ECDHE ciphers. + +_References:_ +[http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_ecdh_curve](http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_ecdh_curve) + +## ssl-dh-param + +Sets the name of the secret that contains Diffie-Hellman key to help with "Perfect Forward Secrecy". + +_References:_ + +- [https://wiki.openssl.org/index.php/Diffie-Hellman_parameters](https://wiki.openssl.org/index.php/Diffie-Hellman_parameters) +- [https://wiki.mozilla.org/Security/Server_Side_TLS#DHE_handshake_and_dhparam](https://wiki.mozilla.org/Security/Server_Side_TLS#DHE_handshake_and_dhparam) +- [http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_dhparam](http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_dhparam) + +## ssl-protocols + +Sets the [SSL protocols](http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_protocols) to use. The default is: `TLSv1.2`. + +Please check the result of the configuration using `https://ssllabs.com/ssltest/analyze.html` or `https://testssl.sh`. + +## ssl-session-cache + +Enables or disables the use of shared [SSL cache](http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_session_cache) among worker processes. + +## ssl-session-cache-size + +Sets the size of the [SSL shared session cache](http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_session_cache) between all worker processes. + +## ssl-session-tickets + +Enables or disables session resumption through [TLS session tickets](http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_session_tickets). + +## ssl-session-ticket-key + +Sets the secret key used to encrypt and decrypt TLS session tickets. The value must be a valid base64 string. +To create a ticket: `openssl rand 80 | openssl enc -A -base64` + +[TLS session ticket-key](http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_session_tickets), by default, a randomly generated key is used. + +## ssl-session-timeout + +Sets the time during which a client may [reuse the session](http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_session_timeout) parameters stored in a cache. + +## ssl-buffer-size + +Sets the size of the [SSL buffer](http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_buffer_size) used for sending data. The default of 4k helps NGINX to improve TLS Time To First Byte (TTTFB). + +_References:_ +[https://www.igvita.com/2013/12/16/optimizing-nginx-tls-time-to-first-byte/](https://www.igvita.com/2013/12/16/optimizing-nginx-tls-time-to-first-byte/) + +## use-proxy-protocol + +Enables or disables the [PROXY protocol](https://www.nginx.com/resources/admin-guide/proxy-protocol/) to receive client connection (real IP address) information passed through proxy servers and load balancers such as HAProxy and Amazon Elastic Load Balancer (ELB). + +## proxy-protocol-header-timeout + +Sets the timeout value for receiving the proxy-protocol headers. The default of 5 seconds prevents the TLS passthrough handler from waiting indefinitely on a dropped connection. +_**default:**_ 5s + +## use-gzip + +Enables or disables compression of HTTP responses using the ["gzip" module](http://nginx.org/en/docs/http/ngx_http_gzip_module.html). MIME types to compress are controlled by [gzip-types](#gzip-types). _**default:**_ true + +## use-geoip + +Enables or disables ["geoip" module](http://nginx.org/en/docs/http/ngx_http_geoip_module.html) that creates variables with values depending on the client IP address, using the precompiled MaxMind databases. +_**default:**_ true + +> __Note:__ MaxMind legacy databases are discontinued and will not receive updates after 2019-01-02, cf. [discontinuation notice](https://support.maxmind.com/geolite-legacy-discontinuation-notice/). Consider [use-geoip2](#use-geoip2) below. + +## use-geoip2 + +Enables the [geoip2 module](https://github.com/leev/ngx_http_geoip2_module) for NGINX. +_**default:**_ false + +## enable-brotli + +Enables or disables compression of HTTP responses using the ["brotli" module](https://github.com/google/ngx_brotli). +The default mime type list to compress is: `application/xml+rss application/atom+xml application/javascript application/x-javascript application/json application/rss+xml application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/svg+xml image/x-icon text/css text/plain text/x-component`. _**default:**_ is disabled + +> __Note:__ Brotli does not works in Safari < 11. For more information see [https://caniuse.com/#feat=brotli](https://caniuse.com/#feat=brotli) + +## brotli-level + +Sets the Brotli Compression Level that will be used. _**default:**_ 4 + +## brotli-types + +Sets the MIME Types that will be compressed on-the-fly by brotli. +_**default:**_ `application/xml+rss application/atom+xml application/javascript application/x-javascript application/json application/rss+xml application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/svg+xml image/x-icon text/css text/plain text/x-component` + +## use-http2 + +Enables or disables [HTTP/2](http://nginx.org/en/docs/http/ngx_http_v2_module.html) support in secure connections. + +## gzip-level + +Sets the gzip Compression Level that will be used. _**default:**_ 5 + +## gzip-types + +Sets the MIME types in addition to "text/html" to compress. The special value "\*" matches any MIME type. Responses with the "text/html" type are always compressed if `[use-gzip](#use-gzip)` is enabled. +_**default:**_ `application/atom+xml application/javascript application/x-javascript application/json application/rss+xml application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/svg+xml image/x-icon text/css text/plain text/x-component`. + +## worker-processes + +Sets the number of [worker processes](http://nginx.org/en/docs/ngx_core_module.html#worker_processes). +The default of "auto" means number of available CPU cores. + +## worker-cpu-affinity + +Binds worker processes to the sets of CPUs. [worker_cpu_affinity](http://nginx.org/en/docs/ngx_core_module.html#worker_cpu_affinity). +By default worker processes are not bound to any specific CPUs. The value can be: + +- "": empty string indicate no affinity is applied. +- cpumask: e.g. `0001 0010 0100 1000` to bind processes to specific cpus. +- auto: binding worker processes automatically to available CPUs. + +## worker-shutdown-timeout + +Sets a timeout for Nginx to [wait for worker to gracefully shutdown](http://nginx.org/en/docs/ngx_core_module.html#worker_shutdown_timeout). _**default:**_ "10s" + +## load-balance + +Sets the algorithm to use for load balancing. +The value can either be: + +- round_robin: to use the default round robin loadbalancer +- ewma: to use the Peak EWMA method for routing ([implementation](https://github.com/kubernetes/ingress-nginx/blob/master/rootfs/etc/nginx/lua/balancer/ewma.lua)) + +The default is `round_robin`. + +- To load balance using consistent hashing of IP or other variables, consider the `nginx.ingress.kubernetes.io/upstream-hash-by` annotation. +- To load balance using session cookies, consider the `nginx.ingress.kubernetes.io/affinity` annotation. + +_References:_ +[http://nginx.org/en/docs/http/load_balancing.html](http://nginx.org/en/docs/http/load_balancing.html) + +## variables-hash-bucket-size + +Sets the bucket size for the variables hash table. + +_References:_ +[http://nginx.org/en/docs/http/ngx_http_map_module.html#variables_hash_bucket_size](http://nginx.org/en/docs/http/ngx_http_map_module.html#variables_hash_bucket_size) + +## variables-hash-max-size + +Sets the maximum size of the variables hash table. + +_References:_ +[http://nginx.org/en/docs/http/ngx_http_map_module.html#variables_hash_max_size](http://nginx.org/en/docs/http/ngx_http_map_module.html#variables_hash_max_size) + +## upstream-keepalive-connections + +Activates the cache for connections to upstream servers. The connections parameter sets the maximum number of idle +keepalive connections to upstream servers that are preserved in the cache of each worker process. When this number is +exceeded, the least recently used connections are closed. +_**default:**_ 32 + +_References:_ +[http://nginx.org/en/docs/http/ngx_http_upstream_module.html#keepalive](http://nginx.org/en/docs/http/ngx_http_upstream_module.html#keepalive) + + +## upstream-keepalive-timeout + +Sets a timeout during which an idle keepalive connection to an upstream server will stay open. + _**default:**_ 60 + +_References:_ +[http://nginx.org/en/docs/http/ngx_http_upstream_module.html#keepalive_timeout](http://nginx.org/en/docs/http/ngx_http_upstream_module.html#keepalive_timeout) + + +## upstream-keepalive-requests + +Sets the maximum number of requests that can be served through one keepalive connection. After the maximum number of +requests is made, the connection is closed. +_**default:**_ 100 + + +_References:_ +[http://nginx.org/en/docs/http/ngx_http_upstream_module.html#keepalive_requests](http://nginx.org/en/docs/http/ngx_http_upstream_module.html#keepalive_requests) + + +## limit-conn-zone-variable + +Sets parameters for a shared memory zone that will keep states for various keys of [limit_conn_zone](http://nginx.org/en/docs/http/ngx_http_limit_conn_module.html#limit_conn_zone). The default of "$binary_remote_addr" variable’s size is always 4 bytes for IPv4 addresses or 16 bytes for IPv6 addresses. + +## proxy-stream-timeout + +Sets the timeout between two successive read or write operations on client or proxied server connections. If no data is transmitted within this time, the connection is closed. + +_References:_ +[http://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_timeout](http://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_timeout) + +## proxy-stream-responses + +Sets the number of datagrams expected from the proxied server in response to the client request if the UDP protocol is used. + +_References:_ +[http://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_responses](http://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_responses) + +## bind-address + +Sets the addresses on which the server will accept requests instead of *. It should be noted that these addresses must exist in the runtime environment or the controller will crash loop. + +## use-forwarded-headers + +If true, NGINX passes the incoming `X-Forwarded-*` headers to upstreams. Use this option when NGINX is behind another L7 proxy / load balancer that is setting these headers. + +If false, NGINX ignores incoming `X-Forwarded-*` headers, filling them with the request information it sees. Use this option if NGINX is exposed directly to the internet, or it's behind a L3/packet-based load balancer that doesn't alter the source IP in the packets. + +## forwarded-for-header + +Sets the header field for identifying the originating IP address of a client. _**default:**_ X-Forwarded-For + +## compute-full-forwarded-for + +Append the remote address to the X-Forwarded-For header instead of replacing it. When this option is enabled, the upstream application is responsible for extracting the client IP based on its own list of trusted proxies. + +## proxy-add-original-uri-header + +Adds an X-Original-Uri header with the original request URI to the backend request + +## generate-request-id + +Ensures that X-Request-ID is defaulted to a random value, if no X-Request-ID is present in the request + +## enable-opentracing + +Enables the nginx Opentracing extension. _**default:**_ is disabled + +_References:_ +[https://github.com/opentracing-contrib/nginx-opentracing](https://github.com/opentracing-contrib/nginx-opentracing) + +## zipkin-collector-host + +Specifies the host to use when uploading traces. It must be a valid URL. + +## zipkin-collector-port + +Specifies the port to use when uploading traces. _**default:**_ 9411 + +## zipkin-service-name + +Specifies the service name to use for any traces created. _**default:**_ nginx + +## zipkin-sample-rate + +Specifies sample rate for any traces created. _**default:**_ 1.0 + +## jaeger-collector-host + +Specifies the host to use when uploading traces. It must be a valid URL. + +## jaeger-collector-port + +Specifies the port to use when uploading traces. _**default:**_ 6831 + +## jaeger-service-name + +Specifies the service name to use for any traces created. _**default:**_ nginx + +## jaeger-sampler-type + +Specifies the sampler to be used when sampling traces. The available samplers are: const, probabilistic, ratelimiting, remote. _**default:**_ const + +## jaeger-sampler-param + +Specifies the argument to be passed to the sampler constructor. Must be a number. +For const this should be 0 to never sample and 1 to always sample. _**default:**_ 1 + +## jaeger-sampler-host + +Specifies the custom remote sampler host to be passed to the sampler constructor. Must be a valid URL. +Leave blank to use default value (localhost). _**default:**_ http://127.0.0.1 + +## jaeger-sampler-port + +Specifies the custom remote sampler port to be passed to the sampler constructor. Must be a number. _**default:**_ 5778 + +## main-snippet + +Adds custom configuration to the main section of the nginx configuration. + +## http-snippet + +Adds custom configuration to the http section of the nginx configuration. + +## server-snippet + +Adds custom configuration to all the servers in the nginx configuration. + +## location-snippet + +Adds custom configuration to all the locations in the nginx configuration. + +You can not use this to add new locations that proxy to the Kubernetes pods, as the snippet does not have access to the Go template functions. If you want to add custom locations you will have to [provide your own nginx.tmpl](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/custom-template/). + +## custom-http-errors + +Enables which HTTP codes should be passed for processing with the [error_page directive](http://nginx.org/en/docs/http/ngx_http_core_module.html#error_page) + +Setting at least one code also enables [proxy_intercept_errors](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_intercept_errors) which are required to process error_page. + +Example usage: `custom-http-errors: 404,415` + +## proxy-body-size + +Sets the maximum allowed size of the client request body. +See NGINX [client_max_body_size](http://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size). + +## proxy-connect-timeout + +Sets the timeout for [establishing a connection with a proxied server](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_connect_timeout). It should be noted that this timeout cannot usually exceed 75 seconds. + +## proxy-read-timeout + +Sets the timeout in seconds for [reading a response from the proxied server](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_read_timeout). The timeout is set only between two successive read operations, not for the transmission of the whole response. + +## proxy-send-timeout + +Sets the timeout in seconds for [transmitting a request to the proxied server](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_send_timeout). The timeout is set only between two successive write operations, not for the transmission of the whole request. + +## proxy-buffers-number + +Sets the number of the buffer used for [reading the first part of the response](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffers) received from the proxied server. This part usually contains a small response header. + +## proxy-buffer-size + +Sets the size of the buffer used for [reading the first part of the response](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffer_size) received from the proxied server. This part usually contains a small response header. + +## proxy-cookie-path + +Sets a text that [should be changed in the path attribute](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cookie_path) of the “Set-Cookie” header fields of a proxied server response. + +## proxy-cookie-domain + +Sets a text that [should be changed in the domain attribute](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cookie_domain) of the “Set-Cookie” header fields of a proxied server response. + +## proxy-next-upstream + +Specifies in [which cases](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_next_upstream) a request should be passed to the next server. + +## proxy-next-upstream-timeout + +[Limits the time](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_next_upstream_timeout) in seconds during which a request can be passed to the next server. + +## proxy-next-upstream-tries + +Limit the number of [possible tries](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_next_upstream_tries) a request should be passed to the next server. + +## proxy-redirect-from + +Sets the original text that should be changed in the "Location" and "Refresh" header fields of a proxied server response. _**default:**_ off + +_References:_ +[http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_redirect](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_redirect) + +## proxy-request-buffering + +Enables or disables [buffering of a client request body](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_request_buffering). + +## ssl-redirect + +Sets the global value of redirects (301) to HTTPS if the server has a TLS certificate (defined in an Ingress rule). +_**default:**_ "true" + +## whitelist-source-range + +Sets the default whitelisted IPs for each `server` block. This can be overwritten by an annotation on an Ingress rule. +See [ngx_http_access_module](http://nginx.org/en/docs/http/ngx_http_access_module.html). + +## skip-access-log-urls + +Sets a list of URLs that should not appear in the NGINX access log. This is useful with urls like `/health` or `health-check` that make "complex" reading the logs. _**default:**_ is empty + +## limit-rate + +Limits the rate of response transmission to a client. The rate is specified in bytes per second. The zero value disables rate limiting. The limit is set per a request, and so if a client simultaneously opens two connections, the overall rate will be twice as much as the specified limit. + +_References:_ +[http://nginx.org/en/docs/http/ngx_http_core_module.html#limit_rate](http://nginx.org/en/docs/http/ngx_http_core_module.html#limit_rate) + +## limit-rate-after + +Sets the initial amount after which the further transmission of a response to a client will be rate limited. + +_References:_ +[http://nginx.org/en/docs/http/ngx_http_core_module.html#limit_rate_after](http://nginx.org/en/docs/http/ngx_http_core_module.html#limit_rate_after) + +## http-redirect-code + +Sets the HTTP status code to be used in redirects. +Supported codes are [301](https://developer.mozilla.org/docs/Web/HTTP/Status/301),[302](https://developer.mozilla.org/docs/Web/HTTP/Status/302),[307](https://developer.mozilla.org/docs/Web/HTTP/Status/307) and [308](https://developer.mozilla.org/docs/Web/HTTP/Status/308) +_**default:**_ 308 + +> __Why the default code is 308?__ + +> [RFC 7238](https://tools.ietf.org/html/rfc7238) was created to define the 308 (Permanent Redirect) status code that is similar to 301 (Moved Permanently) but it keeps the payload in the redirect. This is important if the we send a redirect in methods like POST. + +## proxy-buffering + +Enables or disables [buffering of responses from the proxied server](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffering). + +## limit-req-status-code + +Sets the [status code to return in response to rejected requests](http://nginx.org/en/docs/http/ngx_http_limit_req_module.html#limit_req_status). _**default:**_ 503 + +## limit-conn-status-code + +Sets the [status code to return in response to rejected connections](http://nginx.org/en/docs/http/ngx_http_limit_conn_module.html#limit_conn_status). _**default:**_ 503 + +## no-tls-redirect-locations + +A comma-separated list of locations on which http requests will never get redirected to their https counterpart. +_**default:**_ "/.well-known/acme-challenge" + +## global-auth-url + +A url to an existing service that provides authentication for all the locations. +Similar to the Ingress rule annotation `nginx.ingress.kubernetes.io/auth-url`. +Locations that should not get authenticated can be listed using `no-auth-locations` See [no-auth-locations](#no-auth-locations). In addition, each service can be excluded from authentication via annotation `enable-global-auth` set to "false". +_**default:**_ "" + +_References:_ [https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md#external-authentication](https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md#external-authentication) + +## global-auth-method + +A HTTP method to use for an existing service that provides authentication for all the locations. +Similar to the Ingress rule annotation `nginx.ingress.kubernetes.io/auth-method`. +_**default:**_ "" + +## global-auth-signin + +Sets the location of the error page for an existing service that provides authentication for all the locations. +Similar to the Ingress rule annotation `nginx.ingress.kubernetes.io/auth-signin`. +_**default:**_ "" + +## global-auth-response-headers + +Sets the headers to pass to backend once authentication request completes. Applied to all the locations. +Similar to the Ingress rule annotation `nginx.ingress.kubernetes.io/auth-response-headers`. +_**default:**_ "" + +## global-auth-request-redirect + +Sets the X-Auth-Request-Redirect header value. Applied to all the locations. +Similar to the Ingress rule annotation `nginx.ingress.kubernetes.io/auth-request-redirect`. +_**default:**_ "" + +## global-auth-snippet + +Sets a custom snippet to use with external authentication. Applied to all the locations. +Similar to the Ingress rule annotation `nginx.ingress.kubernetes.io/auth-request-redirect`. +_**default:**_ "" + +## no-auth-locations + +A comma-separated list of locations that should not get authenticated. +_**default:**_ "/.well-known/acme-challenge" + +## block-cidrs + +A comma-separated list of IP addresses (or subnets), request from which have to be blocked globally. + +_References:_ +[http://nginx.org/en/docs/http/ngx_http_access_module.html#deny](http://nginx.org/en/docs/http/ngx_http_access_module.html#deny) + +## block-user-agents + +A comma-separated list of User-Agent, request from which have to be blocked globally. +It's possible to use here full strings and regular expressions. More details about valid patterns can be found at `map` Nginx directive documentation. + +_References:_ +[http://nginx.org/en/docs/http/ngx_http_map_module.html#map](http://nginx.org/en/docs/http/ngx_http_map_module.html#map) + +## block-referers + +A comma-separated list of Referers, request from which have to be blocked globally. +It's possible to use here full strings and regular expressions. More details about valid patterns can be found at `map` Nginx directive documentation. + +_References:_ +[http://nginx.org/en/docs/http/ngx_http_map_module.html#map](http://nginx.org/en/docs/http/ngx_http_map_module.html#map) diff --git a/docs/user-guide/custom-template.md b/docs/user-guide/nginx-configuration/custom-template.md similarity index 100% rename from docs/user-guide/custom-template.md rename to docs/user-guide/nginx-configuration/custom-template.md diff --git a/docs/user-guide/nginx-configuration/index.md b/docs/user-guide/nginx-configuration/index.md new file mode 100644 index 0000000000..bace1a0865 --- /dev/null +++ b/docs/user-guide/nginx-configuration/index.md @@ -0,0 +1,7 @@ +# NGINX Configuration + +There are three ways to customize NGINX: + +1. [ConfigMap](./configmap.md): using a Configmap to set global configurations in NGINX. +2. [Annotations](./annotations.md): use this if you want a specific configuration for a particular Ingress rule. +3. [Custom template](./custom-template.md): when more specific settings are required, like [open_file_cache](http://nginx.org/en/docs/http/ngx_http_core_module.html#open_file_cache), adjust [listen](http://nginx.org/en/docs/http/ngx_http_core_module.html#listen) options as `rcvbuf` or when is not possible to change the configuration through the ConfigMap. diff --git a/docs/user-guide/nginx-configuration/log-format.md b/docs/user-guide/nginx-configuration/log-format.md new file mode 100644 index 0000000000..cdd68e597d --- /dev/null +++ b/docs/user-guide/nginx-configuration/log-format.md @@ -0,0 +1,48 @@ +# Log format + +The default configuration uses a custom logging format to add additional information about upstreams, response time and status. + +``` +log_format upstreaminfo + '{{ if $cfg.useProxyProtocol }}$proxy_protocol_addr{{ else }}$remote_addr{{ end }} - ' + '[$the_real_ip] - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" "$http_user_agent" ' + '$request_length $request_time [$proxy_upstream_name] $upstream_addr ' + '$upstream_response_length $upstream_response_time $upstream_status $req_id'; +``` + +| Placeholder | Description | +|-------------|-------------| +| `$proxy_protocol_addr` | remote address if proxy protocol is enabled | +| `$remote_addr` | remote address if proxy protocol is disabled (default) | +| `$the_real_ip` | the source IP address of the client | +| `$remote_user` | user name supplied with the Basic authentication | +| `$time_local` | local time in the Common Log Format | +| `$request` | full original request line | +| `$status` | response status | +| `$body_bytes_sent` | number of bytes sent to a client, not counting the response header | +| `$http_referer` | value of the Referer header | +| `$http_user_agent` | value of User-Agent header | +| `$request_length` | request length (including request line, header, and request body) | +| `$request_time` | time elapsed since the first bytes were read from the client | +| `$proxy_upstream_name` | name of the upstream. The format is `upstream---` | +| `$upstream_addr` | the IP address and port (or the path to the domain socket) of the upstream server. If several servers were contacted during request processing, their addresses are separated by commas. | +| `$upstream_response_length` | the length of the response obtained from the upstream server | +| `$upstream_response_time` | time spent on receiving the response from the upstream server as seconds with millisecond resolution | +| `$upstream_status` | status code of the response obtained from the upstream server | +| `$req_id` | the randomly generated ID of the request | + +Additional available variables: + +| Placeholder | Description | +|-------------|-------------| +| `$namespace` | namespace of the ingress | +| `$ingress_name` | name of the ingress | +| `$service_name` | name of the service | +| `$service_port` | port of the service | + + +Sources: + +- [Upstream variables](http://nginx.org/en/docs/http/ngx_http_upstream_module.html#variables) +- [Embedded variables](http://nginx.org/en/docs/http/ngx_http_core_module.html#variables) \ No newline at end of file diff --git a/docs/user-guide/nginx-status-page.md b/docs/user-guide/nginx-status-page.md deleted file mode 100644 index 8152c5eae7..0000000000 --- a/docs/user-guide/nginx-status-page.md +++ /dev/null @@ -1,11 +0,0 @@ -# NGINX status page - -The [ngx_http_stub_status_module](http://nginx.org/en/docs/http/ngx_http_stub_status_module.html) module provides access to basic status information. -This is the default module active in the url `/nginx_status` in the status port (default is 18080). - -This controller provides an alternative to this module using the [nginx-module-vts](https://github.com/vozlt/nginx-module-vts) module. -To use this module just set in the configuration configmap `enable-vts-status: "true"`. - -![nginx-module-vts screenshot](https://cloud.githubusercontent.com/assets/3648408/10876811/77a67b70-8183-11e5-9924-6a6d0c5dc73a.png "screenshot with filter") - -To extract the information in JSON format the module provides a custom URL: `/nginx_status/format/json` diff --git a/docs/user-guide/opentracing.md b/docs/user-guide/opentracing.md deleted file mode 100644 index c625062452..0000000000 --- a/docs/user-guide/opentracing.md +++ /dev/null @@ -1,42 +0,0 @@ -# OpenTracing - -Using the third party module [opentracing-contrib/nginx-opentracing](https://github.com/opentracing-contrib/nginx-opentracing) the NGINX ingress controller can configure NGINX to enable [OpenTracing](http://opentracing.io) instrumentation. -By default this feature is disabled. - -To enable the instrumentation we just need to enable the instrumentation in the configuration configmap and set the host where we should send the traces. - -In the [rnburn/zipkin-date-server](https://github.com/rnburn/zipkin-date-server) -github repository is an example of a dockerized date service. To install the example and zipkin collector run: - -``` -kubectl create -f https://raw.githubusercontent.com/rnburn/zipkin-date-server/master/kubernetes/zipkin.yaml -kubectl create -f https://raw.githubusercontent.com/rnburn/zipkin-date-server/master/kubernetes/deployment.yaml -``` - -Also we need to configure the NGINX controller configmap with the required values: - -``` -$ echo ' -apiVersion: v1 -kind: ConfigMap -data: - enable-opentracing: "true" - zipkin-collector-host: zipkin.default.svc.cluster.local -metadata: - name: nginx-configuration - namespace: ingress-nginx - labels: - app: ingress-nginx -' | kubectl replace -f - -``` - -Using curl we can generate some traces: - -```console -$ curl -v http://$(minikube ip) -$ curl -v http://$(minikube ip) -``` - -In the zipkin interface we can see the details: - -![zipkin screenshot](../images/zipkin-demo.png "zipkin collector screenshot") diff --git a/docs/user-guide/third-party-addons/modsecurity.md b/docs/user-guide/third-party-addons/modsecurity.md new file mode 100644 index 0000000000..25265d96af --- /dev/null +++ b/docs/user-guide/third-party-addons/modsecurity.md @@ -0,0 +1,16 @@ +# ModSecurity Web Application Firewall + +ModSecurity is an open source, cross platform web application firewall (WAF) engine for Apache, IIS and Nginx that is developed by Trustwave's SpiderLabs. It has a robust event-based programming language which provides protection from a range of attacks against web applications and allows for HTTP traffic monitoring, logging and real-time analysis - [https://www.modsecurity.org](https://www.modsecurity.org) + +The [ModSecurity-nginx](https://github.com/SpiderLabs/ModSecurity-nginx) connector is the connection point between NGINX and libmodsecurity (ModSecurity v3). + +The default ModSecurity configuration file is located in `/etc/nginx/modsecurity/modsecurity.conf`. This is the only file located in this directory and contains the default recommended configuration. Using a volume we can replace this file with the desired configuration. +To enable the ModSecurity feature we need to specify `enable-modsecurity: "true"` in the configuration configmap. + +>__Note:__ the default configuration use detection only, because that minimizes the chances of post-installation disruption. +The file `/var/log/modsec_audit.log` contains the log of ModSecurity. + + +The OWASP ModSecurity Core Rule Set (CRS) is a set of generic attack detection rules for use with ModSecurity or compatible web application firewalls. The CRS aims to protect web applications from a wide range of attacks, including the OWASP Top Ten, with a minimum of false alerts. +The directory `/etc/nginx/owasp-modsecurity-crs` contains the [owasp-modsecurity-crs repository](https://github.com/SpiderLabs/owasp-modsecurity-crs). +Using `enable-owasp-modsecurity-crs: "true"` we enable the use of the rules. diff --git a/docs/user-guide/third-party-addons/opentracing.md b/docs/user-guide/third-party-addons/opentracing.md new file mode 100644 index 0000000000..18b41b00c1 --- /dev/null +++ b/docs/user-guide/third-party-addons/opentracing.md @@ -0,0 +1,205 @@ +# OpenTracing + +Enables requests served by NGINX for distributed tracing via The OpenTracing Project. + +Using the third party module [opentracing-contrib/nginx-opentracing](https://github.com/opentracing-contrib/nginx-opentracing) the NGINX ingress controller can configure NGINX to enable [OpenTracing](http://opentracing.io) instrumentation. +By default this feature is disabled. + +## Usage + +To enable the instrumentation we must enable OpenTracing in the configuration ConfigMap: +``` +data: + enable-opentracing: "true" +``` + +We must also set the host to use when uploading traces: + +``` +zipkin-collector-host: zipkin.default.svc.cluster.local +jaeger-collector-host: jaeger-agent.default.svc.cluster.local +datadog-collector-host: datadog-agent.default.svc.cluster.local +``` +NOTE: While the option is called `jaeger-collector-host`, you will need to point this to a `jaeger-agent`, and not the `jaeger-collector` component. + +Next you will need to deploy a distributed tracing system which uses OpenTracing. +[Zipkin](https://github.com/openzipkin/zipkin) and +[Jaeger](https://github.com/jaegertracing/jaeger) and +[Datadog](https://github.com/DataDog/dd-opentracing-cpp) +have been tested. + +Other optional configuration options: +``` +# specifies the port to use when uploading traces, Default: 9411 +zipkin-collector-port + +# specifies the service name to use for any traces created, Default: nginx +zipkin-service-name + +# specifies sample rate for any traces created, Default: 1.0 +zipkin-sample-rate + +# specifies the port to use when uploading traces, Default: 6831 +jaeger-collector-port + +# specifies the service name to use for any traces created, Default: nginx +jaeger-service-name + +# specifies the sampler to be used when sampling traces. +# The available samplers are: const, probabilistic, ratelimiting, remote, Default: const +jaeger-sampler-type + +# specifies the argument to be passed to the sampler constructor, Default: 1 +jaeger-sampler-param + +# Specifies the custom remote sampler host to be passed to the sampler constructor. Must be a valid URL. +# Default: http://127.0.0.1 +jaeger-sampler-host + +# Specifies the custom remote sampler port to be passed to the sampler constructor. Must be a number. Default: 5778 +jaeger-sampler-port + +# specifies the port to use when uploading traces, Default 8126 +datadog-collector-port + +# specifies the service name to use for any traces created, Default: nginx +datadog-service-name + +# specifies the operation name to use for any traces collected, Default: nginx.handle +datadog-operation-name-override +``` + +All these options (including host) allow environment variables, such as `$HOSTNAME` or `$HOST_IP`. In the case of Jaeger, if you have a Jaeger agent running on each machine in your cluster, you can use something like `$HOST_IP` (which can be 'mounted' with the `status.hostIP` fieldpath, as described [here](https://kubernetes.io/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information/#capabilities-of-the-downward-api)) to make sure traces will be sent to the local agent. + +## Examples + +The following examples show how to deploy and test different distributed tracing systems. These example can be performed +using Minikube. + +### Zipkin + +In the [rnburn/zipkin-date-server](https://github.com/rnburn/zipkin-date-server) +GitHub repository is an example of a dockerized date service. To install the example and Zipkin collector run: + +``` +kubectl create -f https://raw.githubusercontent.com/rnburn/zipkin-date-server/master/kubernetes/zipkin.yaml +kubectl create -f https://raw.githubusercontent.com/rnburn/zipkin-date-server/master/kubernetes/deployment.yaml +``` + +Also we need to configure the NGINX controller ConfigMap with the required values: + +``` +$ echo ' +apiVersion: v1 +kind: ConfigMap +data: + enable-opentracing: "true" + zipkin-collector-host: zipkin.default.svc.cluster.local +metadata: + name: nginx-configuration + namespace: kube-system +' | kubectl replace -f - +``` + +In the Zipkin interface we can see the details: +![zipkin screenshot](../../images/zipkin-demo.png "zipkin collector screenshot") + +### Jaeger + +1. Enable Ingress addon in Minikube: + ``` + $ minikube addons enable ingress + ``` + +2. Add Minikube IP to /etc/hosts: + ``` + $ echo "$(minikube ip) example.com" | sudo tee -a /etc/hosts + ``` + +3. Apply a basic Service and Ingress Resource: + ``` + # Create Echoheaders Deployment + $ kubectl run echoheaders --image=k8s.gcr.io/echoserver:1.4 --replicas=1 --port=8080 + + # Expose as a Cluster-IP + $ kubectl expose deployment echoheaders --port=80 --target-port=8080 --name=echoheaders-x + + # Apply the Ingress Resource + $ echo ' + apiVersion: extensions/v1beta1 + kind: Ingress + metadata: + name: echo-ingress + spec: + rules: + - host: example.com + http: + paths: + - backend: + serviceName: echoheaders-x + servicePort: 80 + path: /echo + ' | kubectl apply -f - + ``` + +4. Enable OpenTracing and set the jaeger-collector-host: + ``` + $ echo ' + apiVersion: v1 + kind: ConfigMap + data: + enable-opentracing: "true" + jaeger-collector-host: jaeger-agent.default.svc.cluster.local + metadata: + name: nginx-configuration + namespace: kube-system + ' | kubectl replace -f - + ``` + +5. Apply the Jaeger All-In-One Template: + ``` + $ kubectl apply -f https://raw.githubusercontent.com/jaegertracing/jaeger-kubernetes/master/all-in-one/jaeger-all-in-one-template.yml + ``` + +6. Make a few requests to the Service: + ``` + $ curl example.com/echo -d "meow" + + CLIENT VALUES: + client_address=172.17.0.5 + command=POST + real path=/echo + query=nil + request_version=1.1 + request_uri=http://example.com:8080/echo + + SERVER VALUES: + server_version=nginx: 1.10.0 - lua: 10001 + + HEADERS RECEIVED: + accept=*/* + connection=close + content-length=4 + content-type=application/x-www-form-urlencoded + host=example.com + user-agent=curl/7.54.0 + x-forwarded-for=192.168.99.1 + x-forwarded-host=example.com + x-forwarded-port=80 + x-forwarded-proto=http + x-original-uri=/echo + x-real-ip=192.168.99.1 + x-scheme=http + BODY: + meow + ``` + +7. View the Jaeger UI: + ``` + $ minikube service jaeger-query --url + + http://192.168.99.100:30183 + ``` + + In the Jaeger interface we can see the details: + ![jaeger screenshot](../../images/jaeger-demo.png "jaeger collector screenshot") diff --git a/docs/user-guide/tls.md b/docs/user-guide/tls.md index d02ff988c0..af0b3568cb 100644 --- a/docs/user-guide/tls.md +++ b/docs/user-guide/tls.md @@ -1,154 +1,137 @@ -# TLS +# TLS/HTTPS -- [Default SSL Certificate](#default-ssl-certificate) -- [SSL Passthrough](#ssl-passthrough) -- [HTTPS enforcement](#server-side-https-enforcement) -- [HSTS](#http-strict-transport-security) -- [Server-side HTTPS enforcement through redirect](#server-side-https-enforcement-through-redirect) -- [Kube-Lego](#automated-certificate-management-with-kube-lego) +## TLS Secrets -## Default SSL Certificate - -NGINX provides the option to configure a server as a catch-all with [server name _](http://nginx.org/en/docs/http/server_names.html) for requests that do not match any of the configured server names. This configuration works without issues for HTTP traffic. -In case of HTTPS, NGINX requires a certificate. -For this reason the Ingress controller provides the flag `--default-ssl-certificate`. The secret behind this flag contains the default certificate to be used in the mentioned scenario. If this flag is not provided NGINX will use a self signed certificate. +Anytime we reference a TLS secret, we mean a PEM-encoded X.509, RSA (2048) secret. -Running without the flag `--default-ssl-certificate`: +You can generate a self-signed certificate and private key with: -```console -$ curl -v https://10.2.78.7:443 -k -* Rebuilt URL to: https://10.2.78.7:443/ -* Trying 10.2.78.4... -* Connected to 10.2.78.7 (10.2.78.7) port 443 (#0) -* ALPN, offering http/1.1 -* Cipher selection: ALL:!EXPORT:!EXPORT40:!EXPORT56:!aNULL:!LOW:!RC4:@STRENGTH -* successfully set certificate verify locations: -* CAfile: /etc/ssl/certs/ca-certificates.crt - CApath: /etc/ssl/certs -* TLSv1.2 (OUT), TLS header, Certificate Status (22): -* TLSv1.2 (OUT), TLS handshake, Client hello (1): -* TLSv1.2 (IN), TLS handshake, Server hello (2): -* TLSv1.2 (IN), TLS handshake, Certificate (11): -* TLSv1.2 (IN), TLS handshake, Server key exchange (12): -* TLSv1.2 (IN), TLS handshake, Server finished (14): -* TLSv1.2 (OUT), TLS handshake, Client key exchange (16): -* TLSv1.2 (OUT), TLS change cipher, Client hello (1): -* TLSv1.2 (OUT), TLS handshake, Finished (20): -* TLSv1.2 (IN), TLS change cipher, Client hello (1): -* TLSv1.2 (IN), TLS handshake, Finished (20): -* SSL connection using TLSv1.2 / ECDHE-RSA-AES128-GCM-SHA256 -* ALPN, server accepted to use http/1.1 -* Server certificate: -* subject: CN=foo.bar.com -* start date: Apr 13 00:50:56 2016 GMT -* expire date: Apr 13 00:50:56 2017 GMT -* issuer: CN=foo.bar.com -* SSL certificate verify result: self signed certificate (18), continuing anyway. -> GET / HTTP/1.1 -> Host: 10.2.78.7 -> User-Agent: curl/7.47.1 -> Accept: */* -> -< HTTP/1.1 404 Not Found -< Server: nginx/1.11.1 -< Date: Thu, 21 Jul 2016 15:38:46 GMT -< Content-Type: text/html -< Transfer-Encoding: chunked -< Connection: keep-alive -< Strict-Transport-Security: max-age=15724800; includeSubDomains; preload -< -The page you're looking for could not be found. - -* Connection #0 to host 10.2.78.7 left intact +```bash +$ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout ${KEY_FILE} -out ${CERT_FILE} -subj "/CN=${HOST}/O=${HOST}" ``` -Specifying `--default-ssl-certificate=default/foo-tls`: +Then create the secret in the cluster via: -```console -core@localhost ~ $ curl -v https://10.2.78.7:443 -k -* Rebuilt URL to: https://10.2.78.7:443/ -* Trying 10.2.78.7... -* Connected to 10.2.78.7 (10.2.78.7) port 443 (#0) -* ALPN, offering http/1.1 -* Cipher selection: ALL:!EXPORT:!EXPORT40:!EXPORT56:!aNULL:!LOW:!RC4:@STRENGTH -* successfully set certificate verify locations: -* CAfile: /etc/ssl/certs/ca-certificates.crt - CApath: /etc/ssl/certs -* TLSv1.2 (OUT), TLS header, Certificate Status (22): -* TLSv1.2 (OUT), TLS handshake, Client hello (1): -* TLSv1.2 (IN), TLS handshake, Server hello (2): -* TLSv1.2 (IN), TLS handshake, Certificate (11): -* TLSv1.2 (IN), TLS handshake, Server key exchange (12): -* TLSv1.2 (IN), TLS handshake, Server finished (14): -* TLSv1.2 (OUT), TLS handshake, Client key exchange (16): -* TLSv1.2 (OUT), TLS change cipher, Client hello (1): -* TLSv1.2 (OUT), TLS handshake, Finished (20): -* TLSv1.2 (IN), TLS change cipher, Client hello (1): -* TLSv1.2 (IN), TLS handshake, Finished (20): -* SSL connection using TLSv1.2 / ECDHE-RSA-AES128-GCM-SHA256 -* ALPN, server accepted to use http/1.1 -* Server certificate: -* subject: CN=foo.bar.com -* start date: Apr 13 00:50:56 2016 GMT -* expire date: Apr 13 00:50:56 2017 GMT -* issuer: CN=foo.bar.com -* SSL certificate verify result: self signed certificate (18), continuing anyway. -> GET / HTTP/1.1 -> Host: 10.2.78.7 -> User-Agent: curl/7.47.1 -> Accept: */* -> -< HTTP/1.1 404 Not Found -< Server: nginx/1.11.1 -< Date: Mon, 18 Jul 2016 21:02:59 GMT -< Content-Type: text/html -< Transfer-Encoding: chunked -< Connection: keep-alive -< Strict-Transport-Security: max-age=15724800; includeSubDomains; preload -< -The page you're looking for could not be found. - -* Connection #0 to host 10.2.78.7 left intact +```bash +kubectl create secret tls ${CERT_NAME} --key ${KEY_FILE} --cert ${CERT_FILE} ``` +The resulting secret will be of type `kubernetes.io/tls`. + +## Default SSL Certificate + +NGINX provides the option to configure a server as a catch-all with +[server_name](http://nginx.org/en/docs/http/server_names.html) +for requests that do not match any of the configured server names. +This configuration works without out-of-the-box for HTTP traffic. +For HTTPS, a certificate is naturally required. + +For this reason the Ingress controller provides the flag `--default-ssl-certificate`. +The secret referred to by this flag contains the default certificate to be used when +accessing the catch-all server. +If this flag is not provided NGINX will use a self-signed certificate. + +For instance, if you have a TLS secret `foo-tls` in the `default` namespace, +add `--default-ssl-certificate=default/foo-tls` in the `nginx-controller` deployment. + ## SSL Passthrough -The flag `--enable-ssl-passthrough` enables SSL passthrough feature. -By default this feature is disabled +The [`--enable-ssl-passthrough`](cli-arguments/) flag enables the SSL Passthrough feature, which is disabled by +default. This is required to enable passthrough backends in Ingress objects. + +!!! warning + This feature is implemented by intercepting **all traffic** on the configured HTTPS port (default: 443) and handing + it over to a local TCP proxy. This bypasses NGINX completely and introduces a non-negligible performance penalty. -## Server-side HTTPS enforcement +SSL Passthrough leverages [SNI][SNI] and reads the virtual domain from the TLS negotiation, which requires compatible +clients. After a connection has been accepted by the TLS listener, it is handled by the controller itself and piped back +and forth between the backend and the client. -By default the controller redirects (301) to HTTPS if TLS is enabled for that ingress . If you want to disable that behaviour globally, you can use `ssl-redirect: "false"` in the configuration ConfigMap. +If there is no hostname matching the requested host name, the request is handed over to NGINX on the configured +passthrough proxy port (default: 442), which proxies the request to the default backend. -To configure this feature for specific ingress resources, you can use the `nginx.ingress.kubernetes.io/ssl-redirect: "false"` annotation in the particular resource. +!!! note + Unlike HTTP backends, traffic to Passthrough backends is sent to the *clusterIP* of the backing Service instead of + individual Endpoints. ## HTTP Strict Transport Security -HTTP Strict Transport Security (HSTS) is an opt-in security enhancement specified through the use of a special response header. Once a supported browser receives this header that browser will prevent any communications from being sent over HTTP to the specified domain and will instead send all communications over HTTPS. +HTTP Strict Transport Security (HSTS) is an opt-in security enhancement specified +through the use of a special response header. Once a supported browser receives +this header that browser will prevent any communications from being sent over +HTTP to the specified domain and will instead send all communications over HTTPS. -By default the controller redirects (301) to HTTPS if there is a TLS Ingress rule. +HSTS is enabled by default. -To disable this behavior use `hsts: "false"` in the configuration ConfigMap. +To disable this behavior use `hsts: "false"` in the configuration [ConfigMap][ConfigMap]. -### Server-side HTTPS enforcement through redirect +## Server-side HTTPS enforcement through redirect -By default the controller redirects (301) to `HTTPS` if TLS is enabled for that ingress. If you want to disable that behavior globally, you can use `ssl-redirect: "false"` in the NGINX config map. +By default the controller redirects HTTP clients to the HTTPS port +443 using a 308 Permanent Redirect response if TLS is enabled for that Ingress. -To configure this feature for specific ingress resources, you can use the `nginx.ingress.kubernetes.io/ssl-redirect: "false"` annotation in the particular resource. +This can be disabled globally using `ssl-redirect: "false"` in the NGINX [config map][ConfigMap], +or per-Ingress with the `nginx.ingress.kubernetes.io/ssl-redirect: "false"` +annotation in the particular resource. -When using SSL offloading outside of cluster (e.g. AWS ELB) it may be useful to enforce a redirect to `HTTPS` even when there is not TLS cert available. This can be achieved by using the `nginx.ingress.kubernetes.io/force-ssl-redirect: "true"` annotation in the particular resource. +!!! tip + + When using SSL offloading outside of cluster (e.g. AWS ELB) it may be useful to enforce a + redirect to HTTPS even when there is no TLS certificate available. + This can be achieved by using the `nginx.ingress.kubernetes.io/force-ssl-redirect: "true"` + annotation in the particular resource. ## Automated Certificate Management with Kube-Lego -[Kube-Lego] automatically requests missing or expired certificates from [Let's Encrypt] by monitoring ingress resources and their referenced secrets. To enable this for an ingress resource you have to add an annotation: +!!! tip + Kube-Lego has reached end-of-life and is being + replaced by [cert-manager](https://github.com/jetstack/cert-manager/). + +[Kube-Lego] automatically requests missing or expired certificates from [Let's Encrypt] +by monitoring ingress resources and their referenced secrets. + +To enable this for an ingress resource you have to add an annotation: ```console kubectl annotate ing ingress-demo kubernetes.io/tls-acme="true" ``` -To setup Kube-Lego you can take a look at this [full example]. The first -version to fully support Kube-Lego is nginx Ingress controller 0.8. +To setup Kube-Lego you can take a look at this [full example][full-kube-lego-example]. +The first version to fully support Kube-Lego is Nginx Ingress controller 0.8. + +## Default TLS Version and Ciphers + +To provide the most secure baseline configuration possible, + +nginx-ingress defaults to using TLS 1.2 only and a [secure set of TLS ciphers][ssl-ciphers]. + +### Legacy TLS + +The default configuration, though secure, does not support some older browsers and operating systems. + +For instance, TLS 1.1+ is only enabled by default from Android 5.0 on. At the time of writing, +May 2018, [approximately 15% of Android devices](https://developer.android.com/about/dashboards/#Platform) +are not compatible with nginx-ingress's default configuration. + +To change this default behavior, use a [ConfigMap][ConfigMap]. + +A sample ConfigMap fragment to allow these older clients to connect could look something like the following: + +``` +kind: ConfigMap +apiVersion: v1 +metadata: + name: nginx-config +data: + ssl-ciphers: "ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:CAMELLIA:DES-CBC3-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA" + ssl-protocols: "TLSv1 TLSv1.1 TLSv1.2" +``` + + -[full example]:https://github.com/jetstack/kube-lego/tree/master/examples +[full-kube-lego-example]:https://github.com/jetstack/kube-lego/tree/master/examples [Kube-Lego]:https://github.com/jetstack/kube-lego [Let's Encrypt]:https://letsencrypt.org +[ConfigMap]: ./nginx-configuration/configmap.md +[ssl-ciphers]: ./nginx-configuration/configmap.md#ssl-ciphers +[SNI]: https://en.wikipedia.org/wiki/Server_Name_Indication diff --git a/go.mod b/go.mod new file mode 100644 index 0000000000..af20b1f3d0 --- /dev/null +++ b/go.mod @@ -0,0 +1,90 @@ +module k8s.io/ingress-nginx + +go 1.12 + +require ( + cloud.google.com/go v0.37.2 // indirect + contrib.go.opencensus.io/exporter/ocagent v0.4.12 // indirect + github.com/Azure/go-autorest v11.7.1+incompatible // indirect + github.com/Sirupsen/logrus v1.4.0 // indirect + github.com/armon/go-proxyproto v0.0.0-20190211145416-68259f75880e + github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect + github.com/docker/distribution v2.7.1+incompatible // indirect + github.com/docker/go-units v0.3.3 // indirect + github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c // indirect + github.com/eapache/channels v1.1.0 + github.com/elazarl/goproxy v0.0.0-20190410145444-c548f45dcf1d // indirect + github.com/elazarl/goproxy/ext v0.0.0-20190410145444-c548f45dcf1d // indirect + github.com/emicklei/go-restful v2.9.3+incompatible // indirect + github.com/evanphx/json-patch v4.1.0+incompatible // indirect + github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa // indirect + github.com/go-logr/logr v0.1.0 // indirect + github.com/go-logr/zapr v0.1.1 // indirect + github.com/go-openapi/spec v0.19.0 // indirect + github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef // indirect + github.com/google/gofuzz v1.0.0 // indirect + github.com/google/uuid v1.0.0 + github.com/googleapis/gnostic v0.2.0 // indirect + github.com/gophercloud/gophercloud v0.0.0-20190410012400-2c55d17f707c // indirect + github.com/imdario/mergo v0.3.7 + github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/json-iterator/go v1.1.6 + github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 + github.com/mitchellh/go-ps v0.0.0-20170309133038-4fdf99ab2936 + github.com/mitchellh/hashstructure v1.0.0 + github.com/mitchellh/mapstructure v1.1.2 + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.1 // indirect + github.com/moul/http2curl v1.0.0 // indirect + github.com/ncabatoff/go-seq v0.0.0-20180805175032-b08ef85ed833 // indirect + github.com/ncabatoff/process-exporter v0.0.0-20180915144445-bdf24ef23850 + github.com/ncabatoff/procfs v0.0.0-20180903163354-e1a38cb53622 // indirect + github.com/onsi/ginkgo v1.8.0 + github.com/onsi/gomega v1.5.0 + github.com/opencontainers/go-digest v1.0.0-rc1 // indirect + github.com/opencontainers/runc v0.1.1 + github.com/parnurzeal/gorequest v0.2.15 + github.com/paultag/sniff v0.0.0-20170624152000-87325c3dddf4 + github.com/pborman/uuid v1.2.0 // indirect + github.com/peterbourgon/diskv v2.0.1+incompatible // indirect + github.com/pkg/errors v0.8.1 + github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 + github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 + github.com/prometheus/common v0.2.0 + github.com/prometheus/procfs v0.0.0-20190328153300-af7bedc223fb // indirect + github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a // indirect + github.com/spf13/afero v1.2.2 // indirect + github.com/spf13/cobra v0.0.3 + github.com/spf13/pflag v1.0.3 + github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 + github.com/zakjan/cert-chain-resolver v0.0.0-20180703112424-6076e1ded272 + go.uber.org/atomic v1.4.0 // indirect + go.uber.org/multierr v1.1.0 // indirect + go.uber.org/zap v1.10.0 // indirect + gopkg.in/fsnotify/fsnotify.v1 v1.4.7 + gopkg.in/go-playground/assert.v1 v1.2.1 // indirect + gopkg.in/go-playground/pool.v3 v3.1.1 + + k8s.io/api v0.0.0-20190313235455-40a48860b5ab + k8s.io/apiextensions-apiserver v0.0.0-20190315093550-53c4693659ed + k8s.io/apimachinery v0.0.0-20190313205120-d7deff9243b1 + k8s.io/apiserver v0.0.0-20190313205120-8b27c41bdbb1 + k8s.io/cli-runtime v0.0.0-20190314001948-2899ed30580f + k8s.io/client-go v11.0.0+incompatible + k8s.io/cloud-provider v0.0.0-20190323031113-9c9d72d1bf90 // indirect + k8s.io/code-generator v0.0.0-00010101000000-000000000000 + k8s.io/component-base v0.0.0-20190313120452-4727f38490bc + k8s.io/klog v0.3.0 + k8s.io/kube-openapi v0.0.0-20190320154901-5e45bb682580 // indirect + k8s.io/kubernetes v1.14.1 + k8s.io/utils v0.0.0-20190308190857-21c4ce38f2a7 // indirect + sigs.k8s.io/controller-runtime v0.1.10 + sigs.k8s.io/kustomize v2.0.3+incompatible // indirect + sigs.k8s.io/testing_frameworks v0.1.1 // indirect + sigs.k8s.io/yaml v1.1.0 // indirect +) + +replace ( + github.com/Sirupsen/logrus => github.com/sirupsen/logrus v1.4.1 + k8s.io/code-generator => k8s.io/code-generator v0.0.0-20190511023357-639c964206c2 +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000000..7bd30442f2 --- /dev/null +++ b/go.sum @@ -0,0 +1,422 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.37.2 h1:4y4L7BdHenTfZL0HervofNTHh9Ad6mNX72cQvl+5eH0= +cloud.google.com/go v0.37.2/go.mod h1:H8IAquKe2L30IxoupDgqTaQvKSwF/c8prYHynGIWQbA= +contrib.go.opencensus.io/exporter/ocagent v0.4.12 h1:jGFvw3l57ViIVEPKKEUXPcLYIXJmQxLUh6ey1eJhwyc= +contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA= +git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +git.apache.org/thrift.git v0.12.0/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +github.com/Azure/go-autorest v11.7.1+incompatible h1:M2YZIajBBVekV86x0rr1443Lc1F/Ylxb9w+5EtSyX3Q= +github.com/Azure/go-autorest v11.7.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/PuerkitoBio/purell v1.1.0 h1:rmGxhojJlM0tuKtfdvliR84CFHljx9ag64t2xmVkjK4= +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/armon/go-proxyproto v0.0.0-20190211145416-68259f75880e h1:h0gP0hBU6DsA5IQduhLWGOEfIUKzJS5hhXQBSgHuF/g= +github.com/armon/go-proxyproto v0.0.0-20190211145416-68259f75880e/go.mod h1:QmP9hvJ91BbJmGVGSbutW19IC0Q9phDCLGaomwTJbgU= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= +github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4= +github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk= +github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c h1:ZfSZ3P3BedhKGUhzj7BQlPSU4OvT6tfOKe3DVHzOA7s= +github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/eapache/channels v1.1.0 h1:F1taHcn7/F0i8DYqKXJnyhJcVpp2kgFcNePxXtnyu4k= +github.com/eapache/channels v1.1.0/go.mod h1:jMm2qB5Ubtg9zLd+inMZd2/NUvXgzmWXsDaLyQIGfH0= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/elazarl/goproxy v0.0.0-20190410145444-c548f45dcf1d h1:FEw1BeUVT/wxetVmacXPqQgRyYCG+0aCfQel+53Pa/E= +github.com/elazarl/goproxy v0.0.0-20190410145444-c548f45dcf1d/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/elazarl/goproxy/ext v0.0.0-20190410145444-c548f45dcf1d h1:G71TTAuHFAHgY2X7zfUWDbogxiZuLvpJ9xevbOw4Vcw= +github.com/elazarl/goproxy/ext v0.0.0-20190410145444-c548f45dcf1d/go.mod h1:gNh8nYJoAm43RfaxurUnxr+N1PwuFV3ZMl/efxlIlY8= +github.com/emicklei/go-restful v2.9.3+incompatible h1:2OwhVdhtzYUp5P5wuGsVDPagKSRd9JK72sJCHVCXh5g= +github.com/emicklei/go-restful v2.9.3+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/evanphx/json-patch v4.1.0+incompatible h1:K1MDoo4AZ4wU0GIU/fPmtZg7VpzLjCxu+UwBD1FvwOc= +github.com/evanphx/json-patch v4.1.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa h1:RDBNVkRviHZtvDvId8XSGPu3rmpmSe+wKRcEWNgsfWU= +github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/zapr v0.1.1 h1:qXBXPDdNncunGs7XeEpsJt8wCjYBygluzfdLO0G5baE= +github.com/go-logr/zapr v0.1.1/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= +github.com/go-openapi/jsonpointer v0.17.0 h1:nH6xp8XdXHx8dqveo0ZuJBluCO2qGrPbDNZ0dwoRHP0= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonreference v0.17.0 h1:yJW3HCkTHg7NOA+gZ83IPHzUSnUzGXhGmsdiCcMexbA= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/spec v0.19.0 h1:A4SZ6IWh3lnjH0rG0Z5lkxazMGBECtrZcbyYQi+64k4= +github.com/go-openapi/spec v0.19.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/swag v0.17.0 h1:iqrgMg7Q7SvtbWLlltPrkMs0UBJI6oTSs79JFRUi880= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0 h1:xU6/SpYbvkNYiptHJYEDRseDLvYE7wSqhYYNy0QSUzI= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0 h1:28o5sBqPkBsMGnC6b4MvE2TzSr5/AT4c/1fLqVGIwlk= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g= +github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/gophercloud/gophercloud v0.0.0-20190410012400-2c55d17f707c h1:vGQ5eWkG5WkBdfGR+7J5yF2a6clwcUMM1r9fmRHPBVI= +github.com/gophercloud/gophercloud v0.0.0-20190410012400-2c55d17f707c/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/grpc-ecosystem/grpc-gateway v1.6.2/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/grpc-ecosystem/grpc-gateway v1.8.5 h1:2+KSC78XiO6Qy0hIjfc1OD9H+hsaJdJlb8Kqsd41CTE= +github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI= +github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= +github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4= +github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 h1:2gxZ0XQIU/5z3Z3bUBu+FXuk2pFbkN6tcwi/pjyaDic= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mitchellh/go-ps v0.0.0-20170309133038-4fdf99ab2936 h1:kw1v0NlnN+GZcU8Ma8CLF2Zzgjfx95gs3/GN3vYAPpo= +github.com/mitchellh/go-ps v0.0.0-20170309133038-4fdf99ab2936/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk= +github.com/mitchellh/hashstructure v1.0.0 h1:ZkRJX1CyOoTkar7p/mLS5TZU4nJ1Rn/F8u9dGS02Q3Y= +github.com/mitchellh/hashstructure v1.0.0/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/moul/http2curl v1.0.0 h1:dRMWoAtb+ePxMlLkrCbAqh4TlPHXvoGUSQ323/9Zahs= +github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/ncabatoff/go-seq v0.0.0-20180805175032-b08ef85ed833 h1:t4WWQ9I797y7QUgeEjeXnVb+oYuEDQc6gLvrZJTYo94= +github.com/ncabatoff/go-seq v0.0.0-20180805175032-b08ef85ed833/go.mod h1:0CznHmXSjMEqs5Tezj/w2emQoM41wzYM9KpDKUHPYag= +github.com/ncabatoff/process-exporter v0.0.0-20180915144445-bdf24ef23850 h1:+N9D9mM5Nr4iDYOrZBVDT7w7wGhFNTvEXWX80LNXJMo= +github.com/ncabatoff/process-exporter v0.0.0-20180915144445-bdf24ef23850/go.mod h1:HlundBOuN0AHjy7yL0DD250oa8sy/Zcu1GpxhkSaiVY= +github.com/ncabatoff/procfs v0.0.0-20180903163354-e1a38cb53622 h1:xoXp2liUdBAas/zxqJcB+U/t0Supau5NOLw9XbuLD5I= +github.com/ncabatoff/procfs v0.0.0-20180903163354-e1a38cb53622/go.mod h1:1Sa4zSat0csY8rfgyuUg1HTed0q3v9nf8ij8Ontoi5Y= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/runc v0.1.1 h1:GlxAyO6x8rfZYN9Tt0Kti5a/cP41iuiO2yYT0IJGY8Y= +github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/openzipkin/zipkin-go v0.1.3/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/parnurzeal/gorequest v0.2.15 h1:oPjDCsF5IkD4gUk6vIgsxYNaSgvAnIh1EJeROn3HdJU= +github.com/parnurzeal/gorequest v0.2.15/go.mod h1:3Kh2QUMJoqw3icWAecsyzkpY7UzRfDhbRdTjtNwNiUE= +github.com/paultag/sniff v0.0.0-20170624152000-87325c3dddf4 h1:bOK8V55gl1AEWE9KiXSZ0fzARvVBehdmYZOTFcQ5kUo= +github.com/paultag/sniff v0.0.0-20170624152000-87325c3dddf4/go.mod h1:J3XXNGJINXLa4yIivdUT0Ad/srv2q0pSOWbbm6El2EY= +github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 h1:D+CiwcpGTW6pL6bv6KI3KbyEyCKyS+1JWS2h8PNDnGA= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.2.0 h1:kUZDBDTdBVBYBj5Tmh2NZLlF60mfjA27rM34b+cVwNU= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190328153300-af7bedc223fb h1:LvNCMEj0FFZQYsxZb7o3xQPrtqOOB6lrTUOWshC+ZTs= +github.com/prometheus/procfs v0.0.0-20190328153300-af7bedc223fb/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc= +github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a h1:pa8hGb/2YqsZKovtsgrwcDH1RZhVbTKCjLp47XpqCDs= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 h1:G3dpKMzFDjgEh2q1Z7zUUtKa8ViPtH+ocF0bE0g00O8= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/zakjan/cert-chain-resolver v0.0.0-20180703112424-6076e1ded272 h1:scDk3LAM8x+NPuywVGC0q0/G+5Ed8M0+YXecz4XnWRM= +github.com/zakjan/cert-chain-resolver v0.0.0-20180703112424-6076e1ded272/go.mod h1:KNkcm66cr4ilOiEcjydK+tc2ShPUhqmuoXCljXUBPu8= +go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= +go.opencensus.io v0.19.1/go.mod h1:gug0GbSHa8Pafr0d2urOSgoXHZ6x/RUlaiT0d9pqb4A= +go.opencensus.io v0.19.2/go.mod h1:NO/8qkisMZLZ1FCsKNqtJPwc8/TaclWyY0B6wcYNg9M= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2 h1:NAfh7zF0/3/HqtMvJNZ/RFrSlCE6ZTlHmKfhL/Dm1Jk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +golang.org/x/build v0.0.0-20190314133821-5284462c4bec/go.mod h1:atTaCNAy0f16Ah5aV1gMSwgiKVHwu/JncqDpuRr7lS4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495 h1:I6A9Ag9FpEKOjcKrRNjQkPHawoXIhKyTGfvvjFAiiAk= +golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 h1:Wo7BWFiOk0QRFMLYMqJGFMd9CgUAcGx7V+qEg/h5IBI= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181218192612-074acd46bca6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313 h1:pczuHS43Cp2ktBEEmLwScxgjWsBSzdaQiKzUyf3DTTc= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2 h1:z99zHgr7hKfrUcX/KsoJk5FJfjTceCKIp96+biqP4To= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181219222714-6e267b5cc78e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384 h1:TFlARGu6Czu1z7q93HTxcP1P+/ZFC/IKythI5RzrnRg= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485 h1:OB/uP/Puiu5vS5QMRPrXCDWUPb+kt8f1KW8oQzFejQw= +gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e h1:jRyg0XfpwWlhEV8mDfdNGBeSJM2fuyh9Yjrnd8kF2Ts= +gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20181220000619-583d854617af/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.2.0/go.mod h1:IfRCZScioGtypHNTlz3gFk67J8uePVW7uDTBzXuIkhU= +google.golang.org/api v0.3.0/go.mod h1:IuvZyQh8jgscv8qWfQ4ABd8m7hEudgBFM/EdhA3BnXw= +google.golang.org/api v0.3.1 h1:oJra/lMfmtm13/rgY/8i3MzjFWYXvQIAKjQ3HqofMk8= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181219182458-5a97ab628bfb/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 h1:Lj2SnHtxkRGJDqnGaSjo+CCdIieEnwVazbOXILwQemk= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.19.1 h1:TrBcJ1yqAl1G++wO39nD/qtgpsW9/1+QGrluyMGEYgM= +google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/fsnotify/fsnotify.v1 v1.4.7 h1:XNNYLJHt73EyYiCZi6+xjupS9CpvmiDgjPTAjrBlQbo= +gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE= +gopkg.in/go-playground/assert.v1 v1.2.1 h1:xoYuJVE7KT85PYWrN730RguIQO0ePzVRfFMXadIrXTM= +gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= +gopkg.in/go-playground/pool.v3 v3.1.1 h1:4Qcj91IsYTpIeRhe/eo6Fz+w6uKWPEghx8vHFTYMfhw= +gopkg.in/go-playground/pool.v3 v3.1.1/go.mod h1:pUAGBximS/hccTTSzEop6wvvQhVa3QPDFFW+8REdutg= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20180920025451-e3ad64cb4ed3/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +k8s.io/api v0.0.0-20190313235455-40a48860b5ab h1:DG9A67baNpoeweOy2spF1OWHhnVY5KR7/Ek/+U1lVZc= +k8s.io/api v0.0.0-20190313235455-40a48860b5ab/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= +k8s.io/apiextensions-apiserver v0.0.0-20190315093550-53c4693659ed h1:rCteec//ELIjZMfjIGQbVtZooyaofqDJwsmWwWKItNs= +k8s.io/apiextensions-apiserver v0.0.0-20190315093550-53c4693659ed/go.mod h1:IxkesAMoaCRoLrPJdZNZUQp9NfZnzqaVzLhb2VEQzXE= +k8s.io/apimachinery v0.0.0-20190313205120-d7deff9243b1 h1:IS7K02iBkQXpCeieSiyJjGoLSdVOv2DbPaWHJ+ZtgKg= +k8s.io/apimachinery v0.0.0-20190313205120-d7deff9243b1/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= +k8s.io/apiserver v0.0.0-20190313205120-8b27c41bdbb1 h1:8JYeBJdfXeNkq2RsG0hmvA4miGaN/3Y7bTt+vs2MnOE= +k8s.io/apiserver v0.0.0-20190313205120-8b27c41bdbb1/go.mod h1:6bqaTSOSJavUIXUtfaR9Os9JtTCm8ZqH2SUl2S60C4w= +k8s.io/cli-runtime v0.0.0-20190314001948-2899ed30580f h1:gRAqn9Z3rp62UwLU3PdC7Lhmsvd3e9PXLsq7EG+bq1s= +k8s.io/cli-runtime v0.0.0-20190314001948-2899ed30580f/go.mod h1:qWnH3/b8sp/l7EvlDh7ulDU3UWA4P4N1NFbEEP791tM= +k8s.io/client-go v11.0.0+incompatible h1:LBbX2+lOwY9flffWlJM7f1Ct8V2SRNiMRDFeiwnJo9o= +k8s.io/client-go v11.0.0+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= +k8s.io/cloud-provider v0.0.0-20190323031113-9c9d72d1bf90 h1:FI7cwUSbHsDhcpEI8f9YYZinBEEJio7TSfI7BBIQ89g= +k8s.io/cloud-provider v0.0.0-20190323031113-9c9d72d1bf90/go.mod h1:LlIffnLBu+GG7d4ppPzC8UnA1Ex8S+ntmSRVsnr7Xy4= +k8s.io/code-generator v0.0.0-20190511023357-639c964206c2 h1:wfF2JZb8Bl68FNMg/BAkIkkE29Z/bXWBYTtoQh/Cbo0= +k8s.io/code-generator v0.0.0-20190511023357-639c964206c2/go.mod h1:YMQ7Lt97nW/I6nHACDccgS/sPAyrHQNans96RwPaSb8= +k8s.io/component-base v0.0.0-20190313120452-4727f38490bc h1:wECJj/THUnRfyHajZrU4SmU2EIrSAHb3S9d2jTItVmo= +k8s.io/component-base v0.0.0-20190313120452-4727f38490bc/go.mod h1:DMaomcf3j3MM2j1FsvlLVVlc7wA2jPytEur3cP9zRxQ= +k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af h1:SwjZbO0u5ZuaV6TRMWOGB40iaycX8sbdMQHtjNZ19dk= +k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog v0.3.0 h1:0VPpR+sizsiivjIfIAQH/rl8tan6jvWkS7lU+0di3lE= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/kube-openapi v0.0.0-20190320154901-5e45bb682580 h1:fq0ZXW/BAIFZH+dazlups6JTVdwzRo5d9riFA103yuQ= +k8s.io/kube-openapi v0.0.0-20190320154901-5e45bb682580/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= +k8s.io/kubernetes v1.14.1 h1:I9F52h5sqVxBmoSsBlNQ0YygNcukDilkpGxUbJRoBoY= +k8s.io/kubernetes v1.14.1/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= +k8s.io/utils v0.0.0-20190308190857-21c4ce38f2a7 h1:8r+l4bNWjRlsFYlQJnKJ2p7s1YQPj4XyXiJVqDHRx7c= +k8s.io/utils v0.0.0-20190308190857-21c4ce38f2a7/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0= +modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= +modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= +modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= +modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= +modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= +sigs.k8s.io/controller-runtime v0.1.10 h1:amLOmcekVdnsD1uIpmgRqfTbQWJ2qxvQkcdeFhcotn4= +sigs.k8s.io/controller-runtime v0.1.10/go.mod h1:HFAYoOh6XMV+jKF1UjFwrknPbowfyHEHHRdJMf2jMX8= +sigs.k8s.io/kustomize v2.0.3+incompatible h1:JUufWFNlI44MdtnjUqVnvh29rR37PQFzPbLXqhyOyX0= +sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= +sigs.k8s.io/testing_frameworks v0.1.1 h1:cP2l8fkA3O9vekpy5Ks8mmA0NW/F7yBdXf8brkWhVrs= +sigs.k8s.io/testing_frameworks v0.1.1/go.mod h1:VVBKrHmJ6Ekkfz284YKhQePcdycOzNH9qL6ht1zEr/U= +sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/hack/boilerplate/boilerplate.py b/hack/boilerplate/boilerplate.py index 7b7f0c868e..b8f5fe282c 100755 --- a/hack/boilerplate/boilerplate.py +++ b/hack/boilerplate/boilerplate.py @@ -50,6 +50,7 @@ verbose_out = sys.stderr if args.verbose else open("/dev/null", "w") + def get_refs(): refs = {} @@ -63,6 +64,7 @@ def get_refs(): return refs + def file_passes(filename, refs, regexs): try: f = open(filename, 'r') @@ -117,7 +119,8 @@ def file_passes(filename, refs, regexs): # if we don't match the reference at this point, fail if ref != data: - print("Header in %s does not match reference, diff:" % filename, file=verbose_out) + print("Header in %s does not match reference, diff:" % + filename, file=verbose_out) if args.verbose: print(file=verbose_out) for line in difflib.unified_diff(ref, data, 'reference', filename, lineterm=''): @@ -127,11 +130,18 @@ def file_passes(filename, refs, regexs): return True + def file_extension(filename): return os.path.splitext(filename)[1].split(".")[-1].lower() -skipped_dirs = ['Godeps', 'third_party', '_gopath', '_output', '.git', 'cluster/env.sh', - "vendor", "test/e2e/generated/bindata.go", "hack/boilerplate/test"] + +skipped_dirs = [ + '.git', + "vendor", + "test/e2e/framework/framework.go", + "images" +] + def normalize_files(files): newfiles = [] @@ -144,6 +154,7 @@ def normalize_files(files): newfiles[i] = os.path.join(args.rootdir, pathname) return newfiles + def get_files(extensions): files = [] if len(args.filenames) > 0: @@ -171,19 +182,23 @@ def get_files(extensions): outfiles.append(pathname) return outfiles + def get_regexs(): regexs = {} # Search for "YEAR" which exists in the boilerplate, but shouldn't in the real thing - regexs["year"] = re.compile( 'YEAR' ) + regexs["year"] = re.compile('YEAR') # dates can be 2014, 2015, 2016, ..., CURRENT_YEAR, company holder names can be anything years = range(2014, date.today().year + 1) - regexs["date"] = re.compile( '(%s)' % "|".join(map(lambda l: str(l), years)) ) + regexs["date"] = re.compile( + '(%s)' % "|".join(map(lambda l: str(l), years))) # strip // +build \n\n build constraints - regexs["go_build_constraints"] = re.compile(r"^(// \+build.*\n)+\n", re.MULTILINE) + regexs["go_build_constraints"] = re.compile( + r"^(// \+build.*\n)+\n", re.MULTILINE) # strip #!.* from shell scripts regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE) return regexs + def main(): regexs = get_regexs() refs = get_refs() @@ -195,5 +210,6 @@ def main(): return 0 + if __name__ == "__main__": - sys.exit(main()) + sys.exit(main()) diff --git a/hack/kube-env.sh b/hack/kube-env.sh new file mode 100644 index 0000000000..4415df155f --- /dev/null +++ b/hack/kube-env.sh @@ -0,0 +1,44 @@ +#!/bin/bash + +# Copyright 2014 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Some useful colors. +if [[ -z "${color_start-}" ]]; then + declare -r color_start="\033[" + declare -r color_red="${color_start}0;31m" + declare -r color_yellow="${color_start}0;33m" + declare -r color_green="${color_start}0;32m" + declare -r color_norm="${color_start}0m" +fi + +# Returns the server version as MMmmpp, with MM as the major +# component, mm the minor component, and pp as the patch +# revision. e.g. 0.7.1 is echoed as 701, and 1.0.11 would be +# 10011. (This makes for easy integer comparison in bash.) +function kube_server_version() { + local server_version + local major + local minor + local patch + + # This sed expression is the POSIX BRE to match strings like: + # Server Version: &version.Info{Major:"0", Minor:"7+", GitVersion:"v0.7.0-dirty", GitCommit:"ad44234f7152e9c66bc2853575445c7071335e57", GitTreeState:"dirty"} + # and capture the GitVersion portion (which has the patch level) + server_version=$(${KUBECTL} --match-server-version=false version | grep "Server Version:") + read major minor patch < <( + echo ${server_version} | \ + sed "s/.*GitVersion:\"v\([0-9]\{1,\}\)\.\([0-9]\{1,\}\)\.\([0-9]\{1,\}\).*/\1 \2 \3/") + printf "%02d%02d%02d" ${major} ${minor} ${patch} | sed 's/^0*//' +} diff --git a/hack/tools.go b/hack/tools.go new file mode 100644 index 0000000000..ed32168055 --- /dev/null +++ b/hack/tools.go @@ -0,0 +1,25 @@ +// +build tools + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package contains code generation utilities +// This package imports things required by build scripts, to force `go mod` to see them as dependencies +package tools + +import ( + _ "k8s.io/code-generator" +) diff --git a/hack/update-codegen.sh b/hack/update-codegen.sh index 559362ac56..f30124a2df 100755 --- a/hack/update-codegen.sh +++ b/hack/update-codegen.sh @@ -19,7 +19,7 @@ set -o nounset set -o pipefail SCRIPT_ROOT=$(dirname ${BASH_SOURCE})/.. -CODEGEN_PKG=${CODEGEN_PKG:-$(cd ${SCRIPT_ROOT}; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator)} +CODEGEN_PKG=${CODEGEN_PKG:-$(cd "${SCRIPT_ROOT}"; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator)} # generate the code with: # --output-base because this script should also be able to run inside the vendor dir of @@ -29,6 +29,9 @@ CODEGEN_PKG=${CODEGEN_PKG:-$(cd ${SCRIPT_ROOT}; ls -d -1 ./vendor/k8s.io/code-ge # k8s.io/ingress-nginx/pkg/client k8s.io/ingress-nginx/pkg/apis \ # nginxingress:v1alpha1 \ # --output-base "$(dirname ${BASH_SOURCE})/../../.." +mkdir -p ${CODEGEN_PKG}/hack +cp ${SCRIPT_ROOT}/hack/boilerplate/boilerplate.go.txt ${CODEGEN_PKG}/hack/boilerplate.go.txt +chmod +x ${CODEGEN_PKG}/*.sh ${CODEGEN_PKG}/generate-groups.sh "deepcopy" \ k8s.io/ingress-nginx/internal k8s.io/ingress-nginx/internal \ diff --git a/hack/verify-all.sh b/hack/verify-all.sh new file mode 100755 index 0000000000..764d7c3c15 --- /dev/null +++ b/hack/verify-all.sh @@ -0,0 +1,78 @@ +#!/bin/bash + +# Copyright 2014 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. +source "${KUBE_ROOT}/hack/kube-env.sh" + +SILENT=true + +function is-excluded { + for e in $EXCLUDE; do + if [[ $1 -ef ${BASH_SOURCE} ]]; then + return + fi + if [[ $1 -ef "$KUBE_ROOT/hack/$e" ]]; then + return + fi + done + return 1 +} + +while getopts ":v" opt; do + case $opt in + v) + SILENT=false + ;; + \?) + echo "Invalid flag: -$OPTARG" >&2 + exit 1 + ;; + esac +done + +if $SILENT ; then + echo "Running in the silent mode, run with -v if you want to see script logs." +fi + +EXCLUDE="verify-all.sh verify-codegen.sh" + +ret=0 +for t in `ls $KUBE_ROOT/hack/verify-*.sh` +do + if is-excluded $t ; then + echo "Skipping $t" + continue + fi + if $SILENT ; then + echo -e "Verifying $t" + if bash "$t" &> /dev/null; then + echo -e "${color_green}SUCCESS${color_norm}" + else + echo -e "${color_red}FAILED${color_norm}" + ret=1 + fi + else + bash "$t" || ret=1 + fi +done + +exit $ret + +# ex: ts=2 sw=2 et filetype=sh diff --git a/hack/verify-codegen.sh b/hack/verify-codegen.sh index f5835c4253..13f830f1e9 100755 --- a/hack/verify-codegen.sh +++ b/hack/verify-codegen.sh @@ -21,8 +21,8 @@ set -o pipefail SCRIPT_ROOT=$(dirname "${BASH_SOURCE}")/.. SCRIPT_BASE=${SCRIPT_ROOT}/../.. -DIFFROOT="${SCRIPT_ROOT}/pkg" -TMP_DIFFROOT="${SCRIPT_ROOT}/_tmp/pkg" +DIFFROOT="${SCRIPT_ROOT}/internal" +TMP_DIFFROOT="${SCRIPT_ROOT}/_tmp/internal" _tmp="${SCRIPT_ROOT}/_tmp" cleanup() { diff --git a/hack/verify-gofmt.sh b/hack/verify-gofmt.sh new file mode 100755 index 0000000000..db31c0469f --- /dev/null +++ b/hack/verify-gofmt.sh @@ -0,0 +1,43 @@ +#!/bin/bash + +# Copyright 2014 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# GoFmt apparently is changing @ head... + +set -o errexit +set -o nounset +set -o pipefail + +KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. + +cd "${KUBE_ROOT}" + +find_files() { + find . -not \( \ + \( \ + -wholename './.git' \ + -o -wholename '*/vendor/*' \ + -o -wholename '*bindata.go' \ + \) -prune \ + \) -name '*.go' +} + +GOFMT="gofmt -s" +bad_files=$(find_files | xargs $GOFMT -l) +if [[ -n "${bad_files}" ]]; then + echo "!!! '$GOFMT' needs to be run on the following files: " + echo "${bad_files}" + exit 1 +fi diff --git a/hack/verify-golint.sh b/hack/verify-golint.sh new file mode 100755 index 0000000000..7c92848809 --- /dev/null +++ b/hack/verify-golint.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +# Copyright 2014 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. + +cd "${KUBE_ROOT}" + +GOLINT=${GOLINT:-"golint"} +PACKAGES=($(go list ./internal/... | grep -v /vendor/)) +bad_files=() +for package in "${PACKAGES[@]}"; do + out=$("${GOLINT}" -min_confidence=0.9 "${package}" | grep -v -E '(should not use dot imports|internal/file/bindata.go)' || :) + if [[ -n "${out}" ]]; then + bad_files+=("${out}") + fi +done +if [[ "${#bad_files[@]}" -ne 0 ]]; then + echo "!!! '$GOLINT' problems: " + echo "${bad_files[@]}" + exit 1 +fi + +# ex: ts=2 sw=2 et filetype=sh diff --git a/images/404-server/.gitignore b/images/404-server/.gitignore deleted file mode 100644 index 254defddb5..0000000000 --- a/images/404-server/.gitignore +++ /dev/null @@ -1 +0,0 @@ -server diff --git a/images/404-server/Dockerfile b/images/404-server/Dockerfile deleted file mode 100644 index 1875f0a2e8..0000000000 --- a/images/404-server/Dockerfile +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2015 The Kubernetes Authors. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -FROM scratch - -# nobody:nobody -USER 65534:65534 -COPY server / -ENTRYPOINT ["/server"] diff --git a/images/404-server/Godeps/Godeps.json b/images/404-server/Godeps/Godeps.json deleted file mode 100644 index b985aed2bc..0000000000 --- a/images/404-server/Godeps/Godeps.json +++ /dev/null @@ -1,57 +0,0 @@ -{ - "ImportPath": "k8s.io/ingress/images/404-server", - "GoVersion": "go1.8", - "GodepVersion": "v79", - "Packages": [ - "./..." - ], - "Deps": [ - { - "ImportPath": "github.com/beorn7/perks/quantile", - "Rev": "3ac7bf7a47d159a033b107610db8a1b6575507a4" - }, - { - "ImportPath": "github.com/golang/protobuf/proto", - "Rev": "4bd1920723d7b7c925de087aa32e2187708897f7" - }, - { - "ImportPath": "github.com/matttproud/golang_protobuf_extensions/pbutil", - "Rev": "fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a" - }, - { - "ImportPath": "github.com/prometheus/client_golang/prometheus", - "Comment": "v0.8.0-83-ge7e9030", - "Rev": "e7e903064f5e9eb5da98208bae10b475d4db0f8c" - }, - { - "ImportPath": "github.com/prometheus/client_golang/prometheus/promhttp", - "Comment": "v0.8.0-83-ge7e9030", - "Rev": "e7e903064f5e9eb5da98208bae10b475d4db0f8c" - }, - { - "ImportPath": "github.com/prometheus/client_model/go", - "Comment": "model-0.0.2-12-gfa8ad6f", - "Rev": "fa8ad6fec33561be4280a8f0514318c79d7f6cb6" - }, - { - "ImportPath": "github.com/prometheus/common/expfmt", - "Rev": "13ba4ddd0caa9c28ca7b7bffe1dfa9ed8d5ef207" - }, - { - "ImportPath": "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg", - "Rev": "13ba4ddd0caa9c28ca7b7bffe1dfa9ed8d5ef207" - }, - { - "ImportPath": "github.com/prometheus/common/model", - "Rev": "13ba4ddd0caa9c28ca7b7bffe1dfa9ed8d5ef207" - }, - { - "ImportPath": "github.com/prometheus/procfs", - "Rev": "65c1f6f8f0fc1e2185eb9863a3bc751496404259" - }, - { - "ImportPath": "github.com/prometheus/procfs/xfs", - "Rev": "65c1f6f8f0fc1e2185eb9863a3bc751496404259" - } - ] -} diff --git a/images/404-server/Godeps/Readme b/images/404-server/Godeps/Readme deleted file mode 100644 index 4cdaa53d56..0000000000 --- a/images/404-server/Godeps/Readme +++ /dev/null @@ -1,5 +0,0 @@ -This directory tree is generated automatically by godep. - -Please do not edit. - -See https://github.com/tools/godep for more information. diff --git a/images/404-server/Makefile b/images/404-server/Makefile deleted file mode 100644 index e46815c410..0000000000 --- a/images/404-server/Makefile +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2016 The Kubernetes Authors All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Build the default backend binary or image for amd64, arm, arm64 and ppc64le -# -# Usage: -# [PREFIX=gcr.io/google_containers/defaultbackend] [ARCH=amd64] [TAG=1.4] make (server|container|push) - - -all: push - -TAG=1.4 -PREFIX?=gcr.io/google_containers/defaultbackend -ARCH?=amd64 - -server: server.go - CGO_ENABLED=0 GOOS=linux GOARCH=$(ARCH) GOARM=6 go build -a -installsuffix cgo -ldflags '-w -s' -o server - -container: server - docker build --pull -t $(PREFIX)-$(ARCH):$(TAG) . - -push: container - gcloud docker -- push $(PREFIX)-$(ARCH):$(TAG) - -push-legacy: container -ifeq ($(ARCH),amd64) - # Backward compatibility. TODO: deprecate this image tag - docker tag -f $(PREFIX)-$(ARCH):$(TAG) $(PREFIX):$(TAG) - gcloud docker -- push $(PREFIX):$(TAG) -endif - -clean: - rm -f server diff --git a/images/404-server/OWNERS b/images/404-server/OWNERS deleted file mode 100644 index 89b9586b45..0000000000 --- a/images/404-server/OWNERS +++ /dev/null @@ -1,12 +0,0 @@ -approvers: -- bprashanth -- luxas -- mikedanese -- aledbf -- nicksardo -reviewers: -- bprashanth -- luxas -- mikedanese -- aledbf -- nicksardo diff --git a/images/404-server/README.md b/images/404-server/README.md index e281a138a3..7aaf96bfc4 100644 --- a/images/404-server/README.md +++ b/images/404-server/README.md @@ -1,33 +1,2 @@ -# 404-server (default backend) - -404-server is a simple webserver that satisfies the ingress, which means it has to do two things: - - 1. Serves a 404 page at `/` - 2. Serves 200 on a `/healthz` - -## How to release: - -The `404-server` Makefile supports multiple architectures, which means it may cross-compile and build an docker image easily. -If you are releasing a new version, please bump the `TAG` value in the `Makefile` before building the images. - -How to build and push all images: -``` -# Build for linux/amd64 (default) -$ make push -$ make push ARCH=amd64 -# ---> gcr.io/google_containers/defaultbackend-amd64:TAG - -$ make push-legacy ARCH=amd64 -# ---> gcr.io/google_containers/defaultbackend:TAG (image with backwards compatible naming) - -$ make push ARCH=arm -# ---> gcr.io/google_containers/defaultbackend-arm:TAG - -$ make push ARCH=arm64 -# ---> gcr.io/google_containers/defaultbackend-arm64:TAG - -$ make push ARCH=ppc64le -# ---> gcr.io/google_containers/defaultbackend-ppc64le:TAG -``` - -Of course, if you don't want to push the images, just run `make container` +The 404-server (defaultbackend) has moved to the +[kubernetes/ingress-gce](https://github.com/kubernetes/ingress-gce) repository. diff --git a/images/404-server/server.go b/images/404-server/server.go deleted file mode 100644 index 05509b5ee2..0000000000 --- a/images/404-server/server.go +++ /dev/null @@ -1,86 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// A webserver that only serves a 404 page. Used as a default backend. -package main - -import ( - "context" - "flag" - "fmt" - "net/http" - "os" - "os/signal" - "strconv" - "syscall" - "time" - - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promhttp" -) - -var port = flag.Int("port", 8080, "Port number to serve default backend 404 page.") - -func init() { - // Register the summary and the histogram with Prometheus's default registry. - prometheus.MustRegister(requestCount) - prometheus.MustRegister(requestDuration) -} - -func main() { - flag.Parse() - http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { - start := time.Now() - w.WriteHeader(http.StatusNotFound) - fmt.Fprint(w, "default backend - 404") - - duration := time.Now().Sub(start).Seconds() * 1e3 - - proto := strconv.Itoa(r.ProtoMajor) - proto = proto + "." + strconv.Itoa(r.ProtoMinor) - - requestCount.WithLabelValues(proto).Inc() - requestDuration.WithLabelValues(proto).Observe(duration) - }) - http.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - fmt.Fprint(w, "ok") - }) - http.Handle("/metrics", promhttp.Handler()) - - srv := &http.Server{ - Addr: fmt.Sprintf(":%d", *port), - } - - go func() { - err := srv.ListenAndServe() - if err != http.ErrServerClosed { - fmt.Fprintf(os.Stderr, "could not start http server: %s\n", err) - os.Exit(1) - } - }() - - stop := make(chan os.Signal, 1) - signal.Notify(stop, syscall.SIGTERM) - <-stop - - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - defer cancel() - err := srv.Shutdown(ctx) - if err != nil { - fmt.Fprintf(os.Stderr, "could not graceful shutdown http server: %s\n", err) - } -} diff --git a/images/404-server/vendor/github.com/beorn7/perks/LICENSE b/images/404-server/vendor/github.com/beorn7/perks/LICENSE deleted file mode 100644 index 339177be66..0000000000 --- a/images/404-server/vendor/github.com/beorn7/perks/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (C) 2013 Blake Mizerany - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/images/404-server/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/images/404-server/vendor/github.com/beorn7/perks/quantile/exampledata.txt deleted file mode 100644 index 1602287d7c..0000000000 --- a/images/404-server/vendor/github.com/beorn7/perks/quantile/exampledata.txt +++ /dev/null @@ -1,2388 +0,0 @@ -8 -5 -26 -12 -5 -235 -13 -6 -28 -30 -3 -3 -3 -3 -5 -2 -33 -7 -2 -4 -7 -12 -14 -5 -8 -3 -10 -4 -5 -3 -6 -6 -209 -20 -3 -10 -14 -3 -4 -6 -8 -5 -11 -7 -3 -2 -3 -3 -212 -5 -222 -4 -10 -10 -5 -6 -3 -8 -3 -10 -254 -220 -2 -3 -5 -24 -5 -4 -222 -7 -3 -3 -223 -8 -15 -12 -14 -14 -3 -2 -2 -3 -13 -3 -11 -4 -4 -6 -5 -7 -13 -5 -3 -5 -2 -5 -3 -5 -2 -7 -15 -17 -14 -3 -6 -6 -3 -17 -5 -4 -7 -6 -4 -4 -8 -6 -8 -3 -9 -3 -6 -3 -4 -5 -3 -3 -660 -4 -6 -10 -3 -6 -3 -2 -5 -13 -2 -4 -4 -10 -4 -8 -4 -3 -7 -9 -9 -3 -10 -37 -3 -13 -4 -12 -3 -6 -10 -8 -5 -21 -2 -3 -8 -3 -2 -3 -3 -4 -12 -2 -4 -8 -8 -4 -3 -2 -20 -1 -6 -32 -2 -11 -6 -18 -3 -8 -11 -3 -212 -3 -4 -2 -6 -7 -12 -11 -3 -2 -16 -10 -6 -4 -6 -3 -2 -7 -3 -2 -2 -2 -2 -5 -6 -4 -3 -10 -3 -4 -6 -5 -3 -4 -4 -5 -6 -4 -3 -4 -4 -5 -7 -5 -5 -3 -2 -7 -2 -4 -12 -4 -5 -6 -2 -4 -4 -8 -4 -15 -13 -7 -16 -5 -3 -23 -5 -5 -7 -3 -2 -9 -8 -7 -5 -8 -11 -4 -10 -76 -4 -47 -4 -3 -2 -7 -4 -2 -3 -37 -10 -4 -2 -20 -5 -4 -4 -10 -10 -4 -3 -7 -23 -240 -7 -13 -5 -5 -3 -3 -2 -5 -4 -2 -8 -7 -19 -2 -23 -8 -7 -2 -5 -3 -8 -3 -8 -13 -5 -5 -5 -2 -3 -23 -4 -9 -8 -4 -3 -3 -5 -220 -2 -3 -4 -6 -14 -3 -53 -6 -2 -5 -18 -6 -3 -219 -6 -5 -2 -5 -3 -6 -5 -15 -4 -3 -17 -3 -2 -4 -7 -2 -3 -3 -4 -4 -3 -2 -664 -6 -3 -23 -5 -5 -16 -5 -8 -2 -4 -2 -24 -12 -3 -2 -3 -5 -8 -3 -5 -4 -3 -14 -3 -5 -8 -2 -3 -7 -9 -4 -2 -3 -6 -8 -4 -3 -4 -6 -5 -3 -3 -6 -3 -19 -4 -4 -6 -3 -6 -3 -5 -22 -5 -4 -4 -3 -8 -11 -4 -9 -7 -6 -13 -4 -4 -4 -6 -17 -9 -3 -3 -3 -4 -3 -221 -5 -11 -3 -4 -2 -12 -6 -3 -5 -7 -5 -7 -4 -9 -7 -14 -37 -19 -217 -16 -3 -5 -2 -2 -7 -19 -7 -6 -7 -4 -24 -5 -11 -4 -7 -7 -9 -13 -3 -4 -3 -6 -28 -4 -4 -5 -5 -2 -5 -6 -4 -4 -6 -10 -5 -4 -3 -2 -3 -3 -6 -5 -5 -4 -3 -2 -3 -7 -4 -6 -18 -16 -8 -16 -4 -5 -8 -6 -9 -13 -1545 -6 -215 -6 -5 -6 -3 -45 -31 -5 -2 -2 -4 -3 -3 -2 -5 -4 -3 -5 -7 -7 -4 -5 -8 -5 -4 -749 -2 -31 -9 -11 -2 -11 -5 -4 -4 -7 -9 -11 -4 -5 -4 -7 -3 -4 -6 -2 -15 -3 -4 -3 -4 -3 -5 -2 -13 -5 -5 -3 -3 -23 -4 -4 -5 -7 -4 -13 -2 -4 -3 -4 -2 -6 -2 -7 -3 -5 -5 -3 -29 -5 -4 -4 -3 -10 -2 -3 -79 -16 -6 -6 -7 -7 -3 -5 -5 -7 -4 -3 -7 -9 -5 -6 -5 -9 -6 -3 -6 -4 -17 -2 -10 -9 -3 -6 -2 -3 -21 -22 -5 -11 -4 -2 -17 -2 -224 -2 -14 -3 -4 -4 -2 -4 -4 -4 -4 -5 -3 -4 -4 -10 -2 -6 -3 -3 -5 -7 -2 -7 -5 -6 -3 -218 -2 -2 -5 -2 -6 -3 -5 -222 -14 -6 -33 -3 -2 -5 -3 -3 -3 -9 -5 -3 -3 -2 -7 -4 -3 -4 -3 -5 -6 -5 -26 -4 -13 -9 -7 -3 -221 -3 -3 -4 -4 -4 -4 -2 -18 -5 -3 -7 -9 -6 -8 -3 -10 -3 -11 -9 -5 -4 -17 -5 -5 -6 -6 -3 -2 -4 -12 -17 -6 -7 -218 -4 -2 -4 -10 -3 -5 -15 -3 -9 -4 -3 -3 -6 -29 -3 -3 -4 -5 -5 -3 -8 -5 -6 -6 -7 -5 -3 -5 -3 -29 -2 -31 -5 -15 -24 -16 -5 -207 -4 -3 -3 -2 -15 -4 -4 -13 -5 -5 -4 -6 -10 -2 -7 -8 -4 -6 -20 -5 -3 -4 -3 -12 -12 -5 -17 -7 -3 -3 -3 -6 -10 -3 -5 -25 -80 -4 -9 -3 -2 -11 -3 -3 -2 -3 -8 -7 -5 -5 -19 -5 -3 -3 -12 -11 -2 -6 -5 -5 -5 -3 -3 -3 -4 -209 -14 -3 -2 -5 -19 -4 -4 -3 -4 -14 -5 -6 -4 -13 -9 -7 -4 -7 -10 -2 -9 -5 -7 -2 -8 -4 -6 -5 -5 -222 -8 -7 -12 -5 -216 -3 -4 -4 -6 -3 -14 -8 -7 -13 -4 -3 -3 -3 -3 -17 -5 -4 -3 -33 -6 -6 -33 -7 -5 -3 -8 -7 -5 -2 -9 -4 -2 -233 -24 -7 -4 -8 -10 -3 -4 -15 -2 -16 -3 -3 -13 -12 -7 -5 -4 -207 -4 -2 -4 -27 -15 -2 -5 -2 -25 -6 -5 -5 -6 -13 -6 -18 -6 -4 -12 -225 -10 -7 -5 -2 -2 -11 -4 -14 -21 -8 -10 -3 -5 -4 -232 -2 -5 -5 -3 -7 -17 -11 -6 -6 -23 -4 -6 -3 -5 -4 -2 -17 -3 -6 -5 -8 -3 -2 -2 -14 -9 -4 -4 -2 -5 -5 -3 -7 -6 -12 -6 -10 -3 -6 -2 -2 -19 -5 -4 -4 -9 -2 -4 -13 -3 -5 -6 -3 -6 -5 -4 -9 -6 -3 -5 -7 -3 -6 -6 -4 -3 -10 -6 -3 -221 -3 -5 -3 -6 -4 -8 -5 -3 -6 -4 -4 -2 -54 -5 -6 -11 -3 -3 -4 -4 -4 -3 -7 -3 -11 -11 -7 -10 -6 -13 -223 -213 -15 -231 -7 -3 -7 -228 -2 -3 -4 -4 -5 -6 -7 -4 -13 -3 -4 -5 -3 -6 -4 -6 -7 -2 -4 -3 -4 -3 -3 -6 -3 -7 -3 -5 -18 -5 -6 -8 -10 -3 -3 -3 -2 -4 -2 -4 -4 -5 -6 -6 -4 -10 -13 -3 -12 -5 -12 -16 -8 -4 -19 -11 -2 -4 -5 -6 -8 -5 -6 -4 -18 -10 -4 -2 -216 -6 -6 -6 -2 -4 -12 -8 -3 -11 -5 -6 -14 -5 -3 -13 -4 -5 -4 -5 -3 -28 -6 -3 -7 -219 -3 -9 -7 -3 -10 -6 -3 -4 -19 -5 -7 -11 -6 -15 -19 -4 -13 -11 -3 -7 -5 -10 -2 -8 -11 -2 -6 -4 -6 -24 -6 -3 -3 -3 -3 -6 -18 -4 -11 -4 -2 -5 -10 -8 -3 -9 -5 -3 -4 -5 -6 -2 -5 -7 -4 -4 -14 -6 -4 -4 -5 -5 -7 -2 -4 -3 -7 -3 -3 -6 -4 -5 -4 -4 -4 -3 -3 -3 -3 -8 -14 -2 -3 -5 -3 -2 -4 -5 -3 -7 -3 -3 -18 -3 -4 -4 -5 -7 -3 -3 -3 -13 -5 -4 -8 -211 -5 -5 -3 -5 -2 -5 -4 -2 -655 -6 -3 -5 -11 -2 -5 -3 -12 -9 -15 -11 -5 -12 -217 -2 -6 -17 -3 -3 -207 -5 -5 -4 -5 -9 -3 -2 -8 -5 -4 -3 -2 -5 -12 -4 -14 -5 -4 -2 -13 -5 -8 -4 -225 -4 -3 -4 -5 -4 -3 -3 -6 -23 -9 -2 -6 -7 -233 -4 -4 -6 -18 -3 -4 -6 -3 -4 -4 -2 -3 -7 -4 -13 -227 -4 -3 -5 -4 -2 -12 -9 -17 -3 -7 -14 -6 -4 -5 -21 -4 -8 -9 -2 -9 -25 -16 -3 -6 -4 -7 -8 -5 -2 -3 -5 -4 -3 -3 -5 -3 -3 -3 -2 -3 -19 -2 -4 -3 -4 -2 -3 -4 -4 -2 -4 -3 -3 -3 -2 -6 -3 -17 -5 -6 -4 -3 -13 -5 -3 -3 -3 -4 -9 -4 -2 -14 -12 -4 -5 -24 -4 -3 -37 -12 -11 -21 -3 -4 -3 -13 -4 -2 -3 -15 -4 -11 -4 -4 -3 -8 -3 -4 -4 -12 -8 -5 -3 -3 -4 -2 -220 -3 -5 -223 -3 -3 -3 -10 -3 -15 -4 -241 -9 -7 -3 -6 -6 -23 -4 -13 -7 -3 -4 -7 -4 -9 -3 -3 -4 -10 -5 -5 -1 -5 -24 -2 -4 -5 -5 -6 -14 -3 -8 -2 -3 -5 -13 -13 -3 -5 -2 -3 -15 -3 -4 -2 -10 -4 -4 -4 -5 -5 -3 -5 -3 -4 -7 -4 -27 -3 -6 -4 -15 -3 -5 -6 -6 -5 -4 -8 -3 -9 -2 -6 -3 -4 -3 -7 -4 -18 -3 -11 -3 -3 -8 -9 -7 -24 -3 -219 -7 -10 -4 -5 -9 -12 -2 -5 -4 -4 -4 -3 -3 -19 -5 -8 -16 -8 -6 -22 -3 -23 -3 -242 -9 -4 -3 -3 -5 -7 -3 -3 -5 -8 -3 -7 -5 -14 -8 -10 -3 -4 -3 -7 -4 -6 -7 -4 -10 -4 -3 -11 -3 -7 -10 -3 -13 -6 -8 -12 -10 -5 -7 -9 -3 -4 -7 -7 -10 -8 -30 -9 -19 -4 -3 -19 -15 -4 -13 -3 -215 -223 -4 -7 -4 -8 -17 -16 -3 -7 -6 -5 -5 -4 -12 -3 -7 -4 -4 -13 -4 -5 -2 -5 -6 -5 -6 -6 -7 -10 -18 -23 -9 -3 -3 -6 -5 -2 -4 -2 -7 -3 -3 -2 -5 -5 -14 -10 -224 -6 -3 -4 -3 -7 -5 -9 -3 -6 -4 -2 -5 -11 -4 -3 -3 -2 -8 -4 -7 -4 -10 -7 -3 -3 -18 -18 -17 -3 -3 -3 -4 -5 -3 -3 -4 -12 -7 -3 -11 -13 -5 -4 -7 -13 -5 -4 -11 -3 -12 -3 -6 -4 -4 -21 -4 -6 -9 -5 -3 -10 -8 -4 -6 -4 -4 -6 -5 -4 -8 -6 -4 -6 -4 -4 -5 -9 -6 -3 -4 -2 -9 -3 -18 -2 -4 -3 -13 -3 -6 -6 -8 -7 -9 -3 -2 -16 -3 -4 -6 -3 -2 -33 -22 -14 -4 -9 -12 -4 -5 -6 -3 -23 -9 -4 -3 -5 -5 -3 -4 -5 -3 -5 -3 -10 -4 -5 -5 -8 -4 -4 -6 -8 -5 -4 -3 -4 -6 -3 -3 -3 -5 -9 -12 -6 -5 -9 -3 -5 -3 -2 -2 -2 -18 -3 -2 -21 -2 -5 -4 -6 -4 -5 -10 -3 -9 -3 -2 -10 -7 -3 -6 -6 -4 -4 -8 -12 -7 -3 -7 -3 -3 -9 -3 -4 -5 -4 -4 -5 -5 -10 -15 -4 -4 -14 -6 -227 -3 -14 -5 -216 -22 -5 -4 -2 -2 -6 -3 -4 -2 -9 -9 -4 -3 -28 -13 -11 -4 -5 -3 -3 -2 -3 -3 -5 -3 -4 -3 -5 -23 -26 -3 -4 -5 -6 -4 -6 -3 -5 -5 -3 -4 -3 -2 -2 -2 -7 -14 -3 -6 -7 -17 -2 -2 -15 -14 -16 -4 -6 -7 -13 -6 -4 -5 -6 -16 -3 -3 -28 -3 -6 -15 -3 -9 -2 -4 -6 -3 -3 -22 -4 -12 -6 -7 -2 -5 -4 -10 -3 -16 -6 -9 -2 -5 -12 -7 -5 -5 -5 -5 -2 -11 -9 -17 -4 -3 -11 -7 -3 -5 -15 -4 -3 -4 -211 -8 -7 -5 -4 -7 -6 -7 -6 -3 -6 -5 -6 -5 -3 -4 -4 -26 -4 -6 -10 -4 -4 -3 -2 -3 -3 -4 -5 -9 -3 -9 -4 -4 -5 -5 -8 -2 -4 -2 -3 -8 -4 -11 -19 -5 -8 -6 -3 -5 -6 -12 -3 -2 -4 -16 -12 -3 -4 -4 -8 -6 -5 -6 -6 -219 -8 -222 -6 -16 -3 -13 -19 -5 -4 -3 -11 -6 -10 -4 -7 -7 -12 -5 -3 -3 -5 -6 -10 -3 -8 -2 -5 -4 -7 -2 -4 -4 -2 -12 -9 -6 -4 -2 -40 -2 -4 -10 -4 -223 -4 -2 -20 -6 -7 -24 -5 -4 -5 -2 -20 -16 -6 -5 -13 -2 -3 -3 -19 -3 -2 -4 -5 -6 -7 -11 -12 -5 -6 -7 -7 -3 -5 -3 -5 -3 -14 -3 -4 -4 -2 -11 -1 -7 -3 -9 -6 -11 -12 -5 -8 -6 -221 -4 -2 -12 -4 -3 -15 -4 -5 -226 -7 -218 -7 -5 -4 -5 -18 -4 -5 -9 -4 -4 -2 -9 -18 -18 -9 -5 -6 -6 -3 -3 -7 -3 -5 -4 -4 -4 -12 -3 -6 -31 -5 -4 -7 -3 -6 -5 -6 -5 -11 -2 -2 -11 -11 -6 -7 -5 -8 -7 -10 -5 -23 -7 -4 -3 -5 -34 -2 -5 -23 -7 -3 -6 -8 -4 -4 -4 -2 -5 -3 -8 -5 -4 -8 -25 -2 -3 -17 -8 -3 -4 -8 -7 -3 -15 -6 -5 -7 -21 -9 -5 -6 -6 -5 -3 -2 -3 -10 -3 -6 -3 -14 -7 -4 -4 -8 -7 -8 -2 -6 -12 -4 -213 -6 -5 -21 -8 -2 -5 -23 -3 -11 -2 -3 -6 -25 -2 -3 -6 -7 -6 -6 -4 -4 -6 -3 -17 -9 -7 -6 -4 -3 -10 -7 -2 -3 -3 -3 -11 -8 -3 -7 -6 -4 -14 -36 -3 -4 -3 -3 -22 -13 -21 -4 -2 -7 -4 -4 -17 -15 -3 -7 -11 -2 -4 -7 -6 -209 -6 -3 -2 -2 -24 -4 -9 -4 -3 -3 -3 -29 -2 -2 -4 -3 -3 -5 -4 -6 -3 -3 -2 -4 diff --git a/images/404-server/vendor/github.com/beorn7/perks/quantile/stream.go b/images/404-server/vendor/github.com/beorn7/perks/quantile/stream.go deleted file mode 100644 index 587b1fc5ba..0000000000 --- a/images/404-server/vendor/github.com/beorn7/perks/quantile/stream.go +++ /dev/null @@ -1,292 +0,0 @@ -// Package quantile computes approximate quantiles over an unbounded data -// stream within low memory and CPU bounds. -// -// A small amount of accuracy is traded to achieve the above properties. -// -// Multiple streams can be merged before calling Query to generate a single set -// of results. This is meaningful when the streams represent the same type of -// data. See Merge and Samples. -// -// For more detailed information about the algorithm used, see: -// -// Effective Computation of Biased Quantiles over Data Streams -// -// http://www.cs.rutgers.edu/~muthu/bquant.pdf -package quantile - -import ( - "math" - "sort" -) - -// Sample holds an observed value and meta information for compression. JSON -// tags have been added for convenience. -type Sample struct { - Value float64 `json:",string"` - Width float64 `json:",string"` - Delta float64 `json:",string"` -} - -// Samples represents a slice of samples. It implements sort.Interface. -type Samples []Sample - -func (a Samples) Len() int { return len(a) } -func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } -func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -type invariant func(s *stream, r float64) float64 - -// NewLowBiased returns an initialized Stream for low-biased quantiles -// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but -// error guarantees can still be given even for the lower ranks of the data -// distribution. -// -// The provided epsilon is a relative error, i.e. the true quantile of a value -// returned by a query is guaranteed to be within (1±Epsilon)*Quantile. -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error -// properties. -func NewLowBiased(epsilon float64) *Stream { - ƒ := func(s *stream, r float64) float64 { - return 2 * epsilon * r - } - return newStream(ƒ) -} - -// NewHighBiased returns an initialized Stream for high-biased quantiles -// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but -// error guarantees can still be given even for the higher ranks of the data -// distribution. -// -// The provided epsilon is a relative error, i.e. the true quantile of a value -// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error -// properties. -func NewHighBiased(epsilon float64) *Stream { - ƒ := func(s *stream, r float64) float64 { - return 2 * epsilon * (s.n - r) - } - return newStream(ƒ) -} - -// NewTargeted returns an initialized Stream concerned with a particular set of -// quantile values that are supplied a priori. Knowing these a priori reduces -// space and computation time. The targets map maps the desired quantiles to -// their absolute errors, i.e. the true quantile of a value returned by a query -// is guaranteed to be within (Quantile±Epsilon). -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. -func NewTargeted(targets map[float64]float64) *Stream { - ƒ := func(s *stream, r float64) float64 { - var m = math.MaxFloat64 - var f float64 - for quantile, epsilon := range targets { - if quantile*s.n <= r { - f = (2 * epsilon * r) / quantile - } else { - f = (2 * epsilon * (s.n - r)) / (1 - quantile) - } - if f < m { - m = f - } - } - return m - } - return newStream(ƒ) -} - -// Stream computes quantiles for a stream of float64s. It is not thread-safe by -// design. Take care when using across multiple goroutines. -type Stream struct { - *stream - b Samples - sorted bool -} - -func newStream(ƒ invariant) *Stream { - x := &stream{ƒ: ƒ} - return &Stream{x, make(Samples, 0, 500), true} -} - -// Insert inserts v into the stream. -func (s *Stream) Insert(v float64) { - s.insert(Sample{Value: v, Width: 1}) -} - -func (s *Stream) insert(sample Sample) { - s.b = append(s.b, sample) - s.sorted = false - if len(s.b) == cap(s.b) { - s.flush() - } -} - -// Query returns the computed qth percentiles value. If s was created with -// NewTargeted, and q is not in the set of quantiles provided a priori, Query -// will return an unspecified result. -func (s *Stream) Query(q float64) float64 { - if !s.flushed() { - // Fast path when there hasn't been enough data for a flush; - // this also yields better accuracy for small sets of data. - l := len(s.b) - if l == 0 { - return 0 - } - i := int(float64(l) * q) - if i > 0 { - i -= 1 - } - s.maybeSort() - return s.b[i].Value - } - s.flush() - return s.stream.query(q) -} - -// Merge merges samples into the underlying streams samples. This is handy when -// merging multiple streams from separate threads, database shards, etc. -// -// ATTENTION: This method is broken and does not yield correct results. The -// underlying algorithm is not capable of merging streams correctly. -func (s *Stream) Merge(samples Samples) { - sort.Sort(samples) - s.stream.merge(samples) -} - -// Reset reinitializes and clears the list reusing the samples buffer memory. -func (s *Stream) Reset() { - s.stream.reset() - s.b = s.b[:0] -} - -// Samples returns stream samples held by s. -func (s *Stream) Samples() Samples { - if !s.flushed() { - return s.b - } - s.flush() - return s.stream.samples() -} - -// Count returns the total number of samples observed in the stream -// since initialization. -func (s *Stream) Count() int { - return len(s.b) + s.stream.count() -} - -func (s *Stream) flush() { - s.maybeSort() - s.stream.merge(s.b) - s.b = s.b[:0] -} - -func (s *Stream) maybeSort() { - if !s.sorted { - s.sorted = true - sort.Sort(s.b) - } -} - -func (s *Stream) flushed() bool { - return len(s.stream.l) > 0 -} - -type stream struct { - n float64 - l []Sample - ƒ invariant -} - -func (s *stream) reset() { - s.l = s.l[:0] - s.n = 0 -} - -func (s *stream) insert(v float64) { - s.merge(Samples{{v, 1, 0}}) -} - -func (s *stream) merge(samples Samples) { - // TODO(beorn7): This tries to merge not only individual samples, but - // whole summaries. The paper doesn't mention merging summaries at - // all. Unittests show that the merging is inaccurate. Find out how to - // do merges properly. - var r float64 - i := 0 - for _, sample := range samples { - for ; i < len(s.l); i++ { - c := s.l[i] - if c.Value > sample.Value { - // Insert at position i. - s.l = append(s.l, Sample{}) - copy(s.l[i+1:], s.l[i:]) - s.l[i] = Sample{ - sample.Value, - sample.Width, - math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1), - // TODO(beorn7): How to calculate delta correctly? - } - i++ - goto inserted - } - r += c.Width - } - s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) - i++ - inserted: - s.n += sample.Width - r += sample.Width - } - s.compress() -} - -func (s *stream) count() int { - return int(s.n) -} - -func (s *stream) query(q float64) float64 { - t := math.Ceil(q * s.n) - t += math.Ceil(s.ƒ(s, t) / 2) - p := s.l[0] - var r float64 - for _, c := range s.l[1:] { - r += p.Width - if r+c.Width+c.Delta > t { - return p.Value - } - p = c - } - return p.Value -} - -func (s *stream) compress() { - if len(s.l) < 2 { - return - } - x := s.l[len(s.l)-1] - xi := len(s.l) - 1 - r := s.n - 1 - x.Width - - for i := len(s.l) - 2; i >= 0; i-- { - c := s.l[i] - if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { - x.Width += c.Width - s.l[xi] = x - // Remove element at i. - copy(s.l[i:], s.l[i+1:]) - s.l = s.l[:len(s.l)-1] - xi -= 1 - } else { - x = c - xi = i - } - r -= c.Width - } -} - -func (s *stream) samples() Samples { - samples := make(Samples, len(s.l)) - copy(samples, s.l) - return samples -} diff --git a/images/404-server/vendor/github.com/golang/protobuf/LICENSE b/images/404-server/vendor/github.com/golang/protobuf/LICENSE deleted file mode 100644 index 1b1b1921ef..0000000000 --- a/images/404-server/vendor/github.com/golang/protobuf/LICENSE +++ /dev/null @@ -1,31 +0,0 @@ -Go support for Protocol Buffers - Google's data interchange format - -Copyright 2010 The Go Authors. All rights reserved. -https://github.com/golang/protobuf - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/images/404-server/vendor/github.com/golang/protobuf/proto/Makefile b/images/404-server/vendor/github.com/golang/protobuf/proto/Makefile deleted file mode 100644 index e2e0651a93..0000000000 --- a/images/404-server/vendor/github.com/golang/protobuf/proto/Makefile +++ /dev/null @@ -1,43 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -install: - go install - -test: install generate-test-pbs - go test - - -generate-test-pbs: - make install - make -C testdata - protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto - make diff --git a/images/404-server/vendor/github.com/golang/protobuf/proto/clone.go b/images/404-server/vendor/github.com/golang/protobuf/proto/clone.go deleted file mode 100644 index e392575b35..0000000000 --- a/images/404-server/vendor/github.com/golang/protobuf/proto/clone.go +++ /dev/null @@ -1,229 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer deep copy and merge. -// TODO: RawMessage. - -package proto - -import ( - "log" - "reflect" - "strings" -) - -// Clone returns a deep copy of a protocol buffer. -func Clone(pb Message) Message { - in := reflect.ValueOf(pb) - if in.IsNil() { - return pb - } - - out := reflect.New(in.Type().Elem()) - // out is empty so a merge is a deep copy. - mergeStruct(out.Elem(), in.Elem()) - return out.Interface().(Message) -} - -// Merge merges src into dst. -// Required and optional fields that are set in src will be set to that value in dst. -// Elements of repeated fields will be appended. -// Merge panics if src and dst are not the same type, or if dst is nil. -func Merge(dst, src Message) { - in := reflect.ValueOf(src) - out := reflect.ValueOf(dst) - if out.IsNil() { - panic("proto: nil destination") - } - if in.Type() != out.Type() { - // Explicit test prior to mergeStruct so that mistyped nils will fail - panic("proto: type mismatch") - } - if in.IsNil() { - // Merging nil into non-nil is a quiet no-op - return - } - mergeStruct(out.Elem(), in.Elem()) -} - -func mergeStruct(out, in reflect.Value) { - sprop := GetProperties(in.Type()) - for i := 0; i < in.NumField(); i++ { - f := in.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) - } - - if emIn, ok := extendable(in.Addr().Interface()); ok { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - uf := in.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return - } - uin := uf.Bytes() - if len(uin) > 0 { - out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) - } -} - -// mergeAny performs a merge between two values of the same type. -// viaPtr indicates whether the values were indirected through a pointer (implying proto2). -// prop is set if this is a struct field (it may be nil). -func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { - if in.Type() == protoMessageType { - if !in.IsNil() { - if out.IsNil() { - out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) - } else { - Merge(out.Interface().(Message), in.Interface().(Message)) - } - } - return - } - switch in.Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - if !viaPtr && isProto3Zero(in) { - return - } - out.Set(in) - case reflect.Interface: - // Probably a oneof field; copy non-nil values. - if in.IsNil() { - return - } - // Allocate destination if it is not set, or set to a different type. - // Otherwise we will merge as normal. - if out.IsNil() || out.Elem().Type() != in.Elem().Type() { - out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) - } - mergeAny(out.Elem(), in.Elem(), false, nil) - case reflect.Map: - if in.Len() == 0 { - return - } - if out.IsNil() { - out.Set(reflect.MakeMap(in.Type())) - } - // For maps with value types of *T or []byte we need to deep copy each value. - elemKind := in.Type().Elem().Kind() - for _, key := range in.MapKeys() { - var val reflect.Value - switch elemKind { - case reflect.Ptr: - val = reflect.New(in.Type().Elem().Elem()) - mergeAny(val, in.MapIndex(key), false, nil) - case reflect.Slice: - val = in.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - default: - val = in.MapIndex(key) - } - out.SetMapIndex(key, val) - } - case reflect.Ptr: - if in.IsNil() { - return - } - if out.IsNil() { - out.Set(reflect.New(in.Elem().Type())) - } - mergeAny(out.Elem(), in.Elem(), true, nil) - case reflect.Slice: - if in.IsNil() { - return - } - if in.Type().Elem().Kind() == reflect.Uint8 { - // []byte is a scalar bytes field, not a repeated field. - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value, and should not - // be merged. - if prop != nil && prop.proto3 && in.Len() == 0 { - return - } - - // Make a deep copy. - // Append to []byte{} instead of []byte(nil) so that we never end up - // with a nil result. - out.SetBytes(append([]byte{}, in.Bytes()...)) - return - } - n := in.Len() - if out.IsNil() { - out.Set(reflect.MakeSlice(in.Type(), 0, n)) - } - switch in.Type().Elem().Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - out.Set(reflect.AppendSlice(out, in)) - default: - for i := 0; i < n; i++ { - x := reflect.Indirect(reflect.New(in.Type().Elem())) - mergeAny(x, in.Index(i), false, nil) - out.Set(reflect.Append(out, x)) - } - } - case reflect.Struct: - mergeStruct(out, in) - default: - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to copy %v", in) - } -} - -func mergeExtension(out, in map[int32]Extension) { - for extNum, eIn := range in { - eOut := Extension{desc: eIn.desc} - if eIn.value != nil { - v := reflect.New(reflect.TypeOf(eIn.value)).Elem() - mergeAny(v, reflect.ValueOf(eIn.value), false, nil) - eOut.value = v.Interface() - } - if eIn.enc != nil { - eOut.enc = make([]byte, len(eIn.enc)) - copy(eOut.enc, eIn.enc) - } - - out[extNum] = eOut - } -} diff --git a/images/404-server/vendor/github.com/golang/protobuf/proto/decode.go b/images/404-server/vendor/github.com/golang/protobuf/proto/decode.go deleted file mode 100644 index aa207298f9..0000000000 --- a/images/404-server/vendor/github.com/golang/protobuf/proto/decode.go +++ /dev/null @@ -1,970 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for decoding protocol buffer data to construct in-memory representations. - */ - -import ( - "errors" - "fmt" - "io" - "os" - "reflect" -) - -// errOverflow is returned when an integer is too large to be represented. -var errOverflow = errors.New("proto: integer overflow") - -// ErrInternalBadWireType is returned by generated code when an incorrect -// wire type is encountered. It does not get returned to user code. -var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") - -// The fundamental decoders that interpret bytes on the wire. -// Those that take integer types all return uint64 and are -// therefore of type valueDecoder. - -// DecodeVarint reads a varint-encoded integer from the slice. -// It returns the integer and the number of bytes consumed, or -// zero if there is not enough. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func DecodeVarint(buf []byte) (x uint64, n int) { - for shift := uint(0); shift < 64; shift += 7 { - if n >= len(buf) { - return 0, 0 - } - b := uint64(buf[n]) - n++ - x |= (b & 0x7F) << shift - if (b & 0x80) == 0 { - return x, n - } - } - - // The number is too large to represent in a 64-bit value. - return 0, 0 -} - -func (p *Buffer) decodeVarintSlow() (x uint64, err error) { - i := p.index - l := len(p.buf) - - for shift := uint(0); shift < 64; shift += 7 { - if i >= l { - err = io.ErrUnexpectedEOF - return - } - b := p.buf[i] - i++ - x |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - p.index = i - return - } - } - - // The number is too large to represent in a 64-bit value. - err = errOverflow - return -} - -// DecodeVarint reads a varint-encoded integer from the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) DecodeVarint() (x uint64, err error) { - i := p.index - buf := p.buf - - if i >= len(buf) { - return 0, io.ErrUnexpectedEOF - } else if buf[i] < 0x80 { - p.index++ - return uint64(buf[i]), nil - } else if len(buf)-i < 10 { - return p.decodeVarintSlow() - } - - var b uint64 - // we already checked the first byte - x = uint64(buf[i]) - 0x80 - i++ - - b = uint64(buf[i]) - i++ - x += b << 7 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 7 - - b = uint64(buf[i]) - i++ - x += b << 14 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 14 - - b = uint64(buf[i]) - i++ - x += b << 21 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 21 - - b = uint64(buf[i]) - i++ - x += b << 28 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 28 - - b = uint64(buf[i]) - i++ - x += b << 35 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 35 - - b = uint64(buf[i]) - i++ - x += b << 42 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 42 - - b = uint64(buf[i]) - i++ - x += b << 49 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 49 - - b = uint64(buf[i]) - i++ - x += b << 56 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 56 - - b = uint64(buf[i]) - i++ - x += b << 63 - if b&0x80 == 0 { - goto done - } - // x -= 0x80 << 63 // Always zero. - - return 0, errOverflow - -done: - p.index = i - return x, nil -} - -// DecodeFixed64 reads a 64-bit integer from the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) DecodeFixed64() (x uint64, err error) { - // x, err already 0 - i := p.index + 8 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-8]) - x |= uint64(p.buf[i-7]) << 8 - x |= uint64(p.buf[i-6]) << 16 - x |= uint64(p.buf[i-5]) << 24 - x |= uint64(p.buf[i-4]) << 32 - x |= uint64(p.buf[i-3]) << 40 - x |= uint64(p.buf[i-2]) << 48 - x |= uint64(p.buf[i-1]) << 56 - return -} - -// DecodeFixed32 reads a 32-bit integer from the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) DecodeFixed32() (x uint64, err error) { - // x, err already 0 - i := p.index + 4 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-4]) - x |= uint64(p.buf[i-3]) << 8 - x |= uint64(p.buf[i-2]) << 16 - x |= uint64(p.buf[i-1]) << 24 - return -} - -// DecodeZigzag64 reads a zigzag-encoded 64-bit integer -// from the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) DecodeZigzag64() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) - return -} - -// DecodeZigzag32 reads a zigzag-encoded 32-bit integer -// from the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) DecodeZigzag32() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) - return -} - -// These are not ValueDecoders: they produce an array of bytes or a string. -// bytes, embedded messages - -// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { - n, err := p.DecodeVarint() - if err != nil { - return nil, err - } - - nb := int(n) - if nb < 0 { - return nil, fmt.Errorf("proto: bad byte length %d", nb) - } - end := p.index + nb - if end < p.index || end > len(p.buf) { - return nil, io.ErrUnexpectedEOF - } - - if !alloc { - // todo: check if can get more uses of alloc=false - buf = p.buf[p.index:end] - p.index += nb - return - } - - buf = make([]byte, nb) - copy(buf, p.buf[p.index:]) - p.index += nb - return -} - -// DecodeStringBytes reads an encoded string from the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) DecodeStringBytes() (s string, err error) { - buf, err := p.DecodeRawBytes(false) - if err != nil { - return - } - return string(buf), nil -} - -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -// If the protocol buffer has extensions, and the field matches, add it as an extension. -// Otherwise, if the XXX_unrecognized field exists, append the skipped data there. -func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { - oi := o.index - - err := o.skip(t, tag, wire) - if err != nil { - return err - } - - if !unrecField.IsValid() { - return nil - } - - ptr := structPointer_Bytes(base, unrecField) - - // Add the skipped field to struct field - obuf := o.buf - - o.buf = *ptr - o.EncodeVarint(uint64(tag<<3 | wire)) - *ptr = append(o.buf, obuf[oi:o.index]...) - - o.buf = obuf - - return nil -} - -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -func (o *Buffer) skip(t reflect.Type, tag, wire int) error { - - var u uint64 - var err error - - switch wire { - case WireVarint: - _, err = o.DecodeVarint() - case WireFixed64: - _, err = o.DecodeFixed64() - case WireBytes: - _, err = o.DecodeRawBytes(false) - case WireFixed32: - _, err = o.DecodeFixed32() - case WireStartGroup: - for { - u, err = o.DecodeVarint() - if err != nil { - break - } - fwire := int(u & 0x7) - if fwire == WireEndGroup { - break - } - ftag := int(u >> 3) - err = o.skip(t, ftag, fwire) - if err != nil { - break - } - } - default: - err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) - } - return err -} - -// Unmarshaler is the interface representing objects that can -// unmarshal themselves. The method should reset the receiver before -// decoding starts. The argument points to data that may be -// overwritten, so implementations should not keep references to the -// buffer. -type Unmarshaler interface { - Unmarshal([]byte) error -} - -// Unmarshal parses the protocol buffer representation in buf and places the -// decoded result in pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// Unmarshal resets pb before starting to unmarshal, so any -// existing data in pb is always removed. Use UnmarshalMerge -// to preserve and append to existing data. -func Unmarshal(buf []byte, pb Message) error { - pb.Reset() - return UnmarshalMerge(buf, pb) -} - -// UnmarshalMerge parses the protocol buffer representation in buf and -// writes the decoded result to pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// UnmarshalMerge merges into existing data in pb. -// Most code should use Unmarshal instead. -func UnmarshalMerge(buf []byte, pb Message) error { - // If the object can unmarshal itself, let it. - if u, ok := pb.(Unmarshaler); ok { - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// DecodeMessage reads a count-delimited message from the Buffer. -func (p *Buffer) DecodeMessage(pb Message) error { - enc, err := p.DecodeRawBytes(false) - if err != nil { - return err - } - return NewBuffer(enc).Unmarshal(pb) -} - -// DecodeGroup reads a tag-delimited group from the Buffer. -func (p *Buffer) DecodeGroup(pb Message) error { - typ, base, err := getbase(pb) - if err != nil { - return err - } - return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base) -} - -// Unmarshal parses the protocol buffer representation in the -// Buffer and places the decoded result in pb. If the struct -// underlying pb does not match the data in the buffer, the results can be -// unpredictable. -// -// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. -func (p *Buffer) Unmarshal(pb Message) error { - // If the object can unmarshal itself, let it. - if u, ok := pb.(Unmarshaler); ok { - err := u.Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - - typ, base, err := getbase(pb) - if err != nil { - return err - } - - err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) - - if collectStats { - stats.Decode++ - } - - return err -} - -// unmarshalType does the work of unmarshaling a structure. -func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { - var state errorState - required, reqFields := prop.reqCount, uint64(0) - - var err error - for err == nil && o.index < len(o.buf) { - oi := o.index - var u uint64 - u, err = o.DecodeVarint() - if err != nil { - break - } - wire := int(u & 0x7) - if wire == WireEndGroup { - if is_group { - if required > 0 { - // Not enough information to determine the exact field. - // (See below.) - return &RequiredNotSetError{"{Unknown}"} - } - return nil // input is satisfied - } - return fmt.Errorf("proto: %s: wiretype end group for non-group", st) - } - tag := int(u >> 3) - if tag <= 0 { - return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) - } - fieldnum, ok := prop.decoderTags.get(tag) - if !ok { - // Maybe it's an extension? - if prop.extendable { - if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) { - if err = o.skip(st, tag, wire); err == nil { - extmap := e.extensionsWrite() - ext := extmap[int32(tag)] // may be missing - ext.enc = append(ext.enc, o.buf[oi:o.index]...) - extmap[int32(tag)] = ext - } - continue - } - } - // Maybe it's a oneof? - if prop.oneofUnmarshaler != nil { - m := structPointer_Interface(base, st).(Message) - // First return value indicates whether tag is a oneof field. - ok, err = prop.oneofUnmarshaler(m, tag, wire, o) - if err == ErrInternalBadWireType { - // Map the error to something more descriptive. - // Do the formatting here to save generated code space. - err = fmt.Errorf("bad wiretype for oneof field in %T", m) - } - if ok { - continue - } - } - err = o.skipAndSave(st, tag, wire, base, prop.unrecField) - continue - } - p := prop.Prop[fieldnum] - - if p.dec == nil { - fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) - continue - } - dec := p.dec - if wire != WireStartGroup && wire != p.WireType { - if wire == WireBytes && p.packedDec != nil { - // a packable field - dec = p.packedDec - } else { - err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) - continue - } - } - decErr := dec(o, p, base) - if decErr != nil && !state.shouldContinue(decErr, p) { - err = decErr - } - if err == nil && p.Required { - // Successfully decoded a required field. - if tag <= 64 { - // use bitmap for fields 1-64 to catch field reuse. - var mask uint64 = 1 << uint64(tag-1) - if reqFields&mask == 0 { - // new required field - reqFields |= mask - required-- - } - } else { - // This is imprecise. It can be fooled by a required field - // with a tag > 64 that is encoded twice; that's very rare. - // A fully correct implementation would require allocating - // a data structure, which we would like to avoid. - required-- - } - } - } - if err == nil { - if is_group { - return io.ErrUnexpectedEOF - } - if state.err != nil { - return state.err - } - if required > 0 { - // Not enough information to determine the exact field. If we use extra - // CPU, we could determine the field only if the missing required field - // has a tag <= 64 and we check reqFields. - return &RequiredNotSetError{"{Unknown}"} - } - } - return err -} - -// Individual type decoders -// For each, -// u is the decoded value, -// v is a pointer to the field (pointer) in the struct - -// Sizes of the pools to allocate inside the Buffer. -// The goal is modest amortization and allocation -// on at least 16-byte boundaries. -const ( - boolPoolSize = 16 - uint32PoolSize = 8 - uint64PoolSize = 4 -) - -// Decode a bool. -func (o *Buffer) dec_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - if len(o.bools) == 0 { - o.bools = make([]bool, boolPoolSize) - } - o.bools[0] = u != 0 - *structPointer_Bool(base, p.field) = &o.bools[0] - o.bools = o.bools[1:] - return nil -} - -func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - *structPointer_BoolVal(base, p.field) = u != 0 - return nil -} - -// Decode an int32. -func (o *Buffer) dec_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) - return nil -} - -func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) - return nil -} - -// Decode an int64. -func (o *Buffer) dec_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64_Set(structPointer_Word64(base, p.field), o, u) - return nil -} - -func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64Val_Set(structPointer_Word64Val(base, p.field), o, u) - return nil -} - -// Decode a string. -func (o *Buffer) dec_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_String(base, p.field) = &s - return nil -} - -func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_StringVal(base, p.field) = s - return nil -} - -// Decode a slice of bytes ([]byte). -func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - *structPointer_Bytes(base, p.field) = b - return nil -} - -// Decode a slice of bools ([]bool). -func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - v := structPointer_BoolSlice(base, p.field) - *v = append(*v, u != 0) - return nil -} - -// Decode a slice of bools ([]bool) in packed format. -func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { - v := structPointer_BoolSlice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded bools - fin := o.index + nb - if fin < o.index { - return errOverflow - } - - y := *v - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - y = append(y, u != 0) - } - - *v = y - return nil -} - -// Decode a slice of int32s ([]int32). -func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - structPointer_Word32Slice(base, p.field).Append(uint32(u)) - return nil -} - -// Decode a slice of int32s ([]int32) in packed format. -func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int32s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(uint32(u)) - } - return nil -} - -// Decode a slice of int64s ([]int64). -func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - - structPointer_Word64Slice(base, p.field).Append(u) - return nil -} - -// Decode a slice of int64s ([]int64) in packed format. -func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int64s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(u) - } - return nil -} - -// Decode a slice of strings ([]string). -func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - v := structPointer_StringSlice(base, p.field) - *v = append(*v, s) - return nil -} - -// Decode a slice of slice of bytes ([][]byte). -func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - v := structPointer_BytesSlice(base, p.field) - *v = append(*v, b) - return nil -} - -// Decode a map field. -func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { - raw, err := o.DecodeRawBytes(false) - if err != nil { - return err - } - oi := o.index // index at the end of this map entry - o.index -= len(raw) // move buffer back to start of map entry - - mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V - if mptr.Elem().IsNil() { - mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) - } - v := mptr.Elem() // map[K]V - - // Prepare addressable doubly-indirect placeholders for the key and value types. - // See enc_new_map for why. - keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K - keybase := toStructPointer(keyptr.Addr()) // **K - - var valbase structPointer - var valptr reflect.Value - switch p.mtype.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valptr = reflect.ValueOf(&dummy) // *[]byte - valbase = toStructPointer(valptr) // *[]byte - case reflect.Ptr: - // message; valptr is **Msg; need to allocate the intermediate pointer - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valptr.Set(reflect.New(valptr.Type().Elem())) - valbase = toStructPointer(valptr) - default: - // everything else - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valbase = toStructPointer(valptr.Addr()) // **V - } - - // Decode. - // This parses a restricted wire format, namely the encoding of a message - // with two fields. See enc_new_map for the format. - for o.index < oi { - // tagcode for key and value properties are always a single byte - // because they have tags 1 and 2. - tagcode := o.buf[o.index] - o.index++ - switch tagcode { - case p.mkeyprop.tagcode[0]: - if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { - return err - } - case p.mvalprop.tagcode[0]: - if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { - return err - } - default: - // TODO: Should we silently skip this instead? - return fmt.Errorf("proto: bad map data tag %d", raw[0]) - } - } - keyelem, valelem := keyptr.Elem(), valptr.Elem() - if !keyelem.IsValid() { - keyelem = reflect.Zero(p.mtype.Key()) - } - if !valelem.IsValid() { - valelem = reflect.Zero(p.mtype.Elem()) - } - - v.SetMapIndex(keyelem, valelem) - return nil -} - -// Decode a group. -func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - return o.unmarshalType(p.stype, p.sprop, true, bas) -} - -// Decode an embedded message. -func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { - raw, e := o.DecodeRawBytes(false) - if e != nil { - return e - } - - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := structPointer_Interface(bas, p.stype) - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, false, bas) - o.buf = obuf - o.index = oi - - return err -} - -// Decode a slice of embedded messages. -func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, false, base) -} - -// Decode a slice of embedded groups. -func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, true, base) -} - -// Decode a slice of structs ([]*struct). -func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { - v := reflect.New(p.stype) - bas := toStructPointer(v) - structPointer_StructPointerSlice(base, p.field).Append(bas) - - if is_group { - err := o.unmarshalType(p.stype, p.sprop, is_group, bas) - return err - } - - raw, err := o.DecodeRawBytes(false) - if err != nil { - return err - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := v.Interface() - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, is_group, bas) - - o.buf = obuf - o.index = oi - - return err -} diff --git a/images/404-server/vendor/github.com/golang/protobuf/proto/encode.go b/images/404-server/vendor/github.com/golang/protobuf/proto/encode.go deleted file mode 100644 index 68b9b30cfa..0000000000 --- a/images/404-server/vendor/github.com/golang/protobuf/proto/encode.go +++ /dev/null @@ -1,1355 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "errors" - "fmt" - "reflect" - "sort" -) - -// RequiredNotSetError is the error returned if Marshal is called with -// a protocol buffer struct whose required fields have not -// all been initialized. It is also the error returned if Unmarshal is -// called with an encoded protocol buffer that does not include all the -// required fields. -// -// When printed, RequiredNotSetError reports the first unset required field in a -// message. If the field cannot be precisely determined, it is reported as -// "{Unknown}". -type RequiredNotSetError struct { - field string -} - -func (e *RequiredNotSetError) Error() string { - return fmt.Sprintf("proto: required field %q not set", e.field) -} - -var ( - // errRepeatedHasNil is the error returned if Marshal is called with - // a struct with a repeated field containing a nil element. - errRepeatedHasNil = errors.New("proto: repeated field has nil element") - - // errOneofHasNil is the error returned if Marshal is called with - // a struct with a oneof field containing a nil element. - errOneofHasNil = errors.New("proto: oneof field has nil value") - - // ErrNil is the error returned if Marshal is called with nil. - ErrNil = errors.New("proto: Marshal called with nil") - - // ErrTooLarge is the error returned if Marshal is called with a - // message that encodes to >2GB. - ErrTooLarge = errors.New("proto: message encodes to over 2 GB") -) - -// The fundamental encoders that put bytes on the wire. -// Those that take integer types all accept uint64 and are -// therefore of type valueEncoder. - -const maxVarintBytes = 10 // maximum length of a varint - -// maxMarshalSize is the largest allowed size of an encoded protobuf, -// since C++ and Java use signed int32s for the size. -const maxMarshalSize = 1<<31 - 1 - -// EncodeVarint returns the varint encoding of x. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -// Not used by the package itself, but helpful to clients -// wishing to use the same encoding. -func EncodeVarint(x uint64) []byte { - var buf [maxVarintBytes]byte - var n int - for n = 0; x > 127; n++ { - buf[n] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - buf[n] = uint8(x) - n++ - return buf[0:n] -} - -// EncodeVarint writes a varint-encoded integer to the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) EncodeVarint(x uint64) error { - for x >= 1<<7 { - p.buf = append(p.buf, uint8(x&0x7f|0x80)) - x >>= 7 - } - p.buf = append(p.buf, uint8(x)) - return nil -} - -// SizeVarint returns the varint encoding size of an integer. -func SizeVarint(x uint64) int { - return sizeVarint(x) -} - -func sizeVarint(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} - -// EncodeFixed64 writes a 64-bit integer to the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) EncodeFixed64(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24), - uint8(x>>32), - uint8(x>>40), - uint8(x>>48), - uint8(x>>56)) - return nil -} - -func sizeFixed64(x uint64) int { - return 8 -} - -// EncodeFixed32 writes a 32-bit integer to the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) EncodeFixed32(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24)) - return nil -} - -func sizeFixed32(x uint64) int { - return 4 -} - -// EncodeZigzag64 writes a zigzag-encoded 64-bit integer -// to the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) EncodeZigzag64(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} - -func sizeZigzag64(x uint64) int { - return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} - -// EncodeZigzag32 writes a zigzag-encoded 32-bit integer -// to the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) EncodeZigzag32(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - -func sizeZigzag32(x uint64) int { - return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - -// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) EncodeRawBytes(b []byte) error { - p.EncodeVarint(uint64(len(b))) - p.buf = append(p.buf, b...) - return nil -} - -func sizeRawBytes(b []byte) int { - return sizeVarint(uint64(len(b))) + - len(b) -} - -// EncodeStringBytes writes an encoded string to the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) EncodeStringBytes(s string) error { - p.EncodeVarint(uint64(len(s))) - p.buf = append(p.buf, s...) - return nil -} - -func sizeStringBytes(s string) int { - return sizeVarint(uint64(len(s))) + - len(s) -} - -// Marshaler is the interface representing objects that can marshal themselves. -type Marshaler interface { - Marshal() ([]byte, error) -} - -// Marshal takes the protocol buffer -// and encodes it into the wire format, returning the data. -func Marshal(pb Message) ([]byte, error) { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - return m.Marshal() - } - p := NewBuffer(nil) - err := p.Marshal(pb) - if p.buf == nil && err == nil { - // Return a non-nil slice on success. - return []byte{}, nil - } - return p.buf, err -} - -// EncodeMessage writes the protocol buffer to the Buffer, -// prefixed by a varint-encoded length. -func (p *Buffer) EncodeMessage(pb Message) error { - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return ErrNil - } - if err == nil { - var state errorState - err = p.enc_len_struct(GetProperties(t.Elem()), base, &state) - } - return err -} - -// Marshal takes the protocol buffer -// and encodes it into the wire format, writing the result to the -// Buffer. -func (p *Buffer) Marshal(pb Message) error { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - data, err := m.Marshal() - p.buf = append(p.buf, data...) - return err - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return ErrNil - } - if err == nil { - err = p.enc_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - (stats).Encode++ // Parens are to work around a goimports bug. - } - - if len(p.buf) > maxMarshalSize { - return ErrTooLarge - } - return err -} - -// Size returns the encoded size of a protocol buffer. -func Size(pb Message) (n int) { - // Can the object marshal itself? If so, Size is slow. - // TODO: add Size to Marshaler, or add a Sizer interface. - if m, ok := pb.(Marshaler); ok { - b, _ := m.Marshal() - return len(b) - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return 0 - } - if err == nil { - n = size_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - (stats).Size++ // Parens are to work around a goimports bug. - } - - return -} - -// Individual type encoders. - -// Encode a bool. -func (o *Buffer) enc_bool(p *Properties, base structPointer) error { - v := *structPointer_Bool(base, p.field) - if v == nil { - return ErrNil - } - x := 0 - if *v { - x = 1 - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error { - v := *structPointer_BoolVal(base, p.field) - if !v { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, 1) - return nil -} - -func size_bool(p *Properties, base structPointer) int { - v := *structPointer_Bool(base, p.field) - if v == nil { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -func size_proto3_bool(p *Properties, base structPointer) int { - v := *structPointer_BoolVal(base, p.field) - if !v && !p.oneof { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -// Encode an int32. -func (o *Buffer) enc_int32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode a uint32. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := word32_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := word32_Get(v) - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode an int64. -func (o *Buffer) enc_int64(p *Properties, base structPointer) error { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return ErrNil - } - x := word64_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func size_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return 0 - } - x := word64_Get(v) - n += len(p.tagcode) - n += p.valSize(x) - return -} - -func size_proto3_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(x) - return -} - -// Encode a string. -func (o *Buffer) enc_string(p *Properties, base structPointer) error { - v := *structPointer_String(base, p.field) - if v == nil { - return ErrNil - } - x := *v - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(x) - return nil -} - -func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error { - v := *structPointer_StringVal(base, p.field) - if v == "" { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(v) - return nil -} - -func size_string(p *Properties, base structPointer) (n int) { - v := *structPointer_String(base, p.field) - if v == nil { - return 0 - } - x := *v - n += len(p.tagcode) - n += sizeStringBytes(x) - return -} - -func size_proto3_string(p *Properties, base structPointer) (n int) { - v := *structPointer_StringVal(base, p.field) - if v == "" && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeStringBytes(v) - return -} - -// All protocol buffer fields are nillable, but be careful. -func isNil(v reflect.Value) bool { - switch v.Kind() { - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() - } - return false -} - -// Encode a message struct. -func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { - var state errorState - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return ErrNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return state.err - } - - o.buf = append(o.buf, p.tagcode...) - return o.enc_len_struct(p.sprop, structp, &state) -} - -func size_struct_message(p *Properties, base structPointer) int { - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return 0 - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n0 := len(p.tagcode) - n1 := sizeRawBytes(data) - return n0 + n1 - } - - n0 := len(p.tagcode) - n1 := size_struct(p.sprop, structp) - n2 := sizeVarint(uint64(n1)) // size of encoded length - return n0 + n1 + n2 -} - -// Encode a group struct. -func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { - var state errorState - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return ErrNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - err := o.enc_struct(p.sprop, b) - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return state.err -} - -func size_struct_group(p *Properties, base structPointer) (n int) { - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return 0 - } - - n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) - n += size_struct(p.sprop, b) - n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return -} - -// Encode a slice of bools ([]bool). -func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - for _, x := range s { - o.buf = append(o.buf, p.tagcode...) - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_bool(p *Properties, base structPointer) int { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - return l * (len(p.tagcode) + 1) // each bool takes exactly one byte -} - -// Encode a slice of bools ([]bool) in packed format. -func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(l)) // each bool takes exactly one byte - for _, x := range s { - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_packed_bool(p *Properties, base structPointer) (n int) { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - n += len(p.tagcode) - n += sizeVarint(uint64(l)) - n += l // each bool takes exactly one byte - return -} - -// Encode a slice of bytes ([]byte). -func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if s == nil { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func size_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if s == nil && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -// Encode a slice of int32s ([]int32). -func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of int32s ([]int32) in packed format. -func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(buf, uint64(x)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - bufSize += p.valSize(uint64(x)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of uint32s ([]uint32). -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := s.Index(i) - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := s.Index(i) - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of uint32s ([]uint32) in packed format. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, uint64(s.Index(i))) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(uint64(s.Index(i))) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of int64s ([]int64). -func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, s.Index(i)) - } - return nil -} - -func size_slice_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - n += p.valSize(s.Index(i)) - } - return -} - -// Encode a slice of int64s ([]int64) in packed format. -func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, s.Index(i)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(s.Index(i)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of slice of bytes ([][]byte). -func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(ss[i]) - } - return nil -} - -func size_slice_slice_byte(p *Properties, base structPointer) (n int) { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return 0 - } - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeRawBytes(ss[i]) - } - return -} - -// Encode a slice of strings ([]string). -func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(ss[i]) - } - return nil -} - -func size_slice_string(p *Properties, base structPointer) (n int) { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeStringBytes(ss[i]) - } - return -} - -// Encode a slice of message structs ([]*struct). -func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return errRepeatedHasNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - continue - } - - o.buf = append(o.buf, p.tagcode...) - err := o.enc_len_struct(p.sprop, structp, &state) - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - } - return state.err -} - -func size_slice_struct_message(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return // return the size up to this point - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n += sizeRawBytes(data) - continue - } - - n0 := size_struct(p.sprop, structp) - n1 := sizeVarint(uint64(n0)) // size of encoded length - n += n0 + n1 - } - return -} - -// Encode a slice of group structs ([]*struct). -func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return errRepeatedHasNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - - err := o.enc_struct(p.sprop, b) - - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - } - return state.err -} - -func size_slice_struct_group(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup)) - n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup)) - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return // return size up to this point - } - - n += size_struct(p.sprop, b) - } - return -} - -// Encode an extension map. -func (o *Buffer) enc_map(p *Properties, base structPointer) error { - exts := structPointer_ExtMap(base, p.field) - if err := encodeExtensionsMap(*exts); err != nil { - return err - } - - return o.enc_map_body(*exts) -} - -func (o *Buffer) enc_exts(p *Properties, base structPointer) error { - exts := structPointer_Extensions(base, p.field) - if err := encodeExtensions(exts); err != nil { - return err - } - v, _ := exts.extensionsRead() - - return o.enc_map_body(v) -} - -func (o *Buffer) enc_map_body(v map[int32]Extension) error { - // Fast-path for common cases: zero or one extensions. - if len(v) <= 1 { - for _, e := range v { - o.buf = append(o.buf, e.enc...) - } - return nil - } - - // Sort keys to provide a deterministic encoding. - keys := make([]int, 0, len(v)) - for k := range v { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, k := range keys { - o.buf = append(o.buf, v[int32(k)].enc...) - } - return nil -} - -func size_map(p *Properties, base structPointer) int { - v := structPointer_ExtMap(base, p.field) - return extensionsMapSize(*v) -} - -func size_exts(p *Properties, base structPointer) int { - v := structPointer_Extensions(base, p.field) - return extensionsSize(v) -} - -// Encode a map field. -func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { - var state errorState // XXX: or do we need to plumb this through? - - /* - A map defined as - map map_field = N; - is encoded in the same way as - message MapFieldEntry { - key_type key = 1; - value_type value = 2; - } - repeated MapFieldEntry map_field = N; - */ - - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - if v.Len() == 0 { - return nil - } - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - enc := func() error { - if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { - return err - } - if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil { - return err - } - return nil - } - - // Don't sort map keys. It is not required by the spec, and C++ doesn't do it. - for _, key := range v.MapKeys() { - val := v.MapIndex(key) - - keycopy.Set(key) - valcopy.Set(val) - - o.buf = append(o.buf, p.tagcode...) - if err := o.enc_len_thing(enc, &state); err != nil { - return err - } - } - return nil -} - -func size_new_map(p *Properties, base structPointer) int { - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - n := 0 - for _, key := range v.MapKeys() { - val := v.MapIndex(key) - keycopy.Set(key) - valcopy.Set(val) - - // Tag codes for key and val are the responsibility of the sub-sizer. - keysize := p.mkeyprop.size(p.mkeyprop, keybase) - valsize := p.mvalprop.size(p.mvalprop, valbase) - entry := keysize + valsize - // Add on tag code and length of map entry itself. - n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry - } - return n -} - -// mapEncodeScratch returns a new reflect.Value matching the map's value type, -// and a structPointer suitable for passing to an encoder or sizer. -func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) { - // Prepare addressable doubly-indirect placeholders for the key and value types. - // This is needed because the element-type encoders expect **T, but the map iteration produces T. - - keycopy = reflect.New(mapType.Key()).Elem() // addressable K - keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K - keyptr.Set(keycopy.Addr()) // - keybase = toStructPointer(keyptr.Addr()) // **K - - // Value types are more varied and require special handling. - switch mapType.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte - valbase = toStructPointer(valcopy.Addr()) - case reflect.Ptr: - // message; the generated field type is map[K]*Msg (so V is *Msg), - // so we only need one level of indirection. - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valbase = toStructPointer(valcopy.Addr()) - default: - // everything else - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V - valptr.Set(valcopy.Addr()) // - valbase = toStructPointer(valptr.Addr()) // **V - } - return -} - -// Encode a struct. -func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { - var state errorState - // Encode fields in tag order so that decoders may use optimizations - // that depend on the ordering. - // https://developers.google.com/protocol-buffers/docs/encoding#order - for _, i := range prop.order { - p := prop.Prop[i] - if p.enc != nil { - err := p.enc(o, p, base) - if err != nil { - if err == ErrNil { - if p.Required && state.err == nil { - state.err = &RequiredNotSetError{p.Name} - } - } else if err == errRepeatedHasNil { - // Give more context to nil values in repeated fields. - return errors.New("repeated field " + p.OrigName + " has nil element") - } else if !state.shouldContinue(err, p) { - return err - } - } - if len(o.buf) > maxMarshalSize { - return ErrTooLarge - } - } - } - - // Do oneof fields. - if prop.oneofMarshaler != nil { - m := structPointer_Interface(base, prop.stype).(Message) - if err := prop.oneofMarshaler(m, o); err == ErrNil { - return errOneofHasNil - } else if err != nil { - return err - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - if len(o.buf)+len(v) > maxMarshalSize { - return ErrTooLarge - } - if len(v) > 0 { - o.buf = append(o.buf, v...) - } - } - - return state.err -} - -func size_struct(prop *StructProperties, base structPointer) (n int) { - for _, i := range prop.order { - p := prop.Prop[i] - if p.size != nil { - n += p.size(p, base) - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - n += len(v) - } - - // Factor in any oneof fields. - if prop.oneofSizer != nil { - m := structPointer_Interface(base, prop.stype).(Message) - n += prop.oneofSizer(m) - } - - return -} - -var zeroes [20]byte // longer than any conceivable sizeVarint - -// Encode a struct, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error { - return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state) -} - -// Encode something, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error { - iLen := len(o.buf) - o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length - iMsg := len(o.buf) - err := enc() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - lMsg := len(o.buf) - iMsg - lLen := sizeVarint(uint64(lMsg)) - switch x := lLen - (iMsg - iLen); { - case x > 0: // actual length is x bytes larger than the space we reserved - // Move msg x bytes right. - o.buf = append(o.buf, zeroes[:x]...) - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - case x < 0: // actual length is x bytes smaller than the space we reserved - // Move msg x bytes left. - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - o.buf = o.buf[:len(o.buf)+x] // x is negative - } - // Encode the length in the reserved space. - o.buf = o.buf[:iLen] - o.EncodeVarint(uint64(lMsg)) - o.buf = o.buf[:len(o.buf)+lMsg] - return state.err -} - -// errorState maintains the first error that occurs and updates that error -// with additional context. -type errorState struct { - err error -} - -// shouldContinue reports whether encoding should continue upon encountering the -// given error. If the error is RequiredNotSetError, shouldContinue returns true -// and, if this is the first appearance of that error, remembers it for future -// reporting. -// -// If prop is not nil, it may update any error with additional context about the -// field with the error. -func (s *errorState) shouldContinue(err error, prop *Properties) bool { - // Ignore unset required fields. - reqNotSet, ok := err.(*RequiredNotSetError) - if !ok { - return false - } - if s.err == nil { - if prop != nil { - err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field} - } - s.err = err - } - return true -} diff --git a/images/404-server/vendor/github.com/golang/protobuf/proto/equal.go b/images/404-server/vendor/github.com/golang/protobuf/proto/equal.go deleted file mode 100644 index 2ed1cf5966..0000000000 --- a/images/404-server/vendor/github.com/golang/protobuf/proto/equal.go +++ /dev/null @@ -1,300 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer comparison. - -package proto - -import ( - "bytes" - "log" - "reflect" - "strings" -) - -/* -Equal returns true iff protocol buffers a and b are equal. -The arguments must both be pointers to protocol buffer structs. - -Equality is defined in this way: - - Two messages are equal iff they are the same type, - corresponding fields are equal, unknown field sets - are equal, and extensions sets are equal. - - Two set scalar fields are equal iff their values are equal. - If the fields are of a floating-point type, remember that - NaN != x for all x, including NaN. If the message is defined - in a proto3 .proto file, fields are not "set"; specifically, - zero length proto3 "bytes" fields are equal (nil == {}). - - Two repeated fields are equal iff their lengths are the same, - and their corresponding elements are equal. Note a "bytes" field, - although represented by []byte, is not a repeated field and the - rule for the scalar fields described above applies. - - Two unset fields are equal. - - Two unknown field sets are equal if their current - encoded state is equal. - - Two extension sets are equal iff they have corresponding - elements that are pairwise equal. - - Two map fields are equal iff their lengths are the same, - and they contain the same set of elements. Zero-length map - fields are equal. - - Every other combination of things are not equal. - -The return value is undefined if a and b are not protocol buffers. -*/ -func Equal(a, b Message) bool { - if a == nil || b == nil { - return a == b - } - v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) - if v1.Type() != v2.Type() { - return false - } - if v1.Kind() == reflect.Ptr { - if v1.IsNil() { - return v2.IsNil() - } - if v2.IsNil() { - return false - } - v1, v2 = v1.Elem(), v2.Elem() - } - if v1.Kind() != reflect.Struct { - return false - } - return equalStruct(v1, v2) -} - -// v1 and v2 are known to have the same type. -func equalStruct(v1, v2 reflect.Value) bool { - sprop := GetProperties(v1.Type()) - for i := 0; i < v1.NumField(); i++ { - f := v1.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - f1, f2 := v1.Field(i), v2.Field(i) - if f.Type.Kind() == reflect.Ptr { - if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { - // both unset - continue - } else if n1 != n2 { - // set/unset mismatch - return false - } - b1, ok := f1.Interface().(raw) - if ok { - b2 := f2.Interface().(raw) - // RawMessage - if !bytes.Equal(b1.Bytes(), b2.Bytes()) { - return false - } - continue - } - f1, f2 = f1.Elem(), f2.Elem() - } - if !equalAny(f1, f2, sprop.Prop[i]) { - return false - } - } - - if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_InternalExtensions") - if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { - return false - } - } - - if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_extensions") - if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { - return false - } - } - - uf := v1.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return true - } - - u1 := uf.Bytes() - u2 := v2.FieldByName("XXX_unrecognized").Bytes() - if !bytes.Equal(u1, u2) { - return false - } - - return true -} - -// v1 and v2 are known to have the same type. -// prop may be nil. -func equalAny(v1, v2 reflect.Value, prop *Properties) bool { - if v1.Type() == protoMessageType { - m1, _ := v1.Interface().(Message) - m2, _ := v2.Interface().(Message) - return Equal(m1, m2) - } - switch v1.Kind() { - case reflect.Bool: - return v1.Bool() == v2.Bool() - case reflect.Float32, reflect.Float64: - return v1.Float() == v2.Float() - case reflect.Int32, reflect.Int64: - return v1.Int() == v2.Int() - case reflect.Interface: - // Probably a oneof field; compare the inner values. - n1, n2 := v1.IsNil(), v2.IsNil() - if n1 || n2 { - return n1 == n2 - } - e1, e2 := v1.Elem(), v2.Elem() - if e1.Type() != e2.Type() { - return false - } - return equalAny(e1, e2, nil) - case reflect.Map: - if v1.Len() != v2.Len() { - return false - } - for _, key := range v1.MapKeys() { - val2 := v2.MapIndex(key) - if !val2.IsValid() { - // This key was not found in the second map. - return false - } - if !equalAny(v1.MapIndex(key), val2, nil) { - return false - } - } - return true - case reflect.Ptr: - // Maps may have nil values in them, so check for nil. - if v1.IsNil() && v2.IsNil() { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return equalAny(v1.Elem(), v2.Elem(), prop) - case reflect.Slice: - if v1.Type().Elem().Kind() == reflect.Uint8 { - // short circuit: []byte - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value. - if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) - } - - if v1.Len() != v2.Len() { - return false - } - for i := 0; i < v1.Len(); i++ { - if !equalAny(v1.Index(i), v2.Index(i), prop) { - return false - } - } - return true - case reflect.String: - return v1.Interface().(string) == v2.Interface().(string) - case reflect.Struct: - return equalStruct(v1, v2) - case reflect.Uint32, reflect.Uint64: - return v1.Uint() == v2.Uint() - } - - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to compare %v", v1) - return false -} - -// base is the struct type that the extensions are based on. -// x1 and x2 are InternalExtensions. -func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { - em1, _ := x1.extensionsRead() - em2, _ := x2.extensionsRead() - return equalExtMap(base, em1, em2) -} - -func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { - if len(em1) != len(em2) { - return false - } - - for extNum, e1 := range em1 { - e2, ok := em2[extNum] - if !ok { - return false - } - - m1, m2 := e1.value, e2.value - - if m1 != nil && m2 != nil { - // Both are unencoded. - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - continue - } - - // At least one is encoded. To do a semantically correct comparison - // we need to unmarshal them first. - var desc *ExtensionDesc - if m := extensionMaps[base]; m != nil { - desc = m[extNum] - } - if desc == nil { - log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) - continue - } - var err error - if m1 == nil { - m1, err = decodeExtension(e1.enc, desc) - } - if m2 == nil && err == nil { - m2, err = decodeExtension(e2.enc, desc) - } - if err != nil { - // The encoded form is invalid. - log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) - return false - } - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - } - - return true -} diff --git a/images/404-server/vendor/github.com/golang/protobuf/proto/extensions.go b/images/404-server/vendor/github.com/golang/protobuf/proto/extensions.go deleted file mode 100644 index 6b9b363746..0000000000 --- a/images/404-server/vendor/github.com/golang/protobuf/proto/extensions.go +++ /dev/null @@ -1,586 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Types and routines for supporting protocol buffer extensions. - */ - -import ( - "errors" - "fmt" - "reflect" - "strconv" - "sync" -) - -// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. -var ErrMissingExtension = errors.New("proto: missing extension") - -// ExtensionRange represents a range of message extensions for a protocol buffer. -// Used in code generated by the protocol compiler. -type ExtensionRange struct { - Start, End int32 // both inclusive -} - -// extendableProto is an interface implemented by any protocol buffer generated by the current -// proto compiler that may be extended. -type extendableProto interface { - Message - ExtensionRangeArray() []ExtensionRange - extensionsWrite() map[int32]Extension - extensionsRead() (map[int32]Extension, sync.Locker) -} - -// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous -// version of the proto compiler that may be extended. -type extendableProtoV1 interface { - Message - ExtensionRangeArray() []ExtensionRange - ExtensionMap() map[int32]Extension -} - -// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. -type extensionAdapter struct { - extendableProtoV1 -} - -func (e extensionAdapter) extensionsWrite() map[int32]Extension { - return e.ExtensionMap() -} - -func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { - return e.ExtensionMap(), notLocker{} -} - -// notLocker is a sync.Locker whose Lock and Unlock methods are nops. -type notLocker struct{} - -func (n notLocker) Lock() {} -func (n notLocker) Unlock() {} - -// extendable returns the extendableProto interface for the given generated proto message. -// If the proto message has the old extension format, it returns a wrapper that implements -// the extendableProto interface. -func extendable(p interface{}) (extendableProto, bool) { - if ep, ok := p.(extendableProto); ok { - return ep, ok - } - if ep, ok := p.(extendableProtoV1); ok { - return extensionAdapter{ep}, ok - } - return nil, false -} - -// XXX_InternalExtensions is an internal representation of proto extensions. -// -// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, -// thus gaining the unexported 'extensions' method, which can be called only from the proto package. -// -// The methods of XXX_InternalExtensions are not concurrency safe in general, -// but calls to logically read-only methods such as has and get may be executed concurrently. -type XXX_InternalExtensions struct { - // The struct must be indirect so that if a user inadvertently copies a - // generated message and its embedded XXX_InternalExtensions, they - // avoid the mayhem of a copied mutex. - // - // The mutex serializes all logically read-only operations to p.extensionMap. - // It is up to the client to ensure that write operations to p.extensionMap are - // mutually exclusive with other accesses. - p *struct { - mu sync.Mutex - extensionMap map[int32]Extension - } -} - -// extensionsWrite returns the extension map, creating it on first use. -func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { - if e.p == nil { - e.p = new(struct { - mu sync.Mutex - extensionMap map[int32]Extension - }) - e.p.extensionMap = make(map[int32]Extension) - } - return e.p.extensionMap -} - -// extensionsRead returns the extensions map for read-only use. It may be nil. -// The caller must hold the returned mutex's lock when accessing Elements within the map. -func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { - if e.p == nil { - return nil, nil - } - return e.p.extensionMap, &e.p.mu -} - -var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() -var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem() - -// ExtensionDesc represents an extension specification. -// Used in generated code from the protocol compiler. -type ExtensionDesc struct { - ExtendedType Message // nil pointer to the type that is being extended - ExtensionType interface{} // nil pointer to the extension type - Field int32 // field number - Name string // fully-qualified name of extension, for text formatting - Tag string // protobuf tag style -} - -func (ed *ExtensionDesc) repeated() bool { - t := reflect.TypeOf(ed.ExtensionType) - return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 -} - -// Extension represents an extension in a message. -type Extension struct { - // When an extension is stored in a message using SetExtension - // only desc and value are set. When the message is marshaled - // enc will be set to the encoded form of the message. - // - // When a message is unmarshaled and contains extensions, each - // extension will have only enc set. When such an extension is - // accessed using GetExtension (or GetExtensions) desc and value - // will be set. - desc *ExtensionDesc - value interface{} - enc []byte -} - -// SetRawExtension is for testing only. -func SetRawExtension(base Message, id int32, b []byte) { - epb, ok := extendable(base) - if !ok { - return - } - extmap := epb.extensionsWrite() - extmap[id] = Extension{enc: b} -} - -// isExtensionField returns true iff the given field number is in an extension range. -func isExtensionField(pb extendableProto, field int32) bool { - for _, er := range pb.ExtensionRangeArray() { - if er.Start <= field && field <= er.End { - return true - } - } - return false -} - -// checkExtensionTypes checks that the given extension is valid for pb. -func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { - var pbi interface{} = pb - // Check the extended type. - if ea, ok := pbi.(extensionAdapter); ok { - pbi = ea.extendableProtoV1 - } - if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { - return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) - } - // Check the range. - if !isExtensionField(pb, extension.Field) { - return errors.New("proto: bad extension number; not in declared ranges") - } - return nil -} - -// extPropKey is sufficient to uniquely identify an extension. -type extPropKey struct { - base reflect.Type - field int32 -} - -var extProp = struct { - sync.RWMutex - m map[extPropKey]*Properties -}{ - m: make(map[extPropKey]*Properties), -} - -func extensionProperties(ed *ExtensionDesc) *Properties { - key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} - - extProp.RLock() - if prop, ok := extProp.m[key]; ok { - extProp.RUnlock() - return prop - } - extProp.RUnlock() - - extProp.Lock() - defer extProp.Unlock() - // Check again. - if prop, ok := extProp.m[key]; ok { - return prop - } - - prop := new(Properties) - prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) - extProp.m[key] = prop - return prop -} - -// encode encodes any unmarshaled (unencoded) extensions in e. -func encodeExtensions(e *XXX_InternalExtensions) error { - m, mu := e.extensionsRead() - if m == nil { - return nil // fast path - } - mu.Lock() - defer mu.Unlock() - return encodeExtensionsMap(m) -} - -// encode encodes any unmarshaled (unencoded) extensions in e. -func encodeExtensionsMap(m map[int32]Extension) error { - for k, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - p := NewBuffer(nil) - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - if err := props.enc(p, props, toStructPointer(x)); err != nil { - return err - } - e.enc = p.buf - m[k] = e - } - return nil -} - -func extensionsSize(e *XXX_InternalExtensions) (n int) { - m, mu := e.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - defer mu.Unlock() - return extensionsMapSize(m) -} - -func extensionsMapSize(m map[int32]Extension) (n int) { - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - n += props.size(props, toStructPointer(x)) - } - return -} - -// HasExtension returns whether the given extension is present in pb. -func HasExtension(pb Message, extension *ExtensionDesc) bool { - // TODO: Check types, field numbers, etc.? - epb, ok := extendable(pb) - if !ok { - return false - } - extmap, mu := epb.extensionsRead() - if extmap == nil { - return false - } - mu.Lock() - _, ok = extmap[extension.Field] - mu.Unlock() - return ok -} - -// ClearExtension removes the given extension from pb. -func ClearExtension(pb Message, extension *ExtensionDesc) { - epb, ok := extendable(pb) - if !ok { - return - } - // TODO: Check types, field numbers, etc.? - extmap := epb.extensionsWrite() - delete(extmap, extension.Field) -} - -// GetExtension parses and returns the given extension of pb. -// If the extension is not present and has no default value it returns ErrMissingExtension. -func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { - epb, ok := extendable(pb) - if !ok { - return nil, errors.New("proto: not an extendable proto") - } - - if err := checkExtensionTypes(epb, extension); err != nil { - return nil, err - } - - emap, mu := epb.extensionsRead() - if emap == nil { - return defaultExtensionValue(extension) - } - mu.Lock() - defer mu.Unlock() - e, ok := emap[extension.Field] - if !ok { - // defaultExtensionValue returns the default value or - // ErrMissingExtension if there is no default. - return defaultExtensionValue(extension) - } - - if e.value != nil { - // Already decoded. Check the descriptor, though. - if e.desc != extension { - // This shouldn't happen. If it does, it means that - // GetExtension was called twice with two different - // descriptors with the same field number. - return nil, errors.New("proto: descriptor conflict") - } - return e.value, nil - } - - v, err := decodeExtension(e.enc, extension) - if err != nil { - return nil, err - } - - // Remember the decoded version and drop the encoded version. - // That way it is safe to mutate what we return. - e.value = v - e.desc = extension - e.enc = nil - emap[extension.Field] = e - return e.value, nil -} - -// defaultExtensionValue returns the default value for extension. -// If no default for an extension is defined ErrMissingExtension is returned. -func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { - t := reflect.TypeOf(extension.ExtensionType) - props := extensionProperties(extension) - - sf, _, err := fieldDefault(t, props) - if err != nil { - return nil, err - } - - if sf == nil || sf.value == nil { - // There is no default value. - return nil, ErrMissingExtension - } - - if t.Kind() != reflect.Ptr { - // We do not need to return a Ptr, we can directly return sf.value. - return sf.value, nil - } - - // We need to return an interface{} that is a pointer to sf.value. - value := reflect.New(t).Elem() - value.Set(reflect.New(value.Type().Elem())) - if sf.kind == reflect.Int32 { - // We may have an int32 or an enum, but the underlying data is int32. - // Since we can't set an int32 into a non int32 reflect.value directly - // set it as a int32. - value.Elem().SetInt(int64(sf.value.(int32))) - } else { - value.Elem().Set(reflect.ValueOf(sf.value)) - } - return value.Interface(), nil -} - -// decodeExtension decodes an extension encoded in b. -func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { - o := NewBuffer(b) - - t := reflect.TypeOf(extension.ExtensionType) - - props := extensionProperties(extension) - - // t is a pointer to a struct, pointer to basic type or a slice. - // Allocate a "field" to store the pointer/slice itself; the - // pointer/slice will be stored here. We pass - // the address of this field to props.dec. - // This passes a zero field and a *t and lets props.dec - // interpret it as a *struct{ x t }. - value := reflect.New(t).Elem() - - for { - // Discard wire type and field number varint. It isn't needed. - if _, err := o.DecodeVarint(); err != nil { - return nil, err - } - - if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { - return nil, err - } - - if o.index >= len(o.buf) { - break - } - } - return value.Interface(), nil -} - -// GetExtensions returns a slice of the extensions present in pb that are also listed in es. -// The returned slice has the same length as es; missing extensions will appear as nil elements. -func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { - epb, ok := extendable(pb) - if !ok { - return nil, errors.New("proto: not an extendable proto") - } - extensions = make([]interface{}, len(es)) - for i, e := range es { - extensions[i], err = GetExtension(epb, e) - if err == ErrMissingExtension { - err = nil - } - if err != nil { - return - } - } - return -} - -// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. -// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing -// just the Field field, which defines the extension's field number. -func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { - epb, ok := extendable(pb) - if !ok { - return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb) - } - registeredExtensions := RegisteredExtensions(pb) - - emap, mu := epb.extensionsRead() - if emap == nil { - return nil, nil - } - mu.Lock() - defer mu.Unlock() - extensions := make([]*ExtensionDesc, 0, len(emap)) - for extid, e := range emap { - desc := e.desc - if desc == nil { - desc = registeredExtensions[extid] - if desc == nil { - desc = &ExtensionDesc{Field: extid} - } - } - - extensions = append(extensions, desc) - } - return extensions, nil -} - -// SetExtension sets the specified extension of pb to the specified value. -func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { - epb, ok := extendable(pb) - if !ok { - return errors.New("proto: not an extendable proto") - } - if err := checkExtensionTypes(epb, extension); err != nil { - return err - } - typ := reflect.TypeOf(extension.ExtensionType) - if typ != reflect.TypeOf(value) { - return errors.New("proto: bad extension value type") - } - // nil extension values need to be caught early, because the - // encoder can't distinguish an ErrNil due to a nil extension - // from an ErrNil due to a missing field. Extensions are - // always optional, so the encoder would just swallow the error - // and drop all the extensions from the encoded message. - if reflect.ValueOf(value).IsNil() { - return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) - } - - extmap := epb.extensionsWrite() - extmap[extension.Field] = Extension{desc: extension, value: value} - return nil -} - -// ClearAllExtensions clears all extensions from pb. -func ClearAllExtensions(pb Message) { - epb, ok := extendable(pb) - if !ok { - return - } - m := epb.extensionsWrite() - for k := range m { - delete(m, k) - } -} - -// A global registry of extensions. -// The generated code will register the generated descriptors by calling RegisterExtension. - -var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) - -// RegisterExtension is called from the generated code. -func RegisterExtension(desc *ExtensionDesc) { - st := reflect.TypeOf(desc.ExtendedType).Elem() - m := extensionMaps[st] - if m == nil { - m = make(map[int32]*ExtensionDesc) - extensionMaps[st] = m - } - if _, ok := m[desc.Field]; ok { - panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) - } - m[desc.Field] = desc -} - -// RegisteredExtensions returns a map of the registered extensions of a -// protocol buffer struct, indexed by the extension number. -// The argument pb should be a nil pointer to the struct type. -func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { - return extensionMaps[reflect.TypeOf(pb).Elem()] -} diff --git a/images/404-server/vendor/github.com/golang/protobuf/proto/lib.go b/images/404-server/vendor/github.com/golang/protobuf/proto/lib.go deleted file mode 100644 index ac4ddbc075..0000000000 --- a/images/404-server/vendor/github.com/golang/protobuf/proto/lib.go +++ /dev/null @@ -1,898 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package proto converts data structures to and from the wire format of -protocol buffers. It works in concert with the Go source code generated -for .proto files by the protocol compiler. - -A summary of the properties of the protocol buffer interface -for a protocol buffer variable v: - - - Names are turned from camel_case to CamelCase for export. - - There are no methods on v to set fields; just treat - them as structure fields. - - There are getters that return a field's value if set, - and return the field's default value if unset. - The getters work even if the receiver is a nil message. - - The zero value for a struct is its correct initialization state. - All desired fields must be set before marshaling. - - A Reset() method will restore a protobuf struct to its zero state. - - Non-repeated fields are pointers to the values; nil means unset. - That is, optional or required field int32 f becomes F *int32. - - Repeated fields are slices. - - Helper functions are available to aid the setting of fields. - msg.Foo = proto.String("hello") // set field - - Constants are defined to hold the default values of all fields that - have them. They have the form Default_StructName_FieldName. - Because the getter methods handle defaulted values, - direct use of these constants should be rare. - - Enums are given type names and maps from names to values. - Enum values are prefixed by the enclosing message's name, or by the - enum's type name if it is a top-level enum. Enum types have a String - method, and a Enum method to assist in message construction. - - Nested messages, groups and enums have type names prefixed with the name of - the surrounding message type. - - Extensions are given descriptor names that start with E_, - followed by an underscore-delimited list of the nested messages - that contain it (if any) followed by the CamelCased name of the - extension field itself. HasExtension, ClearExtension, GetExtension - and SetExtension are functions for manipulating extensions. - - Oneof field sets are given a single field in their message, - with distinguished wrapper types for each possible field value. - - Marshal and Unmarshal are functions to encode and decode the wire format. - -When the .proto file specifies `syntax="proto3"`, there are some differences: - - - Non-repeated fields of non-message type are values instead of pointers. - - Getters are only generated for message and oneof fields. - - Enum types do not get an Enum method. - -The simplest way to describe this is to see an example. -Given file test.proto, containing - - package example; - - enum FOO { X = 17; } - - message Test { - required string label = 1; - optional int32 type = 2 [default=77]; - repeated int64 reps = 3; - optional group OptionalGroup = 4 { - required string RequiredField = 5; - } - oneof union { - int32 number = 6; - string name = 7; - } - } - -The resulting file, test.pb.go, is: - - package example - - import proto "github.com/golang/protobuf/proto" - import math "math" - - type FOO int32 - const ( - FOO_X FOO = 17 - ) - var FOO_name = map[int32]string{ - 17: "X", - } - var FOO_value = map[string]int32{ - "X": 17, - } - - func (x FOO) Enum() *FOO { - p := new(FOO) - *p = x - return p - } - func (x FOO) String() string { - return proto.EnumName(FOO_name, int32(x)) - } - func (x *FOO) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FOO_value, data) - if err != nil { - return err - } - *x = FOO(value) - return nil - } - - type Test struct { - Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` - Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` - Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` - Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` - // Types that are valid to be assigned to Union: - // *Test_Number - // *Test_Name - Union isTest_Union `protobuf_oneof:"union"` - XXX_unrecognized []byte `json:"-"` - } - func (m *Test) Reset() { *m = Test{} } - func (m *Test) String() string { return proto.CompactTextString(m) } - func (*Test) ProtoMessage() {} - - type isTest_Union interface { - isTest_Union() - } - - type Test_Number struct { - Number int32 `protobuf:"varint,6,opt,name=number"` - } - type Test_Name struct { - Name string `protobuf:"bytes,7,opt,name=name"` - } - - func (*Test_Number) isTest_Union() {} - func (*Test_Name) isTest_Union() {} - - func (m *Test) GetUnion() isTest_Union { - if m != nil { - return m.Union - } - return nil - } - const Default_Test_Type int32 = 77 - - func (m *Test) GetLabel() string { - if m != nil && m.Label != nil { - return *m.Label - } - return "" - } - - func (m *Test) GetType() int32 { - if m != nil && m.Type != nil { - return *m.Type - } - return Default_Test_Type - } - - func (m *Test) GetOptionalgroup() *Test_OptionalGroup { - if m != nil { - return m.Optionalgroup - } - return nil - } - - type Test_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` - } - func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } - func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } - - func (m *Test_OptionalGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" - } - - func (m *Test) GetNumber() int32 { - if x, ok := m.GetUnion().(*Test_Number); ok { - return x.Number - } - return 0 - } - - func (m *Test) GetName() string { - if x, ok := m.GetUnion().(*Test_Name); ok { - return x.Name - } - return "" - } - - func init() { - proto.RegisterEnum("example.FOO", FOO_name, FOO_value) - } - -To create and play with a Test object: - - package main - - import ( - "log" - - "github.com/golang/protobuf/proto" - pb "./example.pb" - ) - - func main() { - test := &pb.Test{ - Label: proto.String("hello"), - Type: proto.Int32(17), - Reps: []int64{1, 2, 3}, - Optionalgroup: &pb.Test_OptionalGroup{ - RequiredField: proto.String("good bye"), - }, - Union: &pb.Test_Name{"fred"}, - } - data, err := proto.Marshal(test) - if err != nil { - log.Fatal("marshaling error: ", err) - } - newTest := &pb.Test{} - err = proto.Unmarshal(data, newTest) - if err != nil { - log.Fatal("unmarshaling error: ", err) - } - // Now test and newTest contain the same data. - if test.GetLabel() != newTest.GetLabel() { - log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) - } - // Use a type switch to determine which oneof was set. - switch u := test.Union.(type) { - case *pb.Test_Number: // u.Number contains the number. - case *pb.Test_Name: // u.Name contains the string. - } - // etc. - } -*/ -package proto - -import ( - "encoding/json" - "fmt" - "log" - "reflect" - "sort" - "strconv" - "sync" -) - -// Message is implemented by generated protocol buffer messages. -type Message interface { - Reset() - String() string - ProtoMessage() -} - -// Stats records allocation details about the protocol buffer encoders -// and decoders. Useful for tuning the library itself. -type Stats struct { - Emalloc uint64 // mallocs in encode - Dmalloc uint64 // mallocs in decode - Encode uint64 // number of encodes - Decode uint64 // number of decodes - Chit uint64 // number of cache hits - Cmiss uint64 // number of cache misses - Size uint64 // number of sizes -} - -// Set to true to enable stats collection. -const collectStats = false - -var stats Stats - -// GetStats returns a copy of the global Stats structure. -func GetStats() Stats { return stats } - -// A Buffer is a buffer manager for marshaling and unmarshaling -// protocol buffers. It may be reused between invocations to -// reduce memory usage. It is not necessary to use a Buffer; -// the global functions Marshal and Unmarshal create a -// temporary Buffer and are fine for most applications. -type Buffer struct { - buf []byte // encode/decode byte stream - index int // read point - - // pools of basic types to amortize allocation. - bools []bool - uint32s []uint32 - uint64s []uint64 - - // extra pools, only used with pointer_reflect.go - int32s []int32 - int64s []int64 - float32s []float32 - float64s []float64 -} - -// NewBuffer allocates a new Buffer and initializes its internal data to -// the contents of the argument slice. -func NewBuffer(e []byte) *Buffer { - return &Buffer{buf: e} -} - -// Reset resets the Buffer, ready for marshaling a new protocol buffer. -func (p *Buffer) Reset() { - p.buf = p.buf[0:0] // for reading/writing - p.index = 0 // for reading -} - -// SetBuf replaces the internal buffer with the slice, -// ready for unmarshaling the contents of the slice. -func (p *Buffer) SetBuf(s []byte) { - p.buf = s - p.index = 0 -} - -// Bytes returns the contents of the Buffer. -func (p *Buffer) Bytes() []byte { return p.buf } - -/* - * Helper routines for simplifying the creation of optional fields of basic type. - */ - -// Bool is a helper routine that allocates a new bool value -// to store v and returns a pointer to it. -func Bool(v bool) *bool { - return &v -} - -// Int32 is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it. -func Int32(v int32) *int32 { - return &v -} - -// Int is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it, but unlike Int32 -// its argument value is an int. -func Int(v int) *int32 { - p := new(int32) - *p = int32(v) - return p -} - -// Int64 is a helper routine that allocates a new int64 value -// to store v and returns a pointer to it. -func Int64(v int64) *int64 { - return &v -} - -// Float32 is a helper routine that allocates a new float32 value -// to store v and returns a pointer to it. -func Float32(v float32) *float32 { - return &v -} - -// Float64 is a helper routine that allocates a new float64 value -// to store v and returns a pointer to it. -func Float64(v float64) *float64 { - return &v -} - -// Uint32 is a helper routine that allocates a new uint32 value -// to store v and returns a pointer to it. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint64 is a helper routine that allocates a new uint64 value -// to store v and returns a pointer to it. -func Uint64(v uint64) *uint64 { - return &v -} - -// String is a helper routine that allocates a new string value -// to store v and returns a pointer to it. -func String(v string) *string { - return &v -} - -// EnumName is a helper function to simplify printing protocol buffer enums -// by name. Given an enum map and a value, it returns a useful string. -func EnumName(m map[int32]string, v int32) string { - s, ok := m[v] - if ok { - return s - } - return strconv.Itoa(int(v)) -} - -// UnmarshalJSONEnum is a helper function to simplify recovering enum int values -// from their JSON-encoded representation. Given a map from the enum's symbolic -// names to its int values, and a byte buffer containing the JSON-encoded -// value, it returns an int32 that can be cast to the enum type by the caller. -// -// The function can deal with both JSON representations, numeric and symbolic. -func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { - if data[0] == '"' { - // New style: enums are strings. - var repr string - if err := json.Unmarshal(data, &repr); err != nil { - return -1, err - } - val, ok := m[repr] - if !ok { - return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) - } - return val, nil - } - // Old style: enums are ints. - var val int32 - if err := json.Unmarshal(data, &val); err != nil { - return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) - } - return val, nil -} - -// DebugPrint dumps the encoded data in b in a debugging format with a header -// including the string s. Used in testing but made available for general debugging. -func (p *Buffer) DebugPrint(s string, b []byte) { - var u uint64 - - obuf := p.buf - index := p.index - p.buf = b - p.index = 0 - depth := 0 - - fmt.Printf("\n--- %s ---\n", s) - -out: - for { - for i := 0; i < depth; i++ { - fmt.Print(" ") - } - - index := p.index - if index == len(p.buf) { - break - } - - op, err := p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: fetching op err %v\n", index, err) - break out - } - tag := op >> 3 - wire := op & 7 - - switch wire { - default: - fmt.Printf("%3d: t=%3d unknown wire=%d\n", - index, tag, wire) - break out - - case WireBytes: - var r []byte - - r, err = p.DecodeRawBytes(false) - if err != nil { - break out - } - fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) - if len(r) <= 6 { - for i := 0; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } else { - for i := 0; i < 3; i++ { - fmt.Printf(" %.2x", r[i]) - } - fmt.Printf(" ..") - for i := len(r) - 3; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } - fmt.Printf("\n") - - case WireFixed32: - u, err = p.DecodeFixed32() - if err != nil { - fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) - - case WireFixed64: - u, err = p.DecodeFixed64() - if err != nil { - fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) - - case WireVarint: - u, err = p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) - - case WireStartGroup: - fmt.Printf("%3d: t=%3d start\n", index, tag) - depth++ - - case WireEndGroup: - depth-- - fmt.Printf("%3d: t=%3d end\n", index, tag) - } - } - - if depth != 0 { - fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) - } - fmt.Printf("\n") - - p.buf = obuf - p.index = index -} - -// SetDefaults sets unset protocol buffer fields to their default values. -// It only modifies fields that are both unset and have defined defaults. -// It recursively sets default values in any non-nil sub-messages. -func SetDefaults(pb Message) { - setDefaults(reflect.ValueOf(pb), true, false) -} - -// v is a pointer to a struct. -func setDefaults(v reflect.Value, recur, zeros bool) { - v = v.Elem() - - defaultMu.RLock() - dm, ok := defaults[v.Type()] - defaultMu.RUnlock() - if !ok { - dm = buildDefaultMessage(v.Type()) - defaultMu.Lock() - defaults[v.Type()] = dm - defaultMu.Unlock() - } - - for _, sf := range dm.scalars { - f := v.Field(sf.index) - if !f.IsNil() { - // field already set - continue - } - dv := sf.value - if dv == nil && !zeros { - // no explicit default, and don't want to set zeros - continue - } - fptr := f.Addr().Interface() // **T - // TODO: Consider batching the allocations we do here. - switch sf.kind { - case reflect.Bool: - b := new(bool) - if dv != nil { - *b = dv.(bool) - } - *(fptr.(**bool)) = b - case reflect.Float32: - f := new(float32) - if dv != nil { - *f = dv.(float32) - } - *(fptr.(**float32)) = f - case reflect.Float64: - f := new(float64) - if dv != nil { - *f = dv.(float64) - } - *(fptr.(**float64)) = f - case reflect.Int32: - // might be an enum - if ft := f.Type(); ft != int32PtrType { - // enum - f.Set(reflect.New(ft.Elem())) - if dv != nil { - f.Elem().SetInt(int64(dv.(int32))) - } - } else { - // int32 field - i := new(int32) - if dv != nil { - *i = dv.(int32) - } - *(fptr.(**int32)) = i - } - case reflect.Int64: - i := new(int64) - if dv != nil { - *i = dv.(int64) - } - *(fptr.(**int64)) = i - case reflect.String: - s := new(string) - if dv != nil { - *s = dv.(string) - } - *(fptr.(**string)) = s - case reflect.Uint8: - // exceptional case: []byte - var b []byte - if dv != nil { - db := dv.([]byte) - b = make([]byte, len(db)) - copy(b, db) - } else { - b = []byte{} - } - *(fptr.(*[]byte)) = b - case reflect.Uint32: - u := new(uint32) - if dv != nil { - *u = dv.(uint32) - } - *(fptr.(**uint32)) = u - case reflect.Uint64: - u := new(uint64) - if dv != nil { - *u = dv.(uint64) - } - *(fptr.(**uint64)) = u - default: - log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) - } - } - - for _, ni := range dm.nested { - f := v.Field(ni) - // f is *T or []*T or map[T]*T - switch f.Kind() { - case reflect.Ptr: - if f.IsNil() { - continue - } - setDefaults(f, recur, zeros) - - case reflect.Slice: - for i := 0; i < f.Len(); i++ { - e := f.Index(i) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - - case reflect.Map: - for _, k := range f.MapKeys() { - e := f.MapIndex(k) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - } - } -} - -var ( - // defaults maps a protocol buffer struct type to a slice of the fields, - // with its scalar fields set to their proto-declared non-zero default values. - defaultMu sync.RWMutex - defaults = make(map[reflect.Type]defaultMessage) - - int32PtrType = reflect.TypeOf((*int32)(nil)) -) - -// defaultMessage represents information about the default values of a message. -type defaultMessage struct { - scalars []scalarField - nested []int // struct field index of nested messages -} - -type scalarField struct { - index int // struct field index - kind reflect.Kind // element type (the T in *T or []T) - value interface{} // the proto-declared default value, or nil -} - -// t is a struct type. -func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { - sprop := GetProperties(t) - for _, prop := range sprop.Prop { - fi, ok := sprop.decoderTags.get(prop.Tag) - if !ok { - // XXX_unrecognized - continue - } - ft := t.Field(fi).Type - - sf, nested, err := fieldDefault(ft, prop) - switch { - case err != nil: - log.Print(err) - case nested: - dm.nested = append(dm.nested, fi) - case sf != nil: - sf.index = fi - dm.scalars = append(dm.scalars, *sf) - } - } - - return dm -} - -// fieldDefault returns the scalarField for field type ft. -// sf will be nil if the field can not have a default. -// nestedMessage will be true if this is a nested message. -// Note that sf.index is not set on return. -func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { - var canHaveDefault bool - switch ft.Kind() { - case reflect.Ptr: - if ft.Elem().Kind() == reflect.Struct { - nestedMessage = true - } else { - canHaveDefault = true // proto2 scalar field - } - - case reflect.Slice: - switch ft.Elem().Kind() { - case reflect.Ptr: - nestedMessage = true // repeated message - case reflect.Uint8: - canHaveDefault = true // bytes field - } - - case reflect.Map: - if ft.Elem().Kind() == reflect.Ptr { - nestedMessage = true // map with message values - } - } - - if !canHaveDefault { - if nestedMessage { - return nil, true, nil - } - return nil, false, nil - } - - // We now know that ft is a pointer or slice. - sf = &scalarField{kind: ft.Elem().Kind()} - - // scalar fields without defaults - if !prop.HasDefault { - return sf, false, nil - } - - // a scalar field: either *T or []byte - switch ft.Elem().Kind() { - case reflect.Bool: - x, err := strconv.ParseBool(prop.Default) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Float32: - x, err := strconv.ParseFloat(prop.Default, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) - } - sf.value = float32(x) - case reflect.Float64: - x, err := strconv.ParseFloat(prop.Default, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Int32: - x, err := strconv.ParseInt(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) - } - sf.value = int32(x) - case reflect.Int64: - x, err := strconv.ParseInt(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.String: - sf.value = prop.Default - case reflect.Uint8: - // []byte (not *uint8) - sf.value = []byte(prop.Default) - case reflect.Uint32: - x, err := strconv.ParseUint(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) - } - sf.value = uint32(x) - case reflect.Uint64: - x, err := strconv.ParseUint(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) - } - sf.value = x - default: - return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) - } - - return sf, false, nil -} - -// Map fields may have key types of non-float scalars, strings and enums. -// The easiest way to sort them in some deterministic order is to use fmt. -// If this turns out to be inefficient we can always consider other options, -// such as doing a Schwartzian transform. - -func mapKeys(vs []reflect.Value) sort.Interface { - s := mapKeySorter{ - vs: vs, - // default Less function: textual comparison - less: func(a, b reflect.Value) bool { - return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface()) - }, - } - - // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps; - // numeric keys are sorted numerically. - if len(vs) == 0 { - return s - } - switch vs[0].Kind() { - case reflect.Int32, reflect.Int64: - s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } - case reflect.Uint32, reflect.Uint64: - s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } - } - - return s -} - -type mapKeySorter struct { - vs []reflect.Value - less func(a, b reflect.Value) bool -} - -func (s mapKeySorter) Len() int { return len(s.vs) } -func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } -func (s mapKeySorter) Less(i, j int) bool { - return s.less(s.vs[i], s.vs[j]) -} - -// isProto3Zero reports whether v is a zero proto3 value. -func isProto3Zero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return !v.Bool() - case reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint32, reflect.Uint64: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.String: - return v.String() == "" - } - return false -} - -// ProtoPackageIsVersion2 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the proto package. -const ProtoPackageIsVersion2 = true - -// ProtoPackageIsVersion1 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the proto package. -const ProtoPackageIsVersion1 = true diff --git a/images/404-server/vendor/github.com/golang/protobuf/proto/message_set.go b/images/404-server/vendor/github.com/golang/protobuf/proto/message_set.go deleted file mode 100644 index fd982decd6..0000000000 --- a/images/404-server/vendor/github.com/golang/protobuf/proto/message_set.go +++ /dev/null @@ -1,311 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Support for message sets. - */ - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "reflect" - "sort" -) - -// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. -// A message type ID is required for storing a protocol buffer in a message set. -var errNoMessageTypeID = errors.New("proto does not have a message type ID") - -// The first two types (_MessageSet_Item and messageSet) -// model what the protocol compiler produces for the following protocol message: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } -// That is the MessageSet wire format. We can't use a proto to generate these -// because that would introduce a circular dependency between it and this package. - -type _MessageSet_Item struct { - TypeId *int32 `protobuf:"varint,2,req,name=type_id"` - Message []byte `protobuf:"bytes,3,req,name=message"` -} - -type messageSet struct { - Item []*_MessageSet_Item `protobuf:"group,1,rep"` - XXX_unrecognized []byte - // TODO: caching? -} - -// Make sure messageSet is a Message. -var _ Message = (*messageSet)(nil) - -// messageTypeIder is an interface satisfied by a protocol buffer type -// that may be stored in a MessageSet. -type messageTypeIder interface { - MessageTypeId() int32 -} - -func (ms *messageSet) find(pb Message) *_MessageSet_Item { - mti, ok := pb.(messageTypeIder) - if !ok { - return nil - } - id := mti.MessageTypeId() - for _, item := range ms.Item { - if *item.TypeId == id { - return item - } - } - return nil -} - -func (ms *messageSet) Has(pb Message) bool { - if ms.find(pb) != nil { - return true - } - return false -} - -func (ms *messageSet) Unmarshal(pb Message) error { - if item := ms.find(pb); item != nil { - return Unmarshal(item.Message, pb) - } - if _, ok := pb.(messageTypeIder); !ok { - return errNoMessageTypeID - } - return nil // TODO: return error instead? -} - -func (ms *messageSet) Marshal(pb Message) error { - msg, err := Marshal(pb) - if err != nil { - return err - } - if item := ms.find(pb); item != nil { - // reuse existing item - item.Message = msg - return nil - } - - mti, ok := pb.(messageTypeIder) - if !ok { - return errNoMessageTypeID - } - - mtid := mti.MessageTypeId() - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: &mtid, - Message: msg, - }) - return nil -} - -func (ms *messageSet) Reset() { *ms = messageSet{} } -func (ms *messageSet) String() string { return CompactTextString(ms) } -func (*messageSet) ProtoMessage() {} - -// Support for the message_set_wire_format message option. - -func skipVarint(buf []byte) []byte { - i := 0 - for ; buf[i]&0x80 != 0; i++ { - } - return buf[i+1:] -} - -// MarshalMessageSet encodes the extension map represented by m in the message set wire format. -// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSet(exts interface{}) ([]byte, error) { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - if err := encodeExtensions(exts); err != nil { - return nil, err - } - m, _ = exts.extensionsRead() - case map[int32]Extension: - if err := encodeExtensionsMap(exts); err != nil { - return nil, err - } - m = exts - default: - return nil, errors.New("proto: not an extension map") - } - - // Sort extension IDs to provide a deterministic encoding. - // See also enc_map in encode.go. - ids := make([]int, 0, len(m)) - for id := range m { - ids = append(ids, int(id)) - } - sort.Ints(ids) - - ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))} - for _, id := range ids { - e := m[int32(id)] - // Remove the wire type and field number varint, as well as the length varint. - msg := skipVarint(skipVarint(e.enc)) - - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: Int32(int32(id)), - Message: msg, - }) - } - return Marshal(ms) -} - -// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. -// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSet(buf []byte, exts interface{}) error { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - m = exts.extensionsWrite() - case map[int32]Extension: - m = exts - default: - return errors.New("proto: not an extension map") - } - - ms := new(messageSet) - if err := Unmarshal(buf, ms); err != nil { - return err - } - for _, item := range ms.Item { - id := *item.TypeId - msg := item.Message - - // Restore wire type and field number varint, plus length varint. - // Be careful to preserve duplicate items. - b := EncodeVarint(uint64(id)<<3 | WireBytes) - if ext, ok := m[id]; ok { - // Existing data; rip off the tag and length varint - // so we join the new data correctly. - // We can assume that ext.enc is set because we are unmarshaling. - o := ext.enc[len(b):] // skip wire type and field number - _, n := DecodeVarint(o) // calculate length of length varint - o = o[n:] // skip length varint - msg = append(o, msg...) // join old data and new data - } - b = append(b, EncodeVarint(uint64(len(msg)))...) - b = append(b, msg...) - - m[id] = Extension{enc: b} - } - return nil -} - -// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. -// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - m, _ = exts.extensionsRead() - case map[int32]Extension: - m = exts - default: - return nil, errors.New("proto: not an extension map") - } - var b bytes.Buffer - b.WriteByte('{') - - // Process the map in key order for deterministic output. - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) // int32Slice defined in text.go - - for i, id := range ids { - ext := m[id] - if i > 0 { - b.WriteByte(',') - } - - msd, ok := messageSetMap[id] - if !ok { - // Unknown type; we can't render it, so skip it. - continue - } - fmt.Fprintf(&b, `"[%s]":`, msd.name) - - x := ext.value - if x == nil { - x = reflect.New(msd.t.Elem()).Interface() - if err := Unmarshal(ext.enc, x.(Message)); err != nil { - return nil, err - } - } - d, err := json.Marshal(x) - if err != nil { - return nil, err - } - b.Write(d) - } - b.WriteByte('}') - return b.Bytes(), nil -} - -// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. -// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error { - // Common-case fast path. - if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { - return nil - } - - // This is fairly tricky, and it's not clear that it is needed. - return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") -} - -// A global registry of types that can be used in a MessageSet. - -var messageSetMap = make(map[int32]messageSetDesc) - -type messageSetDesc struct { - t reflect.Type // pointer to struct - name string -} - -// RegisterMessageSetType is called from the generated code. -func RegisterMessageSetType(m Message, fieldNum int32, name string) { - messageSetMap[fieldNum] = messageSetDesc{ - t: reflect.TypeOf(m), - name: name, - } -} diff --git a/images/404-server/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/images/404-server/vendor/github.com/golang/protobuf/proto/pointer_reflect.go deleted file mode 100644 index fb512e2e16..0000000000 --- a/images/404-server/vendor/github.com/golang/protobuf/proto/pointer_reflect.go +++ /dev/null @@ -1,484 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build appengine js - -// This file contains an implementation of proto field accesses using package reflect. -// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can -// be used on App Engine. - -package proto - -import ( - "math" - "reflect" -) - -// A structPointer is a pointer to a struct. -type structPointer struct { - v reflect.Value -} - -// toStructPointer returns a structPointer equivalent to the given reflect value. -// The reflect value must itself be a pointer to a struct. -func toStructPointer(v reflect.Value) structPointer { - return structPointer{v} -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p.v.IsNil() -} - -// Interface returns the struct pointer as an interface value. -func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { - return p.v.Interface() -} - -// A field identifies a field in a struct, accessible from a structPointer. -// In this implementation, a field is identified by the sequence of field indices -// passed to reflect's FieldByIndex. -type field []int - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return f.Index -} - -// invalidField is an invalid field identifier. -var invalidField = field(nil) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { return f != nil } - -// field returns the given field in the struct as a reflect value. -func structPointer_field(p structPointer, f field) reflect.Value { - // Special case: an extension map entry with a value of type T - // passes a *T to the struct-handling code with a zero field, - // expecting that it will be treated as equivalent to *struct{ X T }, - // which has the same memory layout. We have to handle that case - // specially, because reflect will panic if we call FieldByIndex on a - // non-struct. - if f == nil { - return p.v.Elem() - } - - return p.v.Elem().FieldByIndex(f) -} - -// ifield returns the given field in the struct as an interface value. -func structPointer_ifield(p structPointer, f field) interface{} { - return structPointer_field(p, f).Addr().Interface() -} - -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return structPointer_ifield(p, f).(*[]byte) -} - -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return structPointer_ifield(p, f).(*[][]byte) -} - -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return structPointer_ifield(p, f).(**bool) -} - -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return structPointer_ifield(p, f).(*bool) -} - -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return structPointer_ifield(p, f).(*[]bool) -} - -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return structPointer_ifield(p, f).(**string) -} - -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return structPointer_ifield(p, f).(*string) -} - -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return structPointer_ifield(p, f).(*[]string) -} - -// Extensions returns the address of an extension map field in the struct. -func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { - return structPointer_ifield(p, f).(*XXX_InternalExtensions) -} - -// ExtMap returns the address of an extension map field in the struct. -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return structPointer_ifield(p, f).(*map[int32]Extension) -} - -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return structPointer_field(p, f).Addr() -} - -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - structPointer_field(p, f).Set(q.v) -} - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return structPointer{structPointer_field(p, f)} -} - -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { - return structPointerSlice{structPointer_field(p, f)} -} - -// A structPointerSlice represents the address of a slice of pointers to structs -// (themselves messages or groups). That is, v.Type() is *[]*struct{...}. -type structPointerSlice struct { - v reflect.Value -} - -func (p structPointerSlice) Len() int { return p.v.Len() } -func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } -func (p structPointerSlice) Append(q structPointer) { - p.v.Set(reflect.Append(p.v, q.v)) -} - -var ( - int32Type = reflect.TypeOf(int32(0)) - uint32Type = reflect.TypeOf(uint32(0)) - float32Type = reflect.TypeOf(float32(0)) - int64Type = reflect.TypeOf(int64(0)) - uint64Type = reflect.TypeOf(uint64(0)) - float64Type = reflect.TypeOf(float64(0)) -) - -// A word32 represents a field of type *int32, *uint32, *float32, or *enum. -// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. -type word32 struct { - v reflect.Value -} - -// IsNil reports whether p is nil. -func word32_IsNil(p word32) bool { - return p.v.IsNil() -} - -// Set sets p to point at a newly allocated word with bits set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - t := p.v.Type().Elem() - switch t { - case int32Type: - if len(o.int32s) == 0 { - o.int32s = make([]int32, uint32PoolSize) - } - o.int32s[0] = int32(x) - p.v.Set(reflect.ValueOf(&o.int32s[0])) - o.int32s = o.int32s[1:] - return - case uint32Type: - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) - } - o.uint32s[0] = x - p.v.Set(reflect.ValueOf(&o.uint32s[0])) - o.uint32s = o.uint32s[1:] - return - case float32Type: - if len(o.float32s) == 0 { - o.float32s = make([]float32, uint32PoolSize) - } - o.float32s[0] = math.Float32frombits(x) - p.v.Set(reflect.ValueOf(&o.float32s[0])) - o.float32s = o.float32s[1:] - return - } - - // must be enum - p.v.Set(reflect.New(t)) - p.v.Elem().SetInt(int64(int32(x))) -} - -// Get gets the bits pointed at by p, as a uint32. -func word32_Get(p word32) uint32 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") -} - -// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32{structPointer_field(p, f)} -} - -// A word32Val represents a field of type int32, uint32, float32, or enum. -// That is, v.Type() is int32, uint32, float32, or enum and v is assignable. -type word32Val struct { - v reflect.Value -} - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - switch p.v.Type() { - case int32Type: - p.v.SetInt(int64(x)) - return - case uint32Type: - p.v.SetUint(uint64(x)) - return - case float32Type: - p.v.SetFloat(float64(math.Float32frombits(x))) - return - } - - // must be enum - p.v.SetInt(int64(int32(x))) -} - -// Get gets the bits pointed at by p, as a uint32. -func word32Val_Get(p word32Val) uint32 { - elem := p.v - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") -} - -// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val{structPointer_field(p, f)} -} - -// A word32Slice is a slice of 32-bit values. -// That is, v.Type() is []int32, []uint32, []float32, or []enum. -type word32Slice struct { - v reflect.Value -} - -func (p word32Slice) Append(x uint32) { - n, m := p.v.Len(), p.v.Cap() - if n < m { - p.v.SetLen(n + 1) - } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) - } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int32: - elem.SetInt(int64(int32(x))) - case reflect.Uint32: - elem.SetUint(uint64(x)) - case reflect.Float32: - elem.SetFloat(float64(math.Float32frombits(x))) - } -} - -func (p word32Slice) Len() int { - return p.v.Len() -} - -func (p word32Slice) Index(i int) uint32 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") -} - -// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) word32Slice { - return word32Slice{structPointer_field(p, f)} -} - -// word64 is like word32 but for 64-bit values. -type word64 struct { - v reflect.Value -} - -func word64_Set(p word64, o *Buffer, x uint64) { - t := p.v.Type().Elem() - switch t { - case int64Type: - if len(o.int64s) == 0 { - o.int64s = make([]int64, uint64PoolSize) - } - o.int64s[0] = int64(x) - p.v.Set(reflect.ValueOf(&o.int64s[0])) - o.int64s = o.int64s[1:] - return - case uint64Type: - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) - } - o.uint64s[0] = x - p.v.Set(reflect.ValueOf(&o.uint64s[0])) - o.uint64s = o.uint64s[1:] - return - case float64Type: - if len(o.float64s) == 0 { - o.float64s = make([]float64, uint64PoolSize) - } - o.float64s[0] = math.Float64frombits(x) - p.v.Set(reflect.ValueOf(&o.float64s[0])) - o.float64s = o.float64s[1:] - return - } - panic("unreachable") -} - -func word64_IsNil(p word64) bool { - return p.v.IsNil() -} - -func word64_Get(p word64) uint64 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) - } - panic("unreachable") -} - -func structPointer_Word64(p structPointer, f field) word64 { - return word64{structPointer_field(p, f)} -} - -// word64Val is like word32Val but for 64-bit values. -type word64Val struct { - v reflect.Value -} - -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - switch p.v.Type() { - case int64Type: - p.v.SetInt(int64(x)) - return - case uint64Type: - p.v.SetUint(x) - return - case float64Type: - p.v.SetFloat(math.Float64frombits(x)) - return - } - panic("unreachable") -} - -func word64Val_Get(p word64Val) uint64 { - elem := p.v - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) - } - panic("unreachable") -} - -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val{structPointer_field(p, f)} -} - -type word64Slice struct { - v reflect.Value -} - -func (p word64Slice) Append(x uint64) { - n, m := p.v.Len(), p.v.Cap() - if n < m { - p.v.SetLen(n + 1) - } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) - } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int64: - elem.SetInt(int64(int64(x))) - case reflect.Uint64: - elem.SetUint(uint64(x)) - case reflect.Float64: - elem.SetFloat(float64(math.Float64frombits(x))) - } -} - -func (p word64Slice) Len() int { - return p.v.Len() -} - -func (p word64Slice) Index(i int) uint64 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return uint64(elem.Uint()) - case reflect.Float64: - return math.Float64bits(float64(elem.Float())) - } - panic("unreachable") -} - -func structPointer_Word64Slice(p structPointer, f field) word64Slice { - return word64Slice{structPointer_field(p, f)} -} diff --git a/images/404-server/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/images/404-server/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go deleted file mode 100644 index 6b5567d47c..0000000000 --- a/images/404-server/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go +++ /dev/null @@ -1,270 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !appengine,!js - -// This file contains the implementation of the proto field accesses using package unsafe. - -package proto - -import ( - "reflect" - "unsafe" -) - -// NOTE: These type_Foo functions would more idiomatically be methods, -// but Go does not allow methods on pointer types, and we must preserve -// some pointer type for the garbage collector. We use these -// funcs with clunky names as our poor approximation to methods. -// -// An alternative would be -// type structPointer struct { p unsafe.Pointer } -// but that does not registerize as well. - -// A structPointer is a pointer to a struct. -type structPointer unsafe.Pointer - -// toStructPointer returns a structPointer equivalent to the given reflect value. -func toStructPointer(v reflect.Value) structPointer { - return structPointer(unsafe.Pointer(v.Pointer())) -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p == nil -} - -// Interface returns the struct pointer, assumed to have element type t, -// as an interface value. -func structPointer_Interface(p structPointer, t reflect.Type) interface{} { - return reflect.NewAt(t, unsafe.Pointer(p)).Interface() -} - -// A field identifies a field in a struct, accessible from a structPointer. -// In this implementation, a field is identified by its byte offset from the start of the struct. -type field uintptr - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return field(f.Offset) -} - -// invalidField is an invalid field identifier. -const invalidField = ^field(0) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { - return f != ^field(0) -} - -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// ExtMap returns the address of an extension map field in the struct. -func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { - return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) -} - -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q -} - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { - return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). -type structPointerSlice []structPointer - -func (v *structPointerSlice) Len() int { return len(*v) } -func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } -func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } - -// A word32 is the address of a "pointer to 32-bit value" field. -type word32 **uint32 - -// IsNil reports whether *v is nil. -func word32_IsNil(p word32) bool { - return *p == nil -} - -// Set sets *v to point at a newly allocated word set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) - } - o.uint32s[0] = x - *p = &o.uint32s[0] - o.uint32s = o.uint32s[1:] -} - -// Get gets the value pointed at by *v. -func word32_Get(p word32) uint32 { - return **p -} - -// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// A word32Val is the address of a 32-bit value field. -type word32Val *uint32 - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - *p = x -} - -// Get gets the value pointed at by p. -func word32Val_Get(p word32Val) uint32 { - return *p -} - -// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// A word32Slice is a slice of 32-bit values. -type word32Slice []uint32 - -func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } -func (v *word32Slice) Len() int { return len(*v) } -func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } - -// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) *word32Slice { - return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// word64 is like word32 but for 64-bit values. -type word64 **uint64 - -func word64_Set(p word64, o *Buffer, x uint64) { - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) - } - o.uint64s[0] = x - *p = &o.uint64s[0] - o.uint64s = o.uint64s[1:] -} - -func word64_IsNil(p word64) bool { - return *p == nil -} - -func word64_Get(p word64) uint64 { - return **p -} - -func structPointer_Word64(p structPointer, f field) word64 { - return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// word64Val is like word32Val but for 64-bit values. -type word64Val *uint64 - -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - *p = x -} - -func word64Val_Get(p word64Val) uint64 { - return *p -} - -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// word64Slice is like word32Slice but for 64-bit values. -type word64Slice []uint64 - -func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } -func (v *word64Slice) Len() int { return len(*v) } -func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } - -func structPointer_Word64Slice(p structPointer, f field) *word64Slice { - return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} diff --git a/images/404-server/vendor/github.com/golang/protobuf/proto/properties.go b/images/404-server/vendor/github.com/golang/protobuf/proto/properties.go deleted file mode 100644 index ec2289c005..0000000000 --- a/images/404-server/vendor/github.com/golang/protobuf/proto/properties.go +++ /dev/null @@ -1,872 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "fmt" - "log" - "os" - "reflect" - "sort" - "strconv" - "strings" - "sync" -) - -const debug bool = false - -// Constants that identify the encoding of a value on the wire. -const ( - WireVarint = 0 - WireFixed64 = 1 - WireBytes = 2 - WireStartGroup = 3 - WireEndGroup = 4 - WireFixed32 = 5 -) - -const startSize = 10 // initial slice/string sizes - -// Encoders are defined in encode.go -// An encoder outputs the full representation of a field, including its -// tag and encoder type. -type encoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueEncoder encodes a single integer in a particular encoding. -type valueEncoder func(o *Buffer, x uint64) error - -// Sizers are defined in encode.go -// A sizer returns the encoded size of a field, including its tag and encoder -// type. -type sizer func(prop *Properties, base structPointer) int - -// A valueSizer returns the encoded size of a single integer in a particular -// encoding. -type valueSizer func(x uint64) int - -// Decoders are defined in decode.go -// A decoder creates a value from its wire representation. -// Unrecognized subelements are saved in unrec. -type decoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueDecoder decodes a single integer in a particular encoding. -type valueDecoder func(o *Buffer) (x uint64, err error) - -// A oneofMarshaler does the marshaling for all oneof fields in a message. -type oneofMarshaler func(Message, *Buffer) error - -// A oneofUnmarshaler does the unmarshaling for a oneof field in a message. -type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error) - -// A oneofSizer does the sizing for all oneof fields in a message. -type oneofSizer func(Message) int - -// tagMap is an optimization over map[int]int for typical protocol buffer -// use-cases. Encoded protocol buffers are often in tag order with small tag -// numbers. -type tagMap struct { - fastTags []int - slowTags map[int]int -} - -// tagMapFastLimit is the upper bound on the tag number that will be stored in -// the tagMap slice rather than its map. -const tagMapFastLimit = 1024 - -func (p *tagMap) get(t int) (int, bool) { - if t > 0 && t < tagMapFastLimit { - if t >= len(p.fastTags) { - return 0, false - } - fi := p.fastTags[t] - return fi, fi >= 0 - } - fi, ok := p.slowTags[t] - return fi, ok -} - -func (p *tagMap) put(t int, fi int) { - if t > 0 && t < tagMapFastLimit { - for len(p.fastTags) < t+1 { - p.fastTags = append(p.fastTags, -1) - } - p.fastTags[t] = fi - return - } - if p.slowTags == nil { - p.slowTags = make(map[int]int) - } - p.slowTags[t] = fi -} - -// StructProperties represents properties for all the fields of a struct. -// decoderTags and decoderOrigNames should only be used by the decoder. -type StructProperties struct { - Prop []*Properties // properties for each field - reqCount int // required count - decoderTags tagMap // map from proto tag to struct field number - decoderOrigNames map[string]int // map from original name to struct field number - order []int // list of struct field numbers in tag order - unrecField field // field id of the XXX_unrecognized []byte field - extendable bool // is this an extendable proto - - oneofMarshaler oneofMarshaler - oneofUnmarshaler oneofUnmarshaler - oneofSizer oneofSizer - stype reflect.Type - - // OneofTypes contains information about the oneof fields in this message. - // It is keyed by the original name of a field. - OneofTypes map[string]*OneofProperties -} - -// OneofProperties represents information about a specific field in a oneof. -type OneofProperties struct { - Type reflect.Type // pointer to generated struct type for this oneof field - Field int // struct field number of the containing oneof in the message - Prop *Properties -} - -// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. -// See encode.go, (*Buffer).enc_struct. - -func (sp *StructProperties) Len() int { return len(sp.order) } -func (sp *StructProperties) Less(i, j int) bool { - return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag -} -func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } - -// Properties represents the protocol-specific behavior of a single struct field. -type Properties struct { - Name string // name of the field, for error messages - OrigName string // original name before protocol compiler (always set) - JSONName string // name to use for JSON; determined by protoc - Wire string - WireType int - Tag int - Required bool - Optional bool - Repeated bool - Packed bool // relevant for repeated primitives only - Enum string // set for enum types only - proto3 bool // whether this is known to be a proto3 field; set for []byte only - oneof bool // whether this is a oneof field - - Default string // default value - HasDefault bool // whether an explicit default was provided - def_uint64 uint64 - - enc encoder - valEnc valueEncoder // set for bool and numeric types only - field field - tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) - tagbuf [8]byte - stype reflect.Type // set for struct types only - sprop *StructProperties // set for struct types only - isMarshaler bool - isUnmarshaler bool - - mtype reflect.Type // set for map types only - mkeyprop *Properties // set for map types only - mvalprop *Properties // set for map types only - - size sizer - valSize valueSizer // set for bool and numeric types only - - dec decoder - valDec valueDecoder // set for bool and numeric types only - - // If this is a packable field, this will be the decoder for the packed version of the field. - packedDec decoder -} - -// String formats the properties in the protobuf struct field tag style. -func (p *Properties) String() string { - s := p.Wire - s = "," - s += strconv.Itoa(p.Tag) - if p.Required { - s += ",req" - } - if p.Optional { - s += ",opt" - } - if p.Repeated { - s += ",rep" - } - if p.Packed { - s += ",packed" - } - s += ",name=" + p.OrigName - if p.JSONName != p.OrigName { - s += ",json=" + p.JSONName - } - if p.proto3 { - s += ",proto3" - } - if p.oneof { - s += ",oneof" - } - if len(p.Enum) > 0 { - s += ",enum=" + p.Enum - } - if p.HasDefault { - s += ",def=" + p.Default - } - return s -} - -// Parse populates p by parsing a string in the protobuf struct field tag style. -func (p *Properties) Parse(s string) { - // "bytes,49,opt,name=foo,def=hello!" - fields := strings.Split(s, ",") // breaks def=, but handled below. - if len(fields) < 2 { - fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) - return - } - - p.Wire = fields[0] - switch p.Wire { - case "varint": - p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeVarint - p.valDec = (*Buffer).DecodeVarint - p.valSize = sizeVarint - case "fixed32": - p.WireType = WireFixed32 - p.valEnc = (*Buffer).EncodeFixed32 - p.valDec = (*Buffer).DecodeFixed32 - p.valSize = sizeFixed32 - case "fixed64": - p.WireType = WireFixed64 - p.valEnc = (*Buffer).EncodeFixed64 - p.valDec = (*Buffer).DecodeFixed64 - p.valSize = sizeFixed64 - case "zigzag32": - p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag32 - p.valDec = (*Buffer).DecodeZigzag32 - p.valSize = sizeZigzag32 - case "zigzag64": - p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag64 - p.valDec = (*Buffer).DecodeZigzag64 - p.valSize = sizeZigzag64 - case "bytes", "group": - p.WireType = WireBytes - // no numeric converter for non-numeric types - default: - fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) - return - } - - var err error - p.Tag, err = strconv.Atoi(fields[1]) - if err != nil { - return - } - - for i := 2; i < len(fields); i++ { - f := fields[i] - switch { - case f == "req": - p.Required = true - case f == "opt": - p.Optional = true - case f == "rep": - p.Repeated = true - case f == "packed": - p.Packed = true - case strings.HasPrefix(f, "name="): - p.OrigName = f[5:] - case strings.HasPrefix(f, "json="): - p.JSONName = f[5:] - case strings.HasPrefix(f, "enum="): - p.Enum = f[5:] - case f == "proto3": - p.proto3 = true - case f == "oneof": - p.oneof = true - case strings.HasPrefix(f, "def="): - p.HasDefault = true - p.Default = f[4:] // rest of string - if i+1 < len(fields) { - // Commas aren't escaped, and def is always last. - p.Default += "," + strings.Join(fields[i+1:], ",") - break - } - } - } -} - -func logNoSliceEnc(t1, t2 reflect.Type) { - fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) -} - -var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() - -// Initialize the fields for encoding and decoding. -func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { - p.enc = nil - p.dec = nil - p.size = nil - - switch t1 := typ; t1.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) - - // proto3 scalar types - - case reflect.Bool: - p.enc = (*Buffer).enc_proto3_bool - p.dec = (*Buffer).dec_proto3_bool - p.size = size_proto3_bool - case reflect.Int32: - p.enc = (*Buffer).enc_proto3_int32 - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_int32 - case reflect.Uint32: - p.enc = (*Buffer).enc_proto3_uint32 - p.dec = (*Buffer).dec_proto3_int32 // can reuse - p.size = size_proto3_uint32 - case reflect.Int64, reflect.Uint64: - p.enc = (*Buffer).enc_proto3_int64 - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - case reflect.Float32: - p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_uint32 - case reflect.Float64: - p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - case reflect.String: - p.enc = (*Buffer).enc_proto3_string - p.dec = (*Buffer).dec_proto3_string - p.size = size_proto3_string - - case reflect.Ptr: - switch t2 := t1.Elem(); t2.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) - break - case reflect.Bool: - p.enc = (*Buffer).enc_bool - p.dec = (*Buffer).dec_bool - p.size = size_bool - case reflect.Int32: - p.enc = (*Buffer).enc_int32 - p.dec = (*Buffer).dec_int32 - p.size = size_int32 - case reflect.Uint32: - p.enc = (*Buffer).enc_uint32 - p.dec = (*Buffer).dec_int32 // can reuse - p.size = size_uint32 - case reflect.Int64, reflect.Uint64: - p.enc = (*Buffer).enc_int64 - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.Float32: - p.enc = (*Buffer).enc_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_int32 - p.size = size_uint32 - case reflect.Float64: - p.enc = (*Buffer).enc_int64 // can just treat them as bits - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.String: - p.enc = (*Buffer).enc_string - p.dec = (*Buffer).dec_string - p.size = size_string - case reflect.Struct: - p.stype = t1.Elem() - p.isMarshaler = isMarshaler(t1) - p.isUnmarshaler = isUnmarshaler(t1) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_struct_message - p.dec = (*Buffer).dec_struct_message - p.size = size_struct_message - } else { - p.enc = (*Buffer).enc_struct_group - p.dec = (*Buffer).dec_struct_group - p.size = size_struct_group - } - } - - case reflect.Slice: - switch t2 := t1.Elem(); t2.Kind() { - default: - logNoSliceEnc(t1, t2) - break - case reflect.Bool: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_bool - p.size = size_slice_packed_bool - } else { - p.enc = (*Buffer).enc_slice_bool - p.size = size_slice_bool - } - p.dec = (*Buffer).dec_slice_bool - p.packedDec = (*Buffer).dec_slice_packed_bool - case reflect.Int32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int32 - p.size = size_slice_packed_int32 - } else { - p.enc = (*Buffer).enc_slice_int32 - p.size = size_slice_int32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Uint32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Int64, reflect.Uint64: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - case reflect.Uint8: - p.dec = (*Buffer).dec_slice_byte - if p.proto3 { - p.enc = (*Buffer).enc_proto3_slice_byte - p.size = size_proto3_slice_byte - } else { - p.enc = (*Buffer).enc_slice_byte - p.size = size_slice_byte - } - case reflect.Float32, reflect.Float64: - switch t2.Bits() { - case 32: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case 64: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - default: - logNoSliceEnc(t1, t2) - break - } - case reflect.String: - p.enc = (*Buffer).enc_slice_string - p.dec = (*Buffer).dec_slice_string - p.size = size_slice_string - case reflect.Ptr: - switch t3 := t2.Elem(); t3.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) - break - case reflect.Struct: - p.stype = t2.Elem() - p.isMarshaler = isMarshaler(t2) - p.isUnmarshaler = isUnmarshaler(t2) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_slice_struct_message - p.dec = (*Buffer).dec_slice_struct_message - p.size = size_slice_struct_message - } else { - p.enc = (*Buffer).enc_slice_struct_group - p.dec = (*Buffer).dec_slice_struct_group - p.size = size_slice_struct_group - } - } - case reflect.Slice: - switch t2.Elem().Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) - break - case reflect.Uint8: - p.enc = (*Buffer).enc_slice_slice_byte - p.dec = (*Buffer).dec_slice_slice_byte - p.size = size_slice_slice_byte - } - } - - case reflect.Map: - p.enc = (*Buffer).enc_new_map - p.dec = (*Buffer).dec_new_map - p.size = size_new_map - - p.mtype = t1 - p.mkeyprop = &Properties{} - p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) - p.mvalprop = &Properties{} - vtype := p.mtype.Elem() - if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { - // The value type is not a message (*T) or bytes ([]byte), - // so we need encoders for the pointer to this type. - vtype = reflect.PtrTo(vtype) - } - p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) - } - - // precalculate tag code - wire := p.WireType - if p.Packed { - wire = WireBytes - } - x := uint32(p.Tag)<<3 | uint32(wire) - i := 0 - for i = 0; x > 127; i++ { - p.tagbuf[i] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - p.tagbuf[i] = uint8(x) - p.tagcode = p.tagbuf[0 : i+1] - - if p.stype != nil { - if lockGetProp { - p.sprop = GetProperties(p.stype) - } else { - p.sprop = getPropertiesLocked(p.stype) - } - } -} - -var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() - unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() -) - -// isMarshaler reports whether type t implements Marshaler. -func isMarshaler(t reflect.Type) bool { - // We're checking for (likely) pointer-receiver methods - // so if t is not a pointer, something is very wrong. - // The calls above only invoke isMarshaler on pointer types. - if t.Kind() != reflect.Ptr { - panic("proto: misuse of isMarshaler") - } - return t.Implements(marshalerType) -} - -// isUnmarshaler reports whether type t implements Unmarshaler. -func isUnmarshaler(t reflect.Type) bool { - // We're checking for (likely) pointer-receiver methods - // so if t is not a pointer, something is very wrong. - // The calls above only invoke isUnmarshaler on pointer types. - if t.Kind() != reflect.Ptr { - panic("proto: misuse of isUnmarshaler") - } - return t.Implements(unmarshalerType) -} - -// Init populates the properties from a protocol buffer struct tag. -func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { - p.init(typ, name, tag, f, true) -} - -func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { - // "bytes,49,opt,def=hello!" - p.Name = name - p.OrigName = name - if f != nil { - p.field = toField(f) - } - if tag == "" { - return - } - p.Parse(tag) - p.setEncAndDec(typ, f, lockGetProp) -} - -var ( - propertiesMu sync.RWMutex - propertiesMap = make(map[reflect.Type]*StructProperties) -) - -// GetProperties returns the list of properties for the type represented by t. -// t must represent a generated struct type of a protocol message. -func GetProperties(t reflect.Type) *StructProperties { - if t.Kind() != reflect.Struct { - panic("proto: type must have kind struct") - } - - // Most calls to GetProperties in a long-running program will be - // retrieving details for types we have seen before. - propertiesMu.RLock() - sprop, ok := propertiesMap[t] - propertiesMu.RUnlock() - if ok { - if collectStats { - stats.Chit++ - } - return sprop - } - - propertiesMu.Lock() - sprop = getPropertiesLocked(t) - propertiesMu.Unlock() - return sprop -} - -// getPropertiesLocked requires that propertiesMu is held. -func getPropertiesLocked(t reflect.Type) *StructProperties { - if prop, ok := propertiesMap[t]; ok { - if collectStats { - stats.Chit++ - } - return prop - } - if collectStats { - stats.Cmiss++ - } - - prop := new(StructProperties) - // in case of recursive protos, fill this in now. - propertiesMap[t] = prop - - // build properties - prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) || - reflect.PtrTo(t).Implements(extendableProtoV1Type) - prop.unrecField = invalidField - prop.Prop = make([]*Properties, t.NumField()) - prop.order = make([]int, t.NumField()) - - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - p := new(Properties) - name := f.Name - p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) - - if f.Name == "XXX_InternalExtensions" { // special case - p.enc = (*Buffer).enc_exts - p.dec = nil // not needed - p.size = size_exts - } else if f.Name == "XXX_extensions" { // special case - p.enc = (*Buffer).enc_map - p.dec = nil // not needed - p.size = size_map - } else if f.Name == "XXX_unrecognized" { // special case - prop.unrecField = toField(&f) - } - oneof := f.Tag.Get("protobuf_oneof") // special case - if oneof != "" { - // Oneof fields don't use the traditional protobuf tag. - p.OrigName = oneof - } - prop.Prop[i] = p - prop.order[i] = i - if debug { - print(i, " ", f.Name, " ", t.String(), " ") - if p.Tag > 0 { - print(p.String()) - } - print("\n") - } - if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" { - fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") - } - } - - // Re-order prop.order. - sort.Sort(prop) - - type oneofMessage interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) - } - if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { - var oots []interface{} - prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs() - prop.stype = t - - // Interpret oneof metadata. - prop.OneofTypes = make(map[string]*OneofProperties) - for _, oot := range oots { - oop := &OneofProperties{ - Type: reflect.ValueOf(oot).Type(), // *T - Prop: new(Properties), - } - sft := oop.Type.Elem().Field(0) - oop.Prop.Name = sft.Name - oop.Prop.Parse(sft.Tag.Get("protobuf")) - // There will be exactly one interface field that - // this new value is assignable to. - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if f.Type.Kind() != reflect.Interface { - continue - } - if !oop.Type.AssignableTo(f.Type) { - continue - } - oop.Field = i - break - } - prop.OneofTypes[oop.Prop.OrigName] = oop - } - } - - // build required counts - // build tags - reqCount := 0 - prop.decoderOrigNames = make(map[string]int) - for i, p := range prop.Prop { - if strings.HasPrefix(p.Name, "XXX_") { - // Internal fields should not appear in tags/origNames maps. - // They are handled specially when encoding and decoding. - continue - } - if p.Required { - reqCount++ - } - prop.decoderTags.put(p.Tag, i) - prop.decoderOrigNames[p.OrigName] = i - } - prop.reqCount = reqCount - - return prop -} - -// Return the Properties object for the x[0]'th field of the structure. -func propByIndex(t reflect.Type, x []int) *Properties { - if len(x) != 1 { - fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) - return nil - } - prop := GetProperties(t) - return prop.Prop[x[0]] -} - -// Get the address and type of a pointer to a struct from an interface. -func getbase(pb Message) (t reflect.Type, b structPointer, err error) { - if pb == nil { - err = ErrNil - return - } - // get the reflect type of the pointer to the struct. - t = reflect.TypeOf(pb) - // get the address of the struct. - value := reflect.ValueOf(pb) - b = toStructPointer(value) - return -} - -// A global registry of enum types. -// The generated code will register the generated maps by calling RegisterEnum. - -var enumValueMaps = make(map[string]map[string]int32) - -// RegisterEnum is called from the generated code to install the enum descriptor -// maps into the global table to aid parsing text format protocol buffers. -func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { - if _, ok := enumValueMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumValueMaps[typeName] = valueMap -} - -// EnumValueMap returns the mapping from names to integers of the -// enum type enumType, or a nil if not found. -func EnumValueMap(enumType string) map[string]int32 { - return enumValueMaps[enumType] -} - -// A registry of all linked message types. -// The string is a fully-qualified proto name ("pkg.Message"). -var ( - protoTypes = make(map[string]reflect.Type) - revProtoTypes = make(map[reflect.Type]string) -) - -// RegisterType is called from generated code and maps from the fully qualified -// proto name to the type (pointer to struct) of the protocol buffer. -func RegisterType(x Message, name string) { - if _, ok := protoTypes[name]; ok { - // TODO: Some day, make this a panic. - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - protoTypes[name] = t - revProtoTypes[t] = name -} - -// MessageName returns the fully-qualified proto name for the given message type. -func MessageName(x Message) string { - type xname interface { - XXX_MessageName() string - } - if m, ok := x.(xname); ok { - return m.XXX_MessageName() - } - return revProtoTypes[reflect.TypeOf(x)] -} - -// MessageType returns the message type (pointer to struct) for a named message. -func MessageType(name string) reflect.Type { return protoTypes[name] } - -// A registry of all linked proto files. -var ( - protoFiles = make(map[string][]byte) // file name => fileDescriptor -) - -// RegisterFile is called from generated code and maps from the -// full file name of a .proto file to its compressed FileDescriptorProto. -func RegisterFile(filename string, fileDescriptor []byte) { - protoFiles[filename] = fileDescriptor -} - -// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. -func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/images/404-server/vendor/github.com/golang/protobuf/proto/text.go b/images/404-server/vendor/github.com/golang/protobuf/proto/text.go deleted file mode 100644 index 965876bf03..0000000000 --- a/images/404-server/vendor/github.com/golang/protobuf/proto/text.go +++ /dev/null @@ -1,854 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for writing the text protocol buffer format. - -import ( - "bufio" - "bytes" - "encoding" - "errors" - "fmt" - "io" - "log" - "math" - "reflect" - "sort" - "strings" -) - -var ( - newline = []byte("\n") - spaces = []byte(" ") - gtNewline = []byte(">\n") - endBraceNewline = []byte("}\n") - backslashN = []byte{'\\', 'n'} - backslashR = []byte{'\\', 'r'} - backslashT = []byte{'\\', 't'} - backslashDQ = []byte{'\\', '"'} - backslashBS = []byte{'\\', '\\'} - posInf = []byte("inf") - negInf = []byte("-inf") - nan = []byte("nan") -) - -type writer interface { - io.Writer - WriteByte(byte) error -} - -// textWriter is an io.Writer that tracks its indentation level. -type textWriter struct { - ind int - complete bool // if the current position is a complete line - compact bool // whether to write out as a one-liner - w writer -} - -func (w *textWriter) WriteString(s string) (n int, err error) { - if !strings.Contains(s, "\n") { - if !w.compact && w.complete { - w.writeIndent() - } - w.complete = false - return io.WriteString(w.w, s) - } - // WriteString is typically called without newlines, so this - // codepath and its copy are rare. We copy to avoid - // duplicating all of Write's logic here. - return w.Write([]byte(s)) -} - -func (w *textWriter) Write(p []byte) (n int, err error) { - newlines := bytes.Count(p, newline) - if newlines == 0 { - if !w.compact && w.complete { - w.writeIndent() - } - n, err = w.w.Write(p) - w.complete = false - return n, err - } - - frags := bytes.SplitN(p, newline, newlines+1) - if w.compact { - for i, frag := range frags { - if i > 0 { - if err := w.w.WriteByte(' '); err != nil { - return n, err - } - n++ - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - } - return n, nil - } - - for i, frag := range frags { - if w.complete { - w.writeIndent() - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - if i+1 < len(frags) { - if err := w.w.WriteByte('\n'); err != nil { - return n, err - } - n++ - } - } - w.complete = len(frags[len(frags)-1]) == 0 - return n, nil -} - -func (w *textWriter) WriteByte(c byte) error { - if w.compact && c == '\n' { - c = ' ' - } - if !w.compact && w.complete { - w.writeIndent() - } - err := w.w.WriteByte(c) - w.complete = c == '\n' - return err -} - -func (w *textWriter) indent() { w.ind++ } - -func (w *textWriter) unindent() { - if w.ind == 0 { - log.Print("proto: textWriter unindented too far") - return - } - w.ind-- -} - -func writeName(w *textWriter, props *Properties) error { - if _, err := w.WriteString(props.OrigName); err != nil { - return err - } - if props.Wire != "group" { - return w.WriteByte(':') - } - return nil -} - -// raw is the interface satisfied by RawMessage. -type raw interface { - Bytes() []byte -} - -func requiresQuotes(u string) bool { - // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. - for _, ch := range u { - switch { - case ch == '.' || ch == '/' || ch == '_': - continue - case '0' <= ch && ch <= '9': - continue - case 'A' <= ch && ch <= 'Z': - continue - case 'a' <= ch && ch <= 'z': - continue - default: - return true - } - } - return false -} - -// isAny reports whether sv is a google.protobuf.Any message -func isAny(sv reflect.Value) bool { - type wkt interface { - XXX_WellKnownType() string - } - t, ok := sv.Addr().Interface().(wkt) - return ok && t.XXX_WellKnownType() == "Any" -} - -// writeProto3Any writes an expanded google.protobuf.Any message. -// -// It returns (false, nil) if sv value can't be unmarshaled (e.g. because -// required messages are not linked in). -// -// It returns (true, error) when sv was written in expanded format or an error -// was encountered. -func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { - turl := sv.FieldByName("TypeUrl") - val := sv.FieldByName("Value") - if !turl.IsValid() || !val.IsValid() { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - b, ok := val.Interface().([]byte) - if !ok { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - parts := strings.Split(turl.String(), "/") - mt := MessageType(parts[len(parts)-1]) - if mt == nil { - return false, nil - } - m := reflect.New(mt.Elem()) - if err := Unmarshal(b, m.Interface().(Message)); err != nil { - return false, nil - } - w.Write([]byte("[")) - u := turl.String() - if requiresQuotes(u) { - writeString(w, u) - } else { - w.Write([]byte(u)) - } - if w.compact { - w.Write([]byte("]:<")) - } else { - w.Write([]byte("]: <\n")) - w.ind++ - } - if err := tm.writeStruct(w, m.Elem()); err != nil { - return true, err - } - if w.compact { - w.Write([]byte("> ")) - } else { - w.ind-- - w.Write([]byte(">\n")) - } - return true, nil -} - -func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { - if tm.ExpandAny && isAny(sv) { - if canExpand, err := tm.writeProto3Any(w, sv); canExpand { - return err - } - } - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < sv.NumField(); i++ { - fv := sv.Field(i) - props := sprops.Prop[i] - name := st.Field(i).Name - - if strings.HasPrefix(name, "XXX_") { - // There are two XXX_ fields: - // XXX_unrecognized []byte - // XXX_extensions map[int32]proto.Extension - // The first is handled here; - // the second is handled at the bottom of this function. - if name == "XXX_unrecognized" && !fv.IsNil() { - if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Field not filled in. This could be an optional field or - // a required field that wasn't filled in. Either way, there - // isn't anything we can show for it. - continue - } - if fv.Kind() == reflect.Slice && fv.IsNil() { - // Repeated field that is empty, or a bytes field that is unused. - continue - } - - if props.Repeated && fv.Kind() == reflect.Slice { - // Repeated field. - for j := 0; j < fv.Len(); j++ { - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - v := fv.Index(j) - if v.Kind() == reflect.Ptr && v.IsNil() { - // A nil message in a repeated field is not valid, - // but we can handle that more gracefully than panicking. - if _, err := w.Write([]byte("\n")); err != nil { - return err - } - continue - } - if err := tm.writeAny(w, v, props); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Map { - // Map fields are rendered as a repeated struct with key/value fields. - keys := fv.MapKeys() - sort.Sort(mapKeys(keys)) - for _, key := range keys { - val := fv.MapIndex(key) - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - // open struct - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - // key - if _, err := w.WriteString("key:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, key, props.mkeyprop); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - // nil values aren't legal, but we can avoid panicking because of them. - if val.Kind() != reflect.Ptr || !val.IsNil() { - // value - if _, err := w.WriteString("value:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, val, props.mvalprop); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - // close struct - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { - // empty bytes field - continue - } - if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { - // proto3 non-repeated scalar field; skip if zero value - if isProto3Zero(fv) { - continue - } - } - - if fv.Kind() == reflect.Interface { - // Check if it is a oneof. - if st.Field(i).Tag.Get("protobuf_oneof") != "" { - // fv is nil, or holds a pointer to generated struct. - // That generated struct has exactly one field, - // which has a protobuf struct tag. - if fv.IsNil() { - continue - } - inner := fv.Elem().Elem() // interface -> *T -> T - tag := inner.Type().Field(0).Tag.Get("protobuf") - props = new(Properties) // Overwrite the outer props var, but not its pointee. - props.Parse(tag) - // Write the value in the oneof, not the oneof itself. - fv = inner.Field(0) - - // Special case to cope with malformed messages gracefully: - // If the value in the oneof is a nil pointer, don't panic - // in writeAny. - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Use errors.New so writeAny won't render quotes. - msg := errors.New("/* nil */") - fv = reflect.ValueOf(&msg).Elem() - } - } - } - - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if b, ok := fv.Interface().(raw); ok { - if err := writeRaw(w, b.Bytes()); err != nil { - return err - } - continue - } - - // Enums have a String method, so writeAny will work fine. - if err := tm.writeAny(w, fv, props); err != nil { - return err - } - - if err := w.WriteByte('\n'); err != nil { - return err - } - } - - // Extensions (the XXX_extensions field). - pv := sv.Addr() - if _, ok := extendable(pv.Interface()); ok { - if err := tm.writeExtensions(w, pv); err != nil { - return err - } - } - - return nil -} - -// writeRaw writes an uninterpreted raw message. -func writeRaw(w *textWriter, b []byte) error { - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if err := writeUnknownStruct(w, b); err != nil { - return err - } - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - return nil -} - -// writeAny writes an arbitrary field. -func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { - v = reflect.Indirect(v) - - // Floats have special cases. - if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { - x := v.Float() - var b []byte - switch { - case math.IsInf(x, 1): - b = posInf - case math.IsInf(x, -1): - b = negInf - case math.IsNaN(x): - b = nan - } - if b != nil { - _, err := w.Write(b) - return err - } - // Other values are handled below. - } - - // We don't attempt to serialise every possible value type; only those - // that can occur in protocol buffers. - switch v.Kind() { - case reflect.Slice: - // Should only be a []byte; repeated fields are handled in writeStruct. - if err := writeString(w, string(v.Bytes())); err != nil { - return err - } - case reflect.String: - if err := writeString(w, v.String()); err != nil { - return err - } - case reflect.Struct: - // Required/optional group/message. - var bra, ket byte = '<', '>' - if props != nil && props.Wire == "group" { - bra, ket = '{', '}' - } - if err := w.WriteByte(bra); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if etm, ok := v.Interface().(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = w.Write(text); err != nil { - return err - } - } else if err := tm.writeStruct(w, v); err != nil { - return err - } - w.unindent() - if err := w.WriteByte(ket); err != nil { - return err - } - default: - _, err := fmt.Fprint(w, v.Interface()) - return err - } - return nil -} - -// equivalent to C's isprint. -func isprint(c byte) bool { - return c >= 0x20 && c < 0x7f -} - -// writeString writes a string in the protocol buffer text format. -// It is similar to strconv.Quote except we don't use Go escape sequences, -// we treat the string as a byte sequence, and we use octal escapes. -// These differences are to maintain interoperability with the other -// languages' implementations of the text format. -func writeString(w *textWriter, s string) error { - // use WriteByte here to get any needed indent - if err := w.WriteByte('"'); err != nil { - return err - } - // Loop over the bytes, not the runes. - for i := 0; i < len(s); i++ { - var err error - // Divergence from C++: we don't escape apostrophes. - // There's no need to escape them, and the C++ parser - // copes with a naked apostrophe. - switch c := s[i]; c { - case '\n': - _, err = w.w.Write(backslashN) - case '\r': - _, err = w.w.Write(backslashR) - case '\t': - _, err = w.w.Write(backslashT) - case '"': - _, err = w.w.Write(backslashDQ) - case '\\': - _, err = w.w.Write(backslashBS) - default: - if isprint(c) { - err = w.w.WriteByte(c) - } else { - _, err = fmt.Fprintf(w.w, "\\%03o", c) - } - } - if err != nil { - return err - } - } - return w.WriteByte('"') -} - -func writeUnknownStruct(w *textWriter, data []byte) (err error) { - if !w.compact { - if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { - return err - } - } - b := NewBuffer(data) - for b.index < len(b.buf) { - x, err := b.DecodeVarint() - if err != nil { - _, err := fmt.Fprintf(w, "/* %v */\n", err) - return err - } - wire, tag := x&7, x>>3 - if wire == WireEndGroup { - w.unindent() - if _, err := w.Write(endBraceNewline); err != nil { - return err - } - continue - } - if _, err := fmt.Fprint(w, tag); err != nil { - return err - } - if wire != WireStartGroup { - if err := w.WriteByte(':'); err != nil { - return err - } - } - if !w.compact || wire == WireStartGroup { - if err := w.WriteByte(' '); err != nil { - return err - } - } - switch wire { - case WireBytes: - buf, e := b.DecodeRawBytes(false) - if e == nil { - _, err = fmt.Fprintf(w, "%q", buf) - } else { - _, err = fmt.Fprintf(w, "/* %v */", e) - } - case WireFixed32: - x, err = b.DecodeFixed32() - err = writeUnknownInt(w, x, err) - case WireFixed64: - x, err = b.DecodeFixed64() - err = writeUnknownInt(w, x, err) - case WireStartGroup: - err = w.WriteByte('{') - w.indent() - case WireVarint: - x, err = b.DecodeVarint() - err = writeUnknownInt(w, x, err) - default: - _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) - } - if err != nil { - return err - } - if err = w.WriteByte('\n'); err != nil { - return err - } - } - return nil -} - -func writeUnknownInt(w *textWriter, x uint64, err error) error { - if err == nil { - _, err = fmt.Fprint(w, x) - } else { - _, err = fmt.Fprintf(w, "/* %v */", err) - } - return err -} - -type int32Slice []int32 - -func (s int32Slice) Len() int { return len(s) } -func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } -func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// writeExtensions writes all the extensions in pv. -// pv is assumed to be a pointer to a protocol message struct that is extendable. -func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { - emap := extensionMaps[pv.Type().Elem()] - ep, _ := extendable(pv.Interface()) - - // Order the extensions by ID. - // This isn't strictly necessary, but it will give us - // canonical output, which will also make testing easier. - m, mu := ep.extensionsRead() - if m == nil { - return nil - } - mu.Lock() - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) - mu.Unlock() - - for _, extNum := range ids { - ext := m[extNum] - var desc *ExtensionDesc - if emap != nil { - desc = emap[extNum] - } - if desc == nil { - // Unknown extension. - if err := writeUnknownStruct(w, ext.enc); err != nil { - return err - } - continue - } - - pb, err := GetExtension(ep, desc) - if err != nil { - return fmt.Errorf("failed getting extension: %v", err) - } - - // Repeated extensions will appear as a slice. - if !desc.repeated() { - if err := tm.writeExtension(w, desc.Name, pb); err != nil { - return err - } - } else { - v := reflect.ValueOf(pb) - for i := 0; i < v.Len(); i++ { - if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { - return err - } - } - } - } - return nil -} - -func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { - if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - return nil -} - -func (w *textWriter) writeIndent() { - if !w.complete { - return - } - remain := w.ind * 2 - for remain > 0 { - n := remain - if n > len(spaces) { - n = len(spaces) - } - w.w.Write(spaces[:n]) - remain -= n - } - w.complete = false -} - -// TextMarshaler is a configurable text format marshaler. -type TextMarshaler struct { - Compact bool // use compact text format (one line). - ExpandAny bool // expand google.protobuf.Any messages of known types -} - -// Marshal writes a given protocol buffer in text format. -// The only errors returned are from w. -func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { - val := reflect.ValueOf(pb) - if pb == nil || val.IsNil() { - w.Write([]byte("")) - return nil - } - var bw *bufio.Writer - ww, ok := w.(writer) - if !ok { - bw = bufio.NewWriter(w) - ww = bw - } - aw := &textWriter{ - w: ww, - complete: true, - compact: tm.Compact, - } - - if etm, ok := pb.(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = aw.Write(text); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil - } - // Dereference the received pointer so we don't have outer < and >. - v := reflect.Indirect(val) - if err := tm.writeStruct(aw, v); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil -} - -// Text is the same as Marshal, but returns the string directly. -func (tm *TextMarshaler) Text(pb Message) string { - var buf bytes.Buffer - tm.Marshal(&buf, pb) - return buf.String() -} - -var ( - defaultTextMarshaler = TextMarshaler{} - compactTextMarshaler = TextMarshaler{Compact: true} -) - -// TODO: consider removing some of the Marshal functions below. - -// MarshalText writes a given protocol buffer in text format. -// The only errors returned are from w. -func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } - -// MarshalTextString is the same as MarshalText, but returns the string directly. -func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } - -// CompactText writes a given protocol buffer in compact text format (one line). -func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } - -// CompactTextString is the same as CompactText, but returns the string directly. -func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/images/404-server/vendor/github.com/golang/protobuf/proto/text_parser.go b/images/404-server/vendor/github.com/golang/protobuf/proto/text_parser.go deleted file mode 100644 index 61f83c1e10..0000000000 --- a/images/404-server/vendor/github.com/golang/protobuf/proto/text_parser.go +++ /dev/null @@ -1,895 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for parsing the Text protocol buffer format. -// TODO: message sets. - -import ( - "encoding" - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "unicode/utf8" -) - -// Error string emitted when deserializing Any and fields are already set -const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" - -type ParseError struct { - Message string - Line int // 1-based line number - Offset int // 0-based byte offset from start of input -} - -func (p *ParseError) Error() string { - if p.Line == 1 { - // show offset only for first line - return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) - } - return fmt.Sprintf("line %d: %v", p.Line, p.Message) -} - -type token struct { - value string - err *ParseError - line int // line number - offset int // byte number from start of input, not start of line - unquoted string // the unquoted version of value, if it was a quoted string -} - -func (t *token) String() string { - if t.err == nil { - return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) - } - return fmt.Sprintf("parse error: %v", t.err) -} - -type textParser struct { - s string // remaining input - done bool // whether the parsing is finished (success or error) - backed bool // whether back() was called - offset, line int - cur token -} - -func newTextParser(s string) *textParser { - p := new(textParser) - p.s = s - p.line = 1 - p.cur.line = 1 - return p -} - -func (p *textParser) errorf(format string, a ...interface{}) *ParseError { - pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} - p.cur.err = pe - p.done = true - return pe -} - -// Numbers and identifiers are matched by [-+._A-Za-z0-9] -func isIdentOrNumberChar(c byte) bool { - switch { - case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': - return true - case '0' <= c && c <= '9': - return true - } - switch c { - case '-', '+', '.', '_': - return true - } - return false -} - -func isWhitespace(c byte) bool { - switch c { - case ' ', '\t', '\n', '\r': - return true - } - return false -} - -func isQuote(c byte) bool { - switch c { - case '"', '\'': - return true - } - return false -} - -func (p *textParser) skipWhitespace() { - i := 0 - for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { - if p.s[i] == '#' { - // comment; skip to end of line or input - for i < len(p.s) && p.s[i] != '\n' { - i++ - } - if i == len(p.s) { - break - } - } - if p.s[i] == '\n' { - p.line++ - } - i++ - } - p.offset += i - p.s = p.s[i:len(p.s)] - if len(p.s) == 0 { - p.done = true - } -} - -func (p *textParser) advance() { - // Skip whitespace - p.skipWhitespace() - if p.done { - return - } - - // Start of non-whitespace - p.cur.err = nil - p.cur.offset, p.cur.line = p.offset, p.line - p.cur.unquoted = "" - switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': - // Single symbol - p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] - case '"', '\'': - // Quoted string - i := 1 - for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { - if p.s[i] == '\\' && i+1 < len(p.s) { - // skip escaped char - i++ - } - i++ - } - if i >= len(p.s) || p.s[i] != p.s[0] { - p.errorf("unmatched quote") - return - } - unq, err := unquoteC(p.s[1:i], rune(p.s[0])) - if err != nil { - p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) - return - } - p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] - p.cur.unquoted = unq - default: - i := 0 - for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { - i++ - } - if i == 0 { - p.errorf("unexpected byte %#x", p.s[0]) - return - } - p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] - } - p.offset += len(p.cur.value) -} - -var ( - errBadUTF8 = errors.New("proto: bad UTF-8") - errBadHex = errors.New("proto: bad hexadecimal") -) - -func unquoteC(s string, quote rune) (string, error) { - // This is based on C++'s tokenizer.cc. - // Despite its name, this is *not* parsing C syntax. - // For instance, "\0" is an invalid quoted string. - - // Avoid allocation in trivial cases. - simple := true - for _, r := range s { - if r == '\\' || r == quote { - simple = false - break - } - } - if simple { - return s, nil - } - - buf := make([]byte, 0, 3*len(s)/2) - for len(s) > 0 { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", errBadUTF8 - } - s = s[n:] - if r != '\\' { - if r < utf8.RuneSelf { - buf = append(buf, byte(r)) - } else { - buf = append(buf, string(r)...) - } - continue - } - - ch, tail, err := unescape(s) - if err != nil { - return "", err - } - buf = append(buf, ch...) - s = tail - } - return string(buf), nil -} - -func unescape(s string) (ch string, tail string, err error) { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", "", errBadUTF8 - } - s = s[n:] - switch r { - case 'a': - return "\a", s, nil - case 'b': - return "\b", s, nil - case 'f': - return "\f", s, nil - case 'n': - return "\n", s, nil - case 'r': - return "\r", s, nil - case 't': - return "\t", s, nil - case 'v': - return "\v", s, nil - case '?': - return "?", s, nil // trigraph workaround - case '\'', '"', '\\': - return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': - if len(s) < 2 { - return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) - } - base := 8 - ss := s[:2] - s = s[2:] - if r == 'x' || r == 'X' { - base = 16 - } else { - ss = string(r) + ss - } - i, err := strconv.ParseUint(ss, base, 8) - if err != nil { - return "", "", err - } - return string([]byte{byte(i)}), s, nil - case 'u', 'U': - n := 4 - if r == 'U' { - n = 8 - } - if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) - } - - bs := make([]byte, n/2) - for i := 0; i < n; i += 2 { - a, ok1 := unhex(s[i]) - b, ok2 := unhex(s[i+1]) - if !ok1 || !ok2 { - return "", "", errBadHex - } - bs[i/2] = a<<4 | b - } - s = s[n:] - return string(bs), s, nil - } - return "", "", fmt.Errorf(`unknown escape \%c`, r) -} - -// Adapted from src/pkg/strconv/quote.go. -func unhex(b byte) (v byte, ok bool) { - switch { - case '0' <= b && b <= '9': - return b - '0', true - case 'a' <= b && b <= 'f': - return b - 'a' + 10, true - case 'A' <= b && b <= 'F': - return b - 'A' + 10, true - } - return 0, false -} - -// Back off the parser by one token. Can only be done between calls to next(). -// It makes the next advance() a no-op. -func (p *textParser) back() { p.backed = true } - -// Advances the parser and returns the new current token. -func (p *textParser) next() *token { - if p.backed || p.done { - p.backed = false - return &p.cur - } - p.advance() - if p.done { - p.cur.value = "" - } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { - // Look for multiple quoted strings separated by whitespace, - // and concatenate them. - cat := p.cur - for { - p.skipWhitespace() - if p.done || !isQuote(p.s[0]) { - break - } - p.advance() - if p.cur.err != nil { - return &p.cur - } - cat.value += " " + p.cur.value - cat.unquoted += p.cur.unquoted - } - p.done = false // parser may have seen EOF, but we want to return cat - p.cur = cat - } - return &p.cur -} - -func (p *textParser) consumeToken(s string) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != s { - p.back() - return p.errorf("expected %q, found %q", s, tok.value) - } - return nil -} - -// Return a RequiredNotSetError indicating which required field was not set. -func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < st.NumField(); i++ { - if !isNil(sv.Field(i)) { - continue - } - - props := sprops.Prop[i] - if props.Required { - return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} - } - } - return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen -} - -// Returns the index in the struct for the named field, as well as the parsed tag properties. -func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { - i, ok := sprops.decoderOrigNames[name] - if ok { - return i, sprops.Prop[i], true - } - return -1, nil, false -} - -// Consume a ':' from the input stream (if the next token is a colon), -// returning an error if a colon is needed but not present. -func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ":" { - // Colon is optional when the field is a group or message. - needColon := true - switch props.Wire { - case "group": - needColon = false - case "bytes": - // A "bytes" field is either a message, a string, or a repeated field; - // those three become *T, *string and []T respectively, so we can check for - // this field being a pointer to a non-string. - if typ.Kind() == reflect.Ptr { - // *T or *string - if typ.Elem().Kind() == reflect.String { - break - } - } else if typ.Kind() == reflect.Slice { - // []T or []*T - if typ.Elem().Kind() != reflect.Ptr { - break - } - } else if typ.Kind() == reflect.String { - // The proto3 exception is for a string field, - // which requires a colon. - break - } - needColon = false - } - if needColon { - return p.errorf("expected ':', found %q", tok.value) - } - p.back() - } - return nil -} - -func (p *textParser) readStruct(sv reflect.Value, terminator string) error { - st := sv.Type() - sprops := GetProperties(st) - reqCount := sprops.reqCount - var reqFieldErr error - fieldSet := make(map[string]bool) - // A struct is a sequence of "name: value", terminated by one of - // '>' or '}', or the end of the input. A name may also be - // "[extension]" or "[type/url]". - // - // The whole struct can also be an expanded Any message, like: - // [type/url] < ... struct contents ... > - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - if tok.value == "[" { - // Looks like an extension or an Any. - // - // TODO: Check whether we need to handle - // namespace rooted names (e.g. ".something.Foo"). - extName, err := p.consumeExtName() - if err != nil { - return err - } - - if s := strings.LastIndex(extName, "/"); s >= 0 { - // If it contains a slash, it's an Any type URL. - messageName := extName[s+1:] - mt := MessageType(messageName) - if mt == nil { - return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) - } - tok = p.next() - if tok.err != nil { - return tok.err - } - // consume an optional colon - if tok.value == ":" { - tok = p.next() - if tok.err != nil { - return tok.err - } - } - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - v := reflect.New(mt.Elem()) - if pe := p.readStruct(v.Elem(), terminator); pe != nil { - return pe - } - b, err := Marshal(v.Interface().(Message)) - if err != nil { - return p.errorf("failed to marshal message of type %q: %v", messageName, err) - } - if fieldSet["type_url"] { - return p.errorf(anyRepeatedlyUnpacked, "type_url") - } - if fieldSet["value"] { - return p.errorf(anyRepeatedlyUnpacked, "value") - } - sv.FieldByName("TypeUrl").SetString(extName) - sv.FieldByName("Value").SetBytes(b) - fieldSet["type_url"] = true - fieldSet["value"] = true - continue - } - - var desc *ExtensionDesc - // This could be faster, but it's functional. - // TODO: Do something smarter than a linear scan. - for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { - if d.Name == extName { - desc = d - break - } - } - if desc == nil { - return p.errorf("unrecognized extension %q", extName) - } - - props := &Properties{} - props.Parse(desc.Tag) - - typ := reflect.TypeOf(desc.ExtensionType) - if err := p.checkForColon(props, typ); err != nil { - return err - } - - rep := desc.repeated() - - // Read the extension structure, and set it in - // the value we're constructing. - var ext reflect.Value - if !rep { - ext = reflect.New(typ).Elem() - } else { - ext = reflect.New(typ.Elem()).Elem() - } - if err := p.readAny(ext, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - ep := sv.Addr().Interface().(Message) - if !rep { - SetExtension(ep, desc, ext.Interface()) - } else { - old, err := GetExtension(ep, desc) - var sl reflect.Value - if err == nil { - sl = reflect.ValueOf(old) // existing slice - } else { - sl = reflect.MakeSlice(typ, 0, 1) - } - sl = reflect.Append(sl, ext) - SetExtension(ep, desc, sl.Interface()) - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - continue - } - - // This is a normal, non-extension field. - name := tok.value - var dst reflect.Value - fi, props, ok := structFieldByName(sprops, name) - if ok { - dst = sv.Field(fi) - } else if oop, ok := sprops.OneofTypes[name]; ok { - // It is a oneof. - props = oop.Prop - nv := reflect.New(oop.Type.Elem()) - dst = nv.Elem().Field(0) - field := sv.Field(oop.Field) - if !field.IsNil() { - return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) - } - field.Set(nv) - } - if !dst.IsValid() { - return p.errorf("unknown field name %q in %v", name, st) - } - - if dst.Kind() == reflect.Map { - // Consume any colon. - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Construct the map if it doesn't already exist. - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - key := reflect.New(dst.Type().Key()).Elem() - val := reflect.New(dst.Type().Elem()).Elem() - - // The map entry should be this sequence of tokens: - // < key : KEY value : VALUE > - // However, implementations may omit key or value, and technically - // we should support them in any order. See b/28924776 for a time - // this went wrong. - - tok := p.next() - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - switch tok.value { - case "key": - if err := p.consumeToken(":"); err != nil { - return err - } - if err := p.readAny(key, props.mkeyprop); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - case "value": - if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { - return err - } - if err := p.readAny(val, props.mvalprop); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - default: - p.back() - return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) - } - } - - dst.SetMapIndex(key, val) - continue - } - - // Check that it's not already set if it's not a repeated field. - if !props.Repeated && fieldSet[name] { - return p.errorf("non-repeated field %q was repeated", name) - } - - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Parse into the field. - fieldSet[name] = true - if err := p.readAny(dst, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - if props.Required { - reqCount-- - } - - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - - } - - if reqCount > 0 { - return p.missingRequiredFieldError(sv) - } - return reqFieldErr -} - -// consumeExtName consumes extension name or expanded Any type URL and the -// following ']'. It returns the name or URL consumed. -func (p *textParser) consumeExtName() (string, error) { - tok := p.next() - if tok.err != nil { - return "", tok.err - } - - // If extension name or type url is quoted, it's a single token. - if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { - name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) - if err != nil { - return "", err - } - return name, p.consumeToken("]") - } - - // Consume everything up to "]" - var parts []string - for tok.value != "]" { - parts = append(parts, tok.value) - tok = p.next() - if tok.err != nil { - return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) - } - } - return strings.Join(parts, ""), nil -} - -// consumeOptionalSeparator consumes an optional semicolon or comma. -// It is used in readStruct to provide backward compatibility. -func (p *textParser) consumeOptionalSeparator() error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ";" && tok.value != "," { - p.back() - } - return nil -} - -func (p *textParser) readAny(v reflect.Value, props *Properties) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "" { - return p.errorf("unexpected EOF") - } - - switch fv := v; fv.Kind() { - case reflect.Slice: - at := v.Type() - if at.Elem().Kind() == reflect.Uint8 { - // Special case for []byte - if tok.value[0] != '"' && tok.value[0] != '\'' { - // Deliberately written out here, as the error after - // this switch statement would write "invalid []byte: ...", - // which is not as user-friendly. - return p.errorf("invalid string: %v", tok.value) - } - bytes := []byte(tok.unquoted) - fv.Set(reflect.ValueOf(bytes)) - return nil - } - // Repeated field. - if tok.value == "[" { - // Repeated field with list notation, like [1,2,3]. - for { - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - err := p.readAny(fv.Index(fv.Len()-1), props) - if err != nil { - return err - } - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "]" { - break - } - if tok.value != "," { - return p.errorf("Expected ']' or ',' found %q", tok.value) - } - } - return nil - } - // One value of the repeated field. - p.back() - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - return p.readAny(fv.Index(fv.Len()-1), props) - case reflect.Bool: - // true/1/t/True or false/f/0/False. - switch tok.value { - case "true", "1", "t", "True": - fv.SetBool(true) - return nil - case "false", "0", "f", "False": - fv.SetBool(false) - return nil - } - case reflect.Float32, reflect.Float64: - v := tok.value - // Ignore 'f' for compatibility with output generated by C++, but don't - // remove 'f' when the value is "-inf" or "inf". - if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { - v = v[:len(v)-1] - } - if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { - fv.SetFloat(f) - return nil - } - case reflect.Int32: - if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { - fv.SetInt(x) - return nil - } - - if len(props.Enum) == 0 { - break - } - m, ok := enumValueMaps[props.Enum] - if !ok { - break - } - x, ok := m[tok.value] - if !ok { - break - } - fv.SetInt(int64(x)) - return nil - case reflect.Int64: - if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { - fv.SetInt(x) - return nil - } - - case reflect.Ptr: - // A basic field (indirected through pointer), or a repeated message/group - p.back() - fv.Set(reflect.New(fv.Type().Elem())) - return p.readAny(fv.Elem(), props) - case reflect.String: - if tok.value[0] == '"' || tok.value[0] == '\'' { - fv.SetString(tok.unquoted) - return nil - } - case reflect.Struct: - var terminator string - switch tok.value { - case "{": - terminator = "}" - case "<": - terminator = ">" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - // TODO: Handle nested messages which implement encoding.TextUnmarshaler. - return p.readStruct(fv, terminator) - case reflect.Uint32: - if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(uint64(x)) - return nil - } - case reflect.Uint64: - if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { - fv.SetUint(x) - return nil - } - } - return p.errorf("invalid %v: %v", v.Type(), tok.value) -} - -// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb -// before starting to unmarshal, so any existing data in pb is always removed. -// If a required field is not set and no other error occurs, -// UnmarshalText returns *RequiredNotSetError. -func UnmarshalText(s string, pb Message) error { - if um, ok := pb.(encoding.TextUnmarshaler); ok { - err := um.UnmarshalText([]byte(s)) - return err - } - pb.Reset() - v := reflect.ValueOf(pb) - if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { - return pe - } - return nil -} diff --git a/images/404-server/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE b/images/404-server/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE deleted file mode 100644 index 13f15dfce0..0000000000 --- a/images/404-server/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2013 Matt T. Proud - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/images/404-server/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go b/images/404-server/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go deleted file mode 100644 index 66d9b5458f..0000000000 --- a/images/404-server/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2013 Matt T. Proud -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pbutil - -import ( - "encoding/binary" - "errors" - "io" - - "github.com/golang/protobuf/proto" -) - -var errInvalidVarint = errors.New("invalid varint32 encountered") - -// ReadDelimited decodes a message from the provided length-delimited stream, -// where the length is encoded as 32-bit varint prefix to the message body. -// It returns the total number of bytes read and any applicable error. This is -// roughly equivalent to the companion Java API's -// MessageLite#parseDelimitedFrom. As per the reader contract, this function -// calls r.Read repeatedly as required until exactly one message including its -// prefix is read and decoded (or an error has occurred). The function never -// reads more bytes from the stream than required. The function never returns -// an error if a message has been read and decoded correctly, even if the end -// of the stream has been reached in doing so. In that case, any subsequent -// calls return (0, io.EOF). -func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) { - // Per AbstractParser#parsePartialDelimitedFrom with - // CodedInputStream#readRawVarint32. - headerBuf := make([]byte, binary.MaxVarintLen32) - var bytesRead, varIntBytes int - var messageLength uint64 - for varIntBytes == 0 { // i.e. no varint has been decoded yet. - if bytesRead >= len(headerBuf) { - return bytesRead, errInvalidVarint - } - // We have to read byte by byte here to avoid reading more bytes - // than required. Each read byte is appended to what we have - // read before. - newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1]) - if newBytesRead == 0 { - if err != nil { - return bytesRead, err - } - // A Reader should not return (0, nil), but if it does, - // it should be treated as no-op (according to the - // Reader contract). So let's go on... - continue - } - bytesRead += newBytesRead - // Now present everything read so far to the varint decoder and - // see if a varint can be decoded already. - messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead]) - } - - messageBuf := make([]byte, messageLength) - newBytesRead, err := io.ReadFull(r, messageBuf) - bytesRead += newBytesRead - if err != nil { - return bytesRead, err - } - - return bytesRead, proto.Unmarshal(messageBuf, m) -} diff --git a/images/404-server/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go b/images/404-server/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go deleted file mode 100644 index c318385cbe..0000000000 --- a/images/404-server/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2013 Matt T. Proud -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package pbutil provides record length-delimited Protocol Buffer streaming. -package pbutil diff --git a/images/404-server/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go b/images/404-server/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go deleted file mode 100644 index 4b76ea9a1d..0000000000 --- a/images/404-server/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2013 Matt T. Proud -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pbutil - -import ( - "encoding/binary" - "io" - - "github.com/golang/protobuf/proto" -) - -// WriteDelimited encodes and dumps a message to the provided writer prefixed -// with a 32-bit varint indicating the length of the encoded message, producing -// a length-delimited record stream, which can be used to chain together -// encoded messages of the same type together in a file. It returns the total -// number of bytes written and any applicable error. This is roughly -// equivalent to the companion Java API's MessageLite#writeDelimitedTo. -func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) { - buffer, err := proto.Marshal(m) - if err != nil { - return 0, err - } - - buf := make([]byte, binary.MaxVarintLen32) - encodedLength := binary.PutUvarint(buf, uint64(len(buffer))) - - sync, err := w.Write(buf[:encodedLength]) - if err != nil { - return sync, err - } - - n, err = w.Write(buffer) - return n + sync, err -} diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/NOTICE b/images/404-server/vendor/github.com/prometheus/client_golang/NOTICE deleted file mode 100644 index dd878a30ee..0000000000 --- a/images/404-server/vendor/github.com/prometheus/client_golang/NOTICE +++ /dev/null @@ -1,23 +0,0 @@ -Prometheus instrumentation library for Go applications -Copyright 2012-2015 The Prometheus Authors - -This product includes software developed at -SoundCloud Ltd. (http://soundcloud.com/). - - -The following components are included in this product: - -perks - a fork of https://github.com/bmizerany/perks -https://github.com/beorn7/perks -Copyright 2013-2015 Blake Mizerany, Björn Rabenstein -See https://github.com/beorn7/perks/blob/master/README.md for license details. - -Go support for Protocol Buffers - Google's data interchange format -http://github.com/golang/protobuf/ -Copyright 2010 The Go Authors -See source code for license details. - -Support for streaming Protocol Buffer messages for the Go language (golang). -https://github.com/matttproud/golang_protobuf_extensions -Copyright 2013 Matt T. Proud -Licensed under the Apache License, Version 2.0 diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/.gitignore b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/.gitignore deleted file mode 100644 index 3460f0346d..0000000000 --- a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/.gitignore +++ /dev/null @@ -1 +0,0 @@ -command-line-arguments.test diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/README.md b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/README.md deleted file mode 100644 index 44986bff06..0000000000 --- a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/README.md +++ /dev/null @@ -1 +0,0 @@ -See [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus). diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/collector.go deleted file mode 100644 index 623d3d83fe..0000000000 --- a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/collector.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -// Collector is the interface implemented by anything that can be used by -// Prometheus to collect metrics. A Collector has to be registered for -// collection. See Registerer.Register. -// -// The stock metrics provided by this package (Gauge, Counter, Summary, -// Histogram, Untyped) are also Collectors (which only ever collect one metric, -// namely itself). An implementer of Collector may, however, collect multiple -// metrics in a coordinated fashion and/or create metrics on the fly. Examples -// for collectors already implemented in this library are the metric vectors -// (i.e. collection of multiple instances of the same Metric but with different -// label values) like GaugeVec or SummaryVec, and the ExpvarCollector. -type Collector interface { - // Describe sends the super-set of all possible descriptors of metrics - // collected by this Collector to the provided channel and returns once - // the last descriptor has been sent. The sent descriptors fulfill the - // consistency and uniqueness requirements described in the Desc - // documentation. (It is valid if one and the same Collector sends - // duplicate descriptors. Those duplicates are simply ignored. However, - // two different Collectors must not send duplicate descriptors.) This - // method idempotently sends the same descriptors throughout the - // lifetime of the Collector. If a Collector encounters an error while - // executing this method, it must send an invalid descriptor (created - // with NewInvalidDesc) to signal the error to the registry. - Describe(chan<- *Desc) - // Collect is called by the Prometheus registry when collecting - // metrics. The implementation sends each collected metric via the - // provided channel and returns once the last metric has been sent. The - // descriptor of each sent metric is one of those returned by - // Describe. Returned metrics that share the same descriptor must differ - // in their variable label values. This method may be called - // concurrently and must therefore be implemented in a concurrency safe - // way. Blocking occurs at the expense of total performance of rendering - // all registered metrics. Ideally, Collector implementations support - // concurrent readers. - Collect(chan<- Metric) -} - -// selfCollector implements Collector for a single Metric so that the Metric -// collects itself. Add it as an anonymous field to a struct that implements -// Metric, and call init with the Metric itself as an argument. -type selfCollector struct { - self Metric -} - -// init provides the selfCollector with a reference to the metric it is supposed -// to collect. It is usually called within the factory function to create a -// metric. See example. -func (c *selfCollector) init(self Metric) { - c.self = self -} - -// Describe implements Collector. -func (c *selfCollector) Describe(ch chan<- *Desc) { - ch <- c.self.Desc() -} - -// Collect implements Collector. -func (c *selfCollector) Collect(ch chan<- Metric) { - ch <- c.self -} diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/counter.go deleted file mode 100644 index 72d5256a50..0000000000 --- a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/counter.go +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "errors" -) - -// Counter is a Metric that represents a single numerical value that only ever -// goes up. That implies that it cannot be used to count items whose number can -// also go down, e.g. the number of currently running goroutines. Those -// "counters" are represented by Gauges. -// -// A Counter is typically used to count requests served, tasks completed, errors -// occurred, etc. -// -// To create Counter instances, use NewCounter. -type Counter interface { - Metric - Collector - - // Inc increments the counter by 1. Use Add to increment it by arbitrary - // non-negative values. - Inc() - // Add adds the given value to the counter. It panics if the value is < - // 0. - Add(float64) -} - -// CounterOpts is an alias for Opts. See there for doc comments. -type CounterOpts Opts - -// NewCounter creates a new Counter based on the provided CounterOpts. -func NewCounter(opts CounterOpts) Counter { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ) - result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}} - result.init(result) // Init self-collection. - return result -} - -type counter struct { - value -} - -func (c *counter) Add(v float64) { - if v < 0 { - panic(errors.New("counter cannot decrease in value")) - } - c.value.Add(v) -} - -// CounterVec is a Collector that bundles a set of Counters that all share the -// same Desc, but have different values for their variable labels. This is used -// if you want to count the same thing partitioned by various dimensions -// (e.g. number of HTTP requests, partitioned by response code and -// method). Create instances with NewCounterVec. -// -// CounterVec embeds MetricVec. See there for a full list of methods with -// detailed documentation. -type CounterVec struct { - *MetricVec -} - -// NewCounterVec creates a new CounterVec based on the provided CounterOpts and -// partitioned by the given label names. At least one label name must be -// provided. -func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - labelNames, - opts.ConstLabels, - ) - return &CounterVec{ - MetricVec: newMetricVec(desc, func(lvs ...string) Metric { - result := &counter{value: value{ - desc: desc, - valType: CounterValue, - labelPairs: makeLabelPairs(desc, lvs), - }} - result.init(result) // Init self-collection. - return result - }), - } -} - -// GetMetricWithLabelValues replaces the method of the same name in -// MetricVec. The difference is that this method returns a Counter and not a -// Metric so that no type conversion is required. -func (m *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { - metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Counter), err - } - return nil, err -} - -// GetMetricWith replaces the method of the same name in MetricVec. The -// difference is that this method returns a Counter and not a Metric so that no -// type conversion is required. -func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) { - metric, err := m.MetricVec.GetMetricWith(labels) - if metric != nil { - return metric.(Counter), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. By not returning an -// error, WithLabelValues allows shortcuts like -// myVec.WithLabelValues("404", "GET").Add(42) -func (m *CounterVec) WithLabelValues(lvs ...string) Counter { - return m.MetricVec.WithLabelValues(lvs...).(Counter) -} - -// With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. By not returning an error, With allows shortcuts like -// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) -func (m *CounterVec) With(labels Labels) Counter { - return m.MetricVec.With(labels).(Counter) -} - -// CounterFunc is a Counter whose value is determined at collect time by calling a -// provided function. -// -// To create CounterFunc instances, use NewCounterFunc. -type CounterFunc interface { - Metric - Collector -} - -// NewCounterFunc creates a new CounterFunc based on the provided -// CounterOpts. The value reported is determined by calling the given function -// from within the Write method. Take into account that metric collection may -// happen concurrently. If that results in concurrent calls to Write, like in -// the case where a CounterFunc is directly registered with Prometheus, the -// provided function must be concurrency-safe. The function should also honor -// the contract for a Counter (values only go up, not down), but compliance will -// not be checked. -func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc { - return newValueFunc(NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), CounterValue, function) -} diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/desc.go deleted file mode 100644 index 1835b16f65..0000000000 --- a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright 2016 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "errors" - "fmt" - "sort" - "strings" - - "github.com/golang/protobuf/proto" - "github.com/prometheus/common/model" - - dto "github.com/prometheus/client_model/go" -) - -// reservedLabelPrefix is a prefix which is not legal in user-supplied -// label names. -const reservedLabelPrefix = "__" - -// Labels represents a collection of label name -> value mappings. This type is -// commonly used with the With(Labels) and GetMetricWith(Labels) methods of -// metric vector Collectors, e.g.: -// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) -// -// The other use-case is the specification of constant label pairs in Opts or to -// create a Desc. -type Labels map[string]string - -// Desc is the descriptor used by every Prometheus Metric. It is essentially -// the immutable meta-data of a Metric. The normal Metric implementations -// included in this package manage their Desc under the hood. Users only have to -// deal with Desc if they use advanced features like the ExpvarCollector or -// custom Collectors and Metrics. -// -// Descriptors registered with the same registry have to fulfill certain -// consistency and uniqueness criteria if they share the same fully-qualified -// name: They must have the same help string and the same label names (aka label -// dimensions) in each, constLabels and variableLabels, but they must differ in -// the values of the constLabels. -// -// Descriptors that share the same fully-qualified names and the same label -// values of their constLabels are considered equal. -// -// Use NewDesc to create new Desc instances. -type Desc struct { - // fqName has been built from Namespace, Subsystem, and Name. - fqName string - // help provides some helpful information about this metric. - help string - // constLabelPairs contains precalculated DTO label pairs based on - // the constant labels. - constLabelPairs []*dto.LabelPair - // VariableLabels contains names of labels for which the metric - // maintains variable values. - variableLabels []string - // id is a hash of the values of the ConstLabels and fqName. This - // must be unique among all registered descriptors and can therefore be - // used as an identifier of the descriptor. - id uint64 - // dimHash is a hash of the label names (preset and variable) and the - // Help string. Each Desc with the same fqName must have the same - // dimHash. - dimHash uint64 - // err is an error that occurred during construction. It is reported on - // registration time. - err error -} - -// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc -// and will be reported on registration time. variableLabels and constLabels can -// be nil if no such labels should be set. fqName and help must not be empty. -// -// variableLabels only contain the label names. Their label values are variable -// and therefore not part of the Desc. (They are managed within the Metric.) -// -// For constLabels, the label values are constant. Therefore, they are fully -// specified in the Desc. See the Opts documentation for the implications of -// constant labels. -func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc { - d := &Desc{ - fqName: fqName, - help: help, - variableLabels: variableLabels, - } - if help == "" { - d.err = errors.New("empty help string") - return d - } - if !model.IsValidMetricName(model.LabelValue(fqName)) { - d.err = fmt.Errorf("%q is not a valid metric name", fqName) - return d - } - // labelValues contains the label values of const labels (in order of - // their sorted label names) plus the fqName (at position 0). - labelValues := make([]string, 1, len(constLabels)+1) - labelValues[0] = fqName - labelNames := make([]string, 0, len(constLabels)+len(variableLabels)) - labelNameSet := map[string]struct{}{} - // First add only the const label names and sort them... - for labelName := range constLabels { - if !checkLabelName(labelName) { - d.err = fmt.Errorf("%q is not a valid label name", labelName) - return d - } - labelNames = append(labelNames, labelName) - labelNameSet[labelName] = struct{}{} - } - sort.Strings(labelNames) - // ... so that we can now add const label values in the order of their names. - for _, labelName := range labelNames { - labelValues = append(labelValues, constLabels[labelName]) - } - // Now add the variable label names, but prefix them with something that - // cannot be in a regular label name. That prevents matching the label - // dimension with a different mix between preset and variable labels. - for _, labelName := range variableLabels { - if !checkLabelName(labelName) { - d.err = fmt.Errorf("%q is not a valid label name", labelName) - return d - } - labelNames = append(labelNames, "$"+labelName) - labelNameSet[labelName] = struct{}{} - } - if len(labelNames) != len(labelNameSet) { - d.err = errors.New("duplicate label names") - return d - } - vh := hashNew() - for _, val := range labelValues { - vh = hashAdd(vh, val) - vh = hashAddByte(vh, separatorByte) - } - d.id = vh - // Sort labelNames so that order doesn't matter for the hash. - sort.Strings(labelNames) - // Now hash together (in this order) the help string and the sorted - // label names. - lh := hashNew() - lh = hashAdd(lh, help) - lh = hashAddByte(lh, separatorByte) - for _, labelName := range labelNames { - lh = hashAdd(lh, labelName) - lh = hashAddByte(lh, separatorByte) - } - d.dimHash = lh - - d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels)) - for n, v := range constLabels { - d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{ - Name: proto.String(n), - Value: proto.String(v), - }) - } - sort.Sort(LabelPairSorter(d.constLabelPairs)) - return d -} - -// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the -// provided error set. If a collector returning such a descriptor is registered, -// registration will fail with the provided error. NewInvalidDesc can be used by -// a Collector to signal inability to describe itself. -func NewInvalidDesc(err error) *Desc { - return &Desc{ - err: err, - } -} - -func (d *Desc) String() string { - lpStrings := make([]string, 0, len(d.constLabelPairs)) - for _, lp := range d.constLabelPairs { - lpStrings = append( - lpStrings, - fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()), - ) - } - return fmt.Sprintf( - "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}", - d.fqName, - d.help, - strings.Join(lpStrings, ","), - d.variableLabels, - ) -} - -func checkLabelName(l string) bool { - return model.LabelName(l).IsValid() && - !strings.HasPrefix(l, reservedLabelPrefix) -} diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/doc.go deleted file mode 100644 index 278969dc7c..0000000000 --- a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/doc.go +++ /dev/null @@ -1,186 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package prometheus provides metrics primitives to instrument code for -// monitoring. It also offers a registry for metrics. Sub-packages allow to -// expose the registered metrics via HTTP (package promhttp) or push them to a -// Pushgateway (package push). -// -// All exported functions and methods are safe to be used concurrently unless -// specified otherwise. -// -// A Basic Example -// -// As a starting point, a very basic usage example: -// -// package main -// -// import ( -// "log" -// "net/http" -// -// "github.com/prometheus/client_golang/prometheus" -// "github.com/prometheus/client_golang/prometheus/promhttp" -// ) -// -// var ( -// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{ -// Name: "cpu_temperature_celsius", -// Help: "Current temperature of the CPU.", -// }) -// hdFailures = prometheus.NewCounterVec( -// prometheus.CounterOpts{ -// Name: "hd_errors_total", -// Help: "Number of hard-disk errors.", -// }, -// []string{"device"}, -// ) -// ) -// -// func init() { -// // Metrics have to be registered to be exposed: -// prometheus.MustRegister(cpuTemp) -// prometheus.MustRegister(hdFailures) -// } -// -// func main() { -// cpuTemp.Set(65.3) -// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc() -// -// // The Handler function provides a default handler to expose metrics -// // via an HTTP server. "/metrics" is the usual endpoint for that. -// http.Handle("/metrics", promhttp.Handler()) -// log.Fatal(http.ListenAndServe(":8080", nil)) -// } -// -// -// This is a complete program that exports two metrics, a Gauge and a Counter, -// the latter with a label attached to turn it into a (one-dimensional) vector. -// -// Metrics -// -// The number of exported identifiers in this package might appear a bit -// overwhelming. However, in addition to the basic plumbing shown in the example -// above, you only need to understand the different metric types and their -// vector versions for basic usage. -// -// Above, you have already touched the Counter and the Gauge. There are two more -// advanced metric types: the Summary and Histogram. A more thorough description -// of those four metric types can be found in the Prometheus docs: -// https://prometheus.io/docs/concepts/metric_types/ -// -// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the -// Prometheus server not to assume anything about its type. -// -// In addition to the fundamental metric types Gauge, Counter, Summary, -// Histogram, and Untyped, a very important part of the Prometheus data model is -// the partitioning of samples along dimensions called labels, which results in -// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec, -// HistogramVec, and UntypedVec. -// -// While only the fundamental metric types implement the Metric interface, both -// the metrics and their vector versions implement the Collector interface. A -// Collector manages the collection of a number of Metrics, but for convenience, -// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, -// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec, -// SummaryVec, HistogramVec, and UntypedVec are not. -// -// To create instances of Metrics and their vector versions, you need a suitable -// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, HistogramOpts, or -// UntypedOpts. -// -// Custom Collectors and constant Metrics -// -// While you could create your own implementations of Metric, most likely you -// will only ever implement the Collector interface on your own. At a first -// glance, a custom Collector seems handy to bundle Metrics for common -// registration (with the prime example of the different metric vectors above, -// which bundle all the metrics of the same name but with different labels). -// -// There is a more involved use case, too: If you already have metrics -// available, created outside of the Prometheus context, you don't need the -// interface of the various Metric types. You essentially want to mirror the -// existing numbers into Prometheus Metrics during collection. An own -// implementation of the Collector interface is perfect for that. You can create -// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and -// NewConstSummary (and their respective Must… versions). That will happen in -// the Collect method. The Describe method has to return separate Desc -// instances, representative of the “throw-away” metrics to be created later. -// NewDesc comes in handy to create those Desc instances. -// -// The Collector example illustrates the use case. You can also look at the -// source code of the processCollector (mirroring process metrics), the -// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar -// metrics) as examples that are used in this package itself. -// -// If you just need to call a function to get a single float value to collect as -// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting -// shortcuts. -// -// Advanced Uses of the Registry -// -// While MustRegister is the by far most common way of registering a Collector, -// sometimes you might want to handle the errors the registration might cause. -// As suggested by the name, MustRegister panics if an error occurs. With the -// Register function, the error is returned and can be handled. -// -// An error is returned if the registered Collector is incompatible or -// inconsistent with already registered metrics. The registry aims for -// consistency of the collected metrics according to the Prometheus data model. -// Inconsistencies are ideally detected at registration time, not at collect -// time. The former will usually be detected at start-up time of a program, -// while the latter will only happen at scrape time, possibly not even on the -// first scrape if the inconsistency only becomes relevant later. That is the -// main reason why a Collector and a Metric have to describe themselves to the -// registry. -// -// So far, everything we did operated on the so-called default registry, as it -// can be found in the global DefaultRegistry variable. With NewRegistry, you -// can create a custom registry, or you can even implement the Registerer or -// Gatherer interfaces yourself. The methods Register and Unregister work in the -// same way on a custom registry as the global functions Register and Unregister -// on the default registry. -// -// There are a number of uses for custom registries: You can use registries with -// special properties, see NewPedanticRegistry. You can avoid global state, as -// it is imposed by the DefaultRegistry. You can use multiple registries at the -// same time to expose different metrics in different ways. You can use separate -// registries for testing purposes. -// -// Also note that the DefaultRegistry comes registered with a Collector for Go -// runtime metrics (via NewGoCollector) and a Collector for process metrics (via -// NewProcessCollector). With a custom registry, you are in control and decide -// yourself about the Collectors to register. -// -// HTTP Exposition -// -// The Registry implements the Gatherer interface. The caller of the Gather -// method can then expose the gathered metrics in some way. Usually, the metrics -// are served via HTTP on the /metrics endpoint. That's happening in the example -// above. The tools to expose metrics via HTTP are in the promhttp sub-package. -// (The top-level functions in the prometheus package are deprecated.) -// -// Pushing to the Pushgateway -// -// Function for pushing to the Pushgateway can be found in the push sub-package. -// -// Graphite Bridge -// -// Functions and examples to push metrics from a Gatherer to Graphite can be -// found in the graphite sub-package. -// -// Other Means of Exposition -// -// More ways of exposing metrics can easily be added by following the approaches -// of the existing implementations. -package prometheus diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go deleted file mode 100644 index 18a99d5faa..0000000000 --- a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "encoding/json" - "expvar" -) - -type expvarCollector struct { - exports map[string]*Desc -} - -// NewExpvarCollector returns a newly allocated expvar Collector that still has -// to be registered with a Prometheus registry. -// -// An expvar Collector collects metrics from the expvar interface. It provides a -// quick way to expose numeric values that are already exported via expvar as -// Prometheus metrics. Note that the data models of expvar and Prometheus are -// fundamentally different, and that the expvar Collector is inherently slower -// than native Prometheus metrics. Thus, the expvar Collector is probably great -// for experiments and prototying, but you should seriously consider a more -// direct implementation of Prometheus metrics for monitoring production -// systems. -// -// The exports map has the following meaning: -// -// The keys in the map correspond to expvar keys, i.e. for every expvar key you -// want to export as Prometheus metric, you need an entry in the exports -// map. The descriptor mapped to each key describes how to export the expvar -// value. It defines the name and the help string of the Prometheus metric -// proxying the expvar value. The type will always be Untyped. -// -// For descriptors without variable labels, the expvar value must be a number or -// a bool. The number is then directly exported as the Prometheus sample -// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values -// that are not numbers or bools are silently ignored. -// -// If the descriptor has one variable label, the expvar value must be an expvar -// map. The keys in the expvar map become the various values of the one -// Prometheus label. The values in the expvar map must be numbers or bools again -// as above. -// -// For descriptors with more than one variable label, the expvar must be a -// nested expvar map, i.e. where the values of the topmost map are maps again -// etc. until a depth is reached that corresponds to the number of labels. The -// leaves of that structure must be numbers or bools as above to serve as the -// sample values. -// -// Anything that does not fit into the scheme above is silently ignored. -func NewExpvarCollector(exports map[string]*Desc) Collector { - return &expvarCollector{ - exports: exports, - } -} - -// Describe implements Collector. -func (e *expvarCollector) Describe(ch chan<- *Desc) { - for _, desc := range e.exports { - ch <- desc - } -} - -// Collect implements Collector. -func (e *expvarCollector) Collect(ch chan<- Metric) { - for name, desc := range e.exports { - var m Metric - expVar := expvar.Get(name) - if expVar == nil { - continue - } - var v interface{} - labels := make([]string, len(desc.variableLabels)) - if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil { - ch <- NewInvalidMetric(desc, err) - continue - } - var processValue func(v interface{}, i int) - processValue = func(v interface{}, i int) { - if i >= len(labels) { - copiedLabels := append(make([]string, 0, len(labels)), labels...) - switch v := v.(type) { - case float64: - m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...) - case bool: - if v { - m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...) - } else { - m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...) - } - default: - return - } - ch <- m - return - } - vm, ok := v.(map[string]interface{}) - if !ok { - return - } - for lv, val := range vm { - labels[i] = lv - processValue(val, i+1) - } - } - processValue(v, 0) - } -} diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/fnv.go b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/fnv.go deleted file mode 100644 index e3b67df8ac..0000000000 --- a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/fnv.go +++ /dev/null @@ -1,29 +0,0 @@ -package prometheus - -// Inline and byte-free variant of hash/fnv's fnv64a. - -const ( - offset64 = 14695981039346656037 - prime64 = 1099511628211 -) - -// hashNew initializies a new fnv64a hash value. -func hashNew() uint64 { - return offset64 -} - -// hashAdd adds a string to a fnv64a hash value, returning the updated hash. -func hashAdd(h uint64, s string) uint64 { - for i := 0; i < len(s); i++ { - h ^= uint64(s[i]) - h *= prime64 - } - return h -} - -// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. -func hashAddByte(h uint64, b byte) uint64 { - h ^= uint64(b) - h *= prime64 - return h -} diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/gauge.go deleted file mode 100644 index 9ab5a3d621..0000000000 --- a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/gauge.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -// Gauge is a Metric that represents a single numerical value that can -// arbitrarily go up and down. -// -// A Gauge is typically used for measured values like temperatures or current -// memory usage, but also "counts" that can go up and down, like the number of -// running goroutines. -// -// To create Gauge instances, use NewGauge. -type Gauge interface { - Metric - Collector - - // Set sets the Gauge to an arbitrary value. - Set(float64) - // Inc increments the Gauge by 1. Use Add to increment it by arbitrary - // values. - Inc() - // Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary - // values. - Dec() - // Add adds the given value to the Gauge. (The value can be negative, - // resulting in a decrease of the Gauge.) - Add(float64) - // Sub subtracts the given value from the Gauge. (The value can be - // negative, resulting in an increase of the Gauge.) - Sub(float64) - - // SetToCurrentTime sets the Gauge to the current Unix time in seconds. - SetToCurrentTime() -} - -// GaugeOpts is an alias for Opts. See there for doc comments. -type GaugeOpts Opts - -// NewGauge creates a new Gauge based on the provided GaugeOpts. -func NewGauge(opts GaugeOpts) Gauge { - return newValue(NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), GaugeValue, 0) -} - -// GaugeVec is a Collector that bundles a set of Gauges that all share the same -// Desc, but have different values for their variable labels. This is used if -// you want to count the same thing partitioned by various dimensions -// (e.g. number of operations queued, partitioned by user and operation -// type). Create instances with NewGaugeVec. -type GaugeVec struct { - *MetricVec -} - -// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and -// partitioned by the given label names. At least one label name must be -// provided. -func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - labelNames, - opts.ConstLabels, - ) - return &GaugeVec{ - MetricVec: newMetricVec(desc, func(lvs ...string) Metric { - return newValue(desc, GaugeValue, 0, lvs...) - }), - } -} - -// GetMetricWithLabelValues replaces the method of the same name in -// MetricVec. The difference is that this method returns a Gauge and not a -// Metric so that no type conversion is required. -func (m *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { - metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Gauge), err - } - return nil, err -} - -// GetMetricWith replaces the method of the same name in MetricVec. The -// difference is that this method returns a Gauge and not a Metric so that no -// type conversion is required. -func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { - metric, err := m.MetricVec.GetMetricWith(labels) - if metric != nil { - return metric.(Gauge), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. By not returning an -// error, WithLabelValues allows shortcuts like -// myVec.WithLabelValues("404", "GET").Add(42) -func (m *GaugeVec) WithLabelValues(lvs ...string) Gauge { - return m.MetricVec.WithLabelValues(lvs...).(Gauge) -} - -// With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. By not returning an error, With allows shortcuts like -// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) -func (m *GaugeVec) With(labels Labels) Gauge { - return m.MetricVec.With(labels).(Gauge) -} - -// GaugeFunc is a Gauge whose value is determined at collect time by calling a -// provided function. -// -// To create GaugeFunc instances, use NewGaugeFunc. -type GaugeFunc interface { - Metric - Collector -} - -// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The -// value reported is determined by calling the given function from within the -// Write method. Take into account that metric collection may happen -// concurrently. If that results in concurrent calls to Write, like in the case -// where a GaugeFunc is directly registered with Prometheus, the provided -// function must be concurrency-safe. -func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc { - return newValueFunc(NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), GaugeValue, function) -} diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go deleted file mode 100644 index f96764559b..0000000000 --- a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go +++ /dev/null @@ -1,276 +0,0 @@ -package prometheus - -import ( - "fmt" - "runtime" - "runtime/debug" - "time" -) - -type goCollector struct { - goroutinesDesc *Desc - threadsDesc *Desc - gcDesc *Desc - - // metrics to describe and collect - metrics memStatsMetrics -} - -// NewGoCollector returns a collector which exports metrics about the current -// go process. -func NewGoCollector() Collector { - return &goCollector{ - goroutinesDesc: NewDesc( - "go_goroutines", - "Number of goroutines that currently exist.", - nil, nil), - threadsDesc: NewDesc( - "go_threads", - "Number of OS threads created", - nil, nil), - gcDesc: NewDesc( - "go_gc_duration_seconds", - "A summary of the GC invocation durations.", - nil, nil), - metrics: memStatsMetrics{ - { - desc: NewDesc( - memstatNamespace("alloc_bytes"), - "Number of bytes allocated and still in use.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("alloc_bytes_total"), - "Total number of bytes allocated, even if freed.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) }, - valType: CounterValue, - }, { - desc: NewDesc( - memstatNamespace("sys_bytes"), - "Number of bytes obtained from system.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("lookups_total"), - "Total number of pointer lookups.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) }, - valType: CounterValue, - }, { - desc: NewDesc( - memstatNamespace("mallocs_total"), - "Total number of mallocs.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) }, - valType: CounterValue, - }, { - desc: NewDesc( - memstatNamespace("frees_total"), - "Total number of frees.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) }, - valType: CounterValue, - }, { - desc: NewDesc( - memstatNamespace("heap_alloc_bytes"), - "Number of heap bytes allocated and still in use.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("heap_sys_bytes"), - "Number of heap bytes obtained from system.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("heap_idle_bytes"), - "Number of heap bytes waiting to be used.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("heap_inuse_bytes"), - "Number of heap bytes that are in use.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("heap_released_bytes"), - "Number of heap bytes released to OS.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("heap_objects"), - "Number of allocated objects.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("stack_inuse_bytes"), - "Number of bytes in use by the stack allocator.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("stack_sys_bytes"), - "Number of bytes obtained from system for stack allocator.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("mspan_inuse_bytes"), - "Number of bytes in use by mspan structures.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("mspan_sys_bytes"), - "Number of bytes used for mspan structures obtained from system.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("mcache_inuse_bytes"), - "Number of bytes in use by mcache structures.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("mcache_sys_bytes"), - "Number of bytes used for mcache structures obtained from system.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("buck_hash_sys_bytes"), - "Number of bytes used by the profiling bucket hash table.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("gc_sys_bytes"), - "Number of bytes used for garbage collection system metadata.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("other_sys_bytes"), - "Number of bytes used for other system allocations.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("next_gc_bytes"), - "Number of heap bytes when next garbage collection will take place.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("last_gc_time_seconds"), - "Number of seconds since 1970 of last garbage collection.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("gc_cpu_fraction"), - "The fraction of this program's available CPU time used by the GC since the program started.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction }, - valType: GaugeValue, - }, - }, - } -} - -func memstatNamespace(s string) string { - return fmt.Sprintf("go_memstats_%s", s) -} - -// Describe returns all descriptions of the collector. -func (c *goCollector) Describe(ch chan<- *Desc) { - ch <- c.goroutinesDesc - ch <- c.threadsDesc - ch <- c.gcDesc - for _, i := range c.metrics { - ch <- i.desc - } -} - -// Collect returns the current state of all metrics of the collector. -func (c *goCollector) Collect(ch chan<- Metric) { - ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine())) - n, _ := runtime.ThreadCreateProfile(nil) - ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n)) - - var stats debug.GCStats - stats.PauseQuantiles = make([]time.Duration, 5) - debug.ReadGCStats(&stats) - - quantiles := make(map[float64]float64) - for idx, pq := range stats.PauseQuantiles[1:] { - quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds() - } - quantiles[0.0] = stats.PauseQuantiles[0].Seconds() - ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles) - - ms := &runtime.MemStats{} - runtime.ReadMemStats(ms) - for _, i := range c.metrics { - ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms)) - } -} - -// memStatsMetrics provide description, value, and value type for memstat metrics. -type memStatsMetrics []struct { - desc *Desc - eval func(*runtime.MemStats) float64 - valType ValueType -} diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/histogram.go deleted file mode 100644 index f46eff6acf..0000000000 --- a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ /dev/null @@ -1,444 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "fmt" - "math" - "sort" - "sync/atomic" - - "github.com/golang/protobuf/proto" - - dto "github.com/prometheus/client_model/go" -) - -// A Histogram counts individual observations from an event or sample stream in -// configurable buckets. Similar to a summary, it also provides a sum of -// observations and an observation count. -// -// On the Prometheus server, quantiles can be calculated from a Histogram using -// the histogram_quantile function in the query language. -// -// Note that Histograms, in contrast to Summaries, can be aggregated with the -// Prometheus query language (see the documentation for detailed -// procedures). However, Histograms require the user to pre-define suitable -// buckets, and they are in general less accurate. The Observe method of a -// Histogram has a very low performance overhead in comparison with the Observe -// method of a Summary. -// -// To create Histogram instances, use NewHistogram. -type Histogram interface { - Metric - Collector - - // Observe adds a single observation to the histogram. - Observe(float64) -} - -// bucketLabel is used for the label that defines the upper bound of a -// bucket of a histogram ("le" -> "less or equal"). -const bucketLabel = "le" - -// DefBuckets are the default Histogram buckets. The default buckets are -// tailored to broadly measure the response time (in seconds) of a network -// service. Most likely, however, you will be required to define buckets -// customized to your use case. -var ( - DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} - - errBucketLabelNotAllowed = fmt.Errorf( - "%q is not allowed as label name in histograms", bucketLabel, - ) -) - -// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest -// bucket has an upper bound of 'start'. The final +Inf bucket is not counted -// and not included in the returned slice. The returned slice is meant to be -// used for the Buckets field of HistogramOpts. -// -// The function panics if 'count' is zero or negative. -func LinearBuckets(start, width float64, count int) []float64 { - if count < 1 { - panic("LinearBuckets needs a positive count") - } - buckets := make([]float64, count) - for i := range buckets { - buckets[i] = start - start += width - } - return buckets -} - -// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an -// upper bound of 'start' and each following bucket's upper bound is 'factor' -// times the previous bucket's upper bound. The final +Inf bucket is not counted -// and not included in the returned slice. The returned slice is meant to be -// used for the Buckets field of HistogramOpts. -// -// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative, -// or if 'factor' is less than or equal 1. -func ExponentialBuckets(start, factor float64, count int) []float64 { - if count < 1 { - panic("ExponentialBuckets needs a positive count") - } - if start <= 0 { - panic("ExponentialBuckets needs a positive start value") - } - if factor <= 1 { - panic("ExponentialBuckets needs a factor greater than 1") - } - buckets := make([]float64, count) - for i := range buckets { - buckets[i] = start - start *= factor - } - return buckets -} - -// HistogramOpts bundles the options for creating a Histogram metric. It is -// mandatory to set Name and Help to a non-empty string. All other fields are -// optional and can safely be left at their zero value. -type HistogramOpts struct { - // Namespace, Subsystem, and Name are components of the fully-qualified - // name of the Histogram (created by joining these components with - // "_"). Only Name is mandatory, the others merely help structuring the - // name. Note that the fully-qualified name of the Histogram must be a - // valid Prometheus metric name. - Namespace string - Subsystem string - Name string - - // Help provides information about this Histogram. Mandatory! - // - // Metrics with the same fully-qualified name must have the same Help - // string. - Help string - - // ConstLabels are used to attach fixed labels to this - // Histogram. Histograms with the same fully-qualified name must have the - // same label names in their ConstLabels. - // - // Note that in most cases, labels have a value that varies during the - // lifetime of a process. Those labels are usually managed with a - // HistogramVec. ConstLabels serve only special purposes. One is for the - // special case where the value of a label does not change during the - // lifetime of a process, e.g. if the revision of the running binary is - // put into a label. Another, more advanced purpose is if more than one - // Collector needs to collect Histograms with the same fully-qualified - // name. In that case, those Summaries must differ in the values of - // their ConstLabels. See the Collector examples. - // - // If the value of a label never changes (not even between binaries), - // that label most likely should not be a label at all (but part of the - // metric name). - ConstLabels Labels - - // Buckets defines the buckets into which observations are counted. Each - // element in the slice is the upper inclusive bound of a bucket. The - // values must be sorted in strictly increasing order. There is no need - // to add a highest bucket with +Inf bound, it will be added - // implicitly. The default value is DefBuckets. - Buckets []float64 -} - -// NewHistogram creates a new Histogram based on the provided HistogramOpts. It -// panics if the buckets in HistogramOpts are not in strictly increasing order. -func NewHistogram(opts HistogramOpts) Histogram { - return newHistogram( - NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), - opts, - ) -} - -func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { - if len(desc.variableLabels) != len(labelValues) { - panic(errInconsistentCardinality) - } - - for _, n := range desc.variableLabels { - if n == bucketLabel { - panic(errBucketLabelNotAllowed) - } - } - for _, lp := range desc.constLabelPairs { - if lp.GetName() == bucketLabel { - panic(errBucketLabelNotAllowed) - } - } - - if len(opts.Buckets) == 0 { - opts.Buckets = DefBuckets - } - - h := &histogram{ - desc: desc, - upperBounds: opts.Buckets, - labelPairs: makeLabelPairs(desc, labelValues), - } - for i, upperBound := range h.upperBounds { - if i < len(h.upperBounds)-1 { - if upperBound >= h.upperBounds[i+1] { - panic(fmt.Errorf( - "histogram buckets must be in increasing order: %f >= %f", - upperBound, h.upperBounds[i+1], - )) - } - } else { - if math.IsInf(upperBound, +1) { - // The +Inf bucket is implicit. Remove it here. - h.upperBounds = h.upperBounds[:i] - } - } - } - // Finally we know the final length of h.upperBounds and can make counts. - h.counts = make([]uint64, len(h.upperBounds)) - - h.init(h) // Init self-collection. - return h -} - -type histogram struct { - // sumBits contains the bits of the float64 representing the sum of all - // observations. sumBits and count have to go first in the struct to - // guarantee alignment for atomic operations. - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG - sumBits uint64 - count uint64 - - selfCollector - // Note that there is no mutex required. - - desc *Desc - - upperBounds []float64 - counts []uint64 - - labelPairs []*dto.LabelPair -} - -func (h *histogram) Desc() *Desc { - return h.desc -} - -func (h *histogram) Observe(v float64) { - // TODO(beorn7): For small numbers of buckets (<30), a linear search is - // slightly faster than the binary search. If we really care, we could - // switch from one search strategy to the other depending on the number - // of buckets. - // - // Microbenchmarks (BenchmarkHistogramNoLabels): - // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op - // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op - // 300 buckets: 154 ns/op linear - binary 61.6 ns/op - i := sort.SearchFloat64s(h.upperBounds, v) - if i < len(h.counts) { - atomic.AddUint64(&h.counts[i], 1) - } - atomic.AddUint64(&h.count, 1) - for { - oldBits := atomic.LoadUint64(&h.sumBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + v) - if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) { - break - } - } -} - -func (h *histogram) Write(out *dto.Metric) error { - his := &dto.Histogram{} - buckets := make([]*dto.Bucket, len(h.upperBounds)) - - his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits))) - his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count)) - var count uint64 - for i, upperBound := range h.upperBounds { - count += atomic.LoadUint64(&h.counts[i]) - buckets[i] = &dto.Bucket{ - CumulativeCount: proto.Uint64(count), - UpperBound: proto.Float64(upperBound), - } - } - his.Bucket = buckets - out.Histogram = his - out.Label = h.labelPairs - return nil -} - -// HistogramVec is a Collector that bundles a set of Histograms that all share the -// same Desc, but have different values for their variable labels. This is used -// if you want to count the same thing partitioned by various dimensions -// (e.g. HTTP request latencies, partitioned by status code and method). Create -// instances with NewHistogramVec. -type HistogramVec struct { - *MetricVec -} - -// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and -// partitioned by the given label names. At least one label name must be -// provided. -func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - labelNames, - opts.ConstLabels, - ) - return &HistogramVec{ - MetricVec: newMetricVec(desc, func(lvs ...string) Metric { - return newHistogram(desc, opts, lvs...) - }), - } -} - -// GetMetricWithLabelValues replaces the method of the same name in -// MetricVec. The difference is that this method returns an Observer and not a -// Metric so that no type conversion to an Observer is required. -func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { - metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Observer), err - } - return nil, err -} - -// GetMetricWith replaces the method of the same name in MetricVec. The -// difference is that this method returns an Observer and not a Metric so that no -// type conversion to an Observer is required. -func (m *HistogramVec) GetMetricWith(labels Labels) (Observer, error) { - metric, err := m.MetricVec.GetMetricWith(labels) - if metric != nil { - return metric.(Observer), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. By not returning an -// error, WithLabelValues allows shortcuts like -// myVec.WithLabelValues("404", "GET").Observe(42.21) -func (m *HistogramVec) WithLabelValues(lvs ...string) Observer { - return m.MetricVec.WithLabelValues(lvs...).(Observer) -} - -// With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. By not returning an error, With allows shortcuts like -// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21) -func (m *HistogramVec) With(labels Labels) Observer { - return m.MetricVec.With(labels).(Observer) -} - -type constHistogram struct { - desc *Desc - count uint64 - sum float64 - buckets map[float64]uint64 - labelPairs []*dto.LabelPair -} - -func (h *constHistogram) Desc() *Desc { - return h.desc -} - -func (h *constHistogram) Write(out *dto.Metric) error { - his := &dto.Histogram{} - buckets := make([]*dto.Bucket, 0, len(h.buckets)) - - his.SampleCount = proto.Uint64(h.count) - his.SampleSum = proto.Float64(h.sum) - - for upperBound, count := range h.buckets { - buckets = append(buckets, &dto.Bucket{ - CumulativeCount: proto.Uint64(count), - UpperBound: proto.Float64(upperBound), - }) - } - - if len(buckets) > 0 { - sort.Sort(buckSort(buckets)) - } - his.Bucket = buckets - - out.Histogram = his - out.Label = h.labelPairs - - return nil -} - -// NewConstHistogram returns a metric representing a Prometheus histogram with -// fixed values for the count, sum, and bucket counts. As those parameters -// cannot be changed, the returned value does not implement the Histogram -// interface (but only the Metric interface). Users of this package will not -// have much use for it in regular operations. However, when implementing custom -// Collectors, it is useful as a throw-away metric that is generated on the fly -// to send it to Prometheus in the Collect method. -// -// buckets is a map of upper bounds to cumulative counts, excluding the +Inf -// bucket. -// -// NewConstHistogram returns an error if the length of labelValues is not -// consistent with the variable labels in Desc. -func NewConstHistogram( - desc *Desc, - count uint64, - sum float64, - buckets map[float64]uint64, - labelValues ...string, -) (Metric, error) { - if len(desc.variableLabels) != len(labelValues) { - return nil, errInconsistentCardinality - } - return &constHistogram{ - desc: desc, - count: count, - sum: sum, - buckets: buckets, - labelPairs: makeLabelPairs(desc, labelValues), - }, nil -} - -// MustNewConstHistogram is a version of NewConstHistogram that panics where -// NewConstMetric would have returned an error. -func MustNewConstHistogram( - desc *Desc, - count uint64, - sum float64, - buckets map[float64]uint64, - labelValues ...string, -) Metric { - m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...) - if err != nil { - panic(err) - } - return m -} - -type buckSort []*dto.Bucket - -func (s buckSort) Len() int { - return len(s) -} - -func (s buckSort) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s buckSort) Less(i, j int) bool { - return s[i].GetUpperBound() < s[j].GetUpperBound() -} diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/http.go b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/http.go deleted file mode 100644 index d485ce0b8e..0000000000 --- a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/http.go +++ /dev/null @@ -1,524 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "bufio" - "bytes" - "compress/gzip" - "fmt" - "io" - "net" - "net/http" - "strconv" - "strings" - "sync" - "time" - - "github.com/prometheus/common/expfmt" -) - -// TODO(beorn7): Remove this whole file. It is a partial mirror of -// promhttp/http.go (to avoid circular import chains) where everything HTTP -// related should live. The functions here are just for avoiding -// breakage. Everything is deprecated. - -const ( - contentTypeHeader = "Content-Type" - contentLengthHeader = "Content-Length" - contentEncodingHeader = "Content-Encoding" - acceptEncodingHeader = "Accept-Encoding" -) - -var bufPool sync.Pool - -func getBuf() *bytes.Buffer { - buf := bufPool.Get() - if buf == nil { - return &bytes.Buffer{} - } - return buf.(*bytes.Buffer) -} - -func giveBuf(buf *bytes.Buffer) { - buf.Reset() - bufPool.Put(buf) -} - -// Handler returns an HTTP handler for the DefaultGatherer. It is -// already instrumented with InstrumentHandler (using "prometheus" as handler -// name). -// -// Deprecated: Please note the issues described in the doc comment of -// InstrumentHandler. You might want to consider using promhttp.Handler instead -// (which is not instrumented, but can be instrumented with the tooling provided -// in package promhttp). -func Handler() http.Handler { - return InstrumentHandler("prometheus", UninstrumentedHandler()) -} - -// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer. -// -// Deprecated: Use promhttp.Handler instead. See there for further documentation. -func UninstrumentedHandler() http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - mfs, err := DefaultGatherer.Gather() - if err != nil { - http.Error(w, "An error has occurred during metrics collection:\n\n"+err.Error(), http.StatusInternalServerError) - return - } - - contentType := expfmt.Negotiate(req.Header) - buf := getBuf() - defer giveBuf(buf) - writer, encoding := decorateWriter(req, buf) - enc := expfmt.NewEncoder(writer, contentType) - var lastErr error - for _, mf := range mfs { - if err := enc.Encode(mf); err != nil { - lastErr = err - http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError) - return - } - } - if closer, ok := writer.(io.Closer); ok { - closer.Close() - } - if lastErr != nil && buf.Len() == 0 { - http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError) - return - } - header := w.Header() - header.Set(contentTypeHeader, string(contentType)) - header.Set(contentLengthHeader, fmt.Sprint(buf.Len())) - if encoding != "" { - header.Set(contentEncodingHeader, encoding) - } - w.Write(buf.Bytes()) - }) -} - -// decorateWriter wraps a writer to handle gzip compression if requested. It -// returns the decorated writer and the appropriate "Content-Encoding" header -// (which is empty if no compression is enabled). -func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) { - header := request.Header.Get(acceptEncodingHeader) - parts := strings.Split(header, ",") - for _, part := range parts { - part := strings.TrimSpace(part) - if part == "gzip" || strings.HasPrefix(part, "gzip;") { - return gzip.NewWriter(writer), "gzip" - } - } - return writer, "" -} - -var instLabels = []string{"method", "code"} - -type nower interface { - Now() time.Time -} - -type nowFunc func() time.Time - -func (n nowFunc) Now() time.Time { - return n() -} - -var now nower = nowFunc(func() time.Time { - return time.Now() -}) - -func nowSeries(t ...time.Time) nower { - return nowFunc(func() time.Time { - defer func() { - t = t[1:] - }() - - return t[0] - }) -} - -// InstrumentHandler wraps the given HTTP handler for instrumentation. It -// registers four metric collectors (if not already done) and reports HTTP -// metrics to the (newly or already) registered collectors: http_requests_total -// (CounterVec), http_request_duration_microseconds (Summary), -// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each -// has a constant label named "handler" with the provided handlerName as -// value. http_requests_total is a metric vector partitioned by HTTP method -// (label name "method") and HTTP status code (label name "code"). -// -// Deprecated: InstrumentHandler has several issues. Use the tooling provided in -// package promhttp instead. The issues are the following: -// -// - It uses Summaries rather than Histograms. Summaries are not useful if -// aggregation across multiple instances is required. -// -// - It uses microseconds as unit, which is deprecated and should be replaced by -// seconds. -// -// - The size of the request is calculated in a separate goroutine. Since this -// calculator requires access to the request header, it creates a race with -// any writes to the header performed during request handling. -// httputil.ReverseProxy is a prominent example for a handler -// performing such writes. -// -// - It has additional issues with HTTP/2, cf. -// https://github.com/prometheus/client_golang/issues/272. -func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc { - return InstrumentHandlerFunc(handlerName, handler.ServeHTTP) -} - -// InstrumentHandlerFunc wraps the given function for instrumentation. It -// otherwise works in the same way as InstrumentHandler (and shares the same -// issues). -// -// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as -// InstrumentHandler is. Use the tooling provided in package promhttp instead. -func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { - return InstrumentHandlerFuncWithOpts( - SummaryOpts{ - Subsystem: "http", - ConstLabels: Labels{"handler": handlerName}, - Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, - }, - handlerFunc, - ) -} - -// InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same -// issues) but provides more flexibility (at the cost of a more complex call -// syntax). As InstrumentHandler, this function registers four metric -// collectors, but it uses the provided SummaryOpts to create them. However, the -// fields "Name" and "Help" in the SummaryOpts are ignored. "Name" is replaced -// by "requests_total", "request_duration_microseconds", "request_size_bytes", -// and "response_size_bytes", respectively. "Help" is replaced by an appropriate -// help string. The names of the variable labels of the http_requests_total -// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code). -// -// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the -// behavior of InstrumentHandler: -// -// prometheus.InstrumentHandlerWithOpts( -// prometheus.SummaryOpts{ -// Subsystem: "http", -// ConstLabels: prometheus.Labels{"handler": handlerName}, -// }, -// handler, -// ) -// -// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it -// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally, -// and all its fields are set to the equally named fields in the provided -// SummaryOpts. -// -// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as -// InstrumentHandler is. Use the tooling provided in package promhttp instead. -func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc { - return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP) -} - -// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares -// the same issues) but provides more flexibility (at the cost of a more complex -// call syntax). See InstrumentHandlerWithOpts for details how the provided -// SummaryOpts are used. -// -// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons -// as InstrumentHandler is. Use the tooling provided in package promhttp instead. -func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { - reqCnt := NewCounterVec( - CounterOpts{ - Namespace: opts.Namespace, - Subsystem: opts.Subsystem, - Name: "requests_total", - Help: "Total number of HTTP requests made.", - ConstLabels: opts.ConstLabels, - }, - instLabels, - ) - if err := Register(reqCnt); err != nil { - if are, ok := err.(AlreadyRegisteredError); ok { - reqCnt = are.ExistingCollector.(*CounterVec) - } else { - panic(err) - } - } - - opts.Name = "request_duration_microseconds" - opts.Help = "The HTTP request latencies in microseconds." - reqDur := NewSummary(opts) - if err := Register(reqDur); err != nil { - if are, ok := err.(AlreadyRegisteredError); ok { - reqDur = are.ExistingCollector.(Summary) - } else { - panic(err) - } - } - - opts.Name = "request_size_bytes" - opts.Help = "The HTTP request sizes in bytes." - reqSz := NewSummary(opts) - if err := Register(reqSz); err != nil { - if are, ok := err.(AlreadyRegisteredError); ok { - reqSz = are.ExistingCollector.(Summary) - } else { - panic(err) - } - } - - opts.Name = "response_size_bytes" - opts.Help = "The HTTP response sizes in bytes." - resSz := NewSummary(opts) - if err := Register(resSz); err != nil { - if are, ok := err.(AlreadyRegisteredError); ok { - resSz = are.ExistingCollector.(Summary) - } else { - panic(err) - } - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - now := time.Now() - - delegate := &responseWriterDelegator{ResponseWriter: w} - out := computeApproximateRequestSize(r) - - _, cn := w.(http.CloseNotifier) - _, fl := w.(http.Flusher) - _, hj := w.(http.Hijacker) - _, rf := w.(io.ReaderFrom) - var rw http.ResponseWriter - if cn && fl && hj && rf { - rw = &fancyResponseWriterDelegator{delegate} - } else { - rw = delegate - } - handlerFunc(rw, r) - - elapsed := float64(time.Since(now)) / float64(time.Microsecond) - - method := sanitizeMethod(r.Method) - code := sanitizeCode(delegate.status) - reqCnt.WithLabelValues(method, code).Inc() - reqDur.Observe(elapsed) - resSz.Observe(float64(delegate.written)) - reqSz.Observe(float64(<-out)) - }) -} - -func computeApproximateRequestSize(r *http.Request) <-chan int { - // Get URL length in current go routine for avoiding a race condition. - // HandlerFunc that runs in parallel may modify the URL. - s := 0 - if r.URL != nil { - s += len(r.URL.String()) - } - - out := make(chan int, 1) - - go func() { - s += len(r.Method) - s += len(r.Proto) - for name, values := range r.Header { - s += len(name) - for _, value := range values { - s += len(value) - } - } - s += len(r.Host) - - // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. - - if r.ContentLength != -1 { - s += int(r.ContentLength) - } - out <- s - close(out) - }() - - return out -} - -type responseWriterDelegator struct { - http.ResponseWriter - - handler, method string - status int - written int64 - wroteHeader bool -} - -func (r *responseWriterDelegator) WriteHeader(code int) { - r.status = code - r.wroteHeader = true - r.ResponseWriter.WriteHeader(code) -} - -func (r *responseWriterDelegator) Write(b []byte) (int, error) { - if !r.wroteHeader { - r.WriteHeader(http.StatusOK) - } - n, err := r.ResponseWriter.Write(b) - r.written += int64(n) - return n, err -} - -type fancyResponseWriterDelegator struct { - *responseWriterDelegator -} - -func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool { - return f.ResponseWriter.(http.CloseNotifier).CloseNotify() -} - -func (f *fancyResponseWriterDelegator) Flush() { - f.ResponseWriter.(http.Flusher).Flush() -} - -func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { - return f.ResponseWriter.(http.Hijacker).Hijack() -} - -func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) { - if !f.wroteHeader { - f.WriteHeader(http.StatusOK) - } - n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r) - f.written += n - return n, err -} - -func sanitizeMethod(m string) string { - switch m { - case "GET", "get": - return "get" - case "PUT", "put": - return "put" - case "HEAD", "head": - return "head" - case "POST", "post": - return "post" - case "DELETE", "delete": - return "delete" - case "CONNECT", "connect": - return "connect" - case "OPTIONS", "options": - return "options" - case "NOTIFY", "notify": - return "notify" - default: - return strings.ToLower(m) - } -} - -func sanitizeCode(s int) string { - switch s { - case 100: - return "100" - case 101: - return "101" - - case 200: - return "200" - case 201: - return "201" - case 202: - return "202" - case 203: - return "203" - case 204: - return "204" - case 205: - return "205" - case 206: - return "206" - - case 300: - return "300" - case 301: - return "301" - case 302: - return "302" - case 304: - return "304" - case 305: - return "305" - case 307: - return "307" - - case 400: - return "400" - case 401: - return "401" - case 402: - return "402" - case 403: - return "403" - case 404: - return "404" - case 405: - return "405" - case 406: - return "406" - case 407: - return "407" - case 408: - return "408" - case 409: - return "409" - case 410: - return "410" - case 411: - return "411" - case 412: - return "412" - case 413: - return "413" - case 414: - return "414" - case 415: - return "415" - case 416: - return "416" - case 417: - return "417" - case 418: - return "418" - - case 500: - return "500" - case 501: - return "501" - case 502: - return "502" - case 503: - return "503" - case 504: - return "504" - case 505: - return "505" - - case 428: - return "428" - case 429: - return "429" - case 431: - return "431" - case 511: - return "511" - - default: - return strconv.Itoa(s) - } -} diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/metric.go deleted file mode 100644 index d4063d98f4..0000000000 --- a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "strings" - - dto "github.com/prometheus/client_model/go" -) - -const separatorByte byte = 255 - -// A Metric models a single sample value with its meta data being exported to -// Prometheus. Implementations of Metric in this package are Gauge, Counter, -// Histogram, Summary, and Untyped. -type Metric interface { - // Desc returns the descriptor for the Metric. This method idempotently - // returns the same descriptor throughout the lifetime of the - // Metric. The returned descriptor is immutable by contract. A Metric - // unable to describe itself must return an invalid descriptor (created - // with NewInvalidDesc). - Desc() *Desc - // Write encodes the Metric into a "Metric" Protocol Buffer data - // transmission object. - // - // Metric implementations must observe concurrency safety as reads of - // this metric may occur at any time, and any blocking occurs at the - // expense of total performance of rendering all registered - // metrics. Ideally, Metric implementations should support concurrent - // readers. - // - // While populating dto.Metric, it is the responsibility of the - // implementation to ensure validity of the Metric protobuf (like valid - // UTF-8 strings or syntactically valid metric and label names). It is - // recommended to sort labels lexicographically. (Implementers may find - // LabelPairSorter useful for that.) Callers of Write should still make - // sure of sorting if they depend on it. - Write(*dto.Metric) error - // TODO(beorn7): The original rationale of passing in a pre-allocated - // dto.Metric protobuf to save allocations has disappeared. The - // signature of this method should be changed to "Write() (*dto.Metric, - // error)". -} - -// Opts bundles the options for creating most Metric types. Each metric -// implementation XXX has its own XXXOpts type, but in most cases, it is just be -// an alias of this type (which might change when the requirement arises.) -// -// It is mandatory to set Name and Help to a non-empty string. All other fields -// are optional and can safely be left at their zero value. -type Opts struct { - // Namespace, Subsystem, and Name are components of the fully-qualified - // name of the Metric (created by joining these components with - // "_"). Only Name is mandatory, the others merely help structuring the - // name. Note that the fully-qualified name of the metric must be a - // valid Prometheus metric name. - Namespace string - Subsystem string - Name string - - // Help provides information about this metric. Mandatory! - // - // Metrics with the same fully-qualified name must have the same Help - // string. - Help string - - // ConstLabels are used to attach fixed labels to this metric. Metrics - // with the same fully-qualified name must have the same label names in - // their ConstLabels. - // - // Note that in most cases, labels have a value that varies during the - // lifetime of a process. Those labels are usually managed with a metric - // vector collector (like CounterVec, GaugeVec, UntypedVec). ConstLabels - // serve only special purposes. One is for the special case where the - // value of a label does not change during the lifetime of a process, - // e.g. if the revision of the running binary is put into a - // label. Another, more advanced purpose is if more than one Collector - // needs to collect Metrics with the same fully-qualified name. In that - // case, those Metrics must differ in the values of their - // ConstLabels. See the Collector examples. - // - // If the value of a label never changes (not even between binaries), - // that label most likely should not be a label at all (but part of the - // metric name). - ConstLabels Labels -} - -// BuildFQName joins the given three name components by "_". Empty name -// components are ignored. If the name parameter itself is empty, an empty -// string is returned, no matter what. Metric implementations included in this -// library use this function internally to generate the fully-qualified metric -// name from the name component in their Opts. Users of the library will only -// need this function if they implement their own Metric or instantiate a Desc -// (with NewDesc) directly. -func BuildFQName(namespace, subsystem, name string) string { - if name == "" { - return "" - } - switch { - case namespace != "" && subsystem != "": - return strings.Join([]string{namespace, subsystem, name}, "_") - case namespace != "": - return strings.Join([]string{namespace, name}, "_") - case subsystem != "": - return strings.Join([]string{subsystem, name}, "_") - } - return name -} - -// LabelPairSorter implements sort.Interface. It is used to sort a slice of -// dto.LabelPair pointers. This is useful for implementing the Write method of -// custom metrics. -type LabelPairSorter []*dto.LabelPair - -func (s LabelPairSorter) Len() int { - return len(s) -} - -func (s LabelPairSorter) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s LabelPairSorter) Less(i, j int) bool { - return s[i].GetName() < s[j].GetName() -} - -type hashSorter []uint64 - -func (s hashSorter) Len() int { - return len(s) -} - -func (s hashSorter) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s hashSorter) Less(i, j int) bool { - return s[i] < s[j] -} - -type invalidMetric struct { - desc *Desc - err error -} - -// NewInvalidMetric returns a metric whose Write method always returns the -// provided error. It is useful if a Collector finds itself unable to collect -// a metric and wishes to report an error to the registry. -func NewInvalidMetric(desc *Desc, err error) Metric { - return &invalidMetric{desc, err} -} - -func (m *invalidMetric) Desc() *Desc { return m.desc } - -func (m *invalidMetric) Write(*dto.Metric) error { return m.err } diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go deleted file mode 100644 index 94b2553e14..0000000000 --- a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import "github.com/prometheus/procfs" - -type processCollector struct { - pid int - collectFn func(chan<- Metric) - pidFn func() (int, error) - cpuTotal *Desc - openFDs, maxFDs *Desc - vsize, rss *Desc - startTime *Desc -} - -// NewProcessCollector returns a collector which exports the current state of -// process metrics including cpu, memory and file descriptor usage as well as -// the process start time for the given process id under the given namespace. -func NewProcessCollector(pid int, namespace string) Collector { - return NewProcessCollectorPIDFn( - func() (int, error) { return pid, nil }, - namespace, - ) -} - -// NewProcessCollectorPIDFn returns a collector which exports the current state -// of process metrics including cpu, memory and file descriptor usage as well -// as the process start time under the given namespace. The given pidFn is -// called on each collect and is used to determine the process to export -// metrics for. -func NewProcessCollectorPIDFn( - pidFn func() (int, error), - namespace string, -) Collector { - ns := "" - if len(namespace) > 0 { - ns = namespace + "_" - } - - c := processCollector{ - pidFn: pidFn, - collectFn: func(chan<- Metric) {}, - - cpuTotal: NewDesc( - ns+"process_cpu_seconds_total", - "Total user and system CPU time spent in seconds.", - nil, nil, - ), - openFDs: NewDesc( - ns+"process_open_fds", - "Number of open file descriptors.", - nil, nil, - ), - maxFDs: NewDesc( - ns+"process_max_fds", - "Maximum number of open file descriptors.", - nil, nil, - ), - vsize: NewDesc( - ns+"process_virtual_memory_bytes", - "Virtual memory size in bytes.", - nil, nil, - ), - rss: NewDesc( - ns+"process_resident_memory_bytes", - "Resident memory size in bytes.", - nil, nil, - ), - startTime: NewDesc( - ns+"process_start_time_seconds", - "Start time of the process since unix epoch in seconds.", - nil, nil, - ), - } - - // Set up process metric collection if supported by the runtime. - if _, err := procfs.NewStat(); err == nil { - c.collectFn = c.processCollect - } - - return &c -} - -// Describe returns all descriptions of the collector. -func (c *processCollector) Describe(ch chan<- *Desc) { - ch <- c.cpuTotal - ch <- c.openFDs - ch <- c.maxFDs - ch <- c.vsize - ch <- c.rss - ch <- c.startTime -} - -// Collect returns the current state of all metrics of the collector. -func (c *processCollector) Collect(ch chan<- Metric) { - c.collectFn(ch) -} - -// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the -// client allows users to configure the error behavior. -func (c *processCollector) processCollect(ch chan<- Metric) { - pid, err := c.pidFn() - if err != nil { - return - } - - p, err := procfs.NewProc(pid) - if err != nil { - return - } - - if stat, err := p.NewStat(); err == nil { - ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime()) - ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory())) - ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory())) - if startTime, err := stat.StartTime(); err == nil { - ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) - } - } - - if fds, err := p.FileDescriptorsLen(); err == nil { - ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds)) - } - - if limits, err := p.NewLimits(); err == nil { - ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles)) - } -} diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_7.go b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_7.go deleted file mode 100644 index 24b31503e9..0000000000 --- a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_7.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !go1.8 - -package promhttp - -import ( - "io" - "net/http" -) - -func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator { - d := &responseWriterDelegator{ - ResponseWriter: w, - observeWriteHeader: observeWriteHeaderFunc, - } - - _, cn := w.(http.CloseNotifier) - _, fl := w.(http.Flusher) - _, hj := w.(http.Hijacker) - _, rf := w.(io.ReaderFrom) - if cn && fl && hj && rf { - return &fancyDelegator{d} - } - - return d -} diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go deleted file mode 100644 index b7743fb871..0000000000 --- a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build go1.8 - -package promhttp - -import ( - "io" - "net/http" -) - -// newDelegator handles the four different methods of upgrading a -// http.ResponseWriter to delegator. -func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator { - d := &responseWriterDelegator{ - ResponseWriter: w, - observeWriteHeader: observeWriteHeaderFunc, - } - - _, cn := w.(http.CloseNotifier) - _, fl := w.(http.Flusher) - _, hj := w.(http.Hijacker) - _, ps := w.(http.Pusher) - _, rf := w.(io.ReaderFrom) - - // Check for the four most common combination of interfaces a - // http.ResponseWriter might implement. - switch { - case cn && fl && hj && rf && ps: - // All interfaces. - return &fancyPushDelegator{ - fancyDelegator: &fancyDelegator{d}, - p: &pushDelegator{d}, - } - case cn && fl && hj && rf: - // All interfaces, except http.Pusher. - return &fancyDelegator{d} - case ps: - // Just http.Pusher. - return &pushDelegator{d} - } - - return d -} - -type fancyPushDelegator struct { - p *pushDelegator - - *fancyDelegator -} - -func (f *fancyPushDelegator) Push(target string, opts *http.PushOptions) error { - return f.p.Push(target, opts) -} - -type pushDelegator struct { - *responseWriterDelegator -} - -func (f *pushDelegator) Push(target string, opts *http.PushOptions) error { - return f.ResponseWriter.(http.Pusher).Push(target, opts) -} diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go deleted file mode 100644 index 4c70a7af63..0000000000 --- a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go +++ /dev/null @@ -1,204 +0,0 @@ -// Copyright 2016 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package promhttp provides tooling around HTTP servers and clients. -// -// First, the package allows the creation of http.Handler instances to expose -// Prometheus metrics via HTTP. promhttp.Handler acts on the -// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a -// custom registry or anything that implements the Gatherer interface. It also -// allows the creation of handlers that act differently on errors or allow to -// log errors. -// -// Second, the package provides tooling to instrument instances of http.Handler -// via middleware. Middleware wrappers follow the naming scheme -// InstrumentHandlerX, where X describes the intended use of the middleware. -// See each function's doc comment for specific details. -// -// Finally, the package allows for an http.RoundTripper to be instrumented via -// middleware. Middleware wrappers follow the naming scheme -// InstrumentRoundTripperX, where X describes the intended use of the -// middleware. See each function's doc comment for specific details. -package promhttp - -import ( - "bytes" - "compress/gzip" - "fmt" - "io" - "net/http" - "strings" - "sync" - - "github.com/prometheus/common/expfmt" - - "github.com/prometheus/client_golang/prometheus" -) - -const ( - contentTypeHeader = "Content-Type" - contentLengthHeader = "Content-Length" - contentEncodingHeader = "Content-Encoding" - acceptEncodingHeader = "Accept-Encoding" -) - -var bufPool sync.Pool - -func getBuf() *bytes.Buffer { - buf := bufPool.Get() - if buf == nil { - return &bytes.Buffer{} - } - return buf.(*bytes.Buffer) -} - -func giveBuf(buf *bytes.Buffer) { - buf.Reset() - bufPool.Put(buf) -} - -// Handler returns an HTTP handler for the prometheus.DefaultGatherer. The -// Handler uses the default HandlerOpts, i.e. report the first error as an HTTP -// error, no error logging, and compression if requested by the client. -// -// If you want to create a Handler for the DefaultGatherer with different -// HandlerOpts, create it with HandlerFor with prometheus.DefaultGatherer and -// your desired HandlerOpts. -func Handler() http.Handler { - return HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}) -} - -// HandlerFor returns an http.Handler for the provided Gatherer. The behavior -// of the Handler is defined by the provided HandlerOpts. -func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - mfs, err := reg.Gather() - if err != nil { - if opts.ErrorLog != nil { - opts.ErrorLog.Println("error gathering metrics:", err) - } - switch opts.ErrorHandling { - case PanicOnError: - panic(err) - case ContinueOnError: - if len(mfs) == 0 { - http.Error(w, "No metrics gathered, last error:\n\n"+err.Error(), http.StatusInternalServerError) - return - } - case HTTPErrorOnError: - http.Error(w, "An error has occurred during metrics gathering:\n\n"+err.Error(), http.StatusInternalServerError) - return - } - } - - contentType := expfmt.Negotiate(req.Header) - buf := getBuf() - defer giveBuf(buf) - writer, encoding := decorateWriter(req, buf, opts.DisableCompression) - enc := expfmt.NewEncoder(writer, contentType) - var lastErr error - for _, mf := range mfs { - if err := enc.Encode(mf); err != nil { - lastErr = err - if opts.ErrorLog != nil { - opts.ErrorLog.Println("error encoding metric family:", err) - } - switch opts.ErrorHandling { - case PanicOnError: - panic(err) - case ContinueOnError: - // Handled later. - case HTTPErrorOnError: - http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError) - return - } - } - } - if closer, ok := writer.(io.Closer); ok { - closer.Close() - } - if lastErr != nil && buf.Len() == 0 { - http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError) - return - } - header := w.Header() - header.Set(contentTypeHeader, string(contentType)) - header.Set(contentLengthHeader, fmt.Sprint(buf.Len())) - if encoding != "" { - header.Set(contentEncodingHeader, encoding) - } - w.Write(buf.Bytes()) - // TODO(beorn7): Consider streaming serving of metrics. - }) -} - -// HandlerErrorHandling defines how a Handler serving metrics will handle -// errors. -type HandlerErrorHandling int - -// These constants cause handlers serving metrics to behave as described if -// errors are encountered. -const ( - // Serve an HTTP status code 500 upon the first error - // encountered. Report the error message in the body. - HTTPErrorOnError HandlerErrorHandling = iota - // Ignore errors and try to serve as many metrics as possible. However, - // if no metrics can be served, serve an HTTP status code 500 and the - // last error message in the body. Only use this in deliberate "best - // effort" metrics collection scenarios. It is recommended to at least - // log errors (by providing an ErrorLog in HandlerOpts) to not mask - // errors completely. - ContinueOnError - // Panic upon the first error encountered (useful for "crash only" apps). - PanicOnError -) - -// Logger is the minimal interface HandlerOpts needs for logging. Note that -// log.Logger from the standard library implements this interface, and it is -// easy to implement by custom loggers, if they don't do so already anyway. -type Logger interface { - Println(v ...interface{}) -} - -// HandlerOpts specifies options how to serve metrics via an http.Handler. The -// zero value of HandlerOpts is a reasonable default. -type HandlerOpts struct { - // ErrorLog specifies an optional logger for errors collecting and - // serving metrics. If nil, errors are not logged at all. - ErrorLog Logger - // ErrorHandling defines how errors are handled. Note that errors are - // logged regardless of the configured ErrorHandling provided ErrorLog - // is not nil. - ErrorHandling HandlerErrorHandling - // If DisableCompression is true, the handler will never compress the - // response, even if requested by the client. - DisableCompression bool -} - -// decorateWriter wraps a writer to handle gzip compression if requested. It -// returns the decorated writer and the appropriate "Content-Encoding" header -// (which is empty if no compression is enabled). -func decorateWriter(request *http.Request, writer io.Writer, compressionDisabled bool) (io.Writer, string) { - if compressionDisabled { - return writer, "" - } - header := request.Header.Get(acceptEncodingHeader) - parts := strings.Split(header, ",") - for _, part := range parts { - part := strings.TrimSpace(part) - if part == "gzip" || strings.HasPrefix(part, "gzip;") { - return gzip.NewWriter(writer), "gzip" - } - } - return writer, "" -} diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go deleted file mode 100644 index 1cf21f2174..0000000000 --- a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package promhttp - -import ( - "net/http" - "time" - - "github.com/prometheus/client_golang/prometheus" -) - -// The RoundTripperFunc type is an adapter to allow the use of ordinary -// functions as RoundTrippers. If f is a function with the appropriate -// signature, RountTripperFunc(f) is a RoundTripper that calls f. -type RoundTripperFunc func(req *http.Request) (*http.Response, error) - -// RoundTrip implements the RoundTripper interface. -func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) { - return rt(r) -} - -// InstrumentRoundTripperInFlight is a middleware that wraps the provided -// http.RoundTripper. It sets the provided prometheus.Gauge to the number of -// requests currently handled by the wrapped http.RoundTripper. -// -// See the example for ExampleInstrumentRoundTripperDuration for example usage. -func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc { - return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { - gauge.Inc() - defer gauge.Dec() - return next.RoundTrip(r) - }) -} - -// InstrumentRoundTripperCounter is a middleware that wraps the provided -// http.RoundTripper to observe the request result with the provided CounterVec. -// The CounterVec must have zero, one, or two labels. The only allowed label -// names are "code" and "method". The function panics if any other instance -// labels are provided. Partitioning of the CounterVec happens by HTTP status -// code and/or HTTP method if the respective instance label names are present -// in the CounterVec. For unpartitioned counting, use a CounterVec with -// zero labels. -// -// If the wrapped RoundTripper panics or returns a non-nil error, the Counter -// is not incremented. -// -// See the example for ExampleInstrumentRoundTripperDuration for example usage. -func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper) RoundTripperFunc { - code, method := checkLabels(counter) - - return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { - resp, err := next.RoundTrip(r) - if err == nil { - counter.With(labels(code, method, r.Method, resp.StatusCode)).Inc() - } - return resp, err - }) -} - -// InstrumentRoundTripperDuration is a middleware that wraps the provided -// http.RoundTripper to observe the request duration with the provided ObserverVec. -// The ObserverVec must have zero, one, or two labels. The only allowed label -// names are "code" and "method". The function panics if any other instance -// labels are provided. The Observe method of the Observer in the ObserverVec -// is called with the request duration in seconds. Partitioning happens by HTTP -// status code and/or HTTP method if the respective instance label names are -// present in the ObserverVec. For unpartitioned observations, use an -// ObserverVec with zero labels. Note that partitioning of Histograms is -// expensive and should be used judiciously. -// -// If the wrapped RoundTripper panics or returns a non-nil error, no values are -// reported. -func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper) RoundTripperFunc { - code, method := checkLabels(obs) - - return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { - start := time.Now() - resp, err := next.RoundTrip(r) - if err == nil { - obs.With(labels(code, method, r.Method, resp.StatusCode)).Observe(time.Since(start).Seconds()) - } - return resp, err - }) -} diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go deleted file mode 100644 index ac419e555f..0000000000 --- a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go +++ /dev/null @@ -1,505 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package promhttp - -import ( - "bufio" - "io" - "net" - "net/http" - "strconv" - "strings" - "time" - - dto "github.com/prometheus/client_model/go" - - "github.com/prometheus/client_golang/prometheus" -) - -// magicString is used for the hacky label test in checkLabels. Remove once fixed. -const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa" - -// InstrumentHandlerInFlight is a middleware that wraps the provided -// http.Handler. It sets the provided prometheus.Gauge to the number of -// requests currently handled by the wrapped http.Handler. -// -// See the example for InstrumentHandlerDuration for example usage. -func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - g.Inc() - defer g.Dec() - next.ServeHTTP(w, r) - }) -} - -// InstrumentHandlerDuration is a middleware that wraps the provided -// http.Handler to observe the request duration with the provided ObserverVec. -// The ObserverVec must have zero, one, or two labels. The only allowed label -// names are "code" and "method". The function panics if any other instance -// labels are provided. The Observe method of the Observer in the ObserverVec -// is called with the request duration in seconds. Partitioning happens by HTTP -// status code and/or HTTP method if the respective instance label names are -// present in the ObserverVec. For unpartitioned observations, use an -// ObserverVec with zero labels. Note that partitioning of Histograms is -// expensive and should be used judiciously. -// -// If the wrapped Handler does not set a status code, a status code of 200 is assumed. -// -// If the wrapped Handler panics, no values are reported. -func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { - code, method := checkLabels(obs) - - if code { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - now := time.Now() - d := newDelegator(w, nil) - next.ServeHTTP(d, r) - - obs.With(labels(code, method, r.Method, d.Status())).Observe(time.Since(now).Seconds()) - }) - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - now := time.Now() - next.ServeHTTP(w, r) - obs.With(labels(code, method, r.Method, 0)).Observe(time.Since(now).Seconds()) - }) -} - -// InstrumentHandlerCounter is a middleware that wraps the provided -// http.Handler to observe the request result with the provided CounterVec. -// The CounterVec must have zero, one, or two labels. The only allowed label -// names are "code" and "method". The function panics if any other instance -// labels are provided. Partitioning of the CounterVec happens by HTTP status -// code and/or HTTP method if the respective instance label names are present -// in the CounterVec. For unpartitioned counting, use a CounterVec with -// zero labels. -// -// If the wrapped Handler does not set a status code, a status code of 200 is assumed. -// -// If the wrapped Handler panics, the Counter is not incremented. -// -// See the example for InstrumentHandlerDuration for example usage. -func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc { - code, method := checkLabels(counter) - - if code { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - d := newDelegator(w, nil) - next.ServeHTTP(d, r) - counter.With(labels(code, method, r.Method, d.Status())).Inc() - }) - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - next.ServeHTTP(w, r) - counter.With(labels(code, method, r.Method, 0)).Inc() - }) -} - -// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided -// http.Handler to observe with the provided ObserverVec the request duration -// until the response headers are written. The ObserverVec must have zero, one, -// or two labels. The only allowed label names are "code" and "method". The -// function panics if any other instance labels are provided. The Observe -// method of the Observer in the ObserverVec is called with the request -// duration in seconds. Partitioning happens by HTTP status code and/or HTTP -// method if the respective instance label names are present in the -// ObserverVec. For unpartitioned observations, use an ObserverVec with zero -// labels. Note that partitioning of Histograms is expensive and should be used -// judiciously. -// -// If the wrapped Handler panics before calling WriteHeader, no value is -// reported. -// -// See the example for InstrumentHandlerDuration for example usage. -func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { - code, method := checkLabels(obs) - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - now := time.Now() - d := newDelegator(w, func(status int) { - obs.With(labels(code, method, r.Method, status)).Observe(time.Since(now).Seconds()) - }) - next.ServeHTTP(d, r) - }) -} - -// InstrumentHandlerRequestSize is a middleware that wraps the provided -// http.Handler to observe the request size with the provided ObserverVec. -// The ObserverVec must have zero, one, or two labels. The only allowed label -// names are "code" and "method". The function panics if any other instance -// labels are provided. The Observe method of the Observer in the ObserverVec -// is called with the request size in bytes. Partitioning happens by HTTP -// status code and/or HTTP method if the respective instance label names are -// present in the ObserverVec. For unpartitioned observations, use an -// ObserverVec with zero labels. Note that partitioning of Histograms is -// expensive and should be used judiciously. -// -// If the wrapped Handler does not set a status code, a status code of 200 is assumed. -// -// If the wrapped Handler panics, no values are reported. -// -// See the example for InstrumentHandlerDuration for example usage. -func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { - code, method := checkLabels(obs) - - if code { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - d := newDelegator(w, nil) - next.ServeHTTP(d, r) - size := computeApproximateRequestSize(r) - obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(size)) - }) - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - next.ServeHTTP(w, r) - size := computeApproximateRequestSize(r) - obs.With(labels(code, method, r.Method, 0)).Observe(float64(size)) - }) -} - -// InstrumentHandlerResponseSize is a middleware that wraps the provided -// http.Handler to observe the response size with the provided ObserverVec. -// The ObserverVec must have zero, one, or two labels. The only allowed label -// names are "code" and "method". The function panics if any other instance -// labels are provided. The Observe method of the Observer in the ObserverVec -// is called with the response size in bytes. Partitioning happens by HTTP -// status code and/or HTTP method if the respective instance label names are -// present in the ObserverVec. For unpartitioned observations, use an -// ObserverVec with zero labels. Note that partitioning of Histograms is -// expensive and should be used judiciously. -// -// If the wrapped Handler does not set a status code, a status code of 200 is assumed. -// -// If the wrapped Handler panics, no values are reported. -// -// See the example for InstrumentHandlerDuration for example usage. -func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler) http.Handler { - code, method := checkLabels(obs) - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - d := newDelegator(w, nil) - next.ServeHTTP(d, r) - obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(d.Written())) - }) -} - -func checkLabels(c prometheus.Collector) (code bool, method bool) { - // TODO(beorn7): Remove this hacky way to check for instance labels - // once Descriptors can have their dimensionality queried. - var ( - desc *prometheus.Desc - pm dto.Metric - ) - - descc := make(chan *prometheus.Desc, 1) - c.Describe(descc) - - select { - case desc = <-descc: - default: - panic("no description provided by collector") - } - select { - case <-descc: - panic("more than one description provided by collector") - default: - } - - close(descc) - - if _, err := prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0); err == nil { - return - } - if m, err := prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, magicString); err == nil { - if err := m.Write(&pm); err != nil { - panic("error checking metric for labels") - } - for _, label := range pm.Label { - name, value := label.GetName(), label.GetValue() - if value != magicString { - continue - } - switch name { - case "code": - code = true - case "method": - method = true - default: - panic("metric partitioned with non-supported labels") - } - return - } - panic("previously set label not found – this must never happen") - } - if m, err := prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, magicString, magicString); err == nil { - if err := m.Write(&pm); err != nil { - panic("error checking metric for labels") - } - for _, label := range pm.Label { - name, value := label.GetName(), label.GetValue() - if value != magicString { - continue - } - if name == "code" || name == "method" { - continue - } - panic("metric partitioned with non-supported labels") - } - code = true - method = true - return - } - panic("metric partitioned with non-supported labels") -} - -// emptyLabels is a one-time allocation for non-partitioned metrics to avoid -// unnecessary allocations on each request. -var emptyLabels = prometheus.Labels{} - -func labels(code, method bool, reqMethod string, status int) prometheus.Labels { - if !(code || method) { - return emptyLabels - } - labels := prometheus.Labels{} - - if code { - labels["code"] = sanitizeCode(status) - } - if method { - labels["method"] = sanitizeMethod(reqMethod) - } - - return labels -} - -func computeApproximateRequestSize(r *http.Request) int { - s := 0 - if r.URL != nil { - s += len(r.URL.String()) - } - - s += len(r.Method) - s += len(r.Proto) - for name, values := range r.Header { - s += len(name) - for _, value := range values { - s += len(value) - } - } - s += len(r.Host) - - // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. - - if r.ContentLength != -1 { - s += int(r.ContentLength) - } - return s -} - -func sanitizeMethod(m string) string { - switch m { - case "GET", "get": - return "get" - case "PUT", "put": - return "put" - case "HEAD", "head": - return "head" - case "POST", "post": - return "post" - case "DELETE", "delete": - return "delete" - case "CONNECT", "connect": - return "connect" - case "OPTIONS", "options": - return "options" - case "NOTIFY", "notify": - return "notify" - default: - return strings.ToLower(m) - } -} - -// If the wrapped http.Handler has not set a status code, i.e. the value is -// currently 0, santizeCode will return 200, for consistency with behavior in -// the stdlib. -func sanitizeCode(s int) string { - switch s { - case 100: - return "100" - case 101: - return "101" - - case 200, 0: - return "200" - case 201: - return "201" - case 202: - return "202" - case 203: - return "203" - case 204: - return "204" - case 205: - return "205" - case 206: - return "206" - - case 300: - return "300" - case 301: - return "301" - case 302: - return "302" - case 304: - return "304" - case 305: - return "305" - case 307: - return "307" - - case 400: - return "400" - case 401: - return "401" - case 402: - return "402" - case 403: - return "403" - case 404: - return "404" - case 405: - return "405" - case 406: - return "406" - case 407: - return "407" - case 408: - return "408" - case 409: - return "409" - case 410: - return "410" - case 411: - return "411" - case 412: - return "412" - case 413: - return "413" - case 414: - return "414" - case 415: - return "415" - case 416: - return "416" - case 417: - return "417" - case 418: - return "418" - - case 500: - return "500" - case 501: - return "501" - case 502: - return "502" - case 503: - return "503" - case 504: - return "504" - case 505: - return "505" - - case 428: - return "428" - case 429: - return "429" - case 431: - return "431" - case 511: - return "511" - - default: - return strconv.Itoa(s) - } -} - -type delegator interface { - Status() int - Written() int64 - - http.ResponseWriter -} - -type responseWriterDelegator struct { - http.ResponseWriter - - handler, method string - status int - written int64 - wroteHeader bool - observeWriteHeader func(int) -} - -func (r *responseWriterDelegator) Status() int { - return r.status -} - -func (r *responseWriterDelegator) Written() int64 { - return r.written -} - -func (r *responseWriterDelegator) WriteHeader(code int) { - r.status = code - r.wroteHeader = true - r.ResponseWriter.WriteHeader(code) - if r.observeWriteHeader != nil { - r.observeWriteHeader(code) - } -} - -func (r *responseWriterDelegator) Write(b []byte) (int, error) { - if !r.wroteHeader { - r.WriteHeader(http.StatusOK) - } - n, err := r.ResponseWriter.Write(b) - r.written += int64(n) - return n, err -} - -type fancyDelegator struct { - *responseWriterDelegator -} - -func (r *fancyDelegator) CloseNotify() <-chan bool { - return r.ResponseWriter.(http.CloseNotifier).CloseNotify() -} - -func (r *fancyDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { - return r.ResponseWriter.(http.Hijacker).Hijack() -} - -func (r *fancyDelegator) Flush() { - r.ResponseWriter.(http.Flusher).Flush() -} - -func (r *fancyDelegator) ReadFrom(re io.Reader) (int64, error) { - if !r.wroteHeader { - r.WriteHeader(http.StatusOK) - } - n, err := r.ResponseWriter.(io.ReaderFrom).ReadFrom(re) - r.written += n - return n, err -} diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/registry.go deleted file mode 100644 index 8c6b5bd8ee..0000000000 --- a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ /dev/null @@ -1,755 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "bytes" - "errors" - "fmt" - "os" - "sort" - "sync" - - "github.com/golang/protobuf/proto" - - dto "github.com/prometheus/client_model/go" -) - -const ( - // Capacity for the channel to collect metrics and descriptors. - capMetricChan = 1000 - capDescChan = 10 -) - -// DefaultRegisterer and DefaultGatherer are the implementations of the -// Registerer and Gatherer interface a number of convenience functions in this -// package act on. Initially, both variables point to the same Registry, which -// has a process collector (see NewProcessCollector) and a Go collector (see -// NewGoCollector) already registered. This approach to keep default instances -// as global state mirrors the approach of other packages in the Go standard -// library. Note that there are caveats. Change the variables with caution and -// only if you understand the consequences. Users who want to avoid global state -// altogether should not use the convenience function and act on custom -// instances instead. -var ( - defaultRegistry = NewRegistry() - DefaultRegisterer Registerer = defaultRegistry - DefaultGatherer Gatherer = defaultRegistry -) - -func init() { - MustRegister(NewProcessCollector(os.Getpid(), "")) - MustRegister(NewGoCollector()) -} - -// NewRegistry creates a new vanilla Registry without any Collectors -// pre-registered. -func NewRegistry() *Registry { - return &Registry{ - collectorsByID: map[uint64]Collector{}, - descIDs: map[uint64]struct{}{}, - dimHashesByName: map[string]uint64{}, - } -} - -// NewPedanticRegistry returns a registry that checks during collection if each -// collected Metric is consistent with its reported Desc, and if the Desc has -// actually been registered with the registry. -// -// Usually, a Registry will be happy as long as the union of all collected -// Metrics is consistent and valid even if some metrics are not consistent with -// their own Desc or a Desc provided by their registered Collector. Well-behaved -// Collectors and Metrics will only provide consistent Descs. This Registry is -// useful to test the implementation of Collectors and Metrics. -func NewPedanticRegistry() *Registry { - r := NewRegistry() - r.pedanticChecksEnabled = true - return r -} - -// Registerer is the interface for the part of a registry in charge of -// registering and unregistering. Users of custom registries should use -// Registerer as type for registration purposes (rather then the Registry type -// directly). In that way, they are free to use custom Registerer implementation -// (e.g. for testing purposes). -type Registerer interface { - // Register registers a new Collector to be included in metrics - // collection. It returns an error if the descriptors provided by the - // Collector are invalid or if they — in combination with descriptors of - // already registered Collectors — do not fulfill the consistency and - // uniqueness criteria described in the documentation of metric.Desc. - // - // If the provided Collector is equal to a Collector already registered - // (which includes the case of re-registering the same Collector), the - // returned error is an instance of AlreadyRegisteredError, which - // contains the previously registered Collector. - // - // It is in general not safe to register the same Collector multiple - // times concurrently. - Register(Collector) error - // MustRegister works like Register but registers any number of - // Collectors and panics upon the first registration that causes an - // error. - MustRegister(...Collector) - // Unregister unregisters the Collector that equals the Collector passed - // in as an argument. (Two Collectors are considered equal if their - // Describe method yields the same set of descriptors.) The function - // returns whether a Collector was unregistered. - // - // Note that even after unregistering, it will not be possible to - // register a new Collector that is inconsistent with the unregistered - // Collector, e.g. a Collector collecting metrics with the same name but - // a different help string. The rationale here is that the same registry - // instance must only collect consistent metrics throughout its - // lifetime. - Unregister(Collector) bool -} - -// Gatherer is the interface for the part of a registry in charge of gathering -// the collected metrics into a number of MetricFamilies. The Gatherer interface -// comes with the same general implication as described for the Registerer -// interface. -type Gatherer interface { - // Gather calls the Collect method of the registered Collectors and then - // gathers the collected metrics into a lexicographically sorted slice - // of MetricFamily protobufs. Even if an error occurs, Gather attempts - // to gather as many metrics as possible. Hence, if a non-nil error is - // returned, the returned MetricFamily slice could be nil (in case of a - // fatal error that prevented any meaningful metric collection) or - // contain a number of MetricFamily protobufs, some of which might be - // incomplete, and some might be missing altogether. The returned error - // (which might be a MultiError) explains the details. In scenarios - // where complete collection is critical, the returned MetricFamily - // protobufs should be disregarded if the returned error is non-nil. - Gather() ([]*dto.MetricFamily, error) -} - -// Register registers the provided Collector with the DefaultRegisterer. -// -// Register is a shortcut for DefaultRegisterer.Register(c). See there for more -// details. -func Register(c Collector) error { - return DefaultRegisterer.Register(c) -} - -// MustRegister registers the provided Collectors with the DefaultRegisterer and -// panics if any error occurs. -// -// MustRegister is a shortcut for DefaultRegisterer.MustRegister(cs...). See -// there for more details. -func MustRegister(cs ...Collector) { - DefaultRegisterer.MustRegister(cs...) -} - -// Unregister removes the registration of the provided Collector from the -// DefaultRegisterer. -// -// Unregister is a shortcut for DefaultRegisterer.Unregister(c). See there for -// more details. -func Unregister(c Collector) bool { - return DefaultRegisterer.Unregister(c) -} - -// GathererFunc turns a function into a Gatherer. -type GathererFunc func() ([]*dto.MetricFamily, error) - -// Gather implements Gatherer. -func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) { - return gf() -} - -// AlreadyRegisteredError is returned by the Register method if the Collector to -// be registered has already been registered before, or a different Collector -// that collects the same metrics has been registered before. Registration fails -// in that case, but you can detect from the kind of error what has -// happened. The error contains fields for the existing Collector and the -// (rejected) new Collector that equals the existing one. This can be used to -// find out if an equal Collector has been registered before and switch over to -// using the old one, as demonstrated in the example. -type AlreadyRegisteredError struct { - ExistingCollector, NewCollector Collector -} - -func (err AlreadyRegisteredError) Error() string { - return "duplicate metrics collector registration attempted" -} - -// MultiError is a slice of errors implementing the error interface. It is used -// by a Gatherer to report multiple errors during MetricFamily gathering. -type MultiError []error - -func (errs MultiError) Error() string { - if len(errs) == 0 { - return "" - } - buf := &bytes.Buffer{} - fmt.Fprintf(buf, "%d error(s) occurred:", len(errs)) - for _, err := range errs { - fmt.Fprintf(buf, "\n* %s", err) - } - return buf.String() -} - -// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only -// contained error as error if len(errs is 1). In all other cases, it returns -// the MultiError directly. This is helpful for returning a MultiError in a way -// that only uses the MultiError if needed. -func (errs MultiError) MaybeUnwrap() error { - switch len(errs) { - case 0: - return nil - case 1: - return errs[0] - default: - return errs - } -} - -// Registry registers Prometheus collectors, collects their metrics, and gathers -// them into MetricFamilies for exposition. It implements both Registerer and -// Gatherer. The zero value is not usable. Create instances with NewRegistry or -// NewPedanticRegistry. -type Registry struct { - mtx sync.RWMutex - collectorsByID map[uint64]Collector // ID is a hash of the descIDs. - descIDs map[uint64]struct{} - dimHashesByName map[string]uint64 - pedanticChecksEnabled bool -} - -// Register implements Registerer. -func (r *Registry) Register(c Collector) error { - var ( - descChan = make(chan *Desc, capDescChan) - newDescIDs = map[uint64]struct{}{} - newDimHashesByName = map[string]uint64{} - collectorID uint64 // Just a sum of all desc IDs. - duplicateDescErr error - ) - go func() { - c.Describe(descChan) - close(descChan) - }() - r.mtx.Lock() - defer r.mtx.Unlock() - // Conduct various tests... - for desc := range descChan { - - // Is the descriptor valid at all? - if desc.err != nil { - return fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err) - } - - // Is the descID unique? - // (In other words: Is the fqName + constLabel combination unique?) - if _, exists := r.descIDs[desc.id]; exists { - duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc) - } - // If it is not a duplicate desc in this collector, add it to - // the collectorID. (We allow duplicate descs within the same - // collector, but their existence must be a no-op.) - if _, exists := newDescIDs[desc.id]; !exists { - newDescIDs[desc.id] = struct{}{} - collectorID += desc.id - } - - // Are all the label names and the help string consistent with - // previous descriptors of the same name? - // First check existing descriptors... - if dimHash, exists := r.dimHashesByName[desc.fqName]; exists { - if dimHash != desc.dimHash { - return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) - } - } else { - // ...then check the new descriptors already seen. - if dimHash, exists := newDimHashesByName[desc.fqName]; exists { - if dimHash != desc.dimHash { - return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) - } - } else { - newDimHashesByName[desc.fqName] = desc.dimHash - } - } - } - // Did anything happen at all? - if len(newDescIDs) == 0 { - return errors.New("collector has no descriptors") - } - if existing, exists := r.collectorsByID[collectorID]; exists { - return AlreadyRegisteredError{ - ExistingCollector: existing, - NewCollector: c, - } - } - // If the collectorID is new, but at least one of the descs existed - // before, we are in trouble. - if duplicateDescErr != nil { - return duplicateDescErr - } - - // Only after all tests have passed, actually register. - r.collectorsByID[collectorID] = c - for hash := range newDescIDs { - r.descIDs[hash] = struct{}{} - } - for name, dimHash := range newDimHashesByName { - r.dimHashesByName[name] = dimHash - } - return nil -} - -// Unregister implements Registerer. -func (r *Registry) Unregister(c Collector) bool { - var ( - descChan = make(chan *Desc, capDescChan) - descIDs = map[uint64]struct{}{} - collectorID uint64 // Just a sum of the desc IDs. - ) - go func() { - c.Describe(descChan) - close(descChan) - }() - for desc := range descChan { - if _, exists := descIDs[desc.id]; !exists { - collectorID += desc.id - descIDs[desc.id] = struct{}{} - } - } - - r.mtx.RLock() - if _, exists := r.collectorsByID[collectorID]; !exists { - r.mtx.RUnlock() - return false - } - r.mtx.RUnlock() - - r.mtx.Lock() - defer r.mtx.Unlock() - - delete(r.collectorsByID, collectorID) - for id := range descIDs { - delete(r.descIDs, id) - } - // dimHashesByName is left untouched as those must be consistent - // throughout the lifetime of a program. - return true -} - -// MustRegister implements Registerer. -func (r *Registry) MustRegister(cs ...Collector) { - for _, c := range cs { - if err := r.Register(c); err != nil { - panic(err) - } - } -} - -// Gather implements Gatherer. -func (r *Registry) Gather() ([]*dto.MetricFamily, error) { - var ( - metricChan = make(chan Metric, capMetricChan) - metricHashes = map[uint64]struct{}{} - dimHashes = map[string]uint64{} - wg sync.WaitGroup - errs MultiError // The collected errors to return in the end. - registeredDescIDs map[uint64]struct{} // Only used for pedantic checks - ) - - r.mtx.RLock() - metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) - - // Scatter. - // (Collectors could be complex and slow, so we call them all at once.) - wg.Add(len(r.collectorsByID)) - go func() { - wg.Wait() - close(metricChan) - }() - for _, collector := range r.collectorsByID { - go func(collector Collector) { - defer wg.Done() - collector.Collect(metricChan) - }(collector) - } - - // In case pedantic checks are enabled, we have to copy the map before - // giving up the RLock. - if r.pedanticChecksEnabled { - registeredDescIDs = make(map[uint64]struct{}, len(r.descIDs)) - for id := range r.descIDs { - registeredDescIDs[id] = struct{}{} - } - } - - r.mtx.RUnlock() - - // Drain metricChan in case of premature return. - defer func() { - for range metricChan { - } - }() - - // Gather. - for metric := range metricChan { - // This could be done concurrently, too, but it required locking - // of metricFamiliesByName (and of metricHashes if checks are - // enabled). Most likely not worth it. - desc := metric.Desc() - dtoMetric := &dto.Metric{} - if err := metric.Write(dtoMetric); err != nil { - errs = append(errs, fmt.Errorf( - "error collecting metric %v: %s", desc, err, - )) - continue - } - metricFamily, ok := metricFamiliesByName[desc.fqName] - if ok { - if metricFamily.GetHelp() != desc.help { - errs = append(errs, fmt.Errorf( - "collected metric %s %s has help %q but should have %q", - desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(), - )) - continue - } - // TODO(beorn7): Simplify switch once Desc has type. - switch metricFamily.GetType() { - case dto.MetricType_COUNTER: - if dtoMetric.Counter == nil { - errs = append(errs, fmt.Errorf( - "collected metric %s %s should be a Counter", - desc.fqName, dtoMetric, - )) - continue - } - case dto.MetricType_GAUGE: - if dtoMetric.Gauge == nil { - errs = append(errs, fmt.Errorf( - "collected metric %s %s should be a Gauge", - desc.fqName, dtoMetric, - )) - continue - } - case dto.MetricType_SUMMARY: - if dtoMetric.Summary == nil { - errs = append(errs, fmt.Errorf( - "collected metric %s %s should be a Summary", - desc.fqName, dtoMetric, - )) - continue - } - case dto.MetricType_UNTYPED: - if dtoMetric.Untyped == nil { - errs = append(errs, fmt.Errorf( - "collected metric %s %s should be Untyped", - desc.fqName, dtoMetric, - )) - continue - } - case dto.MetricType_HISTOGRAM: - if dtoMetric.Histogram == nil { - errs = append(errs, fmt.Errorf( - "collected metric %s %s should be a Histogram", - desc.fqName, dtoMetric, - )) - continue - } - default: - panic("encountered MetricFamily with invalid type") - } - } else { - metricFamily = &dto.MetricFamily{} - metricFamily.Name = proto.String(desc.fqName) - metricFamily.Help = proto.String(desc.help) - // TODO(beorn7): Simplify switch once Desc has type. - switch { - case dtoMetric.Gauge != nil: - metricFamily.Type = dto.MetricType_GAUGE.Enum() - case dtoMetric.Counter != nil: - metricFamily.Type = dto.MetricType_COUNTER.Enum() - case dtoMetric.Summary != nil: - metricFamily.Type = dto.MetricType_SUMMARY.Enum() - case dtoMetric.Untyped != nil: - metricFamily.Type = dto.MetricType_UNTYPED.Enum() - case dtoMetric.Histogram != nil: - metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() - default: - errs = append(errs, fmt.Errorf( - "empty metric collected: %s", dtoMetric, - )) - continue - } - metricFamiliesByName[desc.fqName] = metricFamily - } - if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes, dimHashes); err != nil { - errs = append(errs, err) - continue - } - if r.pedanticChecksEnabled { - // Is the desc registered at all? - if _, exist := registeredDescIDs[desc.id]; !exist { - errs = append(errs, fmt.Errorf( - "collected metric %s %s with unregistered descriptor %s", - metricFamily.GetName(), dtoMetric, desc, - )) - continue - } - if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil { - errs = append(errs, err) - continue - } - } - metricFamily.Metric = append(metricFamily.Metric, dtoMetric) - } - return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() -} - -// Gatherers is a slice of Gatherer instances that implements the Gatherer -// interface itself. Its Gather method calls Gather on all Gatherers in the -// slice in order and returns the merged results. Errors returned from the -// Gather calles are all returned in a flattened MultiError. Duplicate and -// inconsistent Metrics are skipped (first occurrence in slice order wins) and -// reported in the returned error. -// -// Gatherers can be used to merge the Gather results from multiple -// Registries. It also provides a way to directly inject existing MetricFamily -// protobufs into the gathering by creating a custom Gatherer with a Gather -// method that simply returns the existing MetricFamily protobufs. Note that no -// registration is involved (in contrast to Collector registration), so -// obviously registration-time checks cannot happen. Any inconsistencies between -// the gathered MetricFamilies are reported as errors by the Gather method, and -// inconsistent Metrics are dropped. Invalid parts of the MetricFamilies -// (e.g. syntactically invalid metric or label names) will go undetected. -type Gatherers []Gatherer - -// Gather implements Gatherer. -func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { - var ( - metricFamiliesByName = map[string]*dto.MetricFamily{} - metricHashes = map[uint64]struct{}{} - dimHashes = map[string]uint64{} - errs MultiError // The collected errors to return in the end. - ) - - for i, g := range gs { - mfs, err := g.Gather() - if err != nil { - if multiErr, ok := err.(MultiError); ok { - for _, err := range multiErr { - errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) - } - } else { - errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) - } - } - for _, mf := range mfs { - existingMF, exists := metricFamiliesByName[mf.GetName()] - if exists { - if existingMF.GetHelp() != mf.GetHelp() { - errs = append(errs, fmt.Errorf( - "gathered metric family %s has help %q but should have %q", - mf.GetName(), mf.GetHelp(), existingMF.GetHelp(), - )) - continue - } - if existingMF.GetType() != mf.GetType() { - errs = append(errs, fmt.Errorf( - "gathered metric family %s has type %s but should have %s", - mf.GetName(), mf.GetType(), existingMF.GetType(), - )) - continue - } - } else { - existingMF = &dto.MetricFamily{} - existingMF.Name = mf.Name - existingMF.Help = mf.Help - existingMF.Type = mf.Type - metricFamiliesByName[mf.GetName()] = existingMF - } - for _, m := range mf.Metric { - if err := checkMetricConsistency(existingMF, m, metricHashes, dimHashes); err != nil { - errs = append(errs, err) - continue - } - existingMF.Metric = append(existingMF.Metric, m) - } - } - } - return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() -} - -// metricSorter is a sortable slice of *dto.Metric. -type metricSorter []*dto.Metric - -func (s metricSorter) Len() int { - return len(s) -} - -func (s metricSorter) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s metricSorter) Less(i, j int) bool { - if len(s[i].Label) != len(s[j].Label) { - // This should not happen. The metrics are - // inconsistent. However, we have to deal with the fact, as - // people might use custom collectors or metric family injection - // to create inconsistent metrics. So let's simply compare the - // number of labels in this case. That will still yield - // reproducible sorting. - return len(s[i].Label) < len(s[j].Label) - } - for n, lp := range s[i].Label { - vi := lp.GetValue() - vj := s[j].Label[n].GetValue() - if vi != vj { - return vi < vj - } - } - - // We should never arrive here. Multiple metrics with the same - // label set in the same scrape will lead to undefined ingestion - // behavior. However, as above, we have to provide stable sorting - // here, even for inconsistent metrics. So sort equal metrics - // by their timestamp, with missing timestamps (implying "now") - // coming last. - if s[i].TimestampMs == nil { - return false - } - if s[j].TimestampMs == nil { - return true - } - return s[i].GetTimestampMs() < s[j].GetTimestampMs() -} - -// normalizeMetricFamilies returns a MetricFamily slice with empty -// MetricFamilies pruned and the remaining MetricFamilies sorted by name within -// the slice, with the contained Metrics sorted within each MetricFamily. -func normalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { - for _, mf := range metricFamiliesByName { - sort.Sort(metricSorter(mf.Metric)) - } - names := make([]string, 0, len(metricFamiliesByName)) - for name, mf := range metricFamiliesByName { - if len(mf.Metric) > 0 { - names = append(names, name) - } - } - sort.Strings(names) - result := make([]*dto.MetricFamily, 0, len(names)) - for _, name := range names { - result = append(result, metricFamiliesByName[name]) - } - return result -} - -// checkMetricConsistency checks if the provided Metric is consistent with the -// provided MetricFamily. It also hashed the Metric labels and the MetricFamily -// name. If the resulting hash is alread in the provided metricHashes, an error -// is returned. If not, it is added to metricHashes. The provided dimHashes maps -// MetricFamily names to their dimHash (hashed sorted label names). If dimHashes -// doesn't yet contain a hash for the provided MetricFamily, it is -// added. Otherwise, an error is returned if the existing dimHashes in not equal -// the calculated dimHash. -func checkMetricConsistency( - metricFamily *dto.MetricFamily, - dtoMetric *dto.Metric, - metricHashes map[uint64]struct{}, - dimHashes map[string]uint64, -) error { - // Type consistency with metric family. - if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || - metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil || - metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil || - metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil || - metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil { - return fmt.Errorf( - "collected metric %s %s is not a %s", - metricFamily.GetName(), dtoMetric, metricFamily.GetType(), - ) - } - - // Is the metric unique (i.e. no other metric with the same name and the same label values)? - h := hashNew() - h = hashAdd(h, metricFamily.GetName()) - h = hashAddByte(h, separatorByte) - dh := hashNew() - // Make sure label pairs are sorted. We depend on it for the consistency - // check. - sort.Sort(LabelPairSorter(dtoMetric.Label)) - for _, lp := range dtoMetric.Label { - h = hashAdd(h, lp.GetValue()) - h = hashAddByte(h, separatorByte) - dh = hashAdd(dh, lp.GetName()) - dh = hashAddByte(dh, separatorByte) - } - if _, exists := metricHashes[h]; exists { - return fmt.Errorf( - "collected metric %s %s was collected before with the same name and label values", - metricFamily.GetName(), dtoMetric, - ) - } - if dimHash, ok := dimHashes[metricFamily.GetName()]; ok { - if dimHash != dh { - return fmt.Errorf( - "collected metric %s %s has label dimensions inconsistent with previously collected metrics in the same metric family", - metricFamily.GetName(), dtoMetric, - ) - } - } else { - dimHashes[metricFamily.GetName()] = dh - } - metricHashes[h] = struct{}{} - return nil -} - -func checkDescConsistency( - metricFamily *dto.MetricFamily, - dtoMetric *dto.Metric, - desc *Desc, -) error { - // Desc help consistency with metric family help. - if metricFamily.GetHelp() != desc.help { - return fmt.Errorf( - "collected metric %s %s has help %q but should have %q", - metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help, - ) - } - - // Is the desc consistent with the content of the metric? - lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label)) - lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...) - for _, l := range desc.variableLabels { - lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ - Name: proto.String(l), - }) - } - if len(lpsFromDesc) != len(dtoMetric.Label) { - return fmt.Errorf( - "labels in collected metric %s %s are inconsistent with descriptor %s", - metricFamily.GetName(), dtoMetric, desc, - ) - } - sort.Sort(LabelPairSorter(lpsFromDesc)) - for i, lpFromDesc := range lpsFromDesc { - lpFromMetric := dtoMetric.Label[i] - if lpFromDesc.GetName() != lpFromMetric.GetName() || - lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() { - return fmt.Errorf( - "labels in collected metric %s %s are inconsistent with descriptor %s", - metricFamily.GetName(), dtoMetric, desc, - ) - } - } - return nil -} diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/summary.go deleted file mode 100644 index 1c65e25ece..0000000000 --- a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ /dev/null @@ -1,543 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "fmt" - "math" - "sort" - "sync" - "time" - - "github.com/beorn7/perks/quantile" - "github.com/golang/protobuf/proto" - - dto "github.com/prometheus/client_model/go" -) - -// quantileLabel is used for the label that defines the quantile in a -// summary. -const quantileLabel = "quantile" - -// A Summary captures individual observations from an event or sample stream and -// summarizes them in a manner similar to traditional summary statistics: 1. sum -// of observations, 2. observation count, 3. rank estimations. -// -// A typical use-case is the observation of request latencies. By default, a -// Summary provides the median, the 90th and the 99th percentile of the latency -// as rank estimations. -// -// Note that the rank estimations cannot be aggregated in a meaningful way with -// the Prometheus query language (i.e. you cannot average or add them). If you -// need aggregatable quantiles (e.g. you want the 99th percentile latency of all -// queries served across all instances of a service), consider the Histogram -// metric type. See the Prometheus documentation for more details. -// -// To create Summary instances, use NewSummary. -type Summary interface { - Metric - Collector - - // Observe adds a single observation to the summary. - Observe(float64) -} - -// DefObjectives are the default Summary quantile values. -// -// Deprecated: DefObjectives will not be used as the default objectives in -// v0.10 of the library. The default Summary will have no quantiles then. -var ( - DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001} - - errQuantileLabelNotAllowed = fmt.Errorf( - "%q is not allowed as label name in summaries", quantileLabel, - ) -) - -// Default values for SummaryOpts. -const ( - // DefMaxAge is the default duration for which observations stay - // relevant. - DefMaxAge time.Duration = 10 * time.Minute - // DefAgeBuckets is the default number of buckets used to calculate the - // age of observations. - DefAgeBuckets = 5 - // DefBufCap is the standard buffer size for collecting Summary observations. - DefBufCap = 500 -) - -// SummaryOpts bundles the options for creating a Summary metric. It is -// mandatory to set Name and Help to a non-empty string. All other fields are -// optional and can safely be left at their zero value. -type SummaryOpts struct { - // Namespace, Subsystem, and Name are components of the fully-qualified - // name of the Summary (created by joining these components with - // "_"). Only Name is mandatory, the others merely help structuring the - // name. Note that the fully-qualified name of the Summary must be a - // valid Prometheus metric name. - Namespace string - Subsystem string - Name string - - // Help provides information about this Summary. Mandatory! - // - // Metrics with the same fully-qualified name must have the same Help - // string. - Help string - - // ConstLabels are used to attach fixed labels to this - // Summary. Summaries with the same fully-qualified name must have the - // same label names in their ConstLabels. - // - // Note that in most cases, labels have a value that varies during the - // lifetime of a process. Those labels are usually managed with a - // SummaryVec. ConstLabels serve only special purposes. One is for the - // special case where the value of a label does not change during the - // lifetime of a process, e.g. if the revision of the running binary is - // put into a label. Another, more advanced purpose is if more than one - // Collector needs to collect Summaries with the same fully-qualified - // name. In that case, those Summaries must differ in the values of - // their ConstLabels. See the Collector examples. - // - // If the value of a label never changes (not even between binaries), - // that label most likely should not be a label at all (but part of the - // metric name). - ConstLabels Labels - - // Objectives defines the quantile rank estimates with their respective - // absolute error. If Objectives[q] = e, then the value reported for q - // will be the φ-quantile value for some φ between q-e and q+e. The - // default value is DefObjectives. It is used if Objectives is left at - // its zero value (i.e. nil). To create a Summary without Objectives, - // set it to an empty map (i.e. map[float64]float64{}). - // - // Deprecated: Note that the current value of DefObjectives is - // deprecated. It will be replaced by an empty map in v0.10 of the - // library. Please explicitly set Objectives to the desired value. - Objectives map[float64]float64 - - // MaxAge defines the duration for which an observation stays relevant - // for the summary. Must be positive. The default value is DefMaxAge. - MaxAge time.Duration - - // AgeBuckets is the number of buckets used to exclude observations that - // are older than MaxAge from the summary. A higher number has a - // resource penalty, so only increase it if the higher resolution is - // really required. For very high observation rates, you might want to - // reduce the number of age buckets. With only one age bucket, you will - // effectively see a complete reset of the summary each time MaxAge has - // passed. The default value is DefAgeBuckets. - AgeBuckets uint32 - - // BufCap defines the default sample stream buffer size. The default - // value of DefBufCap should suffice for most uses. If there is a need - // to increase the value, a multiple of 500 is recommended (because that - // is the internal buffer size of the underlying package - // "github.com/bmizerany/perks/quantile"). - BufCap uint32 -} - -// Great fuck-up with the sliding-window decay algorithm... The Merge method of -// perk/quantile is actually not working as advertised - and it might be -// unfixable, as the underlying algorithm is apparently not capable of merging -// summaries in the first place. To avoid using Merge, we are currently adding -// observations to _each_ age bucket, i.e. the effort to add a sample is -// essentially multiplied by the number of age buckets. When rotating age -// buckets, we empty the previous head stream. On scrape time, we simply take -// the quantiles from the head stream (no merging required). Result: More effort -// on observation time, less effort on scrape time, which is exactly the -// opposite of what we try to accomplish, but at least the results are correct. -// -// The quite elegant previous contraption to merge the age buckets efficiently -// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0) -// can't be used anymore. - -// NewSummary creates a new Summary based on the provided SummaryOpts. -func NewSummary(opts SummaryOpts) Summary { - return newSummary( - NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), - opts, - ) -} - -func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { - if len(desc.variableLabels) != len(labelValues) { - panic(errInconsistentCardinality) - } - - for _, n := range desc.variableLabels { - if n == quantileLabel { - panic(errQuantileLabelNotAllowed) - } - } - for _, lp := range desc.constLabelPairs { - if lp.GetName() == quantileLabel { - panic(errQuantileLabelNotAllowed) - } - } - - if opts.Objectives == nil { - opts.Objectives = DefObjectives - } - - if opts.MaxAge < 0 { - panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge)) - } - if opts.MaxAge == 0 { - opts.MaxAge = DefMaxAge - } - - if opts.AgeBuckets == 0 { - opts.AgeBuckets = DefAgeBuckets - } - - if opts.BufCap == 0 { - opts.BufCap = DefBufCap - } - - s := &summary{ - desc: desc, - - objectives: opts.Objectives, - sortedObjectives: make([]float64, 0, len(opts.Objectives)), - - labelPairs: makeLabelPairs(desc, labelValues), - - hotBuf: make([]float64, 0, opts.BufCap), - coldBuf: make([]float64, 0, opts.BufCap), - streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets), - } - s.headStreamExpTime = time.Now().Add(s.streamDuration) - s.hotBufExpTime = s.headStreamExpTime - - for i := uint32(0); i < opts.AgeBuckets; i++ { - s.streams = append(s.streams, s.newStream()) - } - s.headStream = s.streams[0] - - for qu := range s.objectives { - s.sortedObjectives = append(s.sortedObjectives, qu) - } - sort.Float64s(s.sortedObjectives) - - s.init(s) // Init self-collection. - return s -} - -type summary struct { - selfCollector - - bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime. - mtx sync.Mutex // Protects every other moving part. - // Lock bufMtx before mtx if both are needed. - - desc *Desc - - objectives map[float64]float64 - sortedObjectives []float64 - - labelPairs []*dto.LabelPair - - sum float64 - cnt uint64 - - hotBuf, coldBuf []float64 - - streams []*quantile.Stream - streamDuration time.Duration - headStream *quantile.Stream - headStreamIdx int - headStreamExpTime, hotBufExpTime time.Time -} - -func (s *summary) Desc() *Desc { - return s.desc -} - -func (s *summary) Observe(v float64) { - s.bufMtx.Lock() - defer s.bufMtx.Unlock() - - now := time.Now() - if now.After(s.hotBufExpTime) { - s.asyncFlush(now) - } - s.hotBuf = append(s.hotBuf, v) - if len(s.hotBuf) == cap(s.hotBuf) { - s.asyncFlush(now) - } -} - -func (s *summary) Write(out *dto.Metric) error { - sum := &dto.Summary{} - qs := make([]*dto.Quantile, 0, len(s.objectives)) - - s.bufMtx.Lock() - s.mtx.Lock() - // Swap bufs even if hotBuf is empty to set new hotBufExpTime. - s.swapBufs(time.Now()) - s.bufMtx.Unlock() - - s.flushColdBuf() - sum.SampleCount = proto.Uint64(s.cnt) - sum.SampleSum = proto.Float64(s.sum) - - for _, rank := range s.sortedObjectives { - var q float64 - if s.headStream.Count() == 0 { - q = math.NaN() - } else { - q = s.headStream.Query(rank) - } - qs = append(qs, &dto.Quantile{ - Quantile: proto.Float64(rank), - Value: proto.Float64(q), - }) - } - - s.mtx.Unlock() - - if len(qs) > 0 { - sort.Sort(quantSort(qs)) - } - sum.Quantile = qs - - out.Summary = sum - out.Label = s.labelPairs - return nil -} - -func (s *summary) newStream() *quantile.Stream { - return quantile.NewTargeted(s.objectives) -} - -// asyncFlush needs bufMtx locked. -func (s *summary) asyncFlush(now time.Time) { - s.mtx.Lock() - s.swapBufs(now) - - // Unblock the original goroutine that was responsible for the mutation - // that triggered the compaction. But hold onto the global non-buffer - // state mutex until the operation finishes. - go func() { - s.flushColdBuf() - s.mtx.Unlock() - }() -} - -// rotateStreams needs mtx AND bufMtx locked. -func (s *summary) maybeRotateStreams() { - for !s.hotBufExpTime.Equal(s.headStreamExpTime) { - s.headStream.Reset() - s.headStreamIdx++ - if s.headStreamIdx >= len(s.streams) { - s.headStreamIdx = 0 - } - s.headStream = s.streams[s.headStreamIdx] - s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration) - } -} - -// flushColdBuf needs mtx locked. -func (s *summary) flushColdBuf() { - for _, v := range s.coldBuf { - for _, stream := range s.streams { - stream.Insert(v) - } - s.cnt++ - s.sum += v - } - s.coldBuf = s.coldBuf[0:0] - s.maybeRotateStreams() -} - -// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty. -func (s *summary) swapBufs(now time.Time) { - if len(s.coldBuf) != 0 { - panic("coldBuf is not empty") - } - s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf - // hotBuf is now empty and gets new expiration set. - for now.After(s.hotBufExpTime) { - s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration) - } -} - -type quantSort []*dto.Quantile - -func (s quantSort) Len() int { - return len(s) -} - -func (s quantSort) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s quantSort) Less(i, j int) bool { - return s[i].GetQuantile() < s[j].GetQuantile() -} - -// SummaryVec is a Collector that bundles a set of Summaries that all share the -// same Desc, but have different values for their variable labels. This is used -// if you want to count the same thing partitioned by various dimensions -// (e.g. HTTP request latencies, partitioned by status code and method). Create -// instances with NewSummaryVec. -type SummaryVec struct { - *MetricVec -} - -// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and -// partitioned by the given label names. At least one label name must be -// provided. -func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - labelNames, - opts.ConstLabels, - ) - return &SummaryVec{ - MetricVec: newMetricVec(desc, func(lvs ...string) Metric { - return newSummary(desc, opts, lvs...) - }), - } -} - -// GetMetricWithLabelValues replaces the method of the same name in MetricVec. -// The difference is that this method returns an Observer and not a Metric so -// that no type conversion to an Observer is required. -func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { - metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Observer), err - } - return nil, err -} - -// GetMetricWith replaces the method of the same name in MetricVec. The -// difference is that this method returns an Observer and not a Metric so that -// no type conversion to an Observer is required. -func (m *SummaryVec) GetMetricWith(labels Labels) (Observer, error) { - metric, err := m.MetricVec.GetMetricWith(labels) - if metric != nil { - return metric.(Observer), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. By not returning an -// error, WithLabelValues allows shortcuts like -// myVec.WithLabelValues("404", "GET").Observe(42.21) -func (m *SummaryVec) WithLabelValues(lvs ...string) Observer { - return m.MetricVec.WithLabelValues(lvs...).(Observer) -} - -// With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. By not returning an error, With allows shortcuts like -// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21) -func (m *SummaryVec) With(labels Labels) Observer { - return m.MetricVec.With(labels).(Observer) -} - -type constSummary struct { - desc *Desc - count uint64 - sum float64 - quantiles map[float64]float64 - labelPairs []*dto.LabelPair -} - -func (s *constSummary) Desc() *Desc { - return s.desc -} - -func (s *constSummary) Write(out *dto.Metric) error { - sum := &dto.Summary{} - qs := make([]*dto.Quantile, 0, len(s.quantiles)) - - sum.SampleCount = proto.Uint64(s.count) - sum.SampleSum = proto.Float64(s.sum) - - for rank, q := range s.quantiles { - qs = append(qs, &dto.Quantile{ - Quantile: proto.Float64(rank), - Value: proto.Float64(q), - }) - } - - if len(qs) > 0 { - sort.Sort(quantSort(qs)) - } - sum.Quantile = qs - - out.Summary = sum - out.Label = s.labelPairs - - return nil -} - -// NewConstSummary returns a metric representing a Prometheus summary with fixed -// values for the count, sum, and quantiles. As those parameters cannot be -// changed, the returned value does not implement the Summary interface (but -// only the Metric interface). Users of this package will not have much use for -// it in regular operations. However, when implementing custom Collectors, it is -// useful as a throw-away metric that is generated on the fly to send it to -// Prometheus in the Collect method. -// -// quantiles maps ranks to quantile values. For example, a median latency of -// 0.23s and a 99th percentile latency of 0.56s would be expressed as: -// map[float64]float64{0.5: 0.23, 0.99: 0.56} -// -// NewConstSummary returns an error if the length of labelValues is not -// consistent with the variable labels in Desc. -func NewConstSummary( - desc *Desc, - count uint64, - sum float64, - quantiles map[float64]float64, - labelValues ...string, -) (Metric, error) { - if len(desc.variableLabels) != len(labelValues) { - return nil, errInconsistentCardinality - } - return &constSummary{ - desc: desc, - count: count, - sum: sum, - quantiles: quantiles, - labelPairs: makeLabelPairs(desc, labelValues), - }, nil -} - -// MustNewConstSummary is a version of NewConstSummary that panics where -// NewConstMetric would have returned an error. -func MustNewConstSummary( - desc *Desc, - count uint64, - sum float64, - quantiles map[float64]float64, - labelValues ...string, -) Metric { - m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...) - if err != nil { - panic(err) - } - return m -} diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/untyped.go deleted file mode 100644 index 065501d38f..0000000000 --- a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/untyped.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -// Untyped is a Metric that represents a single numerical value that can -// arbitrarily go up and down. -// -// An Untyped metric works the same as a Gauge. The only difference is that to -// no type information is implied. -// -// To create Untyped instances, use NewUntyped. -// -// Deprecated: The Untyped type is deprecated because it doesn't make sense in -// direct instrumentation. If you need to mirror an external metric of unknown -// type (usually while writing exporters), Use MustNewConstMetric to create an -// untyped metric instance on the fly. -type Untyped interface { - Metric - Collector - - // Set sets the Untyped metric to an arbitrary value. - Set(float64) - // Inc increments the Untyped metric by 1. - Inc() - // Dec decrements the Untyped metric by 1. - Dec() - // Add adds the given value to the Untyped metric. (The value can be - // negative, resulting in a decrease.) - Add(float64) - // Sub subtracts the given value from the Untyped metric. (The value can - // be negative, resulting in an increase.) - Sub(float64) -} - -// UntypedOpts is an alias for Opts. See there for doc comments. -type UntypedOpts Opts - -// NewUntyped creates a new Untyped metric from the provided UntypedOpts. -func NewUntyped(opts UntypedOpts) Untyped { - return newValue(NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), UntypedValue, 0) -} - -// UntypedVec is a Collector that bundles a set of Untyped metrics that all -// share the same Desc, but have different values for their variable -// labels. This is used if you want to count the same thing partitioned by -// various dimensions. Create instances with NewUntypedVec. -type UntypedVec struct { - *MetricVec -} - -// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and -// partitioned by the given label names. At least one label name must be -// provided. -func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - labelNames, - opts.ConstLabels, - ) - return &UntypedVec{ - MetricVec: newMetricVec(desc, func(lvs ...string) Metric { - return newValue(desc, UntypedValue, 0, lvs...) - }), - } -} - -// GetMetricWithLabelValues replaces the method of the same name in -// MetricVec. The difference is that this method returns an Untyped and not a -// Metric so that no type conversion is required. -func (m *UntypedVec) GetMetricWithLabelValues(lvs ...string) (Untyped, error) { - metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Untyped), err - } - return nil, err -} - -// GetMetricWith replaces the method of the same name in MetricVec. The -// difference is that this method returns an Untyped and not a Metric so that no -// type conversion is required. -func (m *UntypedVec) GetMetricWith(labels Labels) (Untyped, error) { - metric, err := m.MetricVec.GetMetricWith(labels) - if metric != nil { - return metric.(Untyped), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. By not returning an -// error, WithLabelValues allows shortcuts like -// myVec.WithLabelValues("404", "GET").Add(42) -func (m *UntypedVec) WithLabelValues(lvs ...string) Untyped { - return m.MetricVec.WithLabelValues(lvs...).(Untyped) -} - -// With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. By not returning an error, With allows shortcuts like -// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) -func (m *UntypedVec) With(labels Labels) Untyped { - return m.MetricVec.With(labels).(Untyped) -} - -// UntypedFunc is an Untyped whose value is determined at collect time by -// calling a provided function. -// -// To create UntypedFunc instances, use NewUntypedFunc. -type UntypedFunc interface { - Metric - Collector -} - -// NewUntypedFunc creates a new UntypedFunc based on the provided -// UntypedOpts. The value reported is determined by calling the given function -// from within the Write method. Take into account that metric collection may -// happen concurrently. If that results in concurrent calls to Write, like in -// the case where an UntypedFunc is directly registered with Prometheus, the -// provided function must be concurrency-safe. -func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc { - return newValueFunc(NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), UntypedValue, function) -} diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/value.go b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/value.go deleted file mode 100644 index ff75ce5851..0000000000 --- a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/value.go +++ /dev/null @@ -1,239 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "errors" - "fmt" - "math" - "sort" - "sync/atomic" - "time" - - dto "github.com/prometheus/client_model/go" - - "github.com/golang/protobuf/proto" -) - -// ValueType is an enumeration of metric types that represent a simple value. -type ValueType int - -// Possible values for the ValueType enum. -const ( - _ ValueType = iota - CounterValue - GaugeValue - UntypedValue -) - -var errInconsistentCardinality = errors.New("inconsistent label cardinality") - -// value is a generic metric for simple values. It implements Metric, Collector, -// Counter, Gauge, and Untyped. Its effective type is determined by -// ValueType. This is a low-level building block used by the library to back the -// implementations of Counter, Gauge, and Untyped. -type value struct { - // valBits contains the bits of the represented float64 value. It has - // to go first in the struct to guarantee alignment for atomic - // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG - valBits uint64 - - selfCollector - - desc *Desc - valType ValueType - labelPairs []*dto.LabelPair -} - -// newValue returns a newly allocated value with the given Desc, ValueType, -// sample value and label values. It panics if the number of label -// values is different from the number of variable labels in Desc. -func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...string) *value { - if len(labelValues) != len(desc.variableLabels) { - panic(errInconsistentCardinality) - } - result := &value{ - desc: desc, - valType: valueType, - valBits: math.Float64bits(val), - labelPairs: makeLabelPairs(desc, labelValues), - } - result.init(result) - return result -} - -func (v *value) Desc() *Desc { - return v.desc -} - -func (v *value) Set(val float64) { - atomic.StoreUint64(&v.valBits, math.Float64bits(val)) -} - -func (v *value) SetToCurrentTime() { - v.Set(float64(time.Now().UnixNano()) / 1e9) -} - -func (v *value) Inc() { - v.Add(1) -} - -func (v *value) Dec() { - v.Add(-1) -} - -func (v *value) Add(val float64) { - for { - oldBits := atomic.LoadUint64(&v.valBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + val) - if atomic.CompareAndSwapUint64(&v.valBits, oldBits, newBits) { - return - } - } -} - -func (v *value) Sub(val float64) { - v.Add(val * -1) -} - -func (v *value) Write(out *dto.Metric) error { - val := math.Float64frombits(atomic.LoadUint64(&v.valBits)) - return populateMetric(v.valType, val, v.labelPairs, out) -} - -// valueFunc is a generic metric for simple values retrieved on collect time -// from a function. It implements Metric and Collector. Its effective type is -// determined by ValueType. This is a low-level building block used by the -// library to back the implementations of CounterFunc, GaugeFunc, and -// UntypedFunc. -type valueFunc struct { - selfCollector - - desc *Desc - valType ValueType - function func() float64 - labelPairs []*dto.LabelPair -} - -// newValueFunc returns a newly allocated valueFunc with the given Desc and -// ValueType. The value reported is determined by calling the given function -// from within the Write method. Take into account that metric collection may -// happen concurrently. If that results in concurrent calls to Write, like in -// the case where a valueFunc is directly registered with Prometheus, the -// provided function must be concurrency-safe. -func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc { - result := &valueFunc{ - desc: desc, - valType: valueType, - function: function, - labelPairs: makeLabelPairs(desc, nil), - } - result.init(result) - return result -} - -func (v *valueFunc) Desc() *Desc { - return v.desc -} - -func (v *valueFunc) Write(out *dto.Metric) error { - return populateMetric(v.valType, v.function(), v.labelPairs, out) -} - -// NewConstMetric returns a metric with one fixed value that cannot be -// changed. Users of this package will not have much use for it in regular -// operations. However, when implementing custom Collectors, it is useful as a -// throw-away metric that is generated on the fly to send it to Prometheus in -// the Collect method. NewConstMetric returns an error if the length of -// labelValues is not consistent with the variable labels in Desc. -func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) { - if len(desc.variableLabels) != len(labelValues) { - return nil, errInconsistentCardinality - } - return &constMetric{ - desc: desc, - valType: valueType, - val: value, - labelPairs: makeLabelPairs(desc, labelValues), - }, nil -} - -// MustNewConstMetric is a version of NewConstMetric that panics where -// NewConstMetric would have returned an error. -func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric { - m, err := NewConstMetric(desc, valueType, value, labelValues...) - if err != nil { - panic(err) - } - return m -} - -type constMetric struct { - desc *Desc - valType ValueType - val float64 - labelPairs []*dto.LabelPair -} - -func (m *constMetric) Desc() *Desc { - return m.desc -} - -func (m *constMetric) Write(out *dto.Metric) error { - return populateMetric(m.valType, m.val, m.labelPairs, out) -} - -func populateMetric( - t ValueType, - v float64, - labelPairs []*dto.LabelPair, - m *dto.Metric, -) error { - m.Label = labelPairs - switch t { - case CounterValue: - m.Counter = &dto.Counter{Value: proto.Float64(v)} - case GaugeValue: - m.Gauge = &dto.Gauge{Value: proto.Float64(v)} - case UntypedValue: - m.Untyped = &dto.Untyped{Value: proto.Float64(v)} - default: - return fmt.Errorf("encountered unknown type %v", t) - } - return nil -} - -func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { - totalLen := len(desc.variableLabels) + len(desc.constLabelPairs) - if totalLen == 0 { - // Super fast path. - return nil - } - if len(desc.variableLabels) == 0 { - // Moderately fast path. - return desc.constLabelPairs - } - labelPairs := make([]*dto.LabelPair, 0, totalLen) - for i, n := range desc.variableLabels { - labelPairs = append(labelPairs, &dto.LabelPair{ - Name: proto.String(n), - Value: proto.String(labelValues[i]), - }) - } - for _, lp := range desc.constLabelPairs { - labelPairs = append(labelPairs, lp) - } - sort.Sort(LabelPairSorter(labelPairs)) - return labelPairs -} diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/vec.go deleted file mode 100644 index 7f3eef9a46..0000000000 --- a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ /dev/null @@ -1,404 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "fmt" - "sync" - - "github.com/prometheus/common/model" -) - -// MetricVec is a Collector to bundle metrics of the same name that -// differ in their label values. MetricVec is usually not used directly but as a -// building block for implementations of vectors of a given metric -// type. GaugeVec, CounterVec, SummaryVec, and UntypedVec are examples already -// provided in this package. -type MetricVec struct { - mtx sync.RWMutex // Protects the children. - children map[uint64][]metricWithLabelValues - desc *Desc - - newMetric func(labelValues ...string) Metric - hashAdd func(h uint64, s string) uint64 // replace hash function for testing collision handling - hashAddByte func(h uint64, b byte) uint64 -} - -// newMetricVec returns an initialized MetricVec. The concrete value is -// returned for embedding into another struct. -func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec { - return &MetricVec{ - children: map[uint64][]metricWithLabelValues{}, - desc: desc, - newMetric: newMetric, - hashAdd: hashAdd, - hashAddByte: hashAddByte, - } -} - -// metricWithLabelValues provides the metric and its label values for -// disambiguation on hash collision. -type metricWithLabelValues struct { - values []string - metric Metric -} - -// Describe implements Collector. The length of the returned slice -// is always one. -func (m *MetricVec) Describe(ch chan<- *Desc) { - ch <- m.desc -} - -// Collect implements Collector. -func (m *MetricVec) Collect(ch chan<- Metric) { - m.mtx.RLock() - defer m.mtx.RUnlock() - - for _, metrics := range m.children { - for _, metric := range metrics { - ch <- metric.metric - } - } -} - -// GetMetricWithLabelValues returns the Metric for the given slice of label -// values (same order as the VariableLabels in Desc). If that combination of -// label values is accessed for the first time, a new Metric is created. -// -// It is possible to call this method without using the returned Metric to only -// create the new Metric but leave it at its start value (e.g. a Summary or -// Histogram without any observations). See also the SummaryVec example. -// -// Keeping the Metric for later use is possible (and should be considered if -// performance is critical), but keep in mind that Reset, DeleteLabelValues and -// Delete can be used to delete the Metric from the MetricVec. In that case, the -// Metric will still exist, but it will not be exported anymore, even if a -// Metric with the same label values is created later. See also the CounterVec -// example. -// -// An error is returned if the number of label values is not the same as the -// number of VariableLabels in Desc. -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as -// an alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// See also the GaugeVec example. -func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { - h, err := m.hashLabelValues(lvs) - if err != nil { - return nil, err - } - - return m.getOrCreateMetricWithLabelValues(h, lvs), nil -} - -// GetMetricWith returns the Metric for the given Labels map (the label names -// must match those of the VariableLabels in Desc). If that label map is -// accessed for the first time, a new Metric is created. Implications of -// creating a Metric without using it and keeping the Metric for later use are -// the same as for GetMetricWithLabelValues. -// -// An error is returned if the number and names of the Labels are inconsistent -// with those of the VariableLabels in Desc. -// -// This method is used for the same purpose as -// GetMetricWithLabelValues(...string). See there for pros and cons of the two -// methods. -func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { - h, err := m.hashLabels(labels) - if err != nil { - return nil, err - } - - return m.getOrCreateMetricWithLabels(h, labels), nil -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics if an error -// occurs. The method allows neat syntax like: -// httpReqs.WithLabelValues("404", "POST").Inc() -func (m *MetricVec) WithLabelValues(lvs ...string) Metric { - metric, err := m.GetMetricWithLabelValues(lvs...) - if err != nil { - panic(err) - } - return metric -} - -// With works as GetMetricWith, but panics if an error occurs. The method allows -// neat syntax like: -// httpReqs.With(Labels{"status":"404", "method":"POST"}).Inc() -func (m *MetricVec) With(labels Labels) Metric { - metric, err := m.GetMetricWith(labels) - if err != nil { - panic(err) - } - return metric -} - -// DeleteLabelValues removes the metric where the variable labels are the same -// as those passed in as labels (same order as the VariableLabels in Desc). It -// returns true if a metric was deleted. -// -// It is not an error if the number of label values is not the same as the -// number of VariableLabels in Desc. However, such inconsistent label count can -// never match an actual Metric, so the method will always return false in that -// case. -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider Delete(Labels) as an -// alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// See also the CounterVec example. -func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { - m.mtx.Lock() - defer m.mtx.Unlock() - - h, err := m.hashLabelValues(lvs) - if err != nil { - return false - } - return m.deleteByHashWithLabelValues(h, lvs) -} - -// Delete deletes the metric where the variable labels are the same as those -// passed in as labels. It returns true if a metric was deleted. -// -// It is not an error if the number and names of the Labels are inconsistent -// with those of the VariableLabels in the Desc of the MetricVec. However, such -// inconsistent Labels can never match an actual Metric, so the method will -// always return false in that case. -// -// This method is used for the same purpose as DeleteLabelValues(...string). See -// there for pros and cons of the two methods. -func (m *MetricVec) Delete(labels Labels) bool { - m.mtx.Lock() - defer m.mtx.Unlock() - - h, err := m.hashLabels(labels) - if err != nil { - return false - } - - return m.deleteByHashWithLabels(h, labels) -} - -// deleteByHashWithLabelValues removes the metric from the hash bucket h. If -// there are multiple matches in the bucket, use lvs to select a metric and -// remove only that metric. -func (m *MetricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool { - metrics, ok := m.children[h] - if !ok { - return false - } - - i := m.findMetricWithLabelValues(metrics, lvs) - if i >= len(metrics) { - return false - } - - if len(metrics) > 1 { - m.children[h] = append(metrics[:i], metrics[i+1:]...) - } else { - delete(m.children, h) - } - return true -} - -// deleteByHashWithLabels removes the metric from the hash bucket h. If there -// are multiple matches in the bucket, use lvs to select a metric and remove -// only that metric. -func (m *MetricVec) deleteByHashWithLabels(h uint64, labels Labels) bool { - metrics, ok := m.children[h] - if !ok { - return false - } - i := m.findMetricWithLabels(metrics, labels) - if i >= len(metrics) { - return false - } - - if len(metrics) > 1 { - m.children[h] = append(metrics[:i], metrics[i+1:]...) - } else { - delete(m.children, h) - } - return true -} - -// Reset deletes all metrics in this vector. -func (m *MetricVec) Reset() { - m.mtx.Lock() - defer m.mtx.Unlock() - - for h := range m.children { - delete(m.children, h) - } -} - -func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { - if len(vals) != len(m.desc.variableLabels) { - return 0, errInconsistentCardinality - } - h := hashNew() - for _, val := range vals { - h = m.hashAdd(h, val) - h = m.hashAddByte(h, model.SeparatorByte) - } - return h, nil -} - -func (m *MetricVec) hashLabels(labels Labels) (uint64, error) { - if len(labels) != len(m.desc.variableLabels) { - return 0, errInconsistentCardinality - } - h := hashNew() - for _, label := range m.desc.variableLabels { - val, ok := labels[label] - if !ok { - return 0, fmt.Errorf("label name %q missing in label map", label) - } - h = m.hashAdd(h, val) - h = m.hashAddByte(h, model.SeparatorByte) - } - return h, nil -} - -// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value -// or creates it and returns the new one. -// -// This function holds the mutex. -func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string) Metric { - m.mtx.RLock() - metric, ok := m.getMetricWithLabelValues(hash, lvs) - m.mtx.RUnlock() - if ok { - return metric - } - - m.mtx.Lock() - defer m.mtx.Unlock() - metric, ok = m.getMetricWithLabelValues(hash, lvs) - if !ok { - // Copy to avoid allocation in case wo don't go down this code path. - copiedLVs := make([]string, len(lvs)) - copy(copiedLVs, lvs) - metric = m.newMetric(copiedLVs...) - m.children[hash] = append(m.children[hash], metricWithLabelValues{values: copiedLVs, metric: metric}) - } - return metric -} - -// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value -// or creates it and returns the new one. -// -// This function holds the mutex. -func (m *MetricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metric { - m.mtx.RLock() - metric, ok := m.getMetricWithLabels(hash, labels) - m.mtx.RUnlock() - if ok { - return metric - } - - m.mtx.Lock() - defer m.mtx.Unlock() - metric, ok = m.getMetricWithLabels(hash, labels) - if !ok { - lvs := m.extractLabelValues(labels) - metric = m.newMetric(lvs...) - m.children[hash] = append(m.children[hash], metricWithLabelValues{values: lvs, metric: metric}) - } - return metric -} - -// getMetricWithLabelValues gets a metric while handling possible collisions in -// the hash space. Must be called while holding read mutex. -func (m *MetricVec) getMetricWithLabelValues(h uint64, lvs []string) (Metric, bool) { - metrics, ok := m.children[h] - if ok { - if i := m.findMetricWithLabelValues(metrics, lvs); i < len(metrics) { - return metrics[i].metric, true - } - } - return nil, false -} - -// getMetricWithLabels gets a metric while handling possible collisions in -// the hash space. Must be called while holding read mutex. -func (m *MetricVec) getMetricWithLabels(h uint64, labels Labels) (Metric, bool) { - metrics, ok := m.children[h] - if ok { - if i := m.findMetricWithLabels(metrics, labels); i < len(metrics) { - return metrics[i].metric, true - } - } - return nil, false -} - -// findMetricWithLabelValues returns the index of the matching metric or -// len(metrics) if not found. -func (m *MetricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, lvs []string) int { - for i, metric := range metrics { - if m.matchLabelValues(metric.values, lvs) { - return i - } - } - return len(metrics) -} - -// findMetricWithLabels returns the index of the matching metric or len(metrics) -// if not found. -func (m *MetricVec) findMetricWithLabels(metrics []metricWithLabelValues, labels Labels) int { - for i, metric := range metrics { - if m.matchLabels(metric.values, labels) { - return i - } - } - return len(metrics) -} - -func (m *MetricVec) matchLabelValues(values []string, lvs []string) bool { - if len(values) != len(lvs) { - return false - } - for i, v := range values { - if v != lvs[i] { - return false - } - } - return true -} - -func (m *MetricVec) matchLabels(values []string, labels Labels) bool { - if len(labels) != len(values) { - return false - } - for i, k := range m.desc.variableLabels { - if values[i] != labels[k] { - return false - } - } - return true -} - -func (m *MetricVec) extractLabelValues(labels Labels) []string { - labelValues := make([]string, len(labels)) - for i, k := range m.desc.variableLabels { - labelValues[i] = labels[k] - } - return labelValues -} diff --git a/images/404-server/vendor/github.com/prometheus/client_model/AUTHORS.md b/images/404-server/vendor/github.com/prometheus/client_model/AUTHORS.md deleted file mode 100644 index e8b3efa6ac..0000000000 --- a/images/404-server/vendor/github.com/prometheus/client_model/AUTHORS.md +++ /dev/null @@ -1,13 +0,0 @@ -The Prometheus project was started by Matt T. Proud (emeritus) and -Julius Volz in 2012. - -Maintainers of this repository: - -* Björn Rabenstein - -The following individuals have contributed code to this repository -(listed in alphabetical order): - -* Björn Rabenstein -* Matt T. Proud -* Tobias Schmidt diff --git a/images/404-server/vendor/github.com/prometheus/client_model/NOTICE b/images/404-server/vendor/github.com/prometheus/client_model/NOTICE deleted file mode 100644 index 20110e410e..0000000000 --- a/images/404-server/vendor/github.com/prometheus/client_model/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -Data model artifacts for Prometheus. -Copyright 2012-2015 The Prometheus Authors - -This product includes software developed at -SoundCloud Ltd. (http://soundcloud.com/). diff --git a/images/404-server/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/images/404-server/vendor/github.com/prometheus/client_model/go/metrics.pb.go deleted file mode 100644 index b065f8683f..0000000000 --- a/images/404-server/vendor/github.com/prometheus/client_model/go/metrics.pb.go +++ /dev/null @@ -1,364 +0,0 @@ -// Code generated by protoc-gen-go. -// source: metrics.proto -// DO NOT EDIT! - -/* -Package io_prometheus_client is a generated protocol buffer package. - -It is generated from these files: - metrics.proto - -It has these top-level messages: - LabelPair - Gauge - Counter - Quantile - Summary - Untyped - Histogram - Bucket - Metric - MetricFamily -*/ -package io_prometheus_client - -import proto "github.com/golang/protobuf/proto" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = math.Inf - -type MetricType int32 - -const ( - MetricType_COUNTER MetricType = 0 - MetricType_GAUGE MetricType = 1 - MetricType_SUMMARY MetricType = 2 - MetricType_UNTYPED MetricType = 3 - MetricType_HISTOGRAM MetricType = 4 -) - -var MetricType_name = map[int32]string{ - 0: "COUNTER", - 1: "GAUGE", - 2: "SUMMARY", - 3: "UNTYPED", - 4: "HISTOGRAM", -} -var MetricType_value = map[string]int32{ - "COUNTER": 0, - "GAUGE": 1, - "SUMMARY": 2, - "UNTYPED": 3, - "HISTOGRAM": 4, -} - -func (x MetricType) Enum() *MetricType { - p := new(MetricType) - *p = x - return p -} -func (x MetricType) String() string { - return proto.EnumName(MetricType_name, int32(x)) -} -func (x *MetricType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType") - if err != nil { - return err - } - *x = MetricType(value) - return nil -} - -type LabelPair struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *LabelPair) Reset() { *m = LabelPair{} } -func (m *LabelPair) String() string { return proto.CompactTextString(m) } -func (*LabelPair) ProtoMessage() {} - -func (m *LabelPair) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *LabelPair) GetValue() string { - if m != nil && m.Value != nil { - return *m.Value - } - return "" -} - -type Gauge struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Gauge) Reset() { *m = Gauge{} } -func (m *Gauge) String() string { return proto.CompactTextString(m) } -func (*Gauge) ProtoMessage() {} - -func (m *Gauge) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type Counter struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Counter) Reset() { *m = Counter{} } -func (m *Counter) String() string { return proto.CompactTextString(m) } -func (*Counter) ProtoMessage() {} - -func (m *Counter) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type Quantile struct { - Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` - Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Quantile) Reset() { *m = Quantile{} } -func (m *Quantile) String() string { return proto.CompactTextString(m) } -func (*Quantile) ProtoMessage() {} - -func (m *Quantile) GetQuantile() float64 { - if m != nil && m.Quantile != nil { - return *m.Quantile - } - return 0 -} - -func (m *Quantile) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type Summary struct { - SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"` - SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"` - Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Summary) Reset() { *m = Summary{} } -func (m *Summary) String() string { return proto.CompactTextString(m) } -func (*Summary) ProtoMessage() {} - -func (m *Summary) GetSampleCount() uint64 { - if m != nil && m.SampleCount != nil { - return *m.SampleCount - } - return 0 -} - -func (m *Summary) GetSampleSum() float64 { - if m != nil && m.SampleSum != nil { - return *m.SampleSum - } - return 0 -} - -func (m *Summary) GetQuantile() []*Quantile { - if m != nil { - return m.Quantile - } - return nil -} - -type Untyped struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Untyped) Reset() { *m = Untyped{} } -func (m *Untyped) String() string { return proto.CompactTextString(m) } -func (*Untyped) ProtoMessage() {} - -func (m *Untyped) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type Histogram struct { - SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"` - SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"` - Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Histogram) Reset() { *m = Histogram{} } -func (m *Histogram) String() string { return proto.CompactTextString(m) } -func (*Histogram) ProtoMessage() {} - -func (m *Histogram) GetSampleCount() uint64 { - if m != nil && m.SampleCount != nil { - return *m.SampleCount - } - return 0 -} - -func (m *Histogram) GetSampleSum() float64 { - if m != nil && m.SampleSum != nil { - return *m.SampleSum - } - return 0 -} - -func (m *Histogram) GetBucket() []*Bucket { - if m != nil { - return m.Bucket - } - return nil -} - -type Bucket struct { - CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count" json:"cumulative_count,omitempty"` - UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound" json:"upper_bound,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Bucket) Reset() { *m = Bucket{} } -func (m *Bucket) String() string { return proto.CompactTextString(m) } -func (*Bucket) ProtoMessage() {} - -func (m *Bucket) GetCumulativeCount() uint64 { - if m != nil && m.CumulativeCount != nil { - return *m.CumulativeCount - } - return 0 -} - -func (m *Bucket) GetUpperBound() float64 { - if m != nil && m.UpperBound != nil { - return *m.UpperBound - } - return 0 -} - -type Metric struct { - Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` - Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` - Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` - Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` - Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` - Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` - TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms" json:"timestamp_ms,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Metric) Reset() { *m = Metric{} } -func (m *Metric) String() string { return proto.CompactTextString(m) } -func (*Metric) ProtoMessage() {} - -func (m *Metric) GetLabel() []*LabelPair { - if m != nil { - return m.Label - } - return nil -} - -func (m *Metric) GetGauge() *Gauge { - if m != nil { - return m.Gauge - } - return nil -} - -func (m *Metric) GetCounter() *Counter { - if m != nil { - return m.Counter - } - return nil -} - -func (m *Metric) GetSummary() *Summary { - if m != nil { - return m.Summary - } - return nil -} - -func (m *Metric) GetUntyped() *Untyped { - if m != nil { - return m.Untyped - } - return nil -} - -func (m *Metric) GetHistogram() *Histogram { - if m != nil { - return m.Histogram - } - return nil -} - -func (m *Metric) GetTimestampMs() int64 { - if m != nil && m.TimestampMs != nil { - return *m.TimestampMs - } - return 0 -} - -type MetricFamily struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` - Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` - Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MetricFamily) Reset() { *m = MetricFamily{} } -func (m *MetricFamily) String() string { return proto.CompactTextString(m) } -func (*MetricFamily) ProtoMessage() {} - -func (m *MetricFamily) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *MetricFamily) GetHelp() string { - if m != nil && m.Help != nil { - return *m.Help - } - return "" -} - -func (m *MetricFamily) GetType() MetricType { - if m != nil && m.Type != nil { - return *m.Type - } - return MetricType_COUNTER -} - -func (m *MetricFamily) GetMetric() []*Metric { - if m != nil { - return m.Metric - } - return nil -} - -func init() { - proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) -} diff --git a/images/404-server/vendor/github.com/prometheus/common/NOTICE b/images/404-server/vendor/github.com/prometheus/common/NOTICE deleted file mode 100644 index 636a2c1a5e..0000000000 --- a/images/404-server/vendor/github.com/prometheus/common/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -Common libraries shared by Prometheus Go components. -Copyright 2015 The Prometheus Authors - -This product includes software developed at -SoundCloud Ltd. (http://soundcloud.com/). diff --git a/images/404-server/vendor/github.com/prometheus/common/expfmt/decode.go b/images/404-server/vendor/github.com/prometheus/common/expfmt/decode.go deleted file mode 100644 index a7a42d5ef4..0000000000 --- a/images/404-server/vendor/github.com/prometheus/common/expfmt/decode.go +++ /dev/null @@ -1,429 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "fmt" - "io" - "math" - "mime" - "net/http" - - dto "github.com/prometheus/client_model/go" - - "github.com/matttproud/golang_protobuf_extensions/pbutil" - "github.com/prometheus/common/model" -) - -// Decoder types decode an input stream into metric families. -type Decoder interface { - Decode(*dto.MetricFamily) error -} - -// DecodeOptions contains options used by the Decoder and in sample extraction. -type DecodeOptions struct { - // Timestamp is added to each value from the stream that has no explicit timestamp set. - Timestamp model.Time -} - -// ResponseFormat extracts the correct format from a HTTP response header. -// If no matching format can be found FormatUnknown is returned. -func ResponseFormat(h http.Header) Format { - ct := h.Get(hdrContentType) - - mediatype, params, err := mime.ParseMediaType(ct) - if err != nil { - return FmtUnknown - } - - const textType = "text/plain" - - switch mediatype { - case ProtoType: - if p, ok := params["proto"]; ok && p != ProtoProtocol { - return FmtUnknown - } - if e, ok := params["encoding"]; ok && e != "delimited" { - return FmtUnknown - } - return FmtProtoDelim - - case textType: - if v, ok := params["version"]; ok && v != TextVersion { - return FmtUnknown - } - return FmtText - } - - return FmtUnknown -} - -// NewDecoder returns a new decoder based on the given input format. -// If the input format does not imply otherwise, a text format decoder is returned. -func NewDecoder(r io.Reader, format Format) Decoder { - switch format { - case FmtProtoDelim: - return &protoDecoder{r: r} - } - return &textDecoder{r: r} -} - -// protoDecoder implements the Decoder interface for protocol buffers. -type protoDecoder struct { - r io.Reader -} - -// Decode implements the Decoder interface. -func (d *protoDecoder) Decode(v *dto.MetricFamily) error { - _, err := pbutil.ReadDelimited(d.r, v) - if err != nil { - return err - } - if !model.IsValidMetricName(model.LabelValue(v.GetName())) { - return fmt.Errorf("invalid metric name %q", v.GetName()) - } - for _, m := range v.GetMetric() { - if m == nil { - continue - } - for _, l := range m.GetLabel() { - if l == nil { - continue - } - if !model.LabelValue(l.GetValue()).IsValid() { - return fmt.Errorf("invalid label value %q", l.GetValue()) - } - if !model.LabelName(l.GetName()).IsValid() { - return fmt.Errorf("invalid label name %q", l.GetName()) - } - } - } - return nil -} - -// textDecoder implements the Decoder interface for the text protocol. -type textDecoder struct { - r io.Reader - p TextParser - fams []*dto.MetricFamily -} - -// Decode implements the Decoder interface. -func (d *textDecoder) Decode(v *dto.MetricFamily) error { - // TODO(fabxc): Wrap this as a line reader to make streaming safer. - if len(d.fams) == 0 { - // No cached metric families, read everything and parse metrics. - fams, err := d.p.TextToMetricFamilies(d.r) - if err != nil { - return err - } - if len(fams) == 0 { - return io.EOF - } - d.fams = make([]*dto.MetricFamily, 0, len(fams)) - for _, f := range fams { - d.fams = append(d.fams, f) - } - } - - *v = *d.fams[0] - d.fams = d.fams[1:] - - return nil -} - -// SampleDecoder wraps a Decoder to extract samples from the metric families -// decoded by the wrapped Decoder. -type SampleDecoder struct { - Dec Decoder - Opts *DecodeOptions - - f dto.MetricFamily -} - -// Decode calls the Decode method of the wrapped Decoder and then extracts the -// samples from the decoded MetricFamily into the provided model.Vector. -func (sd *SampleDecoder) Decode(s *model.Vector) error { - err := sd.Dec.Decode(&sd.f) - if err != nil { - return err - } - *s, err = extractSamples(&sd.f, sd.Opts) - return err -} - -// ExtractSamples builds a slice of samples from the provided metric -// families. If an error occurs during sample extraction, it continues to -// extract from the remaining metric families. The returned error is the last -// error that has occured. -func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) { - var ( - all model.Vector - lastErr error - ) - for _, f := range fams { - some, err := extractSamples(f, o) - if err != nil { - lastErr = err - continue - } - all = append(all, some...) - } - return all, lastErr -} - -func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) { - switch f.GetType() { - case dto.MetricType_COUNTER: - return extractCounter(o, f), nil - case dto.MetricType_GAUGE: - return extractGauge(o, f), nil - case dto.MetricType_SUMMARY: - return extractSummary(o, f), nil - case dto.MetricType_UNTYPED: - return extractUntyped(o, f), nil - case dto.MetricType_HISTOGRAM: - return extractHistogram(o, f), nil - } - return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType()) -} - -func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Counter == nil { - continue - } - - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) - - smpl := &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Counter.GetValue()), - } - - if m.TimestampMs != nil { - smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } else { - smpl.Timestamp = o.Timestamp - } - - samples = append(samples, smpl) - } - - return samples -} - -func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Gauge == nil { - continue - } - - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) - - smpl := &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Gauge.GetValue()), - } - - if m.TimestampMs != nil { - smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } else { - smpl.Timestamp = o.Timestamp - } - - samples = append(samples, smpl) - } - - return samples -} - -func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Untyped == nil { - continue - } - - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) - - smpl := &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Untyped.GetValue()), - } - - if m.TimestampMs != nil { - smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } else { - smpl.Timestamp = o.Timestamp - } - - samples = append(samples, smpl) - } - - return samples -} - -func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Summary == nil { - continue - } - - timestamp := o.Timestamp - if m.TimestampMs != nil { - timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } - - for _, q := range m.Summary.Quantile { - lset := make(model.LabelSet, len(m.Label)+2) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - // BUG(matt): Update other names to "quantile". - lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile())) - lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(q.GetValue()), - Timestamp: timestamp, - }) - } - - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Summary.GetSampleSum()), - Timestamp: timestamp, - }) - - lset = make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Summary.GetSampleCount()), - Timestamp: timestamp, - }) - } - - return samples -} - -func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Histogram == nil { - continue - } - - timestamp := o.Timestamp - if m.TimestampMs != nil { - timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } - - infSeen := false - - for _, q := range m.Histogram.Bucket { - lset := make(model.LabelSet, len(m.Label)+2) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound())) - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") - - if math.IsInf(q.GetUpperBound(), +1) { - infSeen = true - } - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(q.GetCumulativeCount()), - Timestamp: timestamp, - }) - } - - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Histogram.GetSampleSum()), - Timestamp: timestamp, - }) - - lset = make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") - - count := &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Histogram.GetSampleCount()), - Timestamp: timestamp, - } - samples = append(samples, count) - - if !infSeen { - // Append an infinity bucket sample. - lset := make(model.LabelSet, len(m.Label)+2) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf") - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: count.Value, - Timestamp: timestamp, - }) - } - } - - return samples -} diff --git a/images/404-server/vendor/github.com/prometheus/common/expfmt/encode.go b/images/404-server/vendor/github.com/prometheus/common/expfmt/encode.go deleted file mode 100644 index 11839ed65c..0000000000 --- a/images/404-server/vendor/github.com/prometheus/common/expfmt/encode.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "fmt" - "io" - "net/http" - - "github.com/golang/protobuf/proto" - "github.com/matttproud/golang_protobuf_extensions/pbutil" - "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" - - dto "github.com/prometheus/client_model/go" -) - -// Encoder types encode metric families into an underlying wire protocol. -type Encoder interface { - Encode(*dto.MetricFamily) error -} - -type encoder func(*dto.MetricFamily) error - -func (e encoder) Encode(v *dto.MetricFamily) error { - return e(v) -} - -// Negotiate returns the Content-Type based on the given Accept header. -// If no appropriate accepted type is found, FmtText is returned. -func Negotiate(h http.Header) Format { - for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { - // Check for protocol buffer - if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { - switch ac.Params["encoding"] { - case "delimited": - return FmtProtoDelim - case "text": - return FmtProtoText - case "compact-text": - return FmtProtoCompact - } - } - // Check for text format. - ver := ac.Params["version"] - if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return FmtText - } - } - return FmtText -} - -// NewEncoder returns a new encoder based on content type negotiation. -func NewEncoder(w io.Writer, format Format) Encoder { - switch format { - case FmtProtoDelim: - return encoder(func(v *dto.MetricFamily) error { - _, err := pbutil.WriteDelimited(w, v) - return err - }) - case FmtProtoCompact: - return encoder(func(v *dto.MetricFamily) error { - _, err := fmt.Fprintln(w, v.String()) - return err - }) - case FmtProtoText: - return encoder(func(v *dto.MetricFamily) error { - _, err := fmt.Fprintln(w, proto.MarshalTextString(v)) - return err - }) - case FmtText: - return encoder(func(v *dto.MetricFamily) error { - _, err := MetricFamilyToText(w, v) - return err - }) - } - panic("expfmt.NewEncoder: unknown format") -} diff --git a/images/404-server/vendor/github.com/prometheus/common/expfmt/expfmt.go b/images/404-server/vendor/github.com/prometheus/common/expfmt/expfmt.go deleted file mode 100644 index 371ac75037..0000000000 --- a/images/404-server/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package expfmt contains tools for reading and writing Prometheus metrics. -package expfmt - -// Format specifies the HTTP content type of the different wire protocols. -type Format string - -// Constants to assemble the Content-Type values for the different wire protocols. -const ( - TextVersion = "0.0.4" - ProtoType = `application/vnd.google.protobuf` - ProtoProtocol = `io.prometheus.client.MetricFamily` - ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" - - // The Content-Type values for the different wire protocols. - FmtUnknown Format = `` - FmtText Format = `text/plain; version=` + TextVersion - FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` - FmtProtoText Format = ProtoFmt + ` encoding=text` - FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` -) - -const ( - hdrContentType = "Content-Type" - hdrAccept = "Accept" -) diff --git a/images/404-server/vendor/github.com/prometheus/common/expfmt/fuzz.go b/images/404-server/vendor/github.com/prometheus/common/expfmt/fuzz.go deleted file mode 100644 index dc2eedeefc..0000000000 --- a/images/404-server/vendor/github.com/prometheus/common/expfmt/fuzz.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Build only when actually fuzzing -// +build gofuzz - -package expfmt - -import "bytes" - -// Fuzz text metric parser with with github.com/dvyukov/go-fuzz: -// -// go-fuzz-build github.com/prometheus/common/expfmt -// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz -// -// Further input samples should go in the folder fuzz/corpus. -func Fuzz(in []byte) int { - parser := TextParser{} - _, err := parser.TextToMetricFamilies(bytes.NewReader(in)) - - if err != nil { - return 0 - } - - return 1 -} diff --git a/images/404-server/vendor/github.com/prometheus/common/expfmt/text_create.go b/images/404-server/vendor/github.com/prometheus/common/expfmt/text_create.go deleted file mode 100644 index f11321cd0c..0000000000 --- a/images/404-server/vendor/github.com/prometheus/common/expfmt/text_create.go +++ /dev/null @@ -1,303 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "fmt" - "io" - "math" - "strings" - - dto "github.com/prometheus/client_model/go" - "github.com/prometheus/common/model" -) - -// MetricFamilyToText converts a MetricFamily proto message into text format and -// writes the resulting lines to 'out'. It returns the number of bytes written -// and any error encountered. The output will have the same order as the input, -// no further sorting is performed. Furthermore, this function assumes the input -// is already sanitized and does not perform any sanity checks. If the input -// contains duplicate metrics or invalid metric or label names, the conversion -// will result in invalid text format output. -// -// This method fulfills the type 'prometheus.encoder'. -func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) { - var written int - - // Fail-fast checks. - if len(in.Metric) == 0 { - return written, fmt.Errorf("MetricFamily has no metrics: %s", in) - } - name := in.GetName() - if name == "" { - return written, fmt.Errorf("MetricFamily has no name: %s", in) - } - - // Comments, first HELP, then TYPE. - if in.Help != nil { - n, err := fmt.Fprintf( - out, "# HELP %s %s\n", - name, escapeString(*in.Help, false), - ) - written += n - if err != nil { - return written, err - } - } - metricType := in.GetType() - n, err := fmt.Fprintf( - out, "# TYPE %s %s\n", - name, strings.ToLower(metricType.String()), - ) - written += n - if err != nil { - return written, err - } - - // Finally the samples, one line for each. - for _, metric := range in.Metric { - switch metricType { - case dto.MetricType_COUNTER: - if metric.Counter == nil { - return written, fmt.Errorf( - "expected counter in metric %s %s", name, metric, - ) - } - n, err = writeSample( - name, metric, "", "", - metric.Counter.GetValue(), - out, - ) - case dto.MetricType_GAUGE: - if metric.Gauge == nil { - return written, fmt.Errorf( - "expected gauge in metric %s %s", name, metric, - ) - } - n, err = writeSample( - name, metric, "", "", - metric.Gauge.GetValue(), - out, - ) - case dto.MetricType_UNTYPED: - if metric.Untyped == nil { - return written, fmt.Errorf( - "expected untyped in metric %s %s", name, metric, - ) - } - n, err = writeSample( - name, metric, "", "", - metric.Untyped.GetValue(), - out, - ) - case dto.MetricType_SUMMARY: - if metric.Summary == nil { - return written, fmt.Errorf( - "expected summary in metric %s %s", name, metric, - ) - } - for _, q := range metric.Summary.Quantile { - n, err = writeSample( - name, metric, - model.QuantileLabel, fmt.Sprint(q.GetQuantile()), - q.GetValue(), - out, - ) - written += n - if err != nil { - return written, err - } - } - n, err = writeSample( - name+"_sum", metric, "", "", - metric.Summary.GetSampleSum(), - out, - ) - if err != nil { - return written, err - } - written += n - n, err = writeSample( - name+"_count", metric, "", "", - float64(metric.Summary.GetSampleCount()), - out, - ) - case dto.MetricType_HISTOGRAM: - if metric.Histogram == nil { - return written, fmt.Errorf( - "expected histogram in metric %s %s", name, metric, - ) - } - infSeen := false - for _, q := range metric.Histogram.Bucket { - n, err = writeSample( - name+"_bucket", metric, - model.BucketLabel, fmt.Sprint(q.GetUpperBound()), - float64(q.GetCumulativeCount()), - out, - ) - written += n - if err != nil { - return written, err - } - if math.IsInf(q.GetUpperBound(), +1) { - infSeen = true - } - } - if !infSeen { - n, err = writeSample( - name+"_bucket", metric, - model.BucketLabel, "+Inf", - float64(metric.Histogram.GetSampleCount()), - out, - ) - if err != nil { - return written, err - } - written += n - } - n, err = writeSample( - name+"_sum", metric, "", "", - metric.Histogram.GetSampleSum(), - out, - ) - if err != nil { - return written, err - } - written += n - n, err = writeSample( - name+"_count", metric, "", "", - float64(metric.Histogram.GetSampleCount()), - out, - ) - default: - return written, fmt.Errorf( - "unexpected type in metric %s %s", name, metric, - ) - } - written += n - if err != nil { - return written, err - } - } - return written, nil -} - -// writeSample writes a single sample in text format to out, given the metric -// name, the metric proto message itself, optionally an additional label name -// and value (use empty strings if not required), and the value. The function -// returns the number of bytes written and any error encountered. -func writeSample( - name string, - metric *dto.Metric, - additionalLabelName, additionalLabelValue string, - value float64, - out io.Writer, -) (int, error) { - var written int - n, err := fmt.Fprint(out, name) - written += n - if err != nil { - return written, err - } - n, err = labelPairsToText( - metric.Label, - additionalLabelName, additionalLabelValue, - out, - ) - written += n - if err != nil { - return written, err - } - n, err = fmt.Fprintf(out, " %v", value) - written += n - if err != nil { - return written, err - } - if metric.TimestampMs != nil { - n, err = fmt.Fprintf(out, " %v", *metric.TimestampMs) - written += n - if err != nil { - return written, err - } - } - n, err = out.Write([]byte{'\n'}) - written += n - if err != nil { - return written, err - } - return written, nil -} - -// labelPairsToText converts a slice of LabelPair proto messages plus the -// explicitly given additional label pair into text formatted as required by the -// text format and writes it to 'out'. An empty slice in combination with an -// empty string 'additionalLabelName' results in nothing being -// written. Otherwise, the label pairs are written, escaped as required by the -// text format, and enclosed in '{...}'. The function returns the number of -// bytes written and any error encountered. -func labelPairsToText( - in []*dto.LabelPair, - additionalLabelName, additionalLabelValue string, - out io.Writer, -) (int, error) { - if len(in) == 0 && additionalLabelName == "" { - return 0, nil - } - var written int - separator := '{' - for _, lp := range in { - n, err := fmt.Fprintf( - out, `%c%s="%s"`, - separator, lp.GetName(), escapeString(lp.GetValue(), true), - ) - written += n - if err != nil { - return written, err - } - separator = ',' - } - if additionalLabelName != "" { - n, err := fmt.Fprintf( - out, `%c%s="%s"`, - separator, additionalLabelName, - escapeString(additionalLabelValue, true), - ) - written += n - if err != nil { - return written, err - } - } - n, err := out.Write([]byte{'}'}) - written += n - if err != nil { - return written, err - } - return written, nil -} - -var ( - escape = strings.NewReplacer("\\", `\\`, "\n", `\n`) - escapeWithDoubleQuote = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`) -) - -// escapeString replaces '\' by '\\', new line character by '\n', and - if -// includeDoubleQuote is true - '"' by '\"'. -func escapeString(v string, includeDoubleQuote bool) string { - if includeDoubleQuote { - return escapeWithDoubleQuote.Replace(v) - } - - return escape.Replace(v) -} diff --git a/images/404-server/vendor/github.com/prometheus/common/expfmt/text_parse.go b/images/404-server/vendor/github.com/prometheus/common/expfmt/text_parse.go deleted file mode 100644 index ef9a150771..0000000000 --- a/images/404-server/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ /dev/null @@ -1,753 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "bufio" - "bytes" - "fmt" - "io" - "math" - "strconv" - "strings" - - dto "github.com/prometheus/client_model/go" - - "github.com/golang/protobuf/proto" - "github.com/prometheus/common/model" -) - -// A stateFn is a function that represents a state in a state machine. By -// executing it, the state is progressed to the next state. The stateFn returns -// another stateFn, which represents the new state. The end state is represented -// by nil. -type stateFn func() stateFn - -// ParseError signals errors while parsing the simple and flat text-based -// exchange format. -type ParseError struct { - Line int - Msg string -} - -// Error implements the error interface. -func (e ParseError) Error() string { - return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg) -} - -// TextParser is used to parse the simple and flat text-based exchange format. Its -// zero value is ready to use. -type TextParser struct { - metricFamiliesByName map[string]*dto.MetricFamily - buf *bufio.Reader // Where the parsed input is read through. - err error // Most recent error. - lineCount int // Tracks the line count for error messages. - currentByte byte // The most recent byte read. - currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes. - currentMF *dto.MetricFamily - currentMetric *dto.Metric - currentLabelPair *dto.LabelPair - - // The remaining member variables are only used for summaries/histograms. - currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le' - // Summary specific. - summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature. - currentQuantile float64 - // Histogram specific. - histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature. - currentBucket float64 - // These tell us if the currently processed line ends on '_count' or - // '_sum' respectively and belong to a summary/histogram, representing the sample - // count and sum of that summary/histogram. - currentIsSummaryCount, currentIsSummarySum bool - currentIsHistogramCount, currentIsHistogramSum bool -} - -// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange -// format and creates MetricFamily proto messages. It returns the MetricFamily -// proto messages in a map where the metric names are the keys, along with any -// error encountered. -// -// If the input contains duplicate metrics (i.e. lines with the same metric name -// and exactly the same label set), the resulting MetricFamily will contain -// duplicate Metric proto messages. Similar is true for duplicate label -// names. Checks for duplicates have to be performed separately, if required. -// Also note that neither the metrics within each MetricFamily are sorted nor -// the label pairs within each Metric. Sorting is not required for the most -// frequent use of this method, which is sample ingestion in the Prometheus -// server. However, for presentation purposes, you might want to sort the -// metrics, and in some cases, you must sort the labels, e.g. for consumption by -// the metric family injection hook of the Prometheus registry. -// -// Summaries and histograms are rather special beasts. You would probably not -// use them in the simple text format anyway. This method can deal with -// summaries and histograms if they are presented in exactly the way the -// text.Create function creates them. -// -// This method must not be called concurrently. If you want to parse different -// input concurrently, instantiate a separate Parser for each goroutine. -func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) { - p.reset(in) - for nextState := p.startOfLine; nextState != nil; nextState = nextState() { - // Magic happens here... - } - // Get rid of empty metric families. - for k, mf := range p.metricFamiliesByName { - if len(mf.GetMetric()) == 0 { - delete(p.metricFamiliesByName, k) - } - } - // If p.err is io.EOF now, we have run into a premature end of the input - // stream. Turn this error into something nicer and more - // meaningful. (io.EOF is often used as a signal for the legitimate end - // of an input stream.) - if p.err == io.EOF { - p.parseError("unexpected end of input stream") - } - return p.metricFamiliesByName, p.err -} - -func (p *TextParser) reset(in io.Reader) { - p.metricFamiliesByName = map[string]*dto.MetricFamily{} - if p.buf == nil { - p.buf = bufio.NewReader(in) - } else { - p.buf.Reset(in) - } - p.err = nil - p.lineCount = 0 - if p.summaries == nil || len(p.summaries) > 0 { - p.summaries = map[uint64]*dto.Metric{} - } - if p.histograms == nil || len(p.histograms) > 0 { - p.histograms = map[uint64]*dto.Metric{} - } - p.currentQuantile = math.NaN() - p.currentBucket = math.NaN() -} - -// startOfLine represents the state where the next byte read from p.buf is the -// start of a line (or whitespace leading up to it). -func (p *TextParser) startOfLine() stateFn { - p.lineCount++ - if p.skipBlankTab(); p.err != nil { - // End of input reached. This is the only case where - // that is not an error but a signal that we are done. - p.err = nil - return nil - } - switch p.currentByte { - case '#': - return p.startComment - case '\n': - return p.startOfLine // Empty line, start the next one. - } - return p.readingMetricName -} - -// startComment represents the state where the next byte read from p.buf is the -// start of a comment (or whitespace leading up to it). -func (p *TextParser) startComment() stateFn { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte == '\n' { - return p.startOfLine - } - if p.readTokenUntilWhitespace(); p.err != nil { - return nil // Unexpected end of input. - } - // If we have hit the end of line already, there is nothing left - // to do. This is not considered a syntax error. - if p.currentByte == '\n' { - return p.startOfLine - } - keyword := p.currentToken.String() - if keyword != "HELP" && keyword != "TYPE" { - // Generic comment, ignore by fast forwarding to end of line. - for p.currentByte != '\n' { - if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { - return nil // Unexpected end of input. - } - } - return p.startOfLine - } - // There is something. Next has to be a metric name. - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.readTokenAsMetricName(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte == '\n' { - // At the end of the line already. - // Again, this is not considered a syntax error. - return p.startOfLine - } - if !isBlankOrTab(p.currentByte) { - p.parseError("invalid metric name in comment") - return nil - } - p.setOrCreateCurrentMF() - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte == '\n' { - // At the end of the line already. - // Again, this is not considered a syntax error. - return p.startOfLine - } - switch keyword { - case "HELP": - return p.readingHelp - case "TYPE": - return p.readingType - } - panic(fmt.Sprintf("code error: unexpected keyword %q", keyword)) -} - -// readingMetricName represents the state where the last byte read (now in -// p.currentByte) is the first byte of a metric name. -func (p *TextParser) readingMetricName() stateFn { - if p.readTokenAsMetricName(); p.err != nil { - return nil - } - if p.currentToken.Len() == 0 { - p.parseError("invalid metric name") - return nil - } - p.setOrCreateCurrentMF() - // Now is the time to fix the type if it hasn't happened yet. - if p.currentMF.Type == nil { - p.currentMF.Type = dto.MetricType_UNTYPED.Enum() - } - p.currentMetric = &dto.Metric{} - // Do not append the newly created currentMetric to - // currentMF.Metric right now. First wait if this is a summary, - // and the metric exists already, which we can only know after - // having read all the labels. - if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - return p.readingLabels -} - -// readingLabels represents the state where the last byte read (now in -// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the -// first byte of the value (otherwise). -func (p *TextParser) readingLabels() stateFn { - // Summaries/histograms are special. We have to reset the - // currentLabels map, currentQuantile and currentBucket before starting to - // read labels. - if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM { - p.currentLabels = map[string]string{} - p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName() - p.currentQuantile = math.NaN() - p.currentBucket = math.NaN() - } - if p.currentByte != '{' { - return p.readingValue - } - return p.startLabelName -} - -// startLabelName represents the state where the next byte read from p.buf is -// the start of a label name (or whitespace leading up to it). -func (p *TextParser) startLabelName() stateFn { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte == '}' { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - return p.readingValue - } - if p.readTokenAsLabelName(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentToken.Len() == 0 { - p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName())) - return nil - } - p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} - if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { - p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) - return nil - } - // Special summary/histogram treatment. Don't add 'quantile' and 'le' - // labels to 'real' labels. - if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && - !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { - p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair) - } - if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte != '=' { - p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) - return nil - } - return p.startLabelValue -} - -// startLabelValue represents the state where the next byte read from p.buf is -// the start of a (quoted) label value (or whitespace leading up to it). -func (p *TextParser) startLabelValue() stateFn { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte != '"' { - p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte)) - return nil - } - if p.readTokenAsLabelValue(); p.err != nil { - return nil - } - p.currentLabelPair.Value = proto.String(p.currentToken.String()) - // Special treatment of summaries: - // - Quantile labels are special, will result in dto.Quantile later. - // - Other labels have to be added to currentLabels for signature calculation. - if p.currentMF.GetType() == dto.MetricType_SUMMARY { - if p.currentLabelPair.GetName() == model.QuantileLabel { - if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { - // Create a more helpful error message. - p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) - return nil - } - } else { - p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() - } - } - // Similar special treatment of histograms. - if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { - if p.currentLabelPair.GetName() == model.BucketLabel { - if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { - // Create a more helpful error message. - p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue())) - return nil - } - } else { - p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() - } - } - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - switch p.currentByte { - case ',': - return p.startLabelName - - case '}': - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - return p.readingValue - default: - p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.Value)) - return nil - } -} - -// readingValue represents the state where the last byte read (now in -// p.currentByte) is the first byte of the sample value (i.e. a float). -func (p *TextParser) readingValue() stateFn { - // When we are here, we have read all the labels, so for the - // special case of a summary/histogram, we can finally find out - // if the metric already exists. - if p.currentMF.GetType() == dto.MetricType_SUMMARY { - signature := model.LabelsToSignature(p.currentLabels) - if summary := p.summaries[signature]; summary != nil { - p.currentMetric = summary - } else { - p.summaries[signature] = p.currentMetric - p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) - } - } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { - signature := model.LabelsToSignature(p.currentLabels) - if histogram := p.histograms[signature]; histogram != nil { - p.currentMetric = histogram - } else { - p.histograms[signature] = p.currentMetric - p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) - } - } else { - p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) - } - if p.readTokenUntilWhitespace(); p.err != nil { - return nil // Unexpected end of input. - } - value, err := strconv.ParseFloat(p.currentToken.String(), 64) - if err != nil { - // Create a more helpful error message. - p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String())) - return nil - } - switch p.currentMF.GetType() { - case dto.MetricType_COUNTER: - p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)} - case dto.MetricType_GAUGE: - p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)} - case dto.MetricType_UNTYPED: - p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)} - case dto.MetricType_SUMMARY: - // *sigh* - if p.currentMetric.Summary == nil { - p.currentMetric.Summary = &dto.Summary{} - } - switch { - case p.currentIsSummaryCount: - p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value)) - case p.currentIsSummarySum: - p.currentMetric.Summary.SampleSum = proto.Float64(value) - case !math.IsNaN(p.currentQuantile): - p.currentMetric.Summary.Quantile = append( - p.currentMetric.Summary.Quantile, - &dto.Quantile{ - Quantile: proto.Float64(p.currentQuantile), - Value: proto.Float64(value), - }, - ) - } - case dto.MetricType_HISTOGRAM: - // *sigh* - if p.currentMetric.Histogram == nil { - p.currentMetric.Histogram = &dto.Histogram{} - } - switch { - case p.currentIsHistogramCount: - p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value)) - case p.currentIsHistogramSum: - p.currentMetric.Histogram.SampleSum = proto.Float64(value) - case !math.IsNaN(p.currentBucket): - p.currentMetric.Histogram.Bucket = append( - p.currentMetric.Histogram.Bucket, - &dto.Bucket{ - UpperBound: proto.Float64(p.currentBucket), - CumulativeCount: proto.Uint64(uint64(value)), - }, - ) - } - default: - p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName()) - } - if p.currentByte == '\n' { - return p.startOfLine - } - return p.startTimestamp -} - -// startTimestamp represents the state where the next byte read from p.buf is -// the start of the timestamp (or whitespace leading up to it). -func (p *TextParser) startTimestamp() stateFn { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.readTokenUntilWhitespace(); p.err != nil { - return nil // Unexpected end of input. - } - timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64) - if err != nil { - // Create a more helpful error message. - p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String())) - return nil - } - p.currentMetric.TimestampMs = proto.Int64(timestamp) - if p.readTokenUntilNewline(false); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentToken.Len() > 0 { - p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String())) - return nil - } - return p.startOfLine -} - -// readingHelp represents the state where the last byte read (now in -// p.currentByte) is the first byte of the docstring after 'HELP'. -func (p *TextParser) readingHelp() stateFn { - if p.currentMF.Help != nil { - p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName())) - return nil - } - // Rest of line is the docstring. - if p.readTokenUntilNewline(true); p.err != nil { - return nil // Unexpected end of input. - } - p.currentMF.Help = proto.String(p.currentToken.String()) - return p.startOfLine -} - -// readingType represents the state where the last byte read (now in -// p.currentByte) is the first byte of the type hint after 'HELP'. -func (p *TextParser) readingType() stateFn { - if p.currentMF.Type != nil { - p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName())) - return nil - } - // Rest of line is the type. - if p.readTokenUntilNewline(false); p.err != nil { - return nil // Unexpected end of input. - } - metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())] - if !ok { - p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String())) - return nil - } - p.currentMF.Type = dto.MetricType(metricType).Enum() - return p.startOfLine -} - -// parseError sets p.err to a ParseError at the current line with the given -// message. -func (p *TextParser) parseError(msg string) { - p.err = ParseError{ - Line: p.lineCount, - Msg: msg, - } -} - -// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte -// that is neither ' ' nor '\t'. That byte is left in p.currentByte. -func (p *TextParser) skipBlankTab() { - for { - if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) { - return - } - } -} - -// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do -// anything if p.currentByte is neither ' ' nor '\t'. -func (p *TextParser) skipBlankTabIfCurrentBlankTab() { - if isBlankOrTab(p.currentByte) { - p.skipBlankTab() - } -} - -// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The -// first byte considered is the byte already read (now in p.currentByte). The -// first whitespace byte encountered is still copied into p.currentByte, but not -// into p.currentToken. -func (p *TextParser) readTokenUntilWhitespace() { - p.currentToken.Reset() - for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' { - p.currentToken.WriteByte(p.currentByte) - p.currentByte, p.err = p.buf.ReadByte() - } -} - -// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first -// byte considered is the byte already read (now in p.currentByte). The first -// newline byte encountered is still copied into p.currentByte, but not into -// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are -// recognized: '\\' tranlates into '\', and '\n' into a line-feed character. All -// other escape sequences are invalid and cause an error. -func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { - p.currentToken.Reset() - escaped := false - for p.err == nil { - if recognizeEscapeSequence && escaped { - switch p.currentByte { - case '\\': - p.currentToken.WriteByte(p.currentByte) - case 'n': - p.currentToken.WriteByte('\n') - default: - p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) - return - } - escaped = false - } else { - switch p.currentByte { - case '\n': - return - case '\\': - escaped = true - default: - p.currentToken.WriteByte(p.currentByte) - } - } - p.currentByte, p.err = p.buf.ReadByte() - } -} - -// readTokenAsMetricName copies a metric name from p.buf into p.currentToken. -// The first byte considered is the byte already read (now in p.currentByte). -// The first byte not part of a metric name is still copied into p.currentByte, -// but not into p.currentToken. -func (p *TextParser) readTokenAsMetricName() { - p.currentToken.Reset() - if !isValidMetricNameStart(p.currentByte) { - return - } - for { - p.currentToken.WriteByte(p.currentByte) - p.currentByte, p.err = p.buf.ReadByte() - if p.err != nil || !isValidMetricNameContinuation(p.currentByte) { - return - } - } -} - -// readTokenAsLabelName copies a label name from p.buf into p.currentToken. -// The first byte considered is the byte already read (now in p.currentByte). -// The first byte not part of a label name is still copied into p.currentByte, -// but not into p.currentToken. -func (p *TextParser) readTokenAsLabelName() { - p.currentToken.Reset() - if !isValidLabelNameStart(p.currentByte) { - return - } - for { - p.currentToken.WriteByte(p.currentByte) - p.currentByte, p.err = p.buf.ReadByte() - if p.err != nil || !isValidLabelNameContinuation(p.currentByte) { - return - } - } -} - -// readTokenAsLabelValue copies a label value from p.buf into p.currentToken. -// In contrast to the other 'readTokenAs...' functions, which start with the -// last read byte in p.currentByte, this method ignores p.currentByte and starts -// with reading a new byte from p.buf. The first byte not part of a label value -// is still copied into p.currentByte, but not into p.currentToken. -func (p *TextParser) readTokenAsLabelValue() { - p.currentToken.Reset() - escaped := false - for { - if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { - return - } - if escaped { - switch p.currentByte { - case '"', '\\': - p.currentToken.WriteByte(p.currentByte) - case 'n': - p.currentToken.WriteByte('\n') - default: - p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) - return - } - escaped = false - continue - } - switch p.currentByte { - case '"': - return - case '\n': - p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String())) - return - case '\\': - escaped = true - default: - p.currentToken.WriteByte(p.currentByte) - } - } -} - -func (p *TextParser) setOrCreateCurrentMF() { - p.currentIsSummaryCount = false - p.currentIsSummarySum = false - p.currentIsHistogramCount = false - p.currentIsHistogramSum = false - name := p.currentToken.String() - if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil { - return - } - // Try out if this is a _sum or _count for a summary/histogram. - summaryName := summaryMetricName(name) - if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil { - if p.currentMF.GetType() == dto.MetricType_SUMMARY { - if isCount(name) { - p.currentIsSummaryCount = true - } - if isSum(name) { - p.currentIsSummarySum = true - } - return - } - } - histogramName := histogramMetricName(name) - if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil { - if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { - if isCount(name) { - p.currentIsHistogramCount = true - } - if isSum(name) { - p.currentIsHistogramSum = true - } - return - } - } - p.currentMF = &dto.MetricFamily{Name: proto.String(name)} - p.metricFamiliesByName[name] = p.currentMF -} - -func isValidLabelNameStart(b byte) bool { - return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' -} - -func isValidLabelNameContinuation(b byte) bool { - return isValidLabelNameStart(b) || (b >= '0' && b <= '9') -} - -func isValidMetricNameStart(b byte) bool { - return isValidLabelNameStart(b) || b == ':' -} - -func isValidMetricNameContinuation(b byte) bool { - return isValidLabelNameContinuation(b) || b == ':' -} - -func isBlankOrTab(b byte) bool { - return b == ' ' || b == '\t' -} - -func isCount(name string) bool { - return len(name) > 6 && name[len(name)-6:] == "_count" -} - -func isSum(name string) bool { - return len(name) > 4 && name[len(name)-4:] == "_sum" -} - -func isBucket(name string) bool { - return len(name) > 7 && name[len(name)-7:] == "_bucket" -} - -func summaryMetricName(name string) string { - switch { - case isCount(name): - return name[:len(name)-6] - case isSum(name): - return name[:len(name)-4] - default: - return name - } -} - -func histogramMetricName(name string) string { - switch { - case isCount(name): - return name[:len(name)-6] - case isSum(name): - return name[:len(name)-4] - case isBucket(name): - return name[:len(name)-7] - default: - return name - } -} diff --git a/images/404-server/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt b/images/404-server/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt deleted file mode 100644 index 7723656d58..0000000000 --- a/images/404-server/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt +++ /dev/null @@ -1,67 +0,0 @@ -PACKAGE - -package goautoneg -import "bitbucket.org/ww/goautoneg" - -HTTP Content-Type Autonegotiation. - -The functions in this package implement the behaviour specified in -http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html - -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -FUNCTIONS - -func Negotiate(header string, alternatives []string) (content_type string) -Negotiate the most appropriate content_type given the accept header -and a list of alternatives. - -func ParseAccept(header string) (accept []Accept) -Parse an Accept Header string returning a sorted list -of clauses - - -TYPES - -type Accept struct { - Type, SubType string - Q float32 - Params map[string]string -} -Structure to represent a clause in an HTTP Accept Header - - -SUBDIRECTORIES - - .hg diff --git a/images/404-server/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/images/404-server/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go deleted file mode 100644 index 648b38cb65..0000000000 --- a/images/404-server/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go +++ /dev/null @@ -1,162 +0,0 @@ -/* -HTTP Content-Type Autonegotiation. - -The functions in this package implement the behaviour specified in -http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html - -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -*/ -package goautoneg - -import ( - "sort" - "strconv" - "strings" -) - -// Structure to represent a clause in an HTTP Accept Header -type Accept struct { - Type, SubType string - Q float64 - Params map[string]string -} - -// For internal use, so that we can use the sort interface -type accept_slice []Accept - -func (accept accept_slice) Len() int { - slice := []Accept(accept) - return len(slice) -} - -func (accept accept_slice) Less(i, j int) bool { - slice := []Accept(accept) - ai, aj := slice[i], slice[j] - if ai.Q > aj.Q { - return true - } - if ai.Type != "*" && aj.Type == "*" { - return true - } - if ai.SubType != "*" && aj.SubType == "*" { - return true - } - return false -} - -func (accept accept_slice) Swap(i, j int) { - slice := []Accept(accept) - slice[i], slice[j] = slice[j], slice[i] -} - -// Parse an Accept Header string returning a sorted list -// of clauses -func ParseAccept(header string) (accept []Accept) { - parts := strings.Split(header, ",") - accept = make([]Accept, 0, len(parts)) - for _, part := range parts { - part := strings.Trim(part, " ") - - a := Accept{} - a.Params = make(map[string]string) - a.Q = 1.0 - - mrp := strings.Split(part, ";") - - media_range := mrp[0] - sp := strings.Split(media_range, "/") - a.Type = strings.Trim(sp[0], " ") - - switch { - case len(sp) == 1 && a.Type == "*": - a.SubType = "*" - case len(sp) == 2: - a.SubType = strings.Trim(sp[1], " ") - default: - continue - } - - if len(mrp) == 1 { - accept = append(accept, a) - continue - } - - for _, param := range mrp[1:] { - sp := strings.SplitN(param, "=", 2) - if len(sp) != 2 { - continue - } - token := strings.Trim(sp[0], " ") - if token == "q" { - a.Q, _ = strconv.ParseFloat(sp[1], 32) - } else { - a.Params[token] = strings.Trim(sp[1], " ") - } - } - - accept = append(accept, a) - } - - slice := accept_slice(accept) - sort.Sort(slice) - - return -} - -// Negotiate the most appropriate content_type given the accept header -// and a list of alternatives. -func Negotiate(header string, alternatives []string) (content_type string) { - asp := make([][]string, 0, len(alternatives)) - for _, ctype := range alternatives { - asp = append(asp, strings.SplitN(ctype, "/", 2)) - } - for _, clause := range ParseAccept(header) { - for i, ctsp := range asp { - if clause.Type == ctsp[0] && clause.SubType == ctsp[1] { - content_type = alternatives[i] - return - } - if clause.Type == ctsp[0] && clause.SubType == "*" { - content_type = alternatives[i] - return - } - if clause.Type == "*" && clause.SubType == "*" { - content_type = alternatives[i] - return - } - } - } - return -} diff --git a/images/404-server/vendor/github.com/prometheus/common/model/alert.go b/images/404-server/vendor/github.com/prometheus/common/model/alert.go deleted file mode 100644 index 35e739c7ad..0000000000 --- a/images/404-server/vendor/github.com/prometheus/common/model/alert.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "fmt" - "time" -) - -type AlertStatus string - -const ( - AlertFiring AlertStatus = "firing" - AlertResolved AlertStatus = "resolved" -) - -// Alert is a generic representation of an alert in the Prometheus eco-system. -type Alert struct { - // Label value pairs for purpose of aggregation, matching, and disposition - // dispatching. This must minimally include an "alertname" label. - Labels LabelSet `json:"labels"` - - // Extra key/value information which does not define alert identity. - Annotations LabelSet `json:"annotations"` - - // The known time range for this alert. Both ends are optional. - StartsAt time.Time `json:"startsAt,omitempty"` - EndsAt time.Time `json:"endsAt,omitempty"` - GeneratorURL string `json:"generatorURL"` -} - -// Name returns the name of the alert. It is equivalent to the "alertname" label. -func (a *Alert) Name() string { - return string(a.Labels[AlertNameLabel]) -} - -// Fingerprint returns a unique hash for the alert. It is equivalent to -// the fingerprint of the alert's label set. -func (a *Alert) Fingerprint() Fingerprint { - return a.Labels.Fingerprint() -} - -func (a *Alert) String() string { - s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7]) - if a.Resolved() { - return s + "[resolved]" - } - return s + "[active]" -} - -// Resolved returns true iff the activity interval ended in the past. -func (a *Alert) Resolved() bool { - return a.ResolvedAt(time.Now()) -} - -// ResolvedAt returns true off the activity interval ended before -// the given timestamp. -func (a *Alert) ResolvedAt(ts time.Time) bool { - if a.EndsAt.IsZero() { - return false - } - return !a.EndsAt.After(ts) -} - -// Status returns the status of the alert. -func (a *Alert) Status() AlertStatus { - if a.Resolved() { - return AlertResolved - } - return AlertFiring -} - -// Validate checks whether the alert data is inconsistent. -func (a *Alert) Validate() error { - if a.StartsAt.IsZero() { - return fmt.Errorf("start time missing") - } - if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) { - return fmt.Errorf("start time must be before end time") - } - if err := a.Labels.Validate(); err != nil { - return fmt.Errorf("invalid label set: %s", err) - } - if len(a.Labels) == 0 { - return fmt.Errorf("at least one label pair required") - } - if err := a.Annotations.Validate(); err != nil { - return fmt.Errorf("invalid annotations: %s", err) - } - return nil -} - -// Alert is a list of alerts that can be sorted in chronological order. -type Alerts []*Alert - -func (as Alerts) Len() int { return len(as) } -func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] } - -func (as Alerts) Less(i, j int) bool { - if as[i].StartsAt.Before(as[j].StartsAt) { - return true - } - if as[i].EndsAt.Before(as[j].EndsAt) { - return true - } - return as[i].Fingerprint() < as[j].Fingerprint() -} - -// HasFiring returns true iff one of the alerts is not resolved. -func (as Alerts) HasFiring() bool { - for _, a := range as { - if !a.Resolved() { - return true - } - } - return false -} - -// Status returns StatusFiring iff at least one of the alerts is firing. -func (as Alerts) Status() AlertStatus { - if as.HasFiring() { - return AlertFiring - } - return AlertResolved -} diff --git a/images/404-server/vendor/github.com/prometheus/common/model/fingerprinting.go b/images/404-server/vendor/github.com/prometheus/common/model/fingerprinting.go deleted file mode 100644 index fc4de4106e..0000000000 --- a/images/404-server/vendor/github.com/prometheus/common/model/fingerprinting.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "fmt" - "strconv" -) - -// Fingerprint provides a hash-capable representation of a Metric. -// For our purposes, FNV-1A 64-bit is used. -type Fingerprint uint64 - -// FingerprintFromString transforms a string representation into a Fingerprint. -func FingerprintFromString(s string) (Fingerprint, error) { - num, err := strconv.ParseUint(s, 16, 64) - return Fingerprint(num), err -} - -// ParseFingerprint parses the input string into a fingerprint. -func ParseFingerprint(s string) (Fingerprint, error) { - num, err := strconv.ParseUint(s, 16, 64) - if err != nil { - return 0, err - } - return Fingerprint(num), nil -} - -func (f Fingerprint) String() string { - return fmt.Sprintf("%016x", uint64(f)) -} - -// Fingerprints represents a collection of Fingerprint subject to a given -// natural sorting scheme. It implements sort.Interface. -type Fingerprints []Fingerprint - -// Len implements sort.Interface. -func (f Fingerprints) Len() int { - return len(f) -} - -// Less implements sort.Interface. -func (f Fingerprints) Less(i, j int) bool { - return f[i] < f[j] -} - -// Swap implements sort.Interface. -func (f Fingerprints) Swap(i, j int) { - f[i], f[j] = f[j], f[i] -} - -// FingerprintSet is a set of Fingerprints. -type FingerprintSet map[Fingerprint]struct{} - -// Equal returns true if both sets contain the same elements (and not more). -func (s FingerprintSet) Equal(o FingerprintSet) bool { - if len(s) != len(o) { - return false - } - - for k := range s { - if _, ok := o[k]; !ok { - return false - } - } - - return true -} - -// Intersection returns the elements contained in both sets. -func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet { - myLength, otherLength := len(s), len(o) - if myLength == 0 || otherLength == 0 { - return FingerprintSet{} - } - - subSet := s - superSet := o - - if otherLength < myLength { - subSet = o - superSet = s - } - - out := FingerprintSet{} - - for k := range subSet { - if _, ok := superSet[k]; ok { - out[k] = struct{}{} - } - } - - return out -} diff --git a/images/404-server/vendor/github.com/prometheus/common/model/fnv.go b/images/404-server/vendor/github.com/prometheus/common/model/fnv.go deleted file mode 100644 index 038fc1c900..0000000000 --- a/images/404-server/vendor/github.com/prometheus/common/model/fnv.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -// Inline and byte-free variant of hash/fnv's fnv64a. - -const ( - offset64 = 14695981039346656037 - prime64 = 1099511628211 -) - -// hashNew initializies a new fnv64a hash value. -func hashNew() uint64 { - return offset64 -} - -// hashAdd adds a string to a fnv64a hash value, returning the updated hash. -func hashAdd(h uint64, s string) uint64 { - for i := 0; i < len(s); i++ { - h ^= uint64(s[i]) - h *= prime64 - } - return h -} - -// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. -func hashAddByte(h uint64, b byte) uint64 { - h ^= uint64(b) - h *= prime64 - return h -} diff --git a/images/404-server/vendor/github.com/prometheus/common/model/labels.go b/images/404-server/vendor/github.com/prometheus/common/model/labels.go deleted file mode 100644 index 41051a01a3..0000000000 --- a/images/404-server/vendor/github.com/prometheus/common/model/labels.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "fmt" - "regexp" - "strings" - "unicode/utf8" -) - -const ( - // AlertNameLabel is the name of the label containing the an alert's name. - AlertNameLabel = "alertname" - - // ExportedLabelPrefix is the prefix to prepend to the label names present in - // exported metrics if a label of the same name is added by the server. - ExportedLabelPrefix = "exported_" - - // MetricNameLabel is the label name indicating the metric name of a - // timeseries. - MetricNameLabel = "__name__" - - // SchemeLabel is the name of the label that holds the scheme on which to - // scrape a target. - SchemeLabel = "__scheme__" - - // AddressLabel is the name of the label that holds the address of - // a scrape target. - AddressLabel = "__address__" - - // MetricsPathLabel is the name of the label that holds the path on which to - // scrape a target. - MetricsPathLabel = "__metrics_path__" - - // ReservedLabelPrefix is a prefix which is not legal in user-supplied - // label names. - ReservedLabelPrefix = "__" - - // MetaLabelPrefix is a prefix for labels that provide meta information. - // Labels with this prefix are used for intermediate label processing and - // will not be attached to time series. - MetaLabelPrefix = "__meta_" - - // TmpLabelPrefix is a prefix for temporary labels as part of relabelling. - // Labels with this prefix are used for intermediate label processing and - // will not be attached to time series. This is reserved for use in - // Prometheus configuration files by users. - TmpLabelPrefix = "__tmp_" - - // ParamLabelPrefix is a prefix for labels that provide URL parameters - // used to scrape a target. - ParamLabelPrefix = "__param_" - - // JobLabel is the label name indicating the job from which a timeseries - // was scraped. - JobLabel = "job" - - // InstanceLabel is the label name used for the instance label. - InstanceLabel = "instance" - - // BucketLabel is used for the label that defines the upper bound of a - // bucket of a histogram ("le" -> "less or equal"). - BucketLabel = "le" - - // QuantileLabel is used for the label that defines the quantile in a - // summary. - QuantileLabel = "quantile" -) - -// LabelNameRE is a regular expression matching valid label names. Note that the -// IsValid method of LabelName performs the same check but faster than a match -// with this regular expression. -var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") - -// A LabelName is a key for a LabelSet or Metric. It has a value associated -// therewith. -type LabelName string - -// IsValid is true iff the label name matches the pattern of LabelNameRE. This -// method, however, does not use LabelNameRE for the check but a much faster -// hardcoded implementation. -func (ln LabelName) IsValid() bool { - if len(ln) == 0 { - return false - } - for i, b := range ln { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { - return false - } - } - return true -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface. -func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error { - var s string - if err := unmarshal(&s); err != nil { - return err - } - if !LabelName(s).IsValid() { - return fmt.Errorf("%q is not a valid label name", s) - } - *ln = LabelName(s) - return nil -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (ln *LabelName) UnmarshalJSON(b []byte) error { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - if !LabelName(s).IsValid() { - return fmt.Errorf("%q is not a valid label name", s) - } - *ln = LabelName(s) - return nil -} - -// LabelNames is a sortable LabelName slice. In implements sort.Interface. -type LabelNames []LabelName - -func (l LabelNames) Len() int { - return len(l) -} - -func (l LabelNames) Less(i, j int) bool { - return l[i] < l[j] -} - -func (l LabelNames) Swap(i, j int) { - l[i], l[j] = l[j], l[i] -} - -func (l LabelNames) String() string { - labelStrings := make([]string, 0, len(l)) - for _, label := range l { - labelStrings = append(labelStrings, string(label)) - } - return strings.Join(labelStrings, ", ") -} - -// A LabelValue is an associated value for a LabelName. -type LabelValue string - -// IsValid returns true iff the string is a valid UTF8. -func (lv LabelValue) IsValid() bool { - return utf8.ValidString(string(lv)) -} - -// LabelValues is a sortable LabelValue slice. It implements sort.Interface. -type LabelValues []LabelValue - -func (l LabelValues) Len() int { - return len(l) -} - -func (l LabelValues) Less(i, j int) bool { - return string(l[i]) < string(l[j]) -} - -func (l LabelValues) Swap(i, j int) { - l[i], l[j] = l[j], l[i] -} - -// LabelPair pairs a name with a value. -type LabelPair struct { - Name LabelName - Value LabelValue -} - -// LabelPairs is a sortable slice of LabelPair pointers. It implements -// sort.Interface. -type LabelPairs []*LabelPair - -func (l LabelPairs) Len() int { - return len(l) -} - -func (l LabelPairs) Less(i, j int) bool { - switch { - case l[i].Name > l[j].Name: - return false - case l[i].Name < l[j].Name: - return true - case l[i].Value > l[j].Value: - return false - case l[i].Value < l[j].Value: - return true - default: - return false - } -} - -func (l LabelPairs) Swap(i, j int) { - l[i], l[j] = l[j], l[i] -} diff --git a/images/404-server/vendor/github.com/prometheus/common/model/labelset.go b/images/404-server/vendor/github.com/prometheus/common/model/labelset.go deleted file mode 100644 index 6eda08a739..0000000000 --- a/images/404-server/vendor/github.com/prometheus/common/model/labelset.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "fmt" - "sort" - "strings" -) - -// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet -// may be fully-qualified down to the point where it may resolve to a single -// Metric in the data store or not. All operations that occur within the realm -// of a LabelSet can emit a vector of Metric entities to which the LabelSet may -// match. -type LabelSet map[LabelName]LabelValue - -// Validate checks whether all names and values in the label set -// are valid. -func (ls LabelSet) Validate() error { - for ln, lv := range ls { - if !ln.IsValid() { - return fmt.Errorf("invalid name %q", ln) - } - if !lv.IsValid() { - return fmt.Errorf("invalid value %q", lv) - } - } - return nil -} - -// Equal returns true iff both label sets have exactly the same key/value pairs. -func (ls LabelSet) Equal(o LabelSet) bool { - if len(ls) != len(o) { - return false - } - for ln, lv := range ls { - olv, ok := o[ln] - if !ok { - return false - } - if olv != lv { - return false - } - } - return true -} - -// Before compares the metrics, using the following criteria: -// -// If m has fewer labels than o, it is before o. If it has more, it is not. -// -// If the number of labels is the same, the superset of all label names is -// sorted alphanumerically. The first differing label pair found in that order -// determines the outcome: If the label does not exist at all in m, then m is -// before o, and vice versa. Otherwise the label value is compared -// alphanumerically. -// -// If m and o are equal, the method returns false. -func (ls LabelSet) Before(o LabelSet) bool { - if len(ls) < len(o) { - return true - } - if len(ls) > len(o) { - return false - } - - lns := make(LabelNames, 0, len(ls)+len(o)) - for ln := range ls { - lns = append(lns, ln) - } - for ln := range o { - lns = append(lns, ln) - } - // It's probably not worth it to de-dup lns. - sort.Sort(lns) - for _, ln := range lns { - mlv, ok := ls[ln] - if !ok { - return true - } - olv, ok := o[ln] - if !ok { - return false - } - if mlv < olv { - return true - } - if mlv > olv { - return false - } - } - return false -} - -// Clone returns a copy of the label set. -func (ls LabelSet) Clone() LabelSet { - lsn := make(LabelSet, len(ls)) - for ln, lv := range ls { - lsn[ln] = lv - } - return lsn -} - -// Merge is a helper function to non-destructively merge two label sets. -func (l LabelSet) Merge(other LabelSet) LabelSet { - result := make(LabelSet, len(l)) - - for k, v := range l { - result[k] = v - } - - for k, v := range other { - result[k] = v - } - - return result -} - -func (l LabelSet) String() string { - lstrs := make([]string, 0, len(l)) - for l, v := range l { - lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v)) - } - - sort.Strings(lstrs) - return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) -} - -// Fingerprint returns the LabelSet's fingerprint. -func (ls LabelSet) Fingerprint() Fingerprint { - return labelSetToFingerprint(ls) -} - -// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing -// algorithm, which is, however, more susceptible to hash collisions. -func (ls LabelSet) FastFingerprint() Fingerprint { - return labelSetToFastFingerprint(ls) -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (l *LabelSet) UnmarshalJSON(b []byte) error { - var m map[LabelName]LabelValue - if err := json.Unmarshal(b, &m); err != nil { - return err - } - // encoding/json only unmarshals maps of the form map[string]T. It treats - // LabelName as a string and does not call its UnmarshalJSON method. - // Thus, we have to replicate the behavior here. - for ln := range m { - if !ln.IsValid() { - return fmt.Errorf("%q is not a valid label name", ln) - } - } - *l = LabelSet(m) - return nil -} diff --git a/images/404-server/vendor/github.com/prometheus/common/model/metric.go b/images/404-server/vendor/github.com/prometheus/common/model/metric.go deleted file mode 100644 index f7250909b9..0000000000 --- a/images/404-server/vendor/github.com/prometheus/common/model/metric.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "fmt" - "regexp" - "sort" - "strings" -) - -var ( - separator = []byte{0} - // MetricNameRE is a regular expression matching valid metric - // names. Note that the IsValidMetricName function performs the same - // check but faster than a match with this regular expression. - MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`) -) - -// A Metric is similar to a LabelSet, but the key difference is that a Metric is -// a singleton and refers to one and only one stream of samples. -type Metric LabelSet - -// Equal compares the metrics. -func (m Metric) Equal(o Metric) bool { - return LabelSet(m).Equal(LabelSet(o)) -} - -// Before compares the metrics' underlying label sets. -func (m Metric) Before(o Metric) bool { - return LabelSet(m).Before(LabelSet(o)) -} - -// Clone returns a copy of the Metric. -func (m Metric) Clone() Metric { - clone := make(Metric, len(m)) - for k, v := range m { - clone[k] = v - } - return clone -} - -func (m Metric) String() string { - metricName, hasName := m[MetricNameLabel] - numLabels := len(m) - 1 - if !hasName { - numLabels = len(m) - } - labelStrings := make([]string, 0, numLabels) - for label, value := range m { - if label != MetricNameLabel { - labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value)) - } - } - - switch numLabels { - case 0: - if hasName { - return string(metricName) - } - return "{}" - default: - sort.Strings(labelStrings) - return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", ")) - } -} - -// Fingerprint returns a Metric's Fingerprint. -func (m Metric) Fingerprint() Fingerprint { - return LabelSet(m).Fingerprint() -} - -// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing -// algorithm, which is, however, more susceptible to hash collisions. -func (m Metric) FastFingerprint() Fingerprint { - return LabelSet(m).FastFingerprint() -} - -// IsValidMetricName returns true iff name matches the pattern of MetricNameRE. -// This function, however, does not use MetricNameRE for the check but a much -// faster hardcoded implementation. -func IsValidMetricName(n LabelValue) bool { - if len(n) == 0 { - return false - } - for i, b := range n { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) { - return false - } - } - return true -} diff --git a/images/404-server/vendor/github.com/prometheus/common/model/model.go b/images/404-server/vendor/github.com/prometheus/common/model/model.go deleted file mode 100644 index a7b9691707..0000000000 --- a/images/404-server/vendor/github.com/prometheus/common/model/model.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package model contains common data structures that are shared across -// Prometheus components and libraries. -package model diff --git a/images/404-server/vendor/github.com/prometheus/common/model/signature.go b/images/404-server/vendor/github.com/prometheus/common/model/signature.go deleted file mode 100644 index 8762b13c63..0000000000 --- a/images/404-server/vendor/github.com/prometheus/common/model/signature.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "sort" -) - -// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is -// used to separate label names, label values, and other strings from each other -// when calculating their combined hash value (aka signature aka fingerprint). -const SeparatorByte byte = 255 - -var ( - // cache the signature of an empty label set. - emptyLabelSignature = hashNew() -) - -// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a -// given label set. (Collisions are possible but unlikely if the number of label -// sets the function is applied to is small.) -func LabelsToSignature(labels map[string]string) uint64 { - if len(labels) == 0 { - return emptyLabelSignature - } - - labelNames := make([]string, 0, len(labels)) - for labelName := range labels { - labelNames = append(labelNames, labelName) - } - sort.Strings(labelNames) - - sum := hashNew() - for _, labelName := range labelNames { - sum = hashAdd(sum, labelName) - sum = hashAddByte(sum, SeparatorByte) - sum = hashAdd(sum, labels[labelName]) - sum = hashAddByte(sum, SeparatorByte) - } - return sum -} - -// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as -// parameter (rather than a label map) and returns a Fingerprint. -func labelSetToFingerprint(ls LabelSet) Fingerprint { - if len(ls) == 0 { - return Fingerprint(emptyLabelSignature) - } - - labelNames := make(LabelNames, 0, len(ls)) - for labelName := range ls { - labelNames = append(labelNames, labelName) - } - sort.Sort(labelNames) - - sum := hashNew() - for _, labelName := range labelNames { - sum = hashAdd(sum, string(labelName)) - sum = hashAddByte(sum, SeparatorByte) - sum = hashAdd(sum, string(ls[labelName])) - sum = hashAddByte(sum, SeparatorByte) - } - return Fingerprint(sum) -} - -// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a -// faster and less allocation-heavy hash function, which is more susceptible to -// create hash collisions. Therefore, collision detection should be applied. -func labelSetToFastFingerprint(ls LabelSet) Fingerprint { - if len(ls) == 0 { - return Fingerprint(emptyLabelSignature) - } - - var result uint64 - for labelName, labelValue := range ls { - sum := hashNew() - sum = hashAdd(sum, string(labelName)) - sum = hashAddByte(sum, SeparatorByte) - sum = hashAdd(sum, string(labelValue)) - result ^= sum - } - return Fingerprint(result) -} - -// SignatureForLabels works like LabelsToSignature but takes a Metric as -// parameter (rather than a label map) and only includes the labels with the -// specified LabelNames into the signature calculation. The labels passed in -// will be sorted by this function. -func SignatureForLabels(m Metric, labels ...LabelName) uint64 { - if len(labels) == 0 { - return emptyLabelSignature - } - - sort.Sort(LabelNames(labels)) - - sum := hashNew() - for _, label := range labels { - sum = hashAdd(sum, string(label)) - sum = hashAddByte(sum, SeparatorByte) - sum = hashAdd(sum, string(m[label])) - sum = hashAddByte(sum, SeparatorByte) - } - return sum -} - -// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as -// parameter (rather than a label map) and excludes the labels with any of the -// specified LabelNames from the signature calculation. -func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 { - if len(m) == 0 { - return emptyLabelSignature - } - - labelNames := make(LabelNames, 0, len(m)) - for labelName := range m { - if _, exclude := labels[labelName]; !exclude { - labelNames = append(labelNames, labelName) - } - } - if len(labelNames) == 0 { - return emptyLabelSignature - } - sort.Sort(labelNames) - - sum := hashNew() - for _, labelName := range labelNames { - sum = hashAdd(sum, string(labelName)) - sum = hashAddByte(sum, SeparatorByte) - sum = hashAdd(sum, string(m[labelName])) - sum = hashAddByte(sum, SeparatorByte) - } - return sum -} diff --git a/images/404-server/vendor/github.com/prometheus/common/model/silence.go b/images/404-server/vendor/github.com/prometheus/common/model/silence.go deleted file mode 100644 index 7538e29977..0000000000 --- a/images/404-server/vendor/github.com/prometheus/common/model/silence.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "fmt" - "regexp" - "time" -) - -// Matcher describes a matches the value of a given label. -type Matcher struct { - Name LabelName `json:"name"` - Value string `json:"value"` - IsRegex bool `json:"isRegex"` -} - -func (m *Matcher) UnmarshalJSON(b []byte) error { - type plain Matcher - if err := json.Unmarshal(b, (*plain)(m)); err != nil { - return err - } - - if len(m.Name) == 0 { - return fmt.Errorf("label name in matcher must not be empty") - } - if m.IsRegex { - if _, err := regexp.Compile(m.Value); err != nil { - return err - } - } - return nil -} - -// Validate returns true iff all fields of the matcher have valid values. -func (m *Matcher) Validate() error { - if !m.Name.IsValid() { - return fmt.Errorf("invalid name %q", m.Name) - } - if m.IsRegex { - if _, err := regexp.Compile(m.Value); err != nil { - return fmt.Errorf("invalid regular expression %q", m.Value) - } - } else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 { - return fmt.Errorf("invalid value %q", m.Value) - } - return nil -} - -// Silence defines the representation of a silence definiton -// in the Prometheus eco-system. -type Silence struct { - ID uint64 `json:"id,omitempty"` - - Matchers []*Matcher `json:"matchers"` - - StartsAt time.Time `json:"startsAt"` - EndsAt time.Time `json:"endsAt"` - - CreatedAt time.Time `json:"createdAt,omitempty"` - CreatedBy string `json:"createdBy"` - Comment string `json:"comment,omitempty"` -} - -// Validate returns true iff all fields of the silence have valid values. -func (s *Silence) Validate() error { - if len(s.Matchers) == 0 { - return fmt.Errorf("at least one matcher required") - } - for _, m := range s.Matchers { - if err := m.Validate(); err != nil { - return fmt.Errorf("invalid matcher: %s", err) - } - } - if s.StartsAt.IsZero() { - return fmt.Errorf("start time missing") - } - if s.EndsAt.IsZero() { - return fmt.Errorf("end time missing") - } - if s.EndsAt.Before(s.StartsAt) { - return fmt.Errorf("start time must be before end time") - } - if s.CreatedBy == "" { - return fmt.Errorf("creator information missing") - } - if s.Comment == "" { - return fmt.Errorf("comment missing") - } - if s.CreatedAt.IsZero() { - return fmt.Errorf("creation timestamp missing") - } - return nil -} diff --git a/images/404-server/vendor/github.com/prometheus/common/model/time.go b/images/404-server/vendor/github.com/prometheus/common/model/time.go deleted file mode 100644 index 548968aebe..0000000000 --- a/images/404-server/vendor/github.com/prometheus/common/model/time.go +++ /dev/null @@ -1,249 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "fmt" - "math" - "regexp" - "strconv" - "strings" - "time" -) - -const ( - // MinimumTick is the minimum supported time resolution. This has to be - // at least time.Second in order for the code below to work. - minimumTick = time.Millisecond - // second is the Time duration equivalent to one second. - second = int64(time.Second / minimumTick) - // The number of nanoseconds per minimum tick. - nanosPerTick = int64(minimumTick / time.Nanosecond) - - // Earliest is the earliest Time representable. Handy for - // initializing a high watermark. - Earliest = Time(math.MinInt64) - // Latest is the latest Time representable. Handy for initializing - // a low watermark. - Latest = Time(math.MaxInt64) -) - -// Time is the number of milliseconds since the epoch -// (1970-01-01 00:00 UTC) excluding leap seconds. -type Time int64 - -// Interval describes and interval between two timestamps. -type Interval struct { - Start, End Time -} - -// Now returns the current time as a Time. -func Now() Time { - return TimeFromUnixNano(time.Now().UnixNano()) -} - -// TimeFromUnix returns the Time equivalent to the Unix Time t -// provided in seconds. -func TimeFromUnix(t int64) Time { - return Time(t * second) -} - -// TimeFromUnixNano returns the Time equivalent to the Unix Time -// t provided in nanoseconds. -func TimeFromUnixNano(t int64) Time { - return Time(t / nanosPerTick) -} - -// Equal reports whether two Times represent the same instant. -func (t Time) Equal(o Time) bool { - return t == o -} - -// Before reports whether the Time t is before o. -func (t Time) Before(o Time) bool { - return t < o -} - -// After reports whether the Time t is after o. -func (t Time) After(o Time) bool { - return t > o -} - -// Add returns the Time t + d. -func (t Time) Add(d time.Duration) Time { - return t + Time(d/minimumTick) -} - -// Sub returns the Duration t - o. -func (t Time) Sub(o Time) time.Duration { - return time.Duration(t-o) * minimumTick -} - -// Time returns the time.Time representation of t. -func (t Time) Time() time.Time { - return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick) -} - -// Unix returns t as a Unix time, the number of seconds elapsed -// since January 1, 1970 UTC. -func (t Time) Unix() int64 { - return int64(t) / second -} - -// UnixNano returns t as a Unix time, the number of nanoseconds elapsed -// since January 1, 1970 UTC. -func (t Time) UnixNano() int64 { - return int64(t) * nanosPerTick -} - -// The number of digits after the dot. -var dotPrecision = int(math.Log10(float64(second))) - -// String returns a string representation of the Time. -func (t Time) String() string { - return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64) -} - -// MarshalJSON implements the json.Marshaler interface. -func (t Time) MarshalJSON() ([]byte, error) { - return []byte(t.String()), nil -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (t *Time) UnmarshalJSON(b []byte) error { - p := strings.Split(string(b), ".") - switch len(p) { - case 1: - v, err := strconv.ParseInt(string(p[0]), 10, 64) - if err != nil { - return err - } - *t = Time(v * second) - - case 2: - v, err := strconv.ParseInt(string(p[0]), 10, 64) - if err != nil { - return err - } - v *= second - - prec := dotPrecision - len(p[1]) - if prec < 0 { - p[1] = p[1][:dotPrecision] - } else if prec > 0 { - p[1] = p[1] + strings.Repeat("0", prec) - } - - va, err := strconv.ParseInt(p[1], 10, 32) - if err != nil { - return err - } - - *t = Time(v + va) - - default: - return fmt.Errorf("invalid time %q", string(b)) - } - return nil -} - -// Duration wraps time.Duration. It is used to parse the custom duration format -// from YAML. -// This type should not propagate beyond the scope of input/output processing. -type Duration time.Duration - -var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$") - -// StringToDuration parses a string into a time.Duration, assuming that a year -// always has 365d, a week always has 7d, and a day always has 24h. -func ParseDuration(durationStr string) (Duration, error) { - matches := durationRE.FindStringSubmatch(durationStr) - if len(matches) != 3 { - return 0, fmt.Errorf("not a valid duration string: %q", durationStr) - } - var ( - n, _ = strconv.Atoi(matches[1]) - dur = time.Duration(n) * time.Millisecond - ) - switch unit := matches[2]; unit { - case "y": - dur *= 1000 * 60 * 60 * 24 * 365 - case "w": - dur *= 1000 * 60 * 60 * 24 * 7 - case "d": - dur *= 1000 * 60 * 60 * 24 - case "h": - dur *= 1000 * 60 * 60 - case "m": - dur *= 1000 * 60 - case "s": - dur *= 1000 - case "ms": - // Value already correct - default: - return 0, fmt.Errorf("invalid time unit in duration string: %q", unit) - } - return Duration(dur), nil -} - -func (d Duration) String() string { - var ( - ms = int64(time.Duration(d) / time.Millisecond) - unit = "ms" - ) - factors := map[string]int64{ - "y": 1000 * 60 * 60 * 24 * 365, - "w": 1000 * 60 * 60 * 24 * 7, - "d": 1000 * 60 * 60 * 24, - "h": 1000 * 60 * 60, - "m": 1000 * 60, - "s": 1000, - "ms": 1, - } - - switch int64(0) { - case ms % factors["y"]: - unit = "y" - case ms % factors["w"]: - unit = "w" - case ms % factors["d"]: - unit = "d" - case ms % factors["h"]: - unit = "h" - case ms % factors["m"]: - unit = "m" - case ms % factors["s"]: - unit = "s" - } - return fmt.Sprintf("%v%v", ms/factors[unit], unit) -} - -// MarshalYAML implements the yaml.Marshaler interface. -func (d Duration) MarshalYAML() (interface{}, error) { - return d.String(), nil -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface. -func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error { - var s string - if err := unmarshal(&s); err != nil { - return err - } - dur, err := ParseDuration(s) - if err != nil { - return err - } - *d = dur - return nil -} diff --git a/images/404-server/vendor/github.com/prometheus/common/model/value.go b/images/404-server/vendor/github.com/prometheus/common/model/value.go deleted file mode 100644 index c9ed3ffd82..0000000000 --- a/images/404-server/vendor/github.com/prometheus/common/model/value.go +++ /dev/null @@ -1,416 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "fmt" - "math" - "sort" - "strconv" - "strings" -) - -var ( - // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a - // non-existing sample pair. It is a SamplePair with timestamp Earliest and - // value 0.0. Note that the natural zero value of SamplePair has a timestamp - // of 0, which is possible to appear in a real SamplePair and thus not - // suitable to signal a non-existing SamplePair. - ZeroSamplePair = SamplePair{Timestamp: Earliest} - - // ZeroSample is the pseudo zero-value of Sample used to signal a - // non-existing sample. It is a Sample with timestamp Earliest, value 0.0, - // and metric nil. Note that the natural zero value of Sample has a timestamp - // of 0, which is possible to appear in a real Sample and thus not suitable - // to signal a non-existing Sample. - ZeroSample = Sample{Timestamp: Earliest} -) - -// A SampleValue is a representation of a value for a given sample at a given -// time. -type SampleValue float64 - -// MarshalJSON implements json.Marshaler. -func (v SampleValue) MarshalJSON() ([]byte, error) { - return json.Marshal(v.String()) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (v *SampleValue) UnmarshalJSON(b []byte) error { - if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { - return fmt.Errorf("sample value must be a quoted string") - } - f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) - if err != nil { - return err - } - *v = SampleValue(f) - return nil -} - -// Equal returns true if the value of v and o is equal or if both are NaN. Note -// that v==o is false if both are NaN. If you want the conventional float -// behavior, use == to compare two SampleValues. -func (v SampleValue) Equal(o SampleValue) bool { - if v == o { - return true - } - return math.IsNaN(float64(v)) && math.IsNaN(float64(o)) -} - -func (v SampleValue) String() string { - return strconv.FormatFloat(float64(v), 'f', -1, 64) -} - -// SamplePair pairs a SampleValue with a Timestamp. -type SamplePair struct { - Timestamp Time - Value SampleValue -} - -// MarshalJSON implements json.Marshaler. -func (s SamplePair) MarshalJSON() ([]byte, error) { - t, err := json.Marshal(s.Timestamp) - if err != nil { - return nil, err - } - v, err := json.Marshal(s.Value) - if err != nil { - return nil, err - } - return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *SamplePair) UnmarshalJSON(b []byte) error { - v := [...]json.Unmarshaler{&s.Timestamp, &s.Value} - return json.Unmarshal(b, &v) -} - -// Equal returns true if this SamplePair and o have equal Values and equal -// Timestamps. The sematics of Value equality is defined by SampleValue.Equal. -func (s *SamplePair) Equal(o *SamplePair) bool { - return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) -} - -func (s SamplePair) String() string { - return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp) -} - -// Sample is a sample pair associated with a metric. -type Sample struct { - Metric Metric `json:"metric"` - Value SampleValue `json:"value"` - Timestamp Time `json:"timestamp"` -} - -// Equal compares first the metrics, then the timestamp, then the value. The -// sematics of value equality is defined by SampleValue.Equal. -func (s *Sample) Equal(o *Sample) bool { - if s == o { - return true - } - - if !s.Metric.Equal(o.Metric) { - return false - } - if !s.Timestamp.Equal(o.Timestamp) { - return false - } - - return s.Value.Equal(o.Value) -} - -func (s Sample) String() string { - return fmt.Sprintf("%s => %s", s.Metric, SamplePair{ - Timestamp: s.Timestamp, - Value: s.Value, - }) -} - -// MarshalJSON implements json.Marshaler. -func (s Sample) MarshalJSON() ([]byte, error) { - v := struct { - Metric Metric `json:"metric"` - Value SamplePair `json:"value"` - }{ - Metric: s.Metric, - Value: SamplePair{ - Timestamp: s.Timestamp, - Value: s.Value, - }, - } - - return json.Marshal(&v) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *Sample) UnmarshalJSON(b []byte) error { - v := struct { - Metric Metric `json:"metric"` - Value SamplePair `json:"value"` - }{ - Metric: s.Metric, - Value: SamplePair{ - Timestamp: s.Timestamp, - Value: s.Value, - }, - } - - if err := json.Unmarshal(b, &v); err != nil { - return err - } - - s.Metric = v.Metric - s.Timestamp = v.Value.Timestamp - s.Value = v.Value.Value - - return nil -} - -// Samples is a sortable Sample slice. It implements sort.Interface. -type Samples []*Sample - -func (s Samples) Len() int { - return len(s) -} - -// Less compares first the metrics, then the timestamp. -func (s Samples) Less(i, j int) bool { - switch { - case s[i].Metric.Before(s[j].Metric): - return true - case s[j].Metric.Before(s[i].Metric): - return false - case s[i].Timestamp.Before(s[j].Timestamp): - return true - default: - return false - } -} - -func (s Samples) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -// Equal compares two sets of samples and returns true if they are equal. -func (s Samples) Equal(o Samples) bool { - if len(s) != len(o) { - return false - } - - for i, sample := range s { - if !sample.Equal(o[i]) { - return false - } - } - return true -} - -// SampleStream is a stream of Values belonging to an attached COWMetric. -type SampleStream struct { - Metric Metric `json:"metric"` - Values []SamplePair `json:"values"` -} - -func (ss SampleStream) String() string { - vals := make([]string, len(ss.Values)) - for i, v := range ss.Values { - vals[i] = v.String() - } - return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n")) -} - -// Value is a generic interface for values resulting from a query evaluation. -type Value interface { - Type() ValueType - String() string -} - -func (Matrix) Type() ValueType { return ValMatrix } -func (Vector) Type() ValueType { return ValVector } -func (*Scalar) Type() ValueType { return ValScalar } -func (*String) Type() ValueType { return ValString } - -type ValueType int - -const ( - ValNone ValueType = iota - ValScalar - ValVector - ValMatrix - ValString -) - -// MarshalJSON implements json.Marshaler. -func (et ValueType) MarshalJSON() ([]byte, error) { - return json.Marshal(et.String()) -} - -func (et *ValueType) UnmarshalJSON(b []byte) error { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - switch s { - case "": - *et = ValNone - case "scalar": - *et = ValScalar - case "vector": - *et = ValVector - case "matrix": - *et = ValMatrix - case "string": - *et = ValString - default: - return fmt.Errorf("unknown value type %q", s) - } - return nil -} - -func (e ValueType) String() string { - switch e { - case ValNone: - return "" - case ValScalar: - return "scalar" - case ValVector: - return "vector" - case ValMatrix: - return "matrix" - case ValString: - return "string" - } - panic("ValueType.String: unhandled value type") -} - -// Scalar is a scalar value evaluated at the set timestamp. -type Scalar struct { - Value SampleValue `json:"value"` - Timestamp Time `json:"timestamp"` -} - -func (s Scalar) String() string { - return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp) -} - -// MarshalJSON implements json.Marshaler. -func (s Scalar) MarshalJSON() ([]byte, error) { - v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64) - return json.Marshal([...]interface{}{s.Timestamp, string(v)}) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *Scalar) UnmarshalJSON(b []byte) error { - var f string - v := [...]interface{}{&s.Timestamp, &f} - - if err := json.Unmarshal(b, &v); err != nil { - return err - } - - value, err := strconv.ParseFloat(f, 64) - if err != nil { - return fmt.Errorf("error parsing sample value: %s", err) - } - s.Value = SampleValue(value) - return nil -} - -// String is a string value evaluated at the set timestamp. -type String struct { - Value string `json:"value"` - Timestamp Time `json:"timestamp"` -} - -func (s *String) String() string { - return s.Value -} - -// MarshalJSON implements json.Marshaler. -func (s String) MarshalJSON() ([]byte, error) { - return json.Marshal([]interface{}{s.Timestamp, s.Value}) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *String) UnmarshalJSON(b []byte) error { - v := [...]interface{}{&s.Timestamp, &s.Value} - return json.Unmarshal(b, &v) -} - -// Vector is basically only an alias for Samples, but the -// contract is that in a Vector, all Samples have the same timestamp. -type Vector []*Sample - -func (vec Vector) String() string { - entries := make([]string, len(vec)) - for i, s := range vec { - entries[i] = s.String() - } - return strings.Join(entries, "\n") -} - -func (vec Vector) Len() int { return len(vec) } -func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] } - -// Less compares first the metrics, then the timestamp. -func (vec Vector) Less(i, j int) bool { - switch { - case vec[i].Metric.Before(vec[j].Metric): - return true - case vec[j].Metric.Before(vec[i].Metric): - return false - case vec[i].Timestamp.Before(vec[j].Timestamp): - return true - default: - return false - } -} - -// Equal compares two sets of samples and returns true if they are equal. -func (vec Vector) Equal(o Vector) bool { - if len(vec) != len(o) { - return false - } - - for i, sample := range vec { - if !sample.Equal(o[i]) { - return false - } - } - return true -} - -// Matrix is a list of time series. -type Matrix []*SampleStream - -func (m Matrix) Len() int { return len(m) } -func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) } -func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] } - -func (mat Matrix) String() string { - matCp := make(Matrix, len(mat)) - copy(matCp, mat) - sort.Sort(matCp) - - strs := make([]string, len(matCp)) - - for i, ss := range matCp { - strs[i] = ss.String() - } - - return strings.Join(strs, "\n") -} diff --git a/images/404-server/vendor/github.com/prometheus/procfs/.travis.yml b/images/404-server/vendor/github.com/prometheus/procfs/.travis.yml deleted file mode 100644 index a9e28bf5d1..0000000000 --- a/images/404-server/vendor/github.com/prometheus/procfs/.travis.yml +++ /dev/null @@ -1,5 +0,0 @@ -sudo: false -language: go -go: - - 1.6.4 - - 1.7.4 diff --git a/images/404-server/vendor/github.com/prometheus/procfs/Makefile b/images/404-server/vendor/github.com/prometheus/procfs/Makefile deleted file mode 100644 index c264a49d17..0000000000 --- a/images/404-server/vendor/github.com/prometheus/procfs/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -ci: - ! gofmt -l *.go | read nothing - go vet - go test -v ./... - go get github.com/golang/lint/golint - golint *.go diff --git a/images/404-server/vendor/github.com/prometheus/procfs/fs.go b/images/404-server/vendor/github.com/prometheus/procfs/fs.go deleted file mode 100644 index 17546756b3..0000000000 --- a/images/404-server/vendor/github.com/prometheus/procfs/fs.go +++ /dev/null @@ -1,46 +0,0 @@ -package procfs - -import ( - "fmt" - "os" - "path" - - "github.com/prometheus/procfs/xfs" -) - -// FS represents the pseudo-filesystem proc, which provides an interface to -// kernel data structures. -type FS string - -// DefaultMountPoint is the common mount point of the proc filesystem. -const DefaultMountPoint = "/proc" - -// NewFS returns a new FS mounted under the given mountPoint. It will error -// if the mount point can't be read. -func NewFS(mountPoint string) (FS, error) { - info, err := os.Stat(mountPoint) - if err != nil { - return "", fmt.Errorf("could not read %s: %s", mountPoint, err) - } - if !info.IsDir() { - return "", fmt.Errorf("mount point %s is not a directory", mountPoint) - } - - return FS(mountPoint), nil -} - -// Path returns the path of the given subsystem relative to the procfs root. -func (fs FS) Path(p ...string) string { - return path.Join(append([]string{string(fs)}, p...)...) -} - -// XFSStats retrieves XFS filesystem runtime statistics. -func (fs FS) XFSStats() (*xfs.Stats, error) { - f, err := os.Open(fs.Path("fs/xfs/stat")) - if err != nil { - return nil, err - } - defer f.Close() - - return xfs.ParseStats(f) -} diff --git a/images/404-server/vendor/github.com/prometheus/procfs/proc_io.go b/images/404-server/vendor/github.com/prometheus/procfs/proc_io.go deleted file mode 100644 index b4e31d7ba3..0000000000 --- a/images/404-server/vendor/github.com/prometheus/procfs/proc_io.go +++ /dev/null @@ -1,55 +0,0 @@ -package procfs - -import ( - "fmt" - "io/ioutil" - "os" -) - -// ProcIO models the content of /proc//io. -type ProcIO struct { - // Chars read. - RChar uint64 - // Chars written. - WChar uint64 - // Read syscalls. - SyscR uint64 - // Write syscalls. - SyscW uint64 - // Bytes read. - ReadBytes uint64 - // Bytes written. - WriteBytes uint64 - // Bytes written, but taking into account truncation. See - // Documentation/filesystems/proc.txt in the kernel sources for - // detailed explanation. - CancelledWriteBytes int64 -} - -// NewIO creates a new ProcIO instance from a given Proc instance. -func (p Proc) NewIO() (ProcIO, error) { - pio := ProcIO{} - - f, err := os.Open(p.path("io")) - if err != nil { - return pio, err - } - defer f.Close() - - data, err := ioutil.ReadAll(f) - if err != nil { - return pio, err - } - - ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" + - "read_bytes: %d\nwrite_bytes: %d\n" + - "cancelled_write_bytes: %d\n" - - _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR, - &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes) - if err != nil { - return pio, err - } - - return pio, nil -} diff --git a/images/404-server/vendor/github.com/prometheus/procfs/stat.go b/images/404-server/vendor/github.com/prometheus/procfs/stat.go deleted file mode 100644 index 1ca217e8c7..0000000000 --- a/images/404-server/vendor/github.com/prometheus/procfs/stat.go +++ /dev/null @@ -1,56 +0,0 @@ -package procfs - -import ( - "bufio" - "fmt" - "os" - "strconv" - "strings" -) - -// Stat represents kernel/system statistics. -type Stat struct { - // Boot time in seconds since the Epoch. - BootTime int64 -} - -// NewStat returns kernel/system statistics read from /proc/stat. -func NewStat() (Stat, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return Stat{}, err - } - - return fs.NewStat() -} - -// NewStat returns an information about current kernel/system statistics. -func (fs FS) NewStat() (Stat, error) { - f, err := os.Open(fs.Path("stat")) - if err != nil { - return Stat{}, err - } - defer f.Close() - - s := bufio.NewScanner(f) - for s.Scan() { - line := s.Text() - if !strings.HasPrefix(line, "btime") { - continue - } - fields := strings.Fields(line) - if len(fields) != 2 { - return Stat{}, fmt.Errorf("couldn't parse %s line %s", f.Name(), line) - } - i, err := strconv.ParseInt(fields[1], 10, 32) - if err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s: %s", fields[1], err) - } - return Stat{BootTime: i}, nil - } - if err := s.Err(); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err) - } - - return Stat{}, fmt.Errorf("couldn't parse %s, missing btime", f.Name()) -} diff --git a/images/404-server/vendor/github.com/prometheus/procfs/xfs/parse.go b/images/404-server/vendor/github.com/prometheus/procfs/xfs/parse.go deleted file mode 100644 index c8f6279f39..0000000000 --- a/images/404-server/vendor/github.com/prometheus/procfs/xfs/parse.go +++ /dev/null @@ -1,359 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package xfs - -import ( - "bufio" - "fmt" - "io" - "strconv" - "strings" -) - -// ParseStats parses a Stats from an input io.Reader, using the format -// found in /proc/fs/xfs/stat. -func ParseStats(r io.Reader) (*Stats, error) { - const ( - // Fields parsed into stats structures. - fieldExtentAlloc = "extent_alloc" - fieldAbt = "abt" - fieldBlkMap = "blk_map" - fieldBmbt = "bmbt" - fieldDir = "dir" - fieldTrans = "trans" - fieldIg = "ig" - fieldLog = "log" - fieldRw = "rw" - fieldAttr = "attr" - fieldIcluster = "icluster" - fieldVnodes = "vnodes" - fieldBuf = "buf" - fieldXpc = "xpc" - - // Unimplemented at this time due to lack of documentation. - fieldPushAil = "push_ail" - fieldXstrat = "xstrat" - fieldAbtb2 = "abtb2" - fieldAbtc2 = "abtc2" - fieldBmbt2 = "bmbt2" - fieldIbt2 = "ibt2" - fieldFibt2 = "fibt2" - fieldQm = "qm" - fieldDebug = "debug" - ) - - var xfss Stats - - s := bufio.NewScanner(r) - for s.Scan() { - // Expect at least a string label and a single integer value, ex: - // - abt 0 - // - rw 1 2 - ss := strings.Fields(string(s.Bytes())) - if len(ss) < 2 { - continue - } - label := ss[0] - - // Extended precision counters are uint64 values. - if label == fieldXpc { - us, err := parseUint64s(ss[1:]) - if err != nil { - return nil, err - } - - xfss.ExtendedPrecision, err = extendedPrecisionStats(us) - if err != nil { - return nil, err - } - - continue - } - - // All other counters are uint32 values. - us, err := parseUint32s(ss[1:]) - if err != nil { - return nil, err - } - - switch label { - case fieldExtentAlloc: - xfss.ExtentAllocation, err = extentAllocationStats(us) - case fieldAbt: - xfss.AllocationBTree, err = btreeStats(us) - case fieldBlkMap: - xfss.BlockMapping, err = blockMappingStats(us) - case fieldBmbt: - xfss.BlockMapBTree, err = btreeStats(us) - case fieldDir: - xfss.DirectoryOperation, err = directoryOperationStats(us) - case fieldTrans: - xfss.Transaction, err = transactionStats(us) - case fieldIg: - xfss.InodeOperation, err = inodeOperationStats(us) - case fieldLog: - xfss.LogOperation, err = logOperationStats(us) - case fieldRw: - xfss.ReadWrite, err = readWriteStats(us) - case fieldAttr: - xfss.AttributeOperation, err = attributeOperationStats(us) - case fieldIcluster: - xfss.InodeClustering, err = inodeClusteringStats(us) - case fieldVnodes: - xfss.Vnode, err = vnodeStats(us) - case fieldBuf: - xfss.Buffer, err = bufferStats(us) - } - if err != nil { - return nil, err - } - } - - return &xfss, s.Err() -} - -// extentAllocationStats builds an ExtentAllocationStats from a slice of uint32s. -func extentAllocationStats(us []uint32) (ExtentAllocationStats, error) { - if l := len(us); l != 4 { - return ExtentAllocationStats{}, fmt.Errorf("incorrect number of values for XFS extent allocation stats: %d", l) - } - - return ExtentAllocationStats{ - ExtentsAllocated: us[0], - BlocksAllocated: us[1], - ExtentsFreed: us[2], - BlocksFreed: us[3], - }, nil -} - -// btreeStats builds a BTreeStats from a slice of uint32s. -func btreeStats(us []uint32) (BTreeStats, error) { - if l := len(us); l != 4 { - return BTreeStats{}, fmt.Errorf("incorrect number of values for XFS btree stats: %d", l) - } - - return BTreeStats{ - Lookups: us[0], - Compares: us[1], - RecordsInserted: us[2], - RecordsDeleted: us[3], - }, nil -} - -// BlockMappingStat builds a BlockMappingStats from a slice of uint32s. -func blockMappingStats(us []uint32) (BlockMappingStats, error) { - if l := len(us); l != 7 { - return BlockMappingStats{}, fmt.Errorf("incorrect number of values for XFS block mapping stats: %d", l) - } - - return BlockMappingStats{ - Reads: us[0], - Writes: us[1], - Unmaps: us[2], - ExtentListInsertions: us[3], - ExtentListDeletions: us[4], - ExtentListLookups: us[5], - ExtentListCompares: us[6], - }, nil -} - -// DirectoryOperationStats builds a DirectoryOperationStats from a slice of uint32s. -func directoryOperationStats(us []uint32) (DirectoryOperationStats, error) { - if l := len(us); l != 4 { - return DirectoryOperationStats{}, fmt.Errorf("incorrect number of values for XFS directory operation stats: %d", l) - } - - return DirectoryOperationStats{ - Lookups: us[0], - Creates: us[1], - Removes: us[2], - Getdents: us[3], - }, nil -} - -// TransactionStats builds a TransactionStats from a slice of uint32s. -func transactionStats(us []uint32) (TransactionStats, error) { - if l := len(us); l != 3 { - return TransactionStats{}, fmt.Errorf("incorrect number of values for XFS transaction stats: %d", l) - } - - return TransactionStats{ - Sync: us[0], - Async: us[1], - Empty: us[2], - }, nil -} - -// InodeOperationStats builds an InodeOperationStats from a slice of uint32s. -func inodeOperationStats(us []uint32) (InodeOperationStats, error) { - if l := len(us); l != 7 { - return InodeOperationStats{}, fmt.Errorf("incorrect number of values for XFS inode operation stats: %d", l) - } - - return InodeOperationStats{ - Attempts: us[0], - Found: us[1], - Recycle: us[2], - Missed: us[3], - Duplicate: us[4], - Reclaims: us[5], - AttributeChange: us[6], - }, nil -} - -// LogOperationStats builds a LogOperationStats from a slice of uint32s. -func logOperationStats(us []uint32) (LogOperationStats, error) { - if l := len(us); l != 5 { - return LogOperationStats{}, fmt.Errorf("incorrect number of values for XFS log operation stats: %d", l) - } - - return LogOperationStats{ - Writes: us[0], - Blocks: us[1], - NoInternalBuffers: us[2], - Force: us[3], - ForceSleep: us[4], - }, nil -} - -// ReadWriteStats builds a ReadWriteStats from a slice of uint32s. -func readWriteStats(us []uint32) (ReadWriteStats, error) { - if l := len(us); l != 2 { - return ReadWriteStats{}, fmt.Errorf("incorrect number of values for XFS read write stats: %d", l) - } - - return ReadWriteStats{ - Read: us[0], - Write: us[1], - }, nil -} - -// AttributeOperationStats builds an AttributeOperationStats from a slice of uint32s. -func attributeOperationStats(us []uint32) (AttributeOperationStats, error) { - if l := len(us); l != 4 { - return AttributeOperationStats{}, fmt.Errorf("incorrect number of values for XFS attribute operation stats: %d", l) - } - - return AttributeOperationStats{ - Get: us[0], - Set: us[1], - Remove: us[2], - List: us[3], - }, nil -} - -// InodeClusteringStats builds an InodeClusteringStats from a slice of uint32s. -func inodeClusteringStats(us []uint32) (InodeClusteringStats, error) { - if l := len(us); l != 3 { - return InodeClusteringStats{}, fmt.Errorf("incorrect number of values for XFS inode clustering stats: %d", l) - } - - return InodeClusteringStats{ - Iflush: us[0], - Flush: us[1], - FlushInode: us[2], - }, nil -} - -// VnodeStats builds a VnodeStats from a slice of uint32s. -func vnodeStats(us []uint32) (VnodeStats, error) { - // The attribute "Free" appears to not be available on older XFS - // stats versions. Therefore, 7 or 8 elements may appear in - // this slice. - l := len(us) - if l != 7 && l != 8 { - return VnodeStats{}, fmt.Errorf("incorrect number of values for XFS vnode stats: %d", l) - } - - s := VnodeStats{ - Active: us[0], - Allocate: us[1], - Get: us[2], - Hold: us[3], - Release: us[4], - Reclaim: us[5], - Remove: us[6], - } - - // Skip adding free, unless it is present. The zero value will - // be used in place of an actual count. - if l == 7 { - return s, nil - } - - s.Free = us[7] - return s, nil -} - -// BufferStats builds a BufferStats from a slice of uint32s. -func bufferStats(us []uint32) (BufferStats, error) { - if l := len(us); l != 9 { - return BufferStats{}, fmt.Errorf("incorrect number of values for XFS buffer stats: %d", l) - } - - return BufferStats{ - Get: us[0], - Create: us[1], - GetLocked: us[2], - GetLockedWaited: us[3], - BusyLocked: us[4], - MissLocked: us[5], - PageRetries: us[6], - PageFound: us[7], - GetRead: us[8], - }, nil -} - -// ExtendedPrecisionStats builds an ExtendedPrecisionStats from a slice of uint32s. -func extendedPrecisionStats(us []uint64) (ExtendedPrecisionStats, error) { - if l := len(us); l != 3 { - return ExtendedPrecisionStats{}, fmt.Errorf("incorrect number of values for XFS extended precision stats: %d", l) - } - - return ExtendedPrecisionStats{ - FlushBytes: us[0], - WriteBytes: us[1], - ReadBytes: us[2], - }, nil -} - -// parseUint32s parses a slice of strings into a slice of uint32s. -func parseUint32s(ss []string) ([]uint32, error) { - us := make([]uint32, 0, len(ss)) - for _, s := range ss { - u, err := strconv.ParseUint(s, 10, 32) - if err != nil { - return nil, err - } - - us = append(us, uint32(u)) - } - - return us, nil -} - -// parseUint64s parses a slice of strings into a slice of uint64s. -func parseUint64s(ss []string) ([]uint64, error) { - us := make([]uint64, 0, len(ss)) - for _, s := range ss { - u, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return nil, err - } - - us = append(us, u) - } - - return us, nil -} diff --git a/images/custom-error-pages/Makefile b/images/custom-error-pages/Makefile index 11be00530f..c816add8c2 100644 --- a/images/custom-error-pages/Makefile +++ b/images/custom-error-pages/Makefile @@ -1,16 +1,16 @@ -all: push +all: all-container BUILDTAGS= # Use the 0.0 tag for testing, it shouldn't clobber any release builds -TAG?=0.1 +TAG?=0.4 REGISTRY?=quay.io/kubernetes-ingress-controller GOOS?=linux -DOCKER?=gcloud docker -- +DOCKER?=docker SED_I?=sed -i GOHOSTOS ?= $(shell go env GOHOSTOS) -PKG=k8s.io/ingress/images/custom-error-pages +PKG=k8s.io/ingress-nginx/images/custom-error-pages ifeq ($(GOHOSTOS),darwin) SED_I=sed -i '' @@ -26,11 +26,11 @@ ARCH ?= $(shell go env GOARCH) GOARCH = ${ARCH} DUMB_ARCH = ${ARCH} -BASEIMAGE?=alpine:3.6 +BASEIMAGE?=alpine:3.9 ALL_ARCH = amd64 arm arm64 ppc64le -QEMUVERSION=v2.9.1 +QEMUVERSION=v3.0.0 IMGNAME = custom-error-pages IMAGE = $(REGISTRY)/$(IMGNAME) @@ -52,8 +52,6 @@ TEMP_DIR := $(shell mktemp -d) DOCKERFILE := $(TEMP_DIR)/rootfs/Dockerfile -all: all-container - sub-container-%: $(MAKE) ARCH=$* build container @@ -76,7 +74,7 @@ ifeq ($(ARCH),amd64) else # When cross-building, only the placeholder "CROSS_BUILD_" should be removed # Register /usr/bin/qemu-ARCH-static as the handler for ARM binaries in the kernel - $(DOCKER) run --rm --privileged multiarch/qemu-user-static:register --reset + # $(DOCKER) run --rm --privileged multiarch/qemu-user-static:register --reset curl -sSL https://github.com/multiarch/qemu-user-static/releases/download/$(QEMUVERSION)/x86_64_qemu-$(QEMUARCH)-static.tar.gz | tar -xz -C $(TEMP_DIR)/rootfs $(SED_I) "s/CROSS_BUILD_//g" $(DOCKERFILE) endif @@ -105,3 +103,8 @@ build: clean release: all-container all-push echo "done" + +.PHONY: register-qemu +register-qemu: + # Register /usr/bin/qemu-ARCH-static as the handler for binaries in multiple platforms + $(DOCKER) run --rm --privileged multiarch/qemu-user-static:register --reset diff --git a/images/custom-error-pages/README.md b/images/custom-error-pages/README.md index 3ee67e5d91..88983591f7 100644 --- a/images/custom-error-pages/README.md +++ b/images/custom-error-pages/README.md @@ -1,2 +1,3 @@ +# custom-error-pages Example of Custom error pages for the NGINX Ingress controller diff --git a/images/custom-error-pages/main.go b/images/custom-error-pages/main.go index 88b709b6c0..7e3f683b18 100644 --- a/images/custom-error-pages/main.go +++ b/images/custom-error-pages/main.go @@ -24,39 +24,91 @@ import ( "net/http" "os" "strconv" + "strings" + "time" + + "github.com/prometheus/client_golang/prometheus/promhttp" ) const ( + // FormatHeader name of the header used to extract the format FormatHeader = "X-Format" + // CodeHeader name of the header used as source of the HTTP status code to return CodeHeader = "X-Code" + // ContentType name of the header that defines the format of the reply ContentType = "Content-Type" + + // OriginalURI name of the header with the original URL from NGINX + OriginalURI = "X-Original-URI" + + // Namespace name of the header that contains information about the Ingress namespace + Namespace = "X-Namespace" + + // IngressName name of the header that contains the matched Ingress + IngressName = "X-Ingress-Name" + + // ServiceName name of the header that contains the matched Service in the Ingress + ServiceName = "X-Service-Name" + + // ServicePort name of the header that contains the matched Service port in the Ingress + ServicePort = "X-Service-Port" + + // RequestId is a unique ID that identifies the request - same as for backend service + RequestId = "X-Request-ID" + + // ErrFilesPathVar is the name of the environment variable indicating + // the location on disk of files served by the handler. + ErrFilesPathVar = "ERROR_FILES_PATH" ) func main() { - path := "/www" - if os.Getenv("PATH") != "" { - path = os.Getenv("PATH") + errFilesPath := "/www" + if os.Getenv(ErrFilesPathVar) != "" { + errFilesPath = os.Getenv(ErrFilesPathVar) } - http.HandleFunc("/", errorHandler(path)) + + http.HandleFunc("/", errorHandler(errFilesPath)) + + http.Handle("/metrics", promhttp.Handler()) + + http.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + http.ListenAndServe(fmt.Sprintf(":8080"), nil) } func errorHandler(path string) func(http.ResponseWriter, *http.Request) { return func(w http.ResponseWriter, r *http.Request) { + start := time.Now() ext := "html" + if os.Getenv("DEBUG") != "" { + w.Header().Set(FormatHeader, r.Header.Get(FormatHeader)) + w.Header().Set(CodeHeader, r.Header.Get(CodeHeader)) + w.Header().Set(ContentType, r.Header.Get(ContentType)) + w.Header().Set(OriginalURI, r.Header.Get(OriginalURI)) + w.Header().Set(Namespace, r.Header.Get(Namespace)) + w.Header().Set(IngressName, r.Header.Get(IngressName)) + w.Header().Set(ServiceName, r.Header.Get(ServiceName)) + w.Header().Set(ServicePort, r.Header.Get(ServicePort)) + w.Header().Set(RequestId, r.Header.Get(RequestId)) + } + format := r.Header.Get(FormatHeader) if format == "" { format = "text/html" - log.Printf("forma not specified. Using %v\n", format) + log.Printf("format not specified. Using %v", format) } - mediaType, _, _ := mime.ParseMediaType(format) - cext, err := mime.ExtensionsByType(mediaType) + cext, err := mime.ExtensionsByType(format) if err != nil { - log.Printf("unexpected error reading media type extension: %v. Using %v\n", err, ext) + log.Printf("unexpected error reading media type extension: %v. Using %v", err, ext) + format = "text/html" + } else if len(cext) == 0 { + log.Printf("couldn't get media type extension. Using %v", ext) } else { ext = cext[0] } @@ -66,29 +118,40 @@ func errorHandler(path string) func(http.ResponseWriter, *http.Request) { code, err := strconv.Atoi(errCode) if err != nil { code = 404 - log.Printf("unexpected error reading return code: %v. Using %v\n", err, code) + log.Printf("unexpected error reading return code: %v. Using %v", err, code) } w.WriteHeader(code) + if !strings.HasPrefix(ext, ".") { + ext = "." + ext + } file := fmt.Sprintf("%v/%v%v", path, code, ext) f, err := os.Open(file) if err != nil { - log.Printf("unexpected error opening file: %v\n", err) + log.Printf("unexpected error opening file: %v", err) scode := strconv.Itoa(code) file := fmt.Sprintf("%v/%cxx%v", path, scode[0], ext) f, err := os.Open(file) if err != nil { - log.Printf("unexpected error opening file: %v\n", err) + log.Printf("unexpected error opening file: %v", err) http.NotFound(w, r) return } defer f.Close() - log.Printf("serving custom error response for code %v and format %v from file %v\n", code, format, file) + log.Printf("serving custom error response for code %v and format %v from file %v", code, format, file) io.Copy(w, f) return } defer f.Close() - log.Printf("serving custom error response for code %v and format %v from file %v\n", code, format, file) + log.Printf("serving custom error response for code %v and format %v from file %v", code, format, file) io.Copy(w, f) + + duration := time.Now().Sub(start).Seconds() + + proto := strconv.Itoa(r.ProtoMajor) + proto = fmt.Sprintf("%s.%s", proto, strconv.Itoa(r.ProtoMinor)) + + requestCount.WithLabelValues(proto).Inc() + requestDuration.WithLabelValues(proto).Observe(duration) } } diff --git a/images/404-server/metrics.go b/images/custom-error-pages/metrics.go similarity index 100% rename from images/404-server/metrics.go rename to images/custom-error-pages/metrics.go diff --git a/images/custom-error-pages/rootfs/etc/mime.types b/images/custom-error-pages/rootfs/etc/mime.types new file mode 100644 index 0000000000..4c5804d738 --- /dev/null +++ b/images/custom-error-pages/rootfs/etc/mime.types @@ -0,0 +1,1830 @@ +# extracted from mailcap-2.1.48.tar.xz @ https://releases.pagure.org/mailcap/ + +# This is a comment. I love comments. -*- indent-tabs-mode: t -*- + +# This file controls what Internet media types are sent to the client for +# given file extension(s). Sending the correct media type to the client +# is important so they know how to handle the content of the file. +# Extra types can either be added here or by using an AddType directive +# in your config files. For more information about Internet media types, +# please read RFC 2045, 2046, 2047, 2048, and 2077. The Internet media type +# registry is at . + +# IANA types + +# MIME type Extensions +application/1d-interleaved-parityfec +application/3gpdash-qoe-report+xml +application/3gpp-ims+xml +application/A2L a2l +application/activemessage +application/alto-costmap+json +application/alto-costmapfilter+json +application/alto-directory+json +application/alto-endpointcost+json +application/alto-endpointcostparams+json +application/alto-endpointprop+json +application/alto-endpointpropparams+json +application/alto-error+json +application/alto-networkmap+json +application/alto-networkmapfilter+json +application/AML aml +application/andrew-inset ez +application/applefile +application/ATF atf +application/ATFX atfx +application/ATXML atxml +application/atom+xml atom +application/atomcat+xml atomcat +application/atomdeleted+xml atomdeleted +application/atomicmail +application/atomsvc+xml atomsvc +application/auth-policy+xml apxml +application/bacnet-xdd+zip xdd +application/batch-SMTP +application/beep+xml +application/calendar+json +application/calendar+xml xcs +application/call-completion +application/cals-1840 +application/cbor cbor +application/ccmp+xml ccmp +application/ccxml+xml ccxml +application/CDFX+XML cdfx +application/cdmi-capability cdmia +application/cdmi-container cdmic +application/cdmi-domain cdmid +application/cdmi-object cdmio +application/cdmi-queue cdmiq +application/cdni +application/CEA cea +application/cea-2018+xml +application/cellml+xml cellml cml +application/cfw +application/clue_info+xml clue +application/cms cmsc +application/cnrp+xml +application/coap-group+json +application/coap-payload +application/commonground +application/conference-info+xml +application/cpl+xml cpl +application/cose +application/cose-key +application/cose-key-set +application/csrattrs csrattrs +application/csta+xml +application/CSTAdata+xml +application/csvm+json +application/cybercash +application/dash+xml mpd +application/dashdelta mpdd +application/davmount+xml davmount +application/dca-rft +application/DCD dcd +application/dec-dx +application/dialog-info+xml +application/dicom dcm +application/dicom+json +application/dicom+xml +application/DII dii +application/DIT dit +application/dns +application/dskpp+xml xmls +application/dssc+der dssc +application/dssc+xml xdssc +application/dvcs dvc +application/ecmascript es +application/EDI-Consent +application/EDI-X12 +application/EDIFACT +application/efi efi +application/EmergencyCallData.Comment+xml +application/EmergencyCallData.Control+xml +application/EmergencyCallData.DeviceInfo+xml +application/EmergencyCallData.eCall.MSD +application/EmergencyCallData.ProviderInfo+xml +application/EmergencyCallData.ServiceInfo+xml +application/EmergencyCallData.SubscriberInfo+xml +application/EmergencyCallData.VEDS+xml +application/emma+xml emma +application/emotionml+xml emotionml +application/encaprtp +application/epp+xml +application/epub+zip epub +application/eshop +application/exi exi +application/fastinfoset finf +application/fastsoap +application/fdt+xml fdt +# fits, fit, fts: image/fits +application/fits +# application/font-sfnt deprecated in favor of font/sfnt +application/font-tdpfr pfr +# application/font-woff deprecated in favor of font/woff +application/framework-attributes+xml +application/geo+json geojson +application/geo+json-seq +application/gml+xml gml +application/gzip gz tgz +application/H224 +application/held+xml +application/http +application/hyperstudio stk +application/ibe-key-request+xml +application/ibe-pkg-reply+xml +application/ibe-pp-data +application/iges +application/im-iscomposing+xml +application/index +application/index.cmd +application/index.obj +application/index.response +application/index.vnd +application/inkml+xml ink inkml +application/iotp +application/ipfix ipfix +application/ipp +application/isup +application/its+xml its +application/javascript js +application/jose +application/jose+json +application/jrd+json jrd +application/json json +application/json-patch+json json-patch +application/json-seq +application/jwk+json +application/jwk-set+json +application/jwt +application/kpml-request+xml +application/kpml-response+xml +application/ld+json jsonld +application/lgr+xml lgr +application/link-format wlnk +application/load-control+xml +application/lost+xml lostxml +application/lostsync+xml lostsyncxml +application/LXF lxf +application/mac-binhex40 hqx +application/macwriteii +application/mads+xml mads +application/marc mrc +application/marcxml+xml mrcx +application/mathematica nb ma mb +application/mathml-content+xml +application/mathml-presentation+xml +application/mathml+xml mml +application/mbms-associated-procedure-description+xml +application/mbms-deregister+xml +application/mbms-envelope+xml +application/mbms-msk-response+xml +application/mbms-msk+xml +application/mbms-protection-description+xml +application/mbms-reception-report+xml +application/mbms-register-response+xml +application/mbms-register+xml +application/mbms-schedule+xml +application/mbms-user-service-description+xml +application/mbox mbox +application/media_control+xml +# mpf: text/vnd.ms-mediapackage +application/media-policy-dataset+xml +application/mediaservercontrol+xml +application/merge-patch+json +application/metalink4+xml meta4 +application/mets+xml mets +application/MF4 mf4 +application/mikey +application/mods+xml mods +application/moss-keys +application/moss-signature +application/mosskey-data +application/mosskey-request +application/mp21 m21 mp21 +# mp4, mpg4: video/mp4, see RFC 4337 +application/mp4 +application/mpeg4-generic +application/mpeg4-iod +application/mpeg4-iod-xmt +# xdf: application/xcap-diff+xml +application/mrb-consumer+xml +application/mrb-publish+xml +application/msc-ivr+xml +application/msc-mixer+xml +application/msword doc +application/mud+json +application/mxf mxf +application/n-quads nq +application/n-triples nt +application/nasdata +application/news-checkgroups +application/news-groupinfo +application/news-transmission +application/nlsml+xml +application/nss +application/ocsp-request orq +application/ocsp-response ors +application/octet-stream bin lha lzh exe class so dll img iso +application/oda oda +application/ODX odx +application/oebps-package+xml opf +application/ogg ogx +application/oxps oxps +application/p2p-overlay+xml relo +application/parityfec +# xer: application/xcap-error+xml +application/patch-ops-error+xml +application/pdf pdf +application/PDX pdx +application/pgp-encrypted pgp +application/pgp-keys +application/pgp-signature sig +application/pidf-diff+xml +application/pidf+xml +application/pkcs10 p10 +application/pkcs12 p12 pfx +application/pkcs7-mime p7m p7c +application/pkcs7-signature p7s +application/pkcs8 p8 +# ac: application/vnd.nokia.n-gage.ac+xml +application/pkix-attr-cert +application/pkix-cert cer +application/pkix-crl crl +application/pkix-pkipath pkipath +application/pkixcmp pki +application/pls+xml pls +application/poc-settings+xml +application/postscript ps eps ai +application/ppsp-tracker+json +application/problem+json +application/problem+xml +application/provenance+xml provx +application/prs.alvestrand.titrax-sheet +application/prs.cww cw cww +application/prs.hpub+zip hpub +application/prs.nprend rnd rct +application/prs.plucker +application/prs.rdf-xml-crypt rdf-crypt +application/prs.xsf+xml xsf +application/pskc+xml pskcxml +application/qsig +application/raptorfec +application/rdap+json +application/rdf+xml rdf +application/reginfo+xml rif +application/relax-ng-compact-syntax rnc +application/remote-printing +application/reputon+json +application/resource-lists-diff+xml rld +application/resource-lists+xml rl +application/rfc+xml rfcxml +application/riscos +application/rlmi+xml +application/rls-services+xml rs +application/rpki-ghostbusters gbr +application/rpki-manifest mft +application/rpki-publication +application/rpki-roa roa +application/rpki-updown +application/rtf rtf +application/rtploopback +application/rtx +application/samlassertion+xml +application/samlmetadata+xml +application/sbml+xml +application/scaip+xml +# scm: application/vnd.lotus-screencam +application/scim+json scim +application/scvp-cv-request scq +application/scvp-cv-response scs +application/scvp-vp-request spq +application/scvp-vp-response spp +application/sdp sdp +application/sep+xml +application/sep-exi +application/session-info +application/set-payment +application/set-payment-initiation +application/set-registration +application/set-registration-initiation +application/sgml +application/sgml-open-catalog soc +application/shf+xml shf +application/sieve siv sieve +application/simple-filter+xml cl +application/simple-message-summary +application/simpleSymbolContainer +application/slate +# application/smil obsoleted by application/smil+xml +application/smil+xml smil smi sml +application/smpte336m +application/soap+fastinfoset +application/soap+xml +application/sparql-query rq +application/sparql-results+xml srx +application/spirits-event+xml +application/sql sql +application/srgs gram +application/srgs+xml grxml +application/sru+xml sru +application/ssml+xml ssml +application/tamp-apex-update tau +application/tamp-apex-update-confirm auc +application/tamp-community-update tcu +application/tamp-community-update-confirm cuc +application/tamp-error ter +application/tamp-sequence-adjust tsa +application/tamp-sequence-adjust-confirm sac +# tsq: application/timestamp-query +application/tamp-status-query +# tsr: application/timestamp-reply +application/tamp-status-response +application/tamp-update tur +application/tamp-update-confirm tuc +application/tei+xml tei teiCorpus odd +application/thraud+xml tfi +application/timestamp-query tsq +application/timestamp-reply tsr +application/timestamped-data tsd +application/trig trig +application/ttml+xml ttml +application/tve-trigger +application/ulpfec +application/urc-grpsheet+xml gsheet +application/urc-ressheet+xml rsheet +application/urc-targetdesc+xml td +application/urc-uisocketdesc+xml uis +application/vcard+json +application/vcard+xml +application/vemmi +application/vnd.3gpp.access-transfer-events+xml +application/vnd.3gpp.bsf+xml +application/vnd.3gpp.mid-call+xml +application/vnd.3gpp.pic-bw-large plb +application/vnd.3gpp.pic-bw-small psb +application/vnd.3gpp.pic-bw-var pvb +application/vnd.3gpp-prose+xml +application/vnd.3gpp-prose-pc3ch+xml +# sms: application/vnd.3gpp2.sms +application/vnd.3gpp.sms +application/vnd.3gpp.sms+xml +application/vnd.3gpp.srvcc-ext+xml +application/vnd.3gpp.SRVCC-info+xml +application/vnd.3gpp.state-and-event-info+xml +application/vnd.3gpp.ussd+xml +application/vnd.3gpp2.bcmcsinfo+xml +application/vnd.3gpp2.sms sms +application/vnd.3gpp2.tcap tcap +application/vnd.3lightssoftware.imagescal imgcal +application/vnd.3M.Post-it-Notes pwn +application/vnd.accpac.simply.aso aso +application/vnd.accpac.simply.imp imp +application/vnd.acucobol acu +application/vnd.acucorp atc acutc +application/vnd.adobe.flash.movie swf +application/vnd.adobe.formscentral.fcdt fcdt +application/vnd.adobe.fxp fxp fxpl +application/vnd.adobe.partial-upload +application/vnd.adobe.xdp+xml xdp +application/vnd.adobe.xfdf xfdf +application/vnd.aether.imp +application/vnd.ah-barcode +application/vnd.ahead.space ahead +application/vnd.airzip.filesecure.azf azf +application/vnd.airzip.filesecure.azs azs +application/vnd.amazon.mobi8-ebook azw3 +application/vnd.americandynamics.acc acc +application/vnd.amiga.ami ami +application/vnd.amundsen.maze+xml +application/vnd.anki apkg +application/vnd.anser-web-certificate-issue-initiation cii +# Not in IANA listing, but is on FTP site? +application/vnd.anser-web-funds-transfer-initiation fti +# atx: audio/ATRAC-X +application/vnd.antix.game-component +application/vnd.apache.thrift.binary +application/vnd.apache.thrift.compact +application/vnd.apache.thrift.json +application/vnd.api+json +application/vnd.apothekende.reservation+json +application/vnd.apple.installer+xml dist distz pkg mpkg +# m3u: audio/x-mpegurl for now +application/vnd.apple.mpegurl m3u8 +# application/vnd.arastra.swi obsoleted by application/vnd.aristanetworks.swi +application/vnd.aristanetworks.swi swi +application/vnd.artsquare +application/vnd.astraea-software.iota iota +application/vnd.audiograph aep +application/vnd.autopackage package +application/vnd.avistar+xml +application/vnd.balsamiq.bmml+xml bmml +application/vnd.balsamiq.bmpr bmpr +application/vnd.bekitzur-stech+json +application/vnd.bint.med-content +application/vnd.biopax.rdf+xml +application/vnd.blueice.multipass mpm +application/vnd.bluetooth.ep.oob ep +application/vnd.bluetooth.le.oob le +application/vnd.bmi bmi +application/vnd.businessobjects rep +application/vnd.cab-jscript +application/vnd.canon-cpdl +application/vnd.canon-lips +application/vnd.capasystems-pg+json +application/vnd.cendio.thinlinc.clientconf tlclient +application/vnd.century-systems.tcp_stream +application/vnd.chemdraw+xml cdxml +application/vnd.chess-pgn pgn +application/vnd.chipnuts.karaoke-mmd mmd +application/vnd.cinderella cdy +application/vnd.cirpack.isdn-ext +application/vnd.citationstyles.style+xml csl +application/vnd.claymore cla +application/vnd.cloanto.rp9 rp9 +application/vnd.clonk.c4group c4g c4d c4f c4p c4u +application/vnd.cluetrust.cartomobile-config c11amc +application/vnd.cluetrust.cartomobile-config-pkg c11amz +application/vnd.coffeescript coffee +application/vnd.collection+json +application/vnd.collection.doc+json +application/vnd.collection.next+json +application/vnd.comicbook+zip cbz +# icc: application/vnd.iccprofile +application/vnd.commerce-battelle ica icf icd ic0 ic1 ic2 ic3 ic4 ic5 ic6 ic7 ic8 +application/vnd.commonspace csp cst +application/vnd.contact.cmsg cdbcmsg +application/vnd.coreos.ignition+json ign ignition +application/vnd.cosmocaller cmc +application/vnd.crick.clicker clkx +application/vnd.crick.clicker.keyboard clkk +application/vnd.crick.clicker.palette clkp +application/vnd.crick.clicker.template clkt +application/vnd.crick.clicker.wordbank clkw +application/vnd.criticaltools.wbs+xml wbs +application/vnd.ctc-posml pml +application/vnd.ctct.ws+xml +application/vnd.cups-pdf +application/vnd.cups-postscript +application/vnd.cups-ppd ppd +application/vnd.cups-raster +application/vnd.cups-raw +application/vnd.curl curl +application/vnd.cyan.dean.root+xml +application/vnd.cybank +application/vnd.d2l.coursepackage1p0+zip +application/vnd.dart dart +application/vnd.data-vision.rdz rdz +application/vnd.datapackage+json +application/vnd.dataresource+json +application/vnd.debian.binary-package deb udeb +application/vnd.dece.data uvf uvvf uvd uvvd +application/vnd.dece.ttml+xml uvt uvvt +application/vnd.dece.unspecified uvx uvvx +application/vnd.dece.zip uvz uvvz +application/vnd.denovo.fcselayout-link fe_launch +application/vnd.desmume.movie dsm +application/vnd.dir-bi.plate-dl-nosuffix +application/vnd.dm.delegation+xml +application/vnd.dna dna +application/vnd.document+json docjson +application/vnd.dolby.mobile.1 +application/vnd.dolby.mobile.2 +application/vnd.doremir.scorecloud-binary-document scld +application/vnd.dpgraph dpg mwc dpgraph +application/vnd.dreamfactory dfac +application/vnd.drive+json +application/vnd.dtg.local +application/vnd.dtg.local.flash fla +application/vnd.dtg.local.html +application/vnd.dvb.ait ait +# class: application/octet-stream +application/vnd.dvb.dvbj +application/vnd.dvb.esgcontainer +application/vnd.dvb.ipdcdftnotifaccess +application/vnd.dvb.ipdcesgaccess +application/vnd.dvb.ipdcesgaccess2 +application/vnd.dvb.ipdcesgpdd +application/vnd.dvb.ipdcroaming +application/vnd.dvb.iptv.alfec-base +application/vnd.dvb.iptv.alfec-enhancement +application/vnd.dvb.notif-aggregate-root+xml +application/vnd.dvb.notif-container+xml +application/vnd.dvb.notif-generic+xml +application/vnd.dvb.notif-ia-msglist+xml +application/vnd.dvb.notif-ia-registration-request+xml +application/vnd.dvb.notif-ia-registration-response+xml +application/vnd.dvb.notif-init+xml +# pfr: application/font-tdpfr +application/vnd.dvb.pfr +application/vnd.dvb.service svc +# dxr: application/x-director +application/vnd.dxr +application/vnd.dynageo geo +application/vnd.dzr dzr +application/vnd.easykaraoke.cdgdownload +application/vnd.ecdis-update +application/vnd.ecowin.chart mag +application/vnd.ecowin.filerequest +application/vnd.ecowin.fileupdate +application/vnd.ecowin.series +application/vnd.ecowin.seriesrequest +application/vnd.ecowin.seriesupdate +# img: application/octet-stream +application/vnd.efi-img +# iso: application/octet-stream +application/vnd.efi-iso +application/vnd.enliven nml +application/vnd.enphase.envoy +application/vnd.eprints.data+xml +application/vnd.epson.esf esf +application/vnd.epson.msf msf +application/vnd.epson.quickanime qam +application/vnd.epson.salt slt +application/vnd.epson.ssf ssf +application/vnd.ericsson.quickcall qcall qca +application/vnd.espass-espass+zip espass +application/vnd.eszigno3+xml es3 et3 +application/vnd.etsi.aoc+xml +application/vnd.etsi.asic-e+zip asice sce +# scs: application/scvp-cv-response +application/vnd.etsi.asic-s+zip asics +application/vnd.etsi.cug+xml +application/vnd.etsi.iptvcommand+xml +application/vnd.etsi.iptvdiscovery+xml +application/vnd.etsi.iptvprofile+xml +application/vnd.etsi.iptvsad-bc+xml +application/vnd.etsi.iptvsad-cod+xml +application/vnd.etsi.iptvsad-npvr+xml +application/vnd.etsi.iptvservice+xml +application/vnd.etsi.iptvsync+xml +application/vnd.etsi.iptvueprofile+xml +application/vnd.etsi.mcid+xml +application/vnd.etsi.mheg5 +application/vnd.etsi.overload-control-policy-dataset+xml +application/vnd.etsi.pstn+xml +application/vnd.etsi.sci+xml +application/vnd.etsi.simservs+xml +application/vnd.etsi.timestamp-token tst +application/vnd.etsi.tsl.der +application/vnd.etsi.tsl+xml +application/vnd.eudora.data +application/vnd.ezpix-album ez2 +application/vnd.ezpix-package ez3 +application/vnd.f-secure.mobile +application/vnd.fastcopy-disk-image dim +application/vnd.fdf fdf +application/vnd.fdsn.mseed msd mseed +application/vnd.fdsn.seed seed dataless +application/vnd.ffsns +application/vnd.filmit.zfc zfc +# all extensions: application/vnd.hbci +application/vnd.fints +application/vnd.firemonkeys.cloudcell +application/vnd.FloGraphIt gph +application/vnd.fluxtime.clip ftc +application/vnd.font-fontforge-sfd sfd +application/vnd.framemaker fm +application/vnd.frogans.fnc fnc +application/vnd.frogans.ltf ltf +application/vnd.fsc.weblaunch fsc +application/vnd.fujitsu.oasys oas +application/vnd.fujitsu.oasys2 oa2 +application/vnd.fujitsu.oasys3 oa3 +application/vnd.fujitsu.oasysgp fg5 +application/vnd.fujitsu.oasysprs bh2 +application/vnd.fujixerox.ART-EX +application/vnd.fujixerox.ART4 +application/vnd.fujixerox.ddd ddd +application/vnd.fujixerox.docuworks xdw +application/vnd.fujixerox.docuworks.binder xbd +application/vnd.fujixerox.docuworks.container xct +application/vnd.fujixerox.HBPL +application/vnd.fut-misnet +application/vnd.fuzzysheet fzs +application/vnd.genomatix.tuxedo txd +# application/vnd.geo+json obsoleted by application/geo+json +application/vnd.geocube+xml g3 g³ +application/vnd.geogebra.file ggb +application/vnd.geogebra.tool ggt +application/vnd.geometry-explorer gex gre +application/vnd.geonext gxt +application/vnd.geoplan g2w +application/vnd.geospace g3w +# gbr: application/rpki-ghostbusters +application/vnd.gerber +application/vnd.globalplatform.card-content-mgt +application/vnd.globalplatform.card-content-mgt-response +application/vnd.gmx gmx +application/vnd.google-earth.kml+xml kml +application/vnd.google-earth.kmz kmz +application/vnd.gov.sk.e-form+xml +application/vnd.gov.sk.e-form+zip +application/vnd.gov.sk.xmldatacontainer+xml +application/vnd.grafeq gqf gqs +application/vnd.gridmp +application/vnd.groove-account gac +application/vnd.groove-help ghf +application/vnd.groove-identity-message gim +application/vnd.groove-injector grv +application/vnd.groove-tool-message gtm +application/vnd.groove-tool-template tpl +application/vnd.groove-vcard vcg +application/vnd.hal+json +application/vnd.hal+xml hal +application/vnd.HandHeld-Entertainment+xml zmm +application/vnd.hbci hbci hbc kom upa pkd bpd +application/vnd.hc+json +# rep: application/vnd.businessobjects +application/vnd.hcl-bireports +application/vnd.hdt hdt +application/vnd.heroku+json +application/vnd.hhe.lesson-player les +application/vnd.hp-HPGL hpgl +application/vnd.hp-hpid hpi hpid +application/vnd.hp-hps hps +application/vnd.hp-jlyt jlt +application/vnd.hp-PCL pcl +application/vnd.hp-PCLXL +application/vnd.httphone +application/vnd.hydrostatix.sof-data sfd-hdstx +application/vnd.hyperdrive+json +application/vnd.hzn-3d-crossword x3d +application/vnd.ibm.afplinedata +application/vnd.ibm.electronic-media emm +application/vnd.ibm.MiniPay mpy +application/vnd.ibm.modcap list3820 listafp afp pseg3820 +application/vnd.ibm.rights-management irm +application/vnd.ibm.secure-container sc +application/vnd.iccprofile icc icm +application/vnd.ieee.1905 1905.1 +application/vnd.igloader igl +application/vnd.imagemeter.folder+zip imf +application/vnd.imagemeter.image+zip imi +application/vnd.immervision-ivp ivp +application/vnd.immervision-ivu ivu +application/vnd.ims.imsccv1p1 imscc +application/vnd.ims.imsccv1p2 +application/vnd.ims.imsccv1p3 +application/vnd.ims.lis.v2.result+json +application/vnd.ims.lti.v2.toolconsumerprofile+json +application/vnd.ims.lti.v2.toolproxy.id+json +application/vnd.ims.lti.v2.toolproxy+json +application/vnd.ims.lti.v2.toolsettings+json +application/vnd.ims.lti.v2.toolsettings.simple+json +application/vnd.informedcontrol.rms+xml +# application/vnd.informix-visionary obsoleted by application/vnd.visionary +application/vnd.infotech.project +application/vnd.infotech.project+xml +application/vnd.innopath.wamp.notification +application/vnd.insors.igm igm +application/vnd.intercon.formnet xpw xpx +application/vnd.intergeo i2g +application/vnd.intertrust.digibox +application/vnd.intertrust.nncp +application/vnd.intu.qbo qbo +application/vnd.intu.qfx qfx +application/vnd.iptc.g2.catalogitem+xml +application/vnd.iptc.g2.conceptitem+xml +application/vnd.iptc.g2.knowledgeitem+xml +application/vnd.iptc.g2.newsitem+xml +application/vnd.iptc.g2.newsmessage+xml +application/vnd.iptc.g2.packageitem+xml +application/vnd.iptc.g2.planningitem+xml +application/vnd.ipunplugged.rcprofile rcprofile +application/vnd.irepository.package+xml irp +application/vnd.is-xpr xpr +application/vnd.isac.fcs fcs +application/vnd.jam jam +application/vnd.japannet-directory-service +application/vnd.japannet-jpnstore-wakeup +application/vnd.japannet-payment-wakeup +application/vnd.japannet-registration +application/vnd.japannet-registration-wakeup +application/vnd.japannet-setstore-wakeup +application/vnd.japannet-verification +application/vnd.japannet-verification-wakeup +application/vnd.jcp.javame.midlet-rms rms +application/vnd.jisp jisp +application/vnd.joost.joda-archive joda +application/vnd.jsk.isdn-ngn +application/vnd.kahootz ktz ktr +application/vnd.kde.karbon karbon +application/vnd.kde.kchart chrt +application/vnd.kde.kformula kfo +application/vnd.kde.kivio flw +application/vnd.kde.kontour kon +application/vnd.kde.kpresenter kpr kpt +application/vnd.kde.kspread ksp +application/vnd.kde.kword kwd kwt +application/vnd.kenameaapp htke +application/vnd.kidspiration kia +application/vnd.Kinar kne knp sdf +application/vnd.koan skp skd skm skt +application/vnd.kodak-descriptor sse +application/vnd.las.las+json lasjson +application/vnd.las.las+xml lasxml +application/vnd.liberty-request+xml +application/vnd.llamagraphics.life-balance.desktop lbd +application/vnd.llamagraphics.life-balance.exchange+xml lbe +application/vnd.lotus-1-2-3 123 wk4 wk3 wk1 +application/vnd.lotus-approach apr vew +application/vnd.lotus-freelance prz pre +application/vnd.lotus-notes nsf ntf ndl ns4 ns3 ns2 nsh nsg +application/vnd.lotus-organizer or3 or2 org +application/vnd.lotus-screencam scm +application/vnd.lotus-wordpro lwp sam +application/vnd.macports.portpkg portpkg +application/vnd.mapbox-vector-tile mvt +application/vnd.marlin.drm.actiontoken+xml +application/vnd.marlin.drm.conftoken+xml +application/vnd.marlin.drm.license+xml +application/vnd.marlin.drm.mdcf mdc +application/vnd.mason+json +application/vnd.maxmind.maxmind-db mmdb +application/vnd.mcd mcd +application/vnd.medcalcdata mc1 +application/vnd.mediastation.cdkey cdkey +application/vnd.meridian-slingshot +application/vnd.MFER mwf +application/vnd.mfmp mfm +application/vnd.micro+json +application/vnd.micrografx.flo flo +application/vnd.micrografx.igx igx +application/vnd.microsoft.portable-executable +application/vnd.microsoft.windows.thumbnail-cache +application/vnd.miele+json +application/vnd.mif mif +application/vnd.minisoft-hp3000-save +application/vnd.mitsubishi.misty-guard.trustweb +application/vnd.Mobius.DAF daf +application/vnd.Mobius.DIS dis +application/vnd.Mobius.MBK mbk +application/vnd.Mobius.MQY mqy +application/vnd.Mobius.MSL msl +application/vnd.Mobius.PLC plc +application/vnd.Mobius.TXF txf +application/vnd.mophun.application mpn +application/vnd.mophun.certificate mpc +application/vnd.motorola.flexsuite +application/vnd.motorola.flexsuite.adsi +application/vnd.motorola.flexsuite.fis +application/vnd.motorola.flexsuite.gotap +application/vnd.motorola.flexsuite.kmr +application/vnd.motorola.flexsuite.ttc +application/vnd.motorola.flexsuite.wem +application/vnd.motorola.iprm +application/vnd.mozilla.xul+xml xul +application/vnd.ms-3mfdocument 3mf +application/vnd.ms-artgalry cil +application/vnd.ms-asf asf +application/vnd.ms-cab-compressed cab +application/vnd.ms-excel xls xlm xla xlc xlt xlw +application/vnd.ms-excel.template.macroEnabled.12 xltm +application/vnd.ms-excel.addin.macroEnabled.12 xlam +application/vnd.ms-excel.sheet.binary.macroEnabled.12 xlsb +application/vnd.ms-excel.sheet.macroEnabled.12 xlsm +application/vnd.ms-fontobject eot +application/vnd.ms-htmlhelp chm +application/vnd.ms-ims ims +application/vnd.ms-lrm lrm +application/vnd.ms-office.activeX+xml +application/vnd.ms-officetheme thmx +application/vnd.ms-playready.initiator+xml +application/vnd.ms-powerpoint ppt pps pot +application/vnd.ms-powerpoint.addin.macroEnabled.12 ppam +application/vnd.ms-powerpoint.presentation.macroEnabled.12 pptm +application/vnd.ms-powerpoint.slide.macroEnabled.12 sldm +application/vnd.ms-powerpoint.slideshow.macroEnabled.12 ppsm +application/vnd.ms-powerpoint.template.macroEnabled.12 potm +application/vnd.ms-PrintDeviceCapabilities+xml +application/vnd.ms-PrintSchemaTicket+xml +application/vnd.ms-project mpp mpt +application/vnd.ms-tnef tnef tnf +application/vnd.ms-windows.devicepairing +application/vnd.ms-windows.nwprinting.oob +application/vnd.ms-windows.printerpairing +application/vnd.ms-windows.wsd.oob +application/vnd.ms-wmdrm.lic-chlg-req +application/vnd.ms-wmdrm.lic-resp +application/vnd.ms-wmdrm.meter-chlg-req +application/vnd.ms-wmdrm.meter-resp +application/vnd.ms-word.document.macroEnabled.12 docm +application/vnd.ms-word.template.macroEnabled.12 dotm +application/vnd.ms-works wcm wdb wks wps +application/vnd.ms-wpl wpl +application/vnd.ms-xpsdocument xps +application/vnd.msa-disk-image msa +application/vnd.mseq mseq +application/vnd.msign +application/vnd.multiad.creator crtr +application/vnd.multiad.creator.cif cif +application/vnd.music-niff +application/vnd.musician mus +application/vnd.muvee.style msty +application/vnd.mynfc taglet +application/vnd.ncd.control +application/vnd.ncd.reference +application/vnd.nearst.inv+json +application/vnd.nervana entity request bkm kcm +application/vnd.netfpx +# ntf: application/vnd.lotus-notes +application/vnd.nitf nitf +application/vnd.neurolanguage.nlu nlu +application/vnd.nintendo.nitro.rom nds +application/vnd.nintendo.snes.rom sfc smc +application/vnd.noblenet-directory nnd +application/vnd.noblenet-sealer nns +application/vnd.noblenet-web nnw +application/vnd.nokia.catalogs +application/vnd.nokia.conml+wbxml +application/vnd.nokia.conml+xml +application/vnd.nokia.iptv.config+xml +application/vnd.nokia.iSDS-radio-presets +application/vnd.nokia.landmark+wbxml +application/vnd.nokia.landmark+xml +application/vnd.nokia.landmarkcollection+xml +application/vnd.nokia.n-gage.ac+xml ac +application/vnd.nokia.n-gage.data ngdat +application/vnd.nokia.n-gage.symbian.install n-gage +application/vnd.nokia.ncd +application/vnd.nokia.pcd+wbxml +application/vnd.nokia.pcd+xml +application/vnd.nokia.radio-preset rpst +application/vnd.nokia.radio-presets rpss +application/vnd.novadigm.EDM edm +application/vnd.novadigm.EDX edx +application/vnd.novadigm.EXT ext +application/vnd.ntt-local.content-share +application/vnd.ntt-local.file-transfer +application/vnd.ntt-local.ogw_remote-access +application/vnd.ntt-local.sip-ta_remote +application/vnd.ntt-local.sip-ta_tcp_stream +application/vnd.oasis.opendocument.chart odc +application/vnd.oasis.opendocument.chart-template otc +application/vnd.oasis.opendocument.database odb +application/vnd.oasis.opendocument.formula odf +# otf: font/otf +application/vnd.oasis.opendocument.formula-template +application/vnd.oasis.opendocument.graphics odg +application/vnd.oasis.opendocument.graphics-template otg +application/vnd.oasis.opendocument.image odi +application/vnd.oasis.opendocument.image-template oti +application/vnd.oasis.opendocument.presentation odp +application/vnd.oasis.opendocument.presentation-template otp +application/vnd.oasis.opendocument.spreadsheet ods +application/vnd.oasis.opendocument.spreadsheet-template ots +application/vnd.oasis.opendocument.text odt +application/vnd.oasis.opendocument.text-master odm +application/vnd.oasis.opendocument.text-template ott +application/vnd.oasis.opendocument.text-web oth +application/vnd.obn +application/vnd.ocf+cbor +application/vnd.oftn.l10n+json +application/vnd.oipf.contentaccessdownload+xml +application/vnd.oipf.contentaccessstreaming+xml +application/vnd.oipf.cspg-hexbinary +application/vnd.oipf.dae.svg+xml +application/vnd.oipf.dae.xhtml+xml +application/vnd.oipf.mippvcontrolmessage+xml +application/vnd.oipf.pae.gem +application/vnd.oipf.spdiscovery+xml +application/vnd.oipf.spdlist+xml +application/vnd.oipf.ueprofile+xml +application/vnd.olpc-sugar xo +application/vnd.oma.bcast.associated-procedure-parameter+xml +application/vnd.oma.bcast.drm-trigger+xml +application/vnd.oma.bcast.imd+xml +application/vnd.oma.bcast.ltkm +application/vnd.oma.bcast.notification+xml +application/vnd.oma.bcast.provisioningtrigger +application/vnd.oma.bcast.sgboot +application/vnd.oma.bcast.sgdd+xml +application/vnd.oma.bcast.sgdu +application/vnd.oma.bcast.simple-symbol-container +application/vnd.oma.bcast.smartcard-trigger+xml +application/vnd.oma.bcast.sprov+xml +application/vnd.oma.bcast.stkm +application/vnd.oma.cab-address-book+xml +application/vnd.oma.cab-feature-handler+xml +application/vnd.oma.cab-pcc+xml +application/vnd.oma.cab-subs-invite+xml +application/vnd.oma.cab-user-prefs+xml +application/vnd.oma.dcd +application/vnd.oma.dcdc +application/vnd.oma.dd2+xml dd2 +application/vnd.oma.drm.risd+xml +application/vnd.oma.group-usage-list+xml +application/vnd.oma.lwm2m+json +application/vnd.oma.lwm2m+tlv +application/vnd.oma.pal+xml +application/vnd.oma.poc.detailed-progress-report+xml +application/vnd.oma.poc.final-report+xml +application/vnd.oma.poc.groups+xml +application/vnd.oma.poc.invocation-descriptor+xml +application/vnd.oma.poc.optimized-progress-report+xml +application/vnd.oma.push +application/vnd.oma.scidm.messages+xml +application/vnd.oma.xcap-directory+xml +application/vnd.oma-scws-config +application/vnd.oma-scws-http-request +application/vnd.oma-scws-http-response +application/vnd.omads-email+xml +application/vnd.omads-file+xml +application/vnd.omads-folder+xml +application/vnd.omaloc-supl-init +application/vnd.onepager tam +application/vnd.onepagertamp tamp +application/vnd.onepagertamx tamx +application/vnd.onepagertat tat +application/vnd.onepagertatp tatp +application/vnd.onepagertatx tatx +application/vnd.openblox.game+xml obgx +application/vnd.openblox.game-binary obg +application/vnd.openeye.oeb oeb +application/vnd.openofficeorg.extension oxt +application/vnd.openstreetmap.data+xml osm +application/vnd.openxmlformats-officedocument.custom-properties+xml +application/vnd.openxmlformats-officedocument.customXmlProperties+xml +application/vnd.openxmlformats-officedocument.drawing+xml +application/vnd.openxmlformats-officedocument.drawingml.chart+xml +application/vnd.openxmlformats-officedocument.drawingml.chartshapes+xml +application/vnd.openxmlformats-officedocument.drawingml.diagramColors+xml +application/vnd.openxmlformats-officedocument.drawingml.diagramData+xml +application/vnd.openxmlformats-officedocument.drawingml.diagramLayout+xml +application/vnd.openxmlformats-officedocument.drawingml.diagramStyle+xml +application/vnd.openxmlformats-officedocument.extended-properties+xml +application/vnd.openxmlformats-officedocument.presentationml.commentAuthors+xml +application/vnd.openxmlformats-officedocument.presentationml.comments+xml +application/vnd.openxmlformats-officedocument.presentationml.handoutMaster+xml +application/vnd.openxmlformats-officedocument.presentationml.notesMaster+xml +application/vnd.openxmlformats-officedocument.presentationml.notesSlide+xml +application/vnd.openxmlformats-officedocument.presentationml.presProps+xml +application/vnd.openxmlformats-officedocument.presentationml.presentation pptx +application/vnd.openxmlformats-officedocument.presentationml.presentation.main+xml +application/vnd.openxmlformats-officedocument.presentationml.slide sldx +application/vnd.openxmlformats-officedocument.presentationml.slide+xml +application/vnd.openxmlformats-officedocument.presentationml.slideLayout+xml +application/vnd.openxmlformats-officedocument.presentationml.slideMaster+xml +application/vnd.openxmlformats-officedocument.presentationml.slideUpdateInfo+xml +application/vnd.openxmlformats-officedocument.presentationml.slideshow ppsx +application/vnd.openxmlformats-officedocument.presentationml.slideshow.main+xml +application/vnd.openxmlformats-officedocument.presentationml.tableStyles+xml +application/vnd.openxmlformats-officedocument.presentationml.tags+xml +application/vnd.openxmlformats-officedocument.presentationml.template potx +application/vnd.openxmlformats-officedocument.presentationml.template.main+xml +application/vnd.openxmlformats-officedocument.presentationml.viewProps+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.calcChain+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.chartsheet+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.comments+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.connections+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.dialogsheet+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.externalLink+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.pivotCacheDefinition+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.pivotCacheRecords+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.pivotTable+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.queryTable+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.revisionHeaders+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.revisionLog+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.sharedStrings+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.sheet xlsx +application/vnd.openxmlformats-officedocument.spreadsheetml.sheet.main+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.sheetMetadata+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.styles+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.table+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.tableSingleCells+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.template xltx +application/vnd.openxmlformats-officedocument.spreadsheetml.template.main+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.userNames+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.volatileDependencies+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.worksheet+xml +application/vnd.openxmlformats-officedocument.theme+xml +application/vnd.openxmlformats-officedocument.themeOverride+xml +application/vnd.openxmlformats-officedocument.vmlDrawing +application/vnd.openxmlformats-officedocument.wordprocessingml.comments+xml +application/vnd.openxmlformats-officedocument.wordprocessingml.document docx +application/vnd.openxmlformats-officedocument.wordprocessingml.document.glossary+xml +application/vnd.openxmlformats-officedocument.wordprocessingml.document.main+xml +application/vnd.openxmlformats-officedocument.wordprocessingml.endnotes+xml +application/vnd.openxmlformats-officedocument.wordprocessingml.fontTable+xml +application/vnd.openxmlformats-officedocument.wordprocessingml.footer+xml +application/vnd.openxmlformats-officedocument.wordprocessingml.footnotes+xml +application/vnd.openxmlformats-officedocument.wordprocessingml.numbering+xml +application/vnd.openxmlformats-officedocument.wordprocessingml.settings+xml +application/vnd.openxmlformats-officedocument.wordprocessingml.styles+xml +application/vnd.openxmlformats-officedocument.wordprocessingml.template dotx +application/vnd.openxmlformats-officedocument.wordprocessingml.template.main+xml +application/vnd.openxmlformats-officedocument.wordprocessingml.webSettings+xml +application/vnd.openxmlformats-package.core-properties+xml +application/vnd.openxmlformats-package.digital-signature-xmlsignature+xml +application/vnd.openxmlformats-package.relationships+xml +application/vnd.oracle.resource+json +application/vnd.orange.indata +application/vnd.osa.netdeploy ndc +application/vnd.osgeo.mapguide.package mgp +# jar: application/x-java-archive +application/vnd.osgi.bundle +application/vnd.osgi.dp dp +application/vnd.osgi.subsystem esa +application/vnd.otps.ct-kip+xml +application/vnd.oxli.countgraph oxlicg +application/vnd.pagerduty+json +application/vnd.palm prc pdb pqa oprc +application/vnd.panoply plp +application/vnd.paos+xml +application/vnd.pawaafile paw +application/vnd.pcos +application/vnd.pg.format str +application/vnd.pg.osasli ei6 +application/vnd.piaccess.application-license pil +application/vnd.picsel efif +application/vnd.pmi.widget wg +application/vnd.poc.group-advertisement+xml +application/vnd.pocketlearn plf +application/vnd.powerbuilder6 pbd +application/vnd.powerbuilder6-s +application/vnd.powerbuilder7 +application/vnd.powerbuilder7-s +application/vnd.powerbuilder75 +application/vnd.powerbuilder75-s +application/vnd.preminet preminet +application/vnd.previewsystems.box box vbox +application/vnd.proteus.magazine mgz +application/vnd.publishare-delta-tree qps +# pti: image/prs.pti +application/vnd.pvi.ptid1 ptid +application/vnd.pwg-multiplexed +application/vnd.pwg-xhtml-print+xml +application/vnd.qualcomm.brew-app-res bar +application/vnd.quarantainenet +application/vnd.Quark.QuarkXPress qxd qxt qwd qwt qxl qxb +application/vnd.quobject-quoxdocument quox quiz +application/vnd.radisys.moml+xml +application/vnd.radisys.msml-audit-conf+xml +application/vnd.radisys.msml-audit-conn+xml +application/vnd.radisys.msml-audit-dialog+xml +application/vnd.radisys.msml-audit-stream+xml +application/vnd.radisys.msml-audit+xml +application/vnd.radisys.msml-conf+xml +application/vnd.radisys.msml-dialog-base+xml +application/vnd.radisys.msml-dialog-fax-detect+xml +application/vnd.radisys.msml-dialog-fax-sendrecv+xml +application/vnd.radisys.msml-dialog-group+xml +application/vnd.radisys.msml-dialog-speech+xml +application/vnd.radisys.msml-dialog-transform+xml +application/vnd.radisys.msml-dialog+xml +application/vnd.radisys.msml+xml +application/vnd.rainstor.data tree +application/vnd.rapid +application/vnd.rar rar +application/vnd.realvnc.bed bed +application/vnd.recordare.musicxml mxl +application/vnd.recordare.musicxml+xml +application/vnd.RenLearn.rlprint +application/vnd.rig.cryptonote cryptonote +application/vnd.route66.link66+xml link66 +# gbr: application/rpki-ghostbusters +application/vnd.rs-274x +application/vnd.ruckus.download +application/vnd.s3sms +application/vnd.sailingtracker.track st +application/vnd.sbm.cid +application/vnd.sbm.mid2 +application/vnd.scribus scd sla slaz +application/vnd.sealed.3df s3df +application/vnd.sealed.csf scsf +application/vnd.sealed.doc sdoc sdo s1w +application/vnd.sealed.eml seml sem +application/vnd.sealed.mht smht smh +application/vnd.sealed.net +# spp: application/scvp-vp-response +application/vnd.sealed.ppt sppt s1p +application/vnd.sealed.tiff stif +application/vnd.sealed.xls sxls sxl s1e +# stm: audio/x-stm +application/vnd.sealedmedia.softseal.html stml s1h +application/vnd.sealedmedia.softseal.pdf spdf spd s1a +application/vnd.seemail see +application/vnd.sema sema +application/vnd.semd semd +application/vnd.semf semf +application/vnd.shana.informed.formdata ifm +application/vnd.shana.informed.formtemplate itp +application/vnd.shana.informed.interchange iif +application/vnd.shana.informed.package ipk +application/vnd.SimTech-MindMapper twd twds +application/vnd.siren+json +application/vnd.smaf mmf +application/vnd.smart.notebook notebook +application/vnd.smart.teacher teacher +application/vnd.software602.filler.form+xml fo +application/vnd.software602.filler.form-xml-zip zfo +application/vnd.solent.sdkm+xml sdkm sdkd +application/vnd.spotfire.dxp dxp +application/vnd.spotfire.sfs sfs +application/vnd.sss-cod +application/vnd.sss-dtf +application/vnd.sss-ntf +application/vnd.stepmania.package smzip +application/vnd.stepmania.stepchart sm +application/vnd.street-stream +application/vnd.sun.wadl+xml wadl +application/vnd.sus-calendar sus susp +application/vnd.svd +application/vnd.swiftview-ics +application/vnd.syncml+xml xsm +application/vnd.syncml.dm+wbxml bdm +application/vnd.syncml.dm+xml xdm +application/vnd.syncml.dm.notification +application/vnd.syncml.dmddf+wbxml +application/vnd.syncml.dmddf+xml ddf +application/vnd.syncml.dmtnds+wbxml +application/vnd.syncml.dmtnds+xml +application/vnd.syncml.ds.notification +application/vnd.tableschema+json +application/vnd.tao.intent-module-archive tao +application/vnd.tcpdump.pcap pcap cap dmp +application/vnd.theqvd qvd +application/vnd.tmd.mediaflex.api+xml +application/vnd.tml vfr viaframe +application/vnd.tmobile-livetv tmo +application/vnd.tri.onesource +application/vnd.trid.tpt tpt +application/vnd.triscape.mxs mxs +application/vnd.trueapp tra +application/vnd.truedoc +# cab: application/vnd.ms-cab-compressed +application/vnd.ubisoft.webplayer +application/vnd.ufdl ufdl ufd frm +application/vnd.uiq.theme utz +application/vnd.umajin umj +application/vnd.unity unityweb +application/vnd.uoml+xml uoml uo +application/vnd.uplanet.alert +application/vnd.uplanet.alert-wbxml +application/vnd.uplanet.bearer-choice +application/vnd.uplanet.bearer-choice-wbxml +application/vnd.uplanet.cacheop +application/vnd.uplanet.cacheop-wbxml +application/vnd.uplanet.channel +application/vnd.uplanet.channel-wbxml +application/vnd.uplanet.list +application/vnd.uplanet.list-wbxml +application/vnd.uplanet.listcmd +application/vnd.uplanet.listcmd-wbxml +application/vnd.uplanet.signal +application/vnd.uri-map urim urimap +application/vnd.valve.source.material vmt +application/vnd.vcx vcx +# sxi: application/vnd.sun.xml.impress +application/vnd.vd-study mxi study-inter model-inter +# mcd: application/vnd.mcd +application/vnd.vectorworks vwx +application/vnd.vel+json +application/vnd.verimatrix.vcas +application/vnd.vidsoft.vidconference vsc +application/vnd.visio vsd vst vsw vss +application/vnd.visionary vis +# vsc: application/vnd.vidsoft.vidconference +application/vnd.vividence.scriptfile +application/vnd.vsf vsf +application/vnd.wap.sic sic +application/vnd.wap.slc slc +application/vnd.wap.wbxml wbxml +application/vnd.wap.wmlc wmlc +application/vnd.wap.wmlscriptc wmlsc +application/vnd.webturbo wtb +application/vnd.wfa.p2p p2p +application/vnd.wfa.wsc wsc +application/vnd.windows.devicepairing +application/vnd.wmc wmc +application/vnd.wmf.bootstrap +# nb: application/mathematica for now +application/vnd.wolfram.mathematica +application/vnd.wolfram.mathematica.package m +application/vnd.wolfram.player nbp +application/vnd.wordperfect wpd +application/vnd.wqd wqd +application/vnd.wrq-hp3000-labelled +application/vnd.wt.stf stf +application/vnd.wv.csp+xml +application/vnd.wv.csp+wbxml wv +application/vnd.wv.ssp+xml +application/vnd.xacml+json +application/vnd.xara xar +application/vnd.xfdl xfdl xfd +application/vnd.xfdl.webform +application/vnd.xmi+xml +application/vnd.xmpie.cpkg cpkg +application/vnd.xmpie.dpkg dpkg +# dpkg: application/vnd.xmpie.dpkg +application/vnd.xmpie.plan +application/vnd.xmpie.ppkg ppkg +application/vnd.xmpie.xlim xlim +application/vnd.yamaha.hv-dic hvd +application/vnd.yamaha.hv-script hvs +application/vnd.yamaha.hv-voice hvp +application/vnd.yamaha.openscoreformat osf +application/vnd.yamaha.openscoreformat.osfpvg+xml +application/vnd.yamaha.remote-setup +application/vnd.yamaha.smaf-audio saf +application/vnd.yamaha.smaf-phrase spf +application/vnd.yamaha.through-ngn +application/vnd.yamaha.tunnel-udpencap +application/vnd.yaoweme yme +application/vnd.yellowriver-custom-menu cmp +application/vnd.zul zir zirz +application/vnd.zzazz.deck+xml zaz +application/voicexml+xml vxml +application/vq-rtcp-xr +application/watcherinfo+xml wif +application/whoispp-query +application/whoispp-response +application/widget wgt +application/wita +application/wordperfect5.1 +application/wsdl+xml wsdl +application/wspolicy+xml wspolicy +# yes, this *is* IANA registered despite of x- +application/x-www-form-urlencoded +application/x400-bp +application/xacml+xml +application/xcap-att+xml xav +application/xcap-caps+xml xca +application/xcap-diff+xml xdf +application/xcap-el+xml xel +application/xcap-error+xml xer +application/xcap-ns+xml xns +application/xcon-conference-info-diff+xml +application/xcon-conference-info+xml +application/xenc+xml +application/xhtml+xml xhtml xhtm xht +# xml, xsd, rng: text/xml +application/xml +# mod: audio/x-mod +application/xml-dtd dtd +# ent: text/xml-external-parsed-entity +application/xml-external-parsed-entity +application/xml-patch+xml +application/xmpp+xml +application/xop+xml xop +application/xslt+xml xsl xslt +application/xv+xml mxml xhvml xvml xvm +application/yang yang +application/yang-data+json +application/yang-data+xml +application/yang-patch+json +application/yang-patch+xml +application/yin+xml yin +application/zip zip +application/zlib +audio/1d-interleaved-parityfec +audio/32kadpcm 726 +# 3gp, 3gpp: video/3gpp +audio/3gpp +# 3g2, 3gpp2: video/3gpp2 +audio/3gpp2 +audio/ac3 ac3 +audio/AMR amr +audio/AMR-WB awb +audio/amr-wb+ +audio/aptx +audio/asc acn +# aa3, omg: audio/ATRAC3 +audio/ATRAC-ADVANCED-LOSSLESS aal +# aa3, omg: audio/ATRAC3 +audio/ATRAC-X atx +audio/ATRAC3 at3 aa3 omg +audio/basic au snd +audio/BV16 +audio/BV32 +audio/clearmode +audio/CN +audio/DAT12 +audio/dls dls +audio/dsr-es201108 +audio/dsr-es202050 +audio/dsr-es202211 +audio/dsr-es202212 +audio/DV +audio/DVI4 +audio/eac3 +audio/encaprtp +audio/EVRC evc +# qcp: audio/qcelp +audio/EVRC-QCP +audio/EVRC0 +audio/EVRC1 +audio/EVRCB evb +audio/EVRCB0 +audio/EVRCB1 +audio/EVRCNW enw +audio/EVRCNW0 +audio/EVRCNW1 +audio/EVRCWB evw +audio/EVRCWB0 +audio/EVRCWB1 +audio/EVS +audio/example +audio/fwdred +audio/G711-0 +audio/G719 +audio/G722 +audio/G7221 +audio/G723 +audio/G726-16 +audio/G726-24 +audio/G726-32 +audio/G726-40 +audio/G728 +audio/G729 +audio/G7291 +audio/G729D +audio/G729E +audio/GSM +audio/GSM-EFR +audio/GSM-HR-08 +audio/iLBC lbc +audio/ip-mr_v2.5 +# wav: audio/x-wav +audio/L16 l16 +audio/L20 +audio/L24 +audio/L8 +audio/LPC +audio/MELP +audio/MELP600 +audio/MELP1200 +audio/MELP2400 +audio/mobile-xmf mxmf +# mp4, mpg4: video/mp4, see RFC 4337 +audio/mp4 m4a +audio/MP4A-LATM +audio/MPA +audio/mpa-robust +audio/mpeg mp3 mpga mp1 mp2 +audio/mpeg4-generic +audio/ogg oga ogg opus spx +audio/opus +audio/parityfec +audio/PCMA +audio/PCMA-WB +audio/PCMU +audio/PCMU-WB +audio/prs.sid sid psid +audio/qcelp qcp +audio/raptorfec +audio/RED +audio/rtp-enc-aescm128 +audio/rtp-midi +audio/rtploopback +audio/rtx +audio/SMV smv +# qcp: audio/qcelp, see RFC 3625 +audio/SMV-QCP +audio/SMV0 +# mid: audio/midi +audio/sp-midi +audio/speex +audio/t140c +audio/t38 +audio/telephone-event +audio/tone +audio/UEMCLIP +audio/ulpfec +audio/VDVI +audio/VMR-WB +audio/vnd.3gpp.iufp +audio/vnd.4SB +audio/vnd.audikoz koz +audio/vnd.CELP +audio/vnd.cisco.nse +audio/vnd.cmles.radio-events +audio/vnd.cns.anp1 +audio/vnd.cns.inf1 +audio/vnd.dece.audio uva uvva +audio/vnd.digital-winds eol +audio/vnd.dlna.adts +audio/vnd.dolby.heaac.1 +audio/vnd.dolby.heaac.2 +audio/vnd.dolby.mlp mlp +audio/vnd.dolby.mps +audio/vnd.dolby.pl2 +audio/vnd.dolby.pl2x +audio/vnd.dolby.pl2z +audio/vnd.dolby.pulse.1 +audio/vnd.dra +# wav: audio/x-wav, cpt: application/mac-compactpro +audio/vnd.dts dts +audio/vnd.dts.hd dtshd +# dvb: video/vnd.dvb.file +audio/vnd.dvb.file +audio/vnd.everad.plj plj +# rm: audio/x-pn-realaudio +audio/vnd.hns.audio +audio/vnd.lucent.voice lvp +audio/vnd.ms-playready.media.pya pya +# mxmf: audio/mobile-xmf +audio/vnd.nokia.mobile-xmf +audio/vnd.nortel.vbk vbk +audio/vnd.nuera.ecelp4800 ecelp4800 +audio/vnd.nuera.ecelp7470 ecelp7470 +audio/vnd.nuera.ecelp9600 ecelp9600 +audio/vnd.octel.sbc +# audio/vnd.qcelp deprecated in favour of audio/qcelp +audio/vnd.rhetorex.32kadpcm +audio/vnd.rip rip +audio/vnd.sealedmedia.softseal.mpeg smp3 smp s1m +audio/vnd.vmx.cvsd +audio/vorbis +audio/vorbis-config +font/collection ttc +font/otf otf +font/sfnt +font/ttf ttf +font/woff woff +font/woff2 woff2 +image/bmp bmp dib +image/cgm cgm +image/dicom-rle drle +image/emf emf +image/example +image/fits fits fit fts +image/g3fax +image/gif gif +image/ief ief +image/jls jls +image/jp2 jp2 jpg2 +image/jpeg jpg jpeg jpe jfif +image/jpm jpm jpgm +image/jpx jpx jpf +image/ktx ktx +image/naplps +image/png png +image/prs.btif btif btf +image/prs.pti pti +image/pwg-raster +image/svg+xml svg svgz +image/t38 t38 +image/tiff tiff tif +image/tiff-fx tfx +image/vnd.adobe.photoshop psd +image/vnd.airzip.accelerator.azv azv +image/vnd.cns.inf2 +image/vnd.dece.graphic uvi uvvi uvg uvvg +image/vnd.djvu djvu djv +# sub: text/vnd.dvb.subtitle +image/vnd.dvb.subtitle +image/vnd.dwg dwg +image/vnd.dxf dxf +image/vnd.fastbidsheet fbs +image/vnd.fpx fpx +image/vnd.fst fst +image/vnd.fujixerox.edmics-mmr mmr +image/vnd.fujixerox.edmics-rlc rlc +image/vnd.globalgraphics.pgb pgb +image/vnd.microsoft.icon ico +image/vnd.mix +image/vnd.mozilla.apng apng +image/vnd.ms-modi mdi +image/vnd.net-fpx +image/vnd.radiance hdr rgbe xyze +image/vnd.sealed.png spng spn s1n +image/vnd.sealedmedia.softseal.gif sgif sgi s1g +image/vnd.sealedmedia.softseal.jpg sjpg sjp s1j +image/vnd.svf +image/vnd.tencent.tap tap +image/vnd.valve.source.texture vtf +image/vnd.wap.wbmp wbmp +image/vnd.xiff xif +image/vnd.zbrush.pcx pcx +image/wmf wmf +message/CPIM +message/delivery-status +message/disposition-notification +message/example +message/external-body +message/feedback-report +message/global u8msg +message/global-delivery-status u8dsn +message/global-disposition-notification u8mdn +message/global-headers u8hdr +message/http +# cl: application/simple-filter+xml +message/imdn+xml +# message/news obsoleted by message/rfc822 +message/partial +message/rfc822 eml mail art +message/s-http +message/sip +message/sipfrag +message/tracking-status +message/vnd.si.simp +# wsc: application/vnd.wfa.wsc +message/vnd.wfa.wsc +model/example +model/gltf+json gltf +model/iges igs iges +model/mesh msh mesh silo +model/vnd.collada+xml dae +model/vnd.dwf dwf +# 3dml, 3dm: text/vnd.in3d.3dml +model/vnd.flatland.3dml +model/vnd.gdl gdl gsm win dor lmp rsm msm ism +model/vnd.gs-gdl +model/vnd.gtw gtw +model/vnd.moml+xml moml +model/vnd.mts mts +model/vnd.opengex ogex +model/vnd.parasolid.transmit.binary x_b xmt_bin +model/vnd.parasolid.transmit.text x_t xmt_txt +model/vnd.rosette.annotated-data-model +model/vnd.valve.source.compiled-map bsp +model/vnd.vtu vtu +model/vrml wrl vrml +# x3db: model/x3d+xml +model/x3d+fastinfoset +# x3d: application/vnd.hzn-3d-crossword +model/x3d+xml x3db +model/x3d-vrml x3dv x3dvz +multipart/alternative +multipart/appledouble +multipart/byteranges +multipart/digest +multipart/encrypted +multipart/form-data +multipart/header-set +multipart/mixed +multipart/parallel +multipart/related +multipart/report +multipart/signed +multipart/vnd.bint.med-plus bmed +multipart/voice-message vpm +multipart/x-mixed-replace +text/1d-interleaved-parityfec +text/cache-manifest appcache manifest +text/calendar ics ifb +text/css css +text/csv csv +text/csv-schema csvs +text/directory +text/dns soa zone +text/encaprtp +# text/ecmascript obsoleted by application/ecmascript +text/enriched +text/example +text/fwdred +text/grammar-ref-list +text/html html htm +# text/javascript obsoleted by application/javascript +text/jcr-cnd cnd +text/markdown markdown md +text/mizar miz +text/n3 n3 +text/parameters +text/parityfec +text/plain txt asc text pm el c h cc hh cxx hxx f90 conf log +text/provenance-notation provn +text/prs.fallenstein.rst rst +text/prs.lines.tag tag dsc +text/prs.prop.logic +text/raptorfec +text/RED +text/rfc822-headers +text/richtext rtx +# rtf: application/rtf +text/rtf +text/rtp-enc-aescm128 +text/rtploopback +text/rtx +text/sgml sgml sgm +text/strings +text/t140 +text/tab-separated-values tsv +text/troff t tr roff +text/turtle ttl +text/ulpfec +text/uri-list uris uri +text/vcard vcf vcard +text/vnd.a a +text/vnd.abc abc +text/vnd.ascii-art ascii +# curl: application/vnd.curl +text/vnd.curl +text/vnd.debian.copyright copyright +text/vnd.DMClientScript dms +text/vnd.dvb.subtitle sub +text/vnd.esmertec.theme-descriptor jtd +text/vnd.fly fly +text/vnd.fmi.flexstor flx +text/vnd.graphviz gv dot +text/vnd.in3d.3dml 3dml 3dm +text/vnd.in3d.spot spot spo +text/vnd.IPTC.NewsML +text/vnd.IPTC.NITF +text/vnd.latex-z +text/vnd.motorola.reflex +text/vnd.ms-mediapackage mpf +text/vnd.net2phone.commcenter.command ccc +text/vnd.radisys.msml-basic-layout +text/vnd.si.uricatalogue uric +text/vnd.sun.j2me.app-descriptor jad +text/vnd.trolltech.linguist ts +text/vnd.wap.si si +text/vnd.wap.sl sl +text/vnd.wap.wml wml +text/vnd.wap.wmlscript wmls +text/xml xml xsd rng +text/xml-external-parsed-entity ent +video/1d-interleaved-parityfec +video/3gpp 3gp 3gpp +video/3gpp2 3g2 3gpp2 +video/3gpp-tt +video/BMPEG +video/BT656 +video/CelB +video/DV +video/encaprtp +video/example +video/H261 +video/H263 +video/H263-1998 +video/H263-2000 +video/H264 +video/H264-RCDO +video/H264-SVC +video/H265 +video/iso.segment m4s +video/JPEG +video/jpeg2000 +video/mj2 mj2 mjp2 +video/MP1S +video/MP2P +video/MP2T +video/mp4 mp4 mpg4 m4v +video/MP4V-ES +video/mpeg mpeg mpg mpe m1v m2v +video/mpeg4-generic +video/MPV +video/nv +video/ogg ogv +video/parityfec +video/pointer +video/quicktime mov qt +video/raptorfec +video/raw +video/rtp-enc-aescm128 +video/rtploopback +video/rtx +video/SMPTE292M +video/ulpfec +video/vc1 +video/vnd.CCTV +video/vnd.dece.hd uvh uvvh +video/vnd.dece.mobile uvm uvvm +video/vnd.dece.mp4 uvu uvvu +video/vnd.dece.pd uvp uvvp +video/vnd.dece.sd uvs uvvs +video/vnd.dece.video uvv uvvv +video/vnd.directv.mpeg +video/vnd.directv.mpeg-tts +video/vnd.dlna.mpeg-tts +video/vnd.dvb.file dvb +video/vnd.fvt fvt +# rm: audio/x-pn-realaudio +video/vnd.hns.video +video/vnd.iptvforum.1dparityfec-1010 +video/vnd.iptvforum.1dparityfec-2005 +video/vnd.iptvforum.2dparityfec-1010 +video/vnd.iptvforum.2dparityfec-2005 +video/vnd.iptvforum.ttsavc +video/vnd.iptvforum.ttsmpeg2 +video/vnd.motorola.video +video/vnd.motorola.videop +video/vnd.mpegurl mxu m4u +video/vnd.ms-playready.media.pyv pyv +video/vnd.nokia.interleaved-multimedia nim +video/vnd.nokia.videovoip +# mp4: video/mp4 +video/vnd.objectvideo +video/vnd.radgamettools.bink bik bk2 +video/vnd.radgamettools.smacker smk +video/vnd.sealed.mpeg1 smpg s11 +# smpg: video/vnd.sealed.mpeg1 +video/vnd.sealed.mpeg4 s14 +video/vnd.sealed.swf sswf ssw +video/vnd.sealedmedia.softseal.mov smov smo s1q +# uvu, uvvu: video/vnd.dece.mp4 +video/vnd.uvvu.mp4 +video/vnd.vivo viv +video/VP8 + +# Non-IANA types + +application/mac-compactpro cpt +application/metalink+xml metalink +application/owl+xml owx +application/rss+xml rss +application/vnd.android.package-archive apk +application/vnd.oma.dd+xml dd +application/vnd.oma.drm.content dcf +# odf: application/vnd.oasis.opendocument.formula +application/vnd.oma.drm.dcf o4a o4v +application/vnd.oma.drm.message dm +application/vnd.oma.drm.rights+wbxml drc +application/vnd.oma.drm.rights+xml dr +application/vnd.sun.xml.calc sxc +application/vnd.sun.xml.calc.template stc +application/vnd.sun.xml.draw sxd +application/vnd.sun.xml.draw.template std +application/vnd.sun.xml.impress sxi +application/vnd.sun.xml.impress.template sti +application/vnd.sun.xml.math sxm +application/vnd.sun.xml.writer sxw +application/vnd.sun.xml.writer.global sxg +application/vnd.sun.xml.writer.template stw +application/vnd.symbian.install sis +application/vnd.wap.mms-message mms +application/x-annodex anx +application/x-bcpio bcpio +application/x-bittorrent torrent +application/x-bzip2 bz2 +application/x-cdlink vcd +application/x-chrome-extension crx +application/x-cpio cpio +application/x-csh csh +application/x-director dcr dir dxr +application/x-dvi dvi +application/x-futuresplash spl +application/x-gtar gtar +application/x-hdf hdf +application/x-java-archive jar +application/x-java-jnlp-file jnlp +application/x-java-pack200 pack +application/x-killustrator kil +application/x-latex latex +application/x-netcdf nc cdf +application/x-perl pl +application/x-rpm rpm +application/x-sh sh +application/x-shar shar +application/x-stuffit sit +application/x-sv4cpio sv4cpio +application/x-sv4crc sv4crc +application/x-tar tar +application/x-tcl tcl +application/x-tex tex +application/x-texinfo texinfo texi +application/x-troff-man man 1 2 3 4 5 6 7 8 +application/x-troff-me me +application/x-troff-ms ms +application/x-ustar ustar +application/x-wais-source src +application/x-xpinstall xpi +application/x-xspf+xml xspf +application/x-xz xz +audio/midi mid midi kar +audio/x-aiff aif aiff aifc +audio/x-annodex axa +audio/x-flac flac +audio/x-matroska mka +audio/x-mod mod ult uni m15 mtm 669 med +audio/x-mpegurl m3u +audio/x-ms-wax wax +audio/x-ms-wma wma +audio/x-pn-realaudio ram rm +audio/x-realaudio ra +audio/x-s3m s3m +audio/x-stm stm +audio/x-wav wav +chemical/x-xyz xyz +image/webp webp +image/x-cmu-raster ras +image/x-portable-anymap pnm +image/x-portable-bitmap pbm +image/x-portable-graymap pgm +image/x-portable-pixmap ppm +image/x-rgb rgb +image/x-targa tga +image/x-xbitmap xbm +image/x-xpixmap xpm +image/x-xwindowdump xwd +text/html-sandboxed sandboxed +text/x-pod pod +text/x-setext etx +video/webm webm +video/x-annodex axv +video/x-flv flv +video/x-javafx fxm +video/x-matroska mkv +video/x-matroska-3d mk3d +video/x-ms-asf asx +video/x-ms-wm wm +video/x-ms-wmv wmv +video/x-ms-wmx wmx +video/x-ms-wvx wvx +video/x-msvideo avi +video/x-sgi-movie movie +x-conference/x-cooltalk ice +x-epoc/x-sisx-app sisx diff --git a/images/e2e/Dockerfile b/images/e2e/Dockerfile new file mode 100644 index 0000000000..5501e48f2d --- /dev/null +++ b/images/e2e/Dockerfile @@ -0,0 +1,79 @@ +# Copyright 2018 The Kubernetes Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM quay.io/kubernetes-ingress-controller/nginx-amd64:0.87 + +RUN clean-install \ + g++ \ + gcc \ + git \ + libc6-dev \ + make \ + wget \ + luarocks \ + python \ + pkg-config + +ENV GOLANG_VERSION 1.12.5 +ENV GO_ARCH linux-amd64 +ENV GOLANG_SHA aea86e3c73495f205929cfebba0d63f1382c8ac59be081b6351681415f4063cf + +RUN set -eux; \ + url="https://golang.org/dl/go${GOLANG_VERSION}.${GO_ARCH}.tar.gz"; \ + wget -O go.tgz "$url"; \ + echo "${GOLANG_SHA} *go.tgz" | sha256sum -c -; \ + tar -C /usr/local -xzf go.tgz; \ + rm go.tgz; \ + export PATH="/usr/local/go/bin:$PATH"; \ + go version + +ENV GOPATH /go +ENV PATH $GOPATH/bin:/usr/local/go/bin:$PATH + +RUN mkdir -p "$GOPATH/src" "$GOPATH/bin" && chmod -R 777 "$GOPATH" + +WORKDIR $GOPATH + +ENV RESTY_CLI_VERSION 0.23 +ENV RESTY_CLI_SHA 8715b9a6e33140fd468779cd561c0c622f7444a902dc578b1137941ff3fc1ed8 + +RUN set -eux; \ + url="https://github.com/openresty/resty-cli/archive/v${RESTY_CLI_VERSION}.tar.gz"; \ + wget -O resty_cli.tgz "$url"; \ + echo "${RESTY_CLI_SHA} *resty_cli.tgz" | sha256sum -c -; \ + tar -C /tmp -xzf resty_cli.tgz; \ + rm resty_cli.tgz; \ + mv /tmp/resty-cli-${RESTY_CLI_VERSION}/bin/* /usr/local/bin/; \ + resty -V + +RUN luarocks install luacheck \ + && luarocks install busted + +RUN go get github.com/onsi/ginkgo/ginkgo \ + && go get golang.org/x/lint/golint + +ARG K8S_RELEASE +ARG ETCD_VERSION + +RUN wget https://storage.googleapis.com/kubernetes-release/release/${K8S_RELEASE}/bin/linux/amd64/kubectl -O /usr/local/bin/kubectl \ + && chmod +x /usr/local/bin/kubectl + +RUN wget https://storage.googleapis.com/kubernetes-release/release/${K8S_RELEASE}/bin/linux/amd64/kube-apiserver -O /usr/local/bin/kube-apiserver \ + && chmod +x /usr/local/bin/kube-apiserver + +RUN curl -L https://storage.googleapis.com/etcd/${ETCD_VERSION}/etcd-${ETCD_VERSION}-linux-amd64.tar.gz -o /tmp/etcd-${ETCD_VERSION}-linux-amd64.tar.gz \ + && mkdir -p /tmp/etcd-download \ + && tar xzvf /tmp/etcd-${ETCD_VERSION}-linux-amd64.tar.gz -C /tmp/etcd-download --strip-components=1 \ + && cp /tmp/etcd-download/etcd /usr/local/bin \ + && rm -rf /tmp/etcd-download diff --git a/images/e2e/Makefile b/images/e2e/Makefile new file mode 100644 index 0000000000..6c303c1738 --- /dev/null +++ b/images/e2e/Makefile @@ -0,0 +1,33 @@ +# Copyright 2018 The Kubernetes Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +TAG ?=v$(shell date +%m%d%Y)-$(shell git rev-parse --short HEAD) +REGISTRY ?= quay.io/kubernetes-ingress-controller +DOCKER ?= docker + +IMAGE = $(REGISTRY)/e2e + +all: docker-build docker-push + +docker-build: + $(DOCKER) build \ + --pull \ + --build-arg K8S_RELEASE=v1.14.1 \ + --build-arg ETCD_VERSION=v3.3.12 \ + -t $(IMAGE):$(TAG) . + +docker-push: + $(DOCKER) push $(IMAGE):$(TAG) + $(DOCKER) tag $(IMAGE):$(TAG) $(IMAGE):latest + $(DOCKER) push $(IMAGE):latest diff --git a/images/echoheaders/Dockerfile b/images/echoheaders/Dockerfile deleted file mode 100644 index fd4c641bb6..0000000000 --- a/images/echoheaders/Dockerfile +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2017 The Kubernetes Authors. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -FROM BASEIMAGE - -ADD nginx.conf /etc/nginx/nginx.conf -ADD template.lua /usr/local/share/lua/5.1/ -ADD README.md README.md diff --git a/images/echoheaders/Makefile b/images/echoheaders/Makefile deleted file mode 100644 index 4ad95cdb78..0000000000 --- a/images/echoheaders/Makefile +++ /dev/null @@ -1,64 +0,0 @@ -# Copyright 2017 The Kubernetes Authors. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -TAG = 1.8 -REGISTRY = gcr.io/google_containers -ARCH ?= $(shell go env GOARCH) -ALL_ARCH = amd64 arm ppc64le - -QEMUVERSION=v2.7.0 - -IMGNAME = echoserver -IMAGE = $(REGISTRY)/$(IMGNAME) -MULTI_ARCH_IMG = $(IMAGE)-$(ARCH) - -# Set default base image dynamically for each arch -BASEIMAGE?=gcr.io/google_containers/nginx-slim-$(ARCH):0.21 - -TEMP_DIR := $(shell mktemp -d) - -all: all-container - -sub-container-%: - $(MAKE) ARCH=$* container - -sub-push-%: - $(MAKE) ARCH=$* push - -all-container: $(addprefix sub-container-,$(ALL_ARCH)) - -all-push: $(addprefix sub-push-,$(ALL_ARCH)) - -container: .container-$(ARCH) -.container-$(ARCH): - cp ./* $(TEMP_DIR) - cd $(TEMP_DIR) && sed -i 's|BASEIMAGE|$(BASEIMAGE)|g' Dockerfile - - docker build -t $(MULTI_ARCH_IMG):$(TAG) $(TEMP_DIR) - -ifeq ($(ARCH), amd64) - # This is for to maintain the backward compatibility - docker tag $(MULTI_ARCH_IMG):$(TAG) $(IMAGE):$(TAG) -endif - -push: .push-$(ARCH) -.push-$(ARCH): .container-$(ARCH) - gcloud docker -- push $(MULTI_ARCH_IMG):$(TAG) -ifeq ($(ARCH), amd64) - gcloud docker -- push $(IMAGE):$(TAG) -endif - -clean: $(addprefix sub-clean-,$(ALL_ARCH)) -sub-clean-%: - docker rmi -f $(IMAGE)-$*:$(TAG) || true diff --git a/images/echoheaders/README.md b/images/echoheaders/README.md deleted file mode 100644 index d023094ef2..0000000000 --- a/images/echoheaders/README.md +++ /dev/null @@ -1,8 +0,0 @@ -# Echoserver - -This is a simple server that responds with the http headers it received. - -Image versions >= 1.4 removes the redirect introduced in 1.3. -Image versions >= 1.3 redirect requests on :80 with `X-Forwarded-Proto: http` to :443. -Image versions > 1.0 run an nginx server, and implement the echoserver using lua in the nginx config. -Image versions <= 1.0 run a python http server instead of nginx, and don't redirect any requests. diff --git a/images/echoheaders/echo-app.yaml b/images/echoheaders/echo-app.yaml deleted file mode 100644 index 4ffe68134b..0000000000 --- a/images/echoheaders/echo-app.yaml +++ /dev/null @@ -1,51 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: echoheaders - labels: - app: echoheaders -spec: - type: NodePort - ports: - - port: 80 - targetPort: 8080 - protocol: TCP - name: http - selector: - app: echoheaders ---- -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: echoheaders - labels: - app: echoheaders -spec: - replicas: 1 - template: - metadata: - labels: - app: echoheaders - spec: - containers: - - name: echoheaders - image: gcr.io/google_containers/echoserver:1.7 - ports: - - containerPort: 8080 - env: - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP diff --git a/images/echoheaders/nginx.conf b/images/echoheaders/nginx.conf deleted file mode 100644 index 7543612e48..0000000000 --- a/images/echoheaders/nginx.conf +++ /dev/null @@ -1,83 +0,0 @@ -events { - worker_connections 1024; -} - -env HOSTNAME; -env NODE_NAME; -env POD_NAME; -env POD_NAMESPACE; -env POD_IP; - -http { - default_type 'text/plain'; - # maximum allowed size of the client request body. By default this is 1m. - # Request with bigger bodies nginx will return error code 413. - # http://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size - client_max_body_size 10m; - - init_by_lua_block { - local template = require("template") - -- template syntax documented here: - -- https://github.com/bungle/lua-resty-template/blob/master/README.md - tmpl = template.compile([[ - - -Hostname: {{os.getenv("HOSTNAME") or "N/A"}} - -Pod Information: -{% if os.getenv("POD_NAME") then %} - node name: {{os.getenv("NODE_NAME") or "N/A"}} - pod name: {{os.getenv("POD_NAME") or "N/A"}} - pod namespace: {{os.getenv("POD_NAMESPACE") or "N/A"}} - pod IP: {{os.getenv("POD_IP") or "N/A"}} -{% else %} - -no pod information available- -{% end %} - -Server values: - server_version=nginx: {{ngx.var.nginx_version}} - lua: {{ngx.config.ngx_lua_version}} - -Request Information: - client_address={{ngx.var.remote_addr}} - method={{ngx.req.get_method()}} - real path={{ngx.var.request_uri}} - query={{ngx.var.query_string or ""}} - request_version={{ngx.req.http_version()}} - request_uri={{ngx.var.scheme.."://"..ngx.var.host..":"..ngx.var.server_port..ngx.var.request_uri}} - -Request Headers: -{% for i, key in ipairs(keys) do %} - {{key}}={{headers[key]}} -{% end %} - -Request Body: -{{ngx.var.request_body or " -no body in request-"}} -]]) - } - - server { - # please check the benefits of reuseport https://www.nginx.com/blog/socket-sharding-nginx-release-1-9-1 - # basically instructs to create an individual listening socket for each worker process (using the SO_REUSEPORT - # socket option), allowing a kernel to distribute incoming connections between worker processes. - listen 8080 default_server reuseport; - - # Replace '_' with your hostname. - server_name _; - - location / { - lua_need_request_body on; - content_by_lua_block { - ngx.header["Server"] = "echoserver" - - local headers = ngx.req.get_headers() - local keys = {} - for key, val in pairs(headers) do - table.insert(keys, key) - end - table.sort(keys) - - ngx.say(tmpl({os=os, ngx=ngx, keys=keys, headers=headers})) - } - } - } -} diff --git a/images/echoheaders/template.lua b/images/echoheaders/template.lua deleted file mode 100644 index cc80308a75..0000000000 --- a/images/echoheaders/template.lua +++ /dev/null @@ -1,509 +0,0 @@ --- vendored from https://raw.githubusercontent.com/bungle/lua-resty-template/1f9a5c24fc7572dbf5be0b9f8168cc3984b03d24/lib/resty/template.lua --- only modification: remove / from HTML_ENTITIES to not escape it, and fix the appropriate regex. ---[[ -Copyright (c) 2014 - 2017 Aapo Talvensaari -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, this - list of conditions and the following disclaimer in the documentation and/or - other materials provided with the distribution. - -* Neither the name of the {organization} nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---]] - -local setmetatable = setmetatable -local loadstring = loadstring -local loadchunk -local tostring = tostring -local setfenv = setfenv -local require = require -local capture -local concat = table.concat -local assert = assert -local prefix -local write = io.write -local pcall = pcall -local phase -local open = io.open -local load = load -local type = type -local dump = string.dump -local find = string.find -local gsub = string.gsub -local byte = string.byte -local null -local sub = string.sub -local ngx = ngx -local jit = jit -local var - -local _VERSION = _VERSION -local _ENV = _ENV -local _G = _G - -local HTML_ENTITIES = { - ["&"] = "&", - ["<"] = "<", - [">"] = ">", - ['"'] = """, - ["'"] = "'", -} - -local CODE_ENTITIES = { - ["{"] = "{", - ["}"] = "}", - ["&"] = "&", - ["<"] = "<", - [">"] = ">", - ['"'] = """, - ["'"] = "'", - ["/"] = "/" -} - -local VAR_PHASES - -local ok, newtab = pcall(require, "table.new") -if not ok then newtab = function() return {} end end - -local caching = true -local template = newtab(0, 12) - -template._VERSION = "1.9" -template.cache = {} - -local function enabled(val) - if val == nil then return true end - return val == true or (val == "1" or val == "true" or val == "on") -end - -local function trim(s) - return gsub(gsub(s, "^%s+", ""), "%s+$", "") -end - -local function rpos(view, s) - while s > 0 do - local c = sub(view, s, s) - if c == " " or c == "\t" or c == "\0" or c == "\x0B" then - s = s - 1 - else - break - end - end - return s -end - -local function escaped(view, s) - if s > 1 and sub(view, s - 1, s - 1) == "\\" then - if s > 2 and sub(view, s - 2, s - 2) == "\\" then - return false, 1 - else - return true, 1 - end - end - return false, 0 -end - -local function readfile(path) - local file = open(path, "rb") - if not file then return nil end - local content = file:read "*a" - file:close() - return content -end - -local function loadlua(path) - return readfile(path) or path -end - -local function loadngx(path) - local vars = VAR_PHASES[phase()] - local file, location = path, vars and var.template_location - if sub(file, 1) == "/" then file = sub(file, 2) end - if location and location ~= "" then - if sub(location, -1) == "/" then location = sub(location, 1, -2) end - local res = capture(concat{ location, '/', file}) - if res.status == 200 then return res.body end - end - local root = vars and (var.template_root or var.document_root) or prefix - if sub(root, -1) == "/" then root = sub(root, 1, -2) end - return readfile(concat{ root, "/", file }) or path -end - -do - if ngx then - VAR_PHASES = { - set = true, - rewrite = true, - access = true, - content = true, - header_filter = true, - body_filter = true, - log = true - } - template.print = ngx.print or write - template.load = loadngx - prefix, var, capture, null, phase = ngx.config.prefix(), ngx.var, ngx.location.capture, ngx.null, ngx.get_phase - if VAR_PHASES[phase()] then - caching = enabled(var.template_cache) - end - else - template.print = write - template.load = loadlua - end - if _VERSION == "Lua 5.1" then - local context = { __index = function(t, k) - return t.context[k] or t.template[k] or _G[k] - end } - if jit then - loadchunk = function(view) - return assert(load(view, nil, nil, setmetatable({ template = template }, context))) - end - else - loadchunk = function(view) - local func = assert(loadstring(view)) - setfenv(func, setmetatable({ template = template }, context)) - return func - end - end - else - local context = { __index = function(t, k) - return t.context[k] or t.template[k] or _ENV[k] - end } - loadchunk = function(view) - return assert(load(view, nil, nil, setmetatable({ template = template }, context))) - end - end -end - -function template.caching(enable) - if enable ~= nil then caching = enable == true end - return caching -end - -function template.output(s) - if s == nil or s == null then return "" end - if type(s) == "function" then return template.output(s()) end - return tostring(s) -end - -function template.escape(s, c) - if type(s) == "string" then - if c then return gsub(s, "[}{\">/<'&]", CODE_ENTITIES) end - return gsub(s, "[\"><'&]", HTML_ENTITIES) - end - return template.output(s) -end - -function template.new(view, layout) - assert(view, "view was not provided for template.new(view, layout).") - local render, compile = template.render, template.compile - if layout then - if type(layout) == "table" then - return setmetatable({ render = function(self, context) - local context = context or self - context.blocks = context.blocks or {} - context.view = compile(view)(context) - layout.blocks = context.blocks or {} - layout.view = context.view or "" - return layout:render() - end }, { __tostring = function(self) - local context = self - context.blocks = context.blocks or {} - context.view = compile(view)(context) - layout.blocks = context.blocks or {} - layout.view = context.view - return tostring(layout) - end }) - else - return setmetatable({ render = function(self, context) - local context = context or self - context.blocks = context.blocks or {} - context.view = compile(view)(context) - return render(layout, context) - end }, { __tostring = function(self) - local context = self - context.blocks = context.blocks or {} - context.view = compile(view)(context) - return compile(layout)(context) - end }) - end - end - return setmetatable({ render = function(self, context) - return render(view, context or self) - end }, { __tostring = function(self) - return compile(view)(self) - end }) -end - -function template.precompile(view, path, strip) - local chunk = dump(template.compile(view), strip ~= false) - if path then - local file = open(path, "wb") - file:write(chunk) - file:close() - end - return chunk -end - -function template.compile(view, key, plain) - assert(view, "view was not provided for template.compile(view, key, plain).") - if key == "no-cache" then - return loadchunk(template.parse(view, plain)), false - end - key = key or view - local cache = template.cache - if cache[key] then return cache[key], true end - local func = loadchunk(template.parse(view, plain)) - if caching then cache[key] = func end - return func, false -end - -function template.parse(view, plain) - assert(view, "view was not provided for template.parse(view, plain).") - if not plain then - view = template.load(view) - if byte(view, 1, 1) == 27 then return view end - end - local j = 2 - local c = {[[ -context=... or {} -local function include(v, c) return template.compile(v)(c or context) end -local ___,blocks,layout={},blocks or {} -]] } - local i, s = 1, find(view, "{", 1, true) - while s do - local t, p = sub(view, s + 1, s + 1), s + 2 - if t == "{" then - local e = find(view, "}}", p, true) - if e then - local z, w = escaped(view, s) - if i < s - w then - c[j] = "___[#___+1]=[=[\n" - c[j+1] = sub(view, i, s - 1 - w) - c[j+2] = "]=]\n" - j=j+3 - end - if z then - i = s - else - c[j] = "___[#___+1]=template.escape(" - c[j+1] = trim(sub(view, p, e - 1)) - c[j+2] = ")\n" - j=j+3 - s, i = e + 1, e + 2 - end - end - elseif t == "*" then - local e = find(view, "*}", p, true) - if e then - local z, w = escaped(view, s) - if i < s - w then - c[j] = "___[#___+1]=[=[\n" - c[j+1] = sub(view, i, s - 1 - w) - c[j+2] = "]=]\n" - j=j+3 - end - if z then - i = s - else - c[j] = "___[#___+1]=template.output(" - c[j+1] = trim(sub(view, p, e - 1)) - c[j+2] = ")\n" - j=j+3 - s, i = e + 1, e + 2 - end - end - elseif t == "%" then - local e = find(view, "%}", p, true) - if e then - local z, w = escaped(view, s) - if z then - if i < s - w then - c[j] = "___[#___+1]=[=[\n" - c[j+1] = sub(view, i, s - 1 - w) - c[j+2] = "]=]\n" - j=j+3 - end - i = s - else - local n = e + 2 - if sub(view, n, n) == "\n" then - n = n + 1 - end - local r = rpos(view, s - 1) - if i <= r then - c[j] = "___[#___+1]=[=[\n" - c[j+1] = sub(view, i, r) - c[j+2] = "]=]\n" - j=j+3 - end - c[j] = trim(sub(view, p, e - 1)) - c[j+1] = "\n" - j=j+2 - s, i = n - 1, n - end - end - elseif t == "(" then - local e = find(view, ")}", p, true) - if e then - local z, w = escaped(view, s) - if i < s - w then - c[j] = "___[#___+1]=[=[\n" - c[j+1] = sub(view, i, s - 1 - w) - c[j+2] = "]=]\n" - j=j+3 - end - if z then - i = s - else - local f = sub(view, p, e - 1) - local x = find(f, ",", 2, true) - if x then - c[j] = "___[#___+1]=include([=[" - c[j+1] = trim(sub(f, 1, x - 1)) - c[j+2] = "]=]," - c[j+3] = trim(sub(f, x + 1)) - c[j+4] = ")\n" - j=j+5 - else - c[j] = "___[#___+1]=include([=[" - c[j+1] = trim(f) - c[j+2] = "]=])\n" - j=j+3 - end - s, i = e + 1, e + 2 - end - end - elseif t == "[" then - local e = find(view, "]}", p, true) - if e then - local z, w = escaped(view, s) - if i < s - w then - c[j] = "___[#___+1]=[=[\n" - c[j+1] = sub(view, i, s - 1 - w) - c[j+2] = "]=]\n" - j=j+3 - end - if z then - i = s - else - c[j] = "___[#___+1]=include(" - c[j+1] = trim(sub(view, p, e - 1)) - c[j+2] = ")\n" - j=j+3 - s, i = e + 1, e + 2 - end - end - elseif t == "-" then - local e = find(view, "-}", p, true) - if e then - local x, y = find(view, sub(view, s, e + 1), e + 2, true) - if x then - local z, w = escaped(view, s) - if z then - if i < s - w then - c[j] = "___[#___+1]=[=[\n" - c[j+1] = sub(view, i, s - 1 - w) - c[j+2] = "]=]\n" - j=j+3 - end - i = s - else - y = y + 1 - x = x - 1 - if sub(view, y, y) == "\n" then - y = y + 1 - end - local b = trim(sub(view, p, e - 1)) - if b == "verbatim" or b == "raw" then - if i < s - w then - c[j] = "___[#___+1]=[=[\n" - c[j+1] = sub(view, i, s - 1 - w) - c[j+2] = "]=]\n" - j=j+3 - end - c[j] = "___[#___+1]=[=[" - c[j+1] = sub(view, e + 2, x) - c[j+2] = "]=]\n" - j=j+3 - else - if sub(view, x, x) == "\n" then - x = x - 1 - end - local r = rpos(view, s - 1) - if i <= r then - c[j] = "___[#___+1]=[=[\n" - c[j+1] = sub(view, i, r) - c[j+2] = "]=]\n" - j=j+3 - end - c[j] = 'blocks["' - c[j+1] = b - c[j+2] = '"]=include[=[' - c[j+3] = sub(view, e + 2, x) - c[j+4] = "]=]\n" - j=j+5 - end - s, i = y - 1, y - end - end - end - elseif t == "#" then - local e = find(view, "#}", p, true) - if e then - local z, w = escaped(view, s) - if i < s - w then - c[j] = "___[#___+1]=[=[\n" - c[j+1] = sub(view, i, s - 1 - w) - c[j+2] = "]=]\n" - j=j+3 - end - if z then - i = s - else - e = e + 2 - if sub(view, e, e) == "\n" then - e = e + 1 - end - s, i = e - 1, e - end - end - end - s = find(view, "{", s + 1, true) - end - s = sub(view, i) - if s and s ~= "" then - c[j] = "___[#___+1]=[=[\n" - c[j+1] = s - c[j+2] = "]=]\n" - j=j+3 - end - c[j] = "return layout and include(layout,setmetatable({view=table.concat(___),blocks=blocks},{__index=context})) or table.concat(___)" - return concat(c) -end - -function template.render(view, context, key, plain) - assert(view, "view was not provided for template.render(view, context, key, plain).") - return template.print(template.compile(view, key, plain)(context)) -end - -return template \ No newline at end of file diff --git a/images/grpc-fortune-teller/.gitignore b/images/grpc-fortune-teller/.gitignore new file mode 100644 index 0000000000..ac51a054d2 --- /dev/null +++ b/images/grpc-fortune-teller/.gitignore @@ -0,0 +1 @@ +bazel-* diff --git a/images/grpc-fortune-teller/BUILD.bazel b/images/grpc-fortune-teller/BUILD.bazel new file mode 100644 index 0000000000..b000f8347d --- /dev/null +++ b/images/grpc-fortune-teller/BUILD.bazel @@ -0,0 +1,15 @@ +load("@io_bazel_rules_go//go:def.bzl", "gazelle", "go_prefix") + +gazelle( + name = "gazelle", + args = [ + "-build_file_name", + "BUILD,BUILD.bazel", + ], + command = "fix", + prefix = "github.com/kubernetes/ingress-nginx/images/grpc-fortune-teller", +) + +go_prefix( + prefix = "github.com/kubernetes/ingress-nginx/images/grpc-fortune-teller", +) diff --git a/images/grpc-fortune-teller/Gopkg.lock b/images/grpc-fortune-teller/Gopkg.lock new file mode 100644 index 0000000000..e25ff02ddd --- /dev/null +++ b/images/grpc-fortune-teller/Gopkg.lock @@ -0,0 +1,104 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/golang/protobuf" + packages = [ + "proto", + "protoc-gen-go/descriptor", + "ptypes", + "ptypes/any", + "ptypes/duration", + "ptypes/timestamp" + ] + revision = "925541529c1fa6821df4e44ce2723319eb2be768" + version = "v1.0.0" + +[[projects]] + name = "github.com/vromero/gofortune" + packages = [ + "lib", + "lib/fortune" + ] + revision = "8a3c485287c0d3d3e4f8f0d9e0058f6cdd29740d" + version = "v0.0.1" + +[[projects]] + branch = "master" + name = "golang.org/x/net" + packages = [ + "context", + "http2", + "http2/hpack", + "idna", + "internal/timeseries", + "lex/httplex", + "trace" + ] + revision = "b3c676e531a6dc479fa1b35ac961c13f5e2b4d2e" + +[[projects]] + name = "golang.org/x/text" + packages = [ + "collate", + "collate/build", + "internal/colltab", + "internal/gen", + "internal/tag", + "internal/triegen", + "internal/ucd", + "language", + "secure/bidirule", + "transform", + "unicode/bidi", + "unicode/cldr", + "unicode/norm", + "unicode/rangetable" + ] + revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" + version = "v0.3.0" + +[[projects]] + branch = "master" + name = "google.golang.org/genproto" + packages = ["googleapis/rpc/status"] + revision = "35de2414665fc36f56b72d982c5af480d86de5ab" + +[[projects]] + name = "google.golang.org/grpc" + packages = [ + ".", + "balancer", + "balancer/base", + "balancer/roundrobin", + "codes", + "connectivity", + "credentials", + "encoding", + "encoding/proto", + "grpclb/grpc_lb_v1/messages", + "grpclog", + "internal", + "keepalive", + "metadata", + "naming", + "peer", + "reflection", + "reflection/grpc_reflection_v1alpha", + "resolver", + "resolver/dns", + "resolver/passthrough", + "stats", + "status", + "tap", + "transport" + ] + revision = "d89cded64628466c4ab532d1f0ba5c220459ebe8" + version = "v1.11.2" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "fcd36504ba120cfbbe161eac2ddd439c884fcef61ce318b5c0a544d27bc47043" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/images/grpc-fortune-teller/README.md b/images/grpc-fortune-teller/README.md new file mode 100644 index 0000000000..5cc755ef6a --- /dev/null +++ b/images/grpc-fortune-teller/README.md @@ -0,0 +1,59 @@ + +# grpc-fortune-teller + +This is the grpc server application for the nginx-ingress grpc example. Bazel +0.12 is used for the building and container management. + +## Build + +Builds a statically compiled go binary cross-compiled for linux: + +``` +$ bazel build //app:fortune +Target //app:fortune up-to-date: + bazel-bin/app/linux_amd64_static_pure_stripped/fortune +``` + +> To build for your host system, comment out the `goos` and `goarch` attributes +> in the `go_binary` rule. + +## Run + +Builds a minimal docker image that wraps the go_binary, loads it into your local +docker image repository, and runs it: + +```sh +$ bazel run //app:image +Loaded image ID: sha256:aa597c897c873116fcbfccafecf5ab0f6f4178a05e4a00c8e79de91ac0d2e9e7 +Tagging aa597c897c873116fcbfccafecf5ab0f6f4178a05e4a00c8e79de91ac0d2e9e7 as bazel/app:image +2018/05/01 02:13:43 Restored /tmp/fortune-teller/usr/share/games/fortunes/fortunes.dat +2018/05/01 02:13:43 Restored /tmp/fortune-teller/usr/share/games/fortunes/literature +2018/05/01 02:13:43 Restored /tmp/fortune-teller/usr/share/games/fortunes/literature.dat +2018/05/01 02:13:43 Restored /tmp/fortune-teller/usr/share/games/fortunes/riddles +2018/05/01 02:13:43 Restored /tmp/fortune-teller/usr/share/games/fortunes/riddles.dat +2018/05/01 02:13:43 Restored /tmp/fortune-teller/usr/share/games/fortunes/fortunes +2018/05/01 02:13:43 Assets restored to /tmp/fortune-teller +2018/05/01 02:13:43 Listening for gRPC requests at 50051 +``` + +Or run it via docker: + +```sh +$ docker run bazel/app:image +``` + +Build image and push to the container registry specified in the `container_push` +rule: + +```sh +$ bazel run //app:push +``` + +## Invoke + +```sh +$ grpcurl -plaintext localhost:50051 build.stack.fortune.FortuneTeller/Predict +{ + "message": "Whenever the literary German dives into a sentence, that is the last\nyou are going to see of him until he emerges on the other side of his\nAtlantic with his verb in his mouth.\n\t\t-- Mark Twain \"A Connecticut Yankee in King Arthur's Court\"" +} +``` diff --git a/images/grpc-fortune-teller/WORKSPACE b/images/grpc-fortune-teller/WORKSPACE new file mode 100644 index 0000000000..a6960d0540 --- /dev/null +++ b/images/grpc-fortune-teller/WORKSPACE @@ -0,0 +1,62 @@ +workspace(name = "com_github_kubernetes_ingress_nginx_images_grpc_fortune_teller") + +##################################################################### +# RULES_GO +##################################################################### + +git_repository( + name = "io_bazel_rules_go", + remote = "https://github.com/bazelbuild/rules_go.git", + commit = "161c91485b007c6bf51c0e81808cf4ee2ded299d", +) + +http_archive( + name = "com_github_scele_rules_go_dep", + urls = ["https://github.com/scele/rules_go_dep/archive/49a5e4ca9f6a16c9b4c930a51ce3a537498bb4e1.tar.gz"], + strip_prefix = "rules_go_dep-49a5e4ca9f6a16c9b4c930a51ce3a537498bb4e1", + sha256 = "f170d3d6f55e216f1493f975cde6c489d7070da2a8a41fd4de9812d96f4fb38b", +) + +load("@io_bazel_rules_go//go:def.bzl", "go_rules_dependencies", "go_register_toolchains") +load("@com_github_scele_rules_go_dep//dep:dep.bzl", "dep_import") + +go_register_toolchains(go_version = "1.10.1") + +go_rules_dependencies() + +dep_import( + name = "godeps", + prefix = "github.com/kubernetes/ingress-nginx/images/grpc-fortune-teller", + gopkg_lock = "//:Gopkg.lock", +) + +load("@godeps//:Gopkg.bzl", "go_deps") + +go_deps() + +############################################################# +# RULES_DOCKER +############################################################# + +RULES_DOCKER_VERSION = "553d5506bb7325185950f91533b967da8f5bc536" + +http_archive( + name = "io_bazel_rules_docker", + url = "https://github.com/bazelbuild/rules_docker/archive/%s.zip" % RULES_DOCKER_VERSION, + strip_prefix = "rules_docker-" + RULES_DOCKER_VERSION, + sha256 = "e0b3d966f2a5c0fe921b6294df7c823afa63b4c439f0a7f3b9da3ed6534bab83", +) + +load( + "@io_bazel_rules_docker//container:container.bzl", + container_repositories = "repositories", +) + +container_repositories() + +load( + "@io_bazel_rules_docker//go:image.bzl", + go_image_repositories = "repositories", +) + +go_image_repositories() diff --git a/images/grpc-fortune-teller/app/BUILD.bazel b/images/grpc-fortune-teller/app/BUILD.bazel new file mode 100644 index 0000000000..4fffd481eb --- /dev/null +++ b/images/grpc-fortune-teller/app/BUILD.bazel @@ -0,0 +1,69 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library", "go_embed_data") +load("@io_bazel_rules_docker//go:image.bzl", "go_image") +load("@io_bazel_rules_docker//container:push.bzl", "container_push") + +# Concatenates the fortune databases to a single bundle. +# May need to adjust paths for your system (built on ubuntu 16.04). +# $ apt-get install fortune +genrule( + name = "tar", + outs = ["fortune.tar"], + cmd = " && ".join([ + "OUT=$$(pwd)/$@", + "tar -cvf $$OUT /usr/share/games/fortunes", + ]), +) + +# Generates a .go source file with the tarball content in +# the fortuneFiles variable. +go_embed_data( + name = "fortune_assets", + srcs = [ + ":tar", + ], + package = "main", + unpack = True, + var = "fortuneFiles", +) + +go_library( + name = "go_default_library", + srcs = [ + "main.go", + ":fortune_assets", # keep + ], + importpath = "github.com/kubernetes/ingress-nginx/images/grpc-fortune-teller/app", + visibility = ["//visibility:private"], + deps = [ + "//proto/fortune:go_default_library", + "@com_github_vromero_gofortune//lib/fortune:go_default_library", + "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//codes:go_default_library", + "@org_golang_google_grpc//reflection:go_default_library", + ], +) + +go_binary( + name = "fortune", + embed = [":go_default_library"], + goarch = "amd64", + goos = "linux", + pure = "on", + static = "on", + visibility = ["//visibility:public"], +) + +go_image( + name = "image", + binary = ":fortune", + visibility = ["//visibility:public"], +) + +container_push( + name = "push", + format = "Docker", + image = ":image", + registry = "quay.io", + repository = "kubernetes-ingress-controller/grpc-fortune-teller", + tag = "0.1", +) diff --git a/images/grpc-fortune-teller/app/main.go b/images/grpc-fortune-teller/app/main.go new file mode 100644 index 0000000000..d09799cbef --- /dev/null +++ b/images/grpc-fortune-teller/app/main.go @@ -0,0 +1,137 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "context" + "fmt" + "io/ioutil" + "log" + "net" + "os" + "path" + + proto "github.com/kubernetes/ingress-nginx/images/grpc-fortune-teller/proto/fortune" + "github.com/vromero/gofortune/lib/fortune" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/reflection" +) + +const ( + grpcPort = 50051 +) + +func main() { + + baseDir := "/tmp/fortune-teller" + mustMkdirAll(baseDir) + + opts := []grpc.ServerOption{ + grpc.MaxConcurrentStreams(200), + } + + grpcServer := grpc.NewServer(opts...) + + fortuneTeller := &FortuneTeller{ + fs: createFortuneFilesystemNodeDescriptor(baseDir), + } + proto.RegisterFortuneTellerServer(grpcServer, fortuneTeller) + + reflection.Register(grpcServer) + + lis, err := net.Listen("tcp", fmt.Sprintf(":%d", grpcPort)) + if err != nil { + log.Fatalf("Error while starting grpc server: %v\n", err) + } + + log.Printf("Listening for gRPC requests at %d\n", grpcPort) + grpcServer.Serve(lis) +} + +// FortuneTeller - struct that will implement the grpc service interface. +type FortuneTeller struct { + fs *fortune.FileSystemNodeDescriptor +} + +// Predict - implementation for the grpc unary request method. +func (f *FortuneTeller) Predict(ctx context.Context, r *proto.PredictionRequest) (*proto.PredictionResponse, error) { + _, data, err := fortune.GetRandomFortune(*f.fs) + if err != nil { + return nil, grpc.Errorf(codes.Internal, "Unable to render fortune: %v", err) + } + return &proto.PredictionResponse{ + Message: data, + }, nil +} + +func createFortuneFilesystemNodeDescriptor(baseDir string) *fortune.FileSystemNodeDescriptor { + + // Restore the packed fortune data + fortuneDir := path.Join(baseDir, "usr/share/games/fortunes") + + mustRestore(baseDir, fortuneFiles, nil) + + // init gofortune fs + fs, err := fortune.LoadPaths([]fortune.ProbabilityPath{ + {Path: fortuneDir}, + }) + if err != nil { + log.Fatalf("Unable to load fortune paths: %v", err) + } + + fortune.SetProbabilities(&fs, true) // consider all equal probabilities + return &fs +} + +// mustRestore - Restore assets. +func mustRestore(baseDir string, assets map[string][]byte, mappings map[string]string) { + // unpack variable is provided by the go_embed data and is a + // map[string][]byte such as {"/usr/share/games/fortune/literature.dat": + // bytes... } + for basename, bytes := range assets { + if mappings != nil { + replacement := mappings[basename] + if replacement != "" { + basename = replacement + } + } + filename := path.Join(baseDir, basename) + dirname := path.Dir(filename) + //log.Printf("file %s, dir %s, rel %d, abs %s, absdir: %s", file, dir, rel, abs, absdir) + if err := os.MkdirAll(dirname, os.ModePerm); err != nil { + log.Fatalf("Failed to create asset dir %s: %v", dirname, err) + } + + if err := ioutil.WriteFile(filename, bytes, os.ModePerm); err != nil { + log.Fatalf("Failed to write asset %s: %v", filename, err) + } + log.Printf("Restored %s", filename) + } + + log.Printf("Assets restored to %s", baseDir) +} + +// mustMkdirAll - make all dirs and panic if fail +func mustMkdirAll(dirs ...string) { + for _, dir := range dirs { + err := os.MkdirAll(dir, os.ModePerm) + if err != nil { + panic(fmt.Sprintf("Failed mkdir %s: %v", dir, err)) + } + } +} diff --git a/images/grpc-fortune-teller/proto/fortune/BUILD.bazel b/images/grpc-fortune-teller/proto/fortune/BUILD.bazel new file mode 100644 index 0000000000..b3008b8265 --- /dev/null +++ b/images/grpc-fortune-teller/proto/fortune/BUILD.bazel @@ -0,0 +1,24 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") + +go_library( + name = "go_default_library", + srcs = ["doc.go"], + embed = [":build_stack_fortune_go_proto"], # keep + importpath = "github.com/kubernetes/ingress-nginx/images/grpc-fortune-teller/proto/fortune", + visibility = ["//visibility:public"], +) + +proto_library( + name = "build_stack_fortune_proto", + srcs = ["fortune.proto"], + visibility = ["//visibility:public"], +) + +go_proto_library( + name = "build_stack_fortune_go_proto", + compilers = ["@io_bazel_rules_go//proto:go_grpc"], + importpath = "github.com/kubernetes/ingress-nginx/images/grpc-fortune-teller/proto/fortune", + proto = ":build_stack_fortune_proto", + visibility = ["//visibility:public"], +) diff --git a/images/grpc-fortune-teller/proto/fortune/fortune.proto b/images/grpc-fortune-teller/proto/fortune/fortune.proto new file mode 100644 index 0000000000..d71b4acb1d --- /dev/null +++ b/images/grpc-fortune-teller/proto/fortune/fortune.proto @@ -0,0 +1,14 @@ +syntax = "proto3"; + +package build.stack.fortune; + +message PredictionRequest { +} + +message PredictionResponse { + string message = 1; +} + +service FortuneTeller { + rpc Predict(PredictionRequest) returns (PredictionResponse); +} \ No newline at end of file diff --git a/images/nginx/Dockerfile b/images/nginx/Dockerfile deleted file mode 100644 index 4144ae3a72..0000000000 --- a/images/nginx/Dockerfile +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright 2015 The Kubernetes Authors. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -FROM BASEIMAGE - -CROSS_BUILD_COPY qemu-ARCH-static /usr/bin/ - -COPY build.sh / - -RUN clean-install bash - -RUN /build.sh - -# Create symlinks to redirect nginx logs to stdout and stderr docker log collector -# This only works if nginx is started with CMD or ENTRYPOINT -RUN ln -sf /dev/stdout /var/log/nginx/access.log -RUN ln -sf /dev/stderr /var/log/nginx/error.log - -EXPOSE 80 443 - -CMD ["nginx", "-g", "daemon off;"] diff --git a/images/nginx/Makefile b/images/nginx/Makefile index 65dd5f1eee..65d259bff8 100644 --- a/images/nginx/Makefile +++ b/images/nginx/Makefile @@ -13,12 +13,12 @@ # limitations under the License. # 0.0.0 shouldn't clobber any released builds -TAG ?= 0.30 +TAG ?= 0.87 REGISTRY ?= quay.io/kubernetes-ingress-controller ARCH ?= $(shell go env GOARCH) -DOCKER ?= gcloud docker -- +DOCKER ?= docker -ALL_ARCH = amd64 arm arm64 ppc64le s390x +ALL_ARCH = amd64 arm64 SED_I?=sed -i GOHOSTOS ?= $(shell go env GOHOSTOS) @@ -26,27 +26,18 @@ ifeq ($(GOHOSTOS),darwin) SED_I=sed -i '' endif -QEMUVERSION=v2.9.1 +QEMUVERSION=v3.0.0 IMGNAME = nginx IMAGE = $(REGISTRY)/$(IMGNAME) MULTI_ARCH_IMG = $(IMAGE)-$(ARCH) # Set default base image dynamically for each arch -BASEIMAGE?=gcr.io/google-containers/debian-base-$(ARCH):0.3 +BASEIMAGE?=quay.io/kubernetes-ingress-controller/debian-base-$(ARCH):0.1 -ifeq ($(ARCH),arm) - QEMUARCH=arm -endif ifeq ($(ARCH),arm64) QEMUARCH=aarch64 endif -ifeq ($(ARCH),ppc64le) - QEMUARCH=ppc64le -endif -ifeq ($(ARCH),s390x) - QEMUARCH=s390x -endif TEMP_DIR := $(shell mktemp -d) @@ -67,7 +58,7 @@ all-push: $(addprefix sub-push-,$(ALL_ARCH)) container: .container-$(ARCH) .container-$(ARCH): - cp ./* $(TEMP_DIR) + cp -r ./rootfs/* $(TEMP_DIR) cd $(TEMP_DIR) && $(SED_I) 's|BASEIMAGE|$(BASEIMAGE)|g' Dockerfile cd $(TEMP_DIR) && $(SED_I) "s|ARCH|$(QEMUARCH)|g" Dockerfile @@ -76,19 +67,22 @@ ifeq ($(ARCH),amd64) cd $(TEMP_DIR) && $(SED_I) "/CROSS_BUILD_/d" Dockerfile else # When cross-building, only the placeholder "CROSS_BUILD_" should be removed - # Register /usr/bin/qemu-ARCH-static as the handler for ARM binaries in the kernel - $(DOCKER) run --rm --privileged multiarch/qemu-user-static:register --reset curl -sSL https://github.com/multiarch/qemu-user-static/releases/download/$(QEMUVERSION)/x86_64_qemu-$(QEMUARCH)-static.tar.gz | tar -xz -C $(TEMP_DIR) cd $(TEMP_DIR) && $(SED_I) "s/CROSS_BUILD_//g" Dockerfile endif - $(DOCKER) build -t $(MULTI_ARCH_IMG):$(TAG) $(TEMP_DIR) + $(DOCKER) build --no-cache -t $(MULTI_ARCH_IMG):$(TAG) $(TEMP_DIR) ifeq ($(ARCH), amd64) # This is for to maintain the backward compatibility $(DOCKER) tag $(MULTI_ARCH_IMG):$(TAG) $(IMAGE):$(TAG) endif +.PHONY: register-qemu +register-qemu: + # Register /usr/bin/qemu-ARCH-static as the handler for binaries in multiple platforms + $(DOCKER) run --rm --privileged multiarch/qemu-user-static:register --reset + push: .push-$(ARCH) .push-$(ARCH): .container-$(ARCH) $(DOCKER) push $(MULTI_ARCH_IMG):$(TAG) diff --git a/images/nginx/README.md b/images/nginx/README.md index 6698b6ad06..ac536aec43 100644 --- a/images/nginx/README.md +++ b/images/nginx/README.md @@ -1,35 +1,32 @@ - -nginx 1.13.x base image using [debian-base](https://github.com/kubernetes/kubernetes/tree/master/build/debian-base) +nginx 1.15.x base image using [debian-base](quay.io/kubernetes-ingress-controller/debian-base-amd64) nginx [engine x] is an HTTP and reverse proxy server, a mail proxy server, and a generic TCP proxy server. This custom nginx image contains: -- [stream](http://nginx.org/en/docs/stream/ngx_stream_core_module.html) tcp support for upstreams -- nginx stats [nginx-module-vts](https://github.com/vozlt/nginx-module-vts) -- [Dynamic TLS record sizing](https://blog.cloudflare.com/optimizing-tls-over-tcp-to-reduce-latency/) - [ngx_devel_kit](https://github.com/simpl/ngx_devel_kit) - [set-misc-nginx-module](https://github.com/openresty/set-misc-nginx-module) - [headers-more-nginx-module](https://github.com/openresty/headers-more-nginx-module) -- [nginx-sticky-module-ng](https://bitbucket.org/nginx-goodies/nginx-sticky-module-ng) - [nginx-http-auth-digest](https://github.com/atomx/nginx-http-auth-digest) - [ngx_http_substitutions_filter_module](https://github.com/yaoweibin/ngx_http_substitutions_filter_module) - [nginx-opentracing](https://github.com/opentracing-contrib/nginx-opentracing) - [opentracing-cpp](https://github.com/opentracing/opentracing-cpp) - [zipkin-cpp-opentracing](https://github.com/rnburn/zipkin-cpp-opentracing) +- [dd-opentracing-cpp](https://github.com/DataDog/dd-opentracing-cpp) - [ModSecurity-nginx](https://github.com/SpiderLabs/ModSecurity-nginx) (only supported in x86_64) -- [brotli](https://github.com/google/brotli) (not supported in s390x) +- [brotli](https://github.com/google/brotli) +- [geoip2](https://github.com/leev/ngx_http_geoip2_module) **How to use this image:** -This image does provides a default configuration file with no backend servers. +This image provides a default configuration file with no backend servers. -*Using docker* +_Using docker_ ```console -docker run -v /some/nginx.con:/etc/nginx/nginx.conf:ro quay.io/kubernetes-ingress-controller/nginx:0.30 +docker run -v /some/nginx.con:/etc/nginx/nginx.conf:ro quay.io/kubernetes-ingress-controller/nginx:0.84 ``` -*Creating a replication controller* +_Creating a replication controller_ ```console kubectl create -f ./rc.yaml diff --git a/images/nginx/build.sh b/images/nginx/build.sh deleted file mode 100755 index 8b00250ae0..0000000000 --- a/images/nginx/build.sh +++ /dev/null @@ -1,344 +0,0 @@ -#!/bin/bash - -# Copyright 2015 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -set -o errexit -set -o nounset -set -o pipefail - -export NGINX_VERSION=1.13.7 -export NDK_VERSION=0.3.0 -export VTS_VERSION=0.1.15 -export SETMISC_VERSION=0.31 -export STICKY_SESSIONS_VERSION=08a395c66e42 -export MORE_HEADERS_VERSION=0.33 -export NGINX_DIGEST_AUTH=519dc2a4907bc6d9c48f95b3cf6a0151aaf44b40 -export NGINX_SUBSTITUTIONS=bc58cb11844bc42735bbaef7085ea86ace46d05b -export NGINX_OPENTRACING_VERSION=0.1.1 -export OPENTRACING_CPP_VERSION=1.0.0 -export ZIPKIN_CPP_VERSION=0.1.0 -export MODSECURITY=a2a5858d249222938c2f5e48087a922c63d7f9d8 - -export BUILD_PATH=/tmp/build - -ARCH=$(uname -m) - -get_src() -{ - hash="$1" - url="$2" - f=$(basename "$url") - - curl -sSL "$url" -o "$f" - echo "$hash $f" | sha256sum -c - || exit 10 - tar xzf "$f" - rm -rf "$f" -} - -if [[ ${ARCH} == "ppc64le" ]]; then - clean-install software-properties-common -fi - -# install required packages to build -clean-install \ - bash \ - build-essential \ - curl ca-certificates \ - libgeoip1 \ - libgeoip-dev \ - patch \ - libpcre3 \ - libpcre3-dev \ - libssl-dev \ - zlib1g \ - zlib1g-dev \ - libaio1 \ - libaio-dev \ - openssl \ - libperl-dev \ - cmake \ - util-linux \ - wget \ - libcurl4-openssl-dev \ - procps \ - git g++ pkgconf flex bison doxygen libyajl-dev liblmdb-dev libgeoip-dev libtool dh-autoreconf libxml2 libpcre++-dev libxml2-dev \ - || exit 1 - -mkdir -p /etc/nginx - -# download GeoIP databases -wget -O /etc/nginx/GeoIP.dat.gz https://geolite.maxmind.com/download/geoip/database/GeoLiteCountry/GeoIP.dat.gz || { echo 'Could not download GeoLiteCountry, exiting.' ; exit 1; } -wget -O /etc/nginx/GeoLiteCity.dat.gz https://geolite.maxmind.com/download/geoip/database/GeoLiteCity.dat.gz || { echo 'Could not download GeoLiteCity, exiting.' ; exit 1; } - -gunzip /etc/nginx/GeoIP.dat.gz -gunzip /etc/nginx/GeoLiteCity.dat.gz - -mkdir --verbose -p "$BUILD_PATH" -cd "$BUILD_PATH" - -# download, verify and extract the source files -get_src beb732bc7da80948c43fd0bf94940a21a21b1c1ddfba0bd99a4b88e026220f5c \ - "http://nginx.org/download/nginx-$NGINX_VERSION.tar.gz" - -get_src 88e05a99a8a7419066f5ae75966fb1efc409bad4522d14986da074554ae61619 \ - "https://github.com/simpl/ngx_devel_kit/archive/v$NDK_VERSION.tar.gz" - -get_src 97946a68937b50ab8637e1a90a13198fe376d801dc3e7447052e43c28e9ee7de \ - "https://github.com/openresty/set-misc-nginx-module/archive/v$SETMISC_VERSION.tar.gz" - -get_src 5112a054b1b1edb4c0042a9a840ef45f22abb3c05c68174e28ebf483164fb7e1 \ - "https://github.com/vozlt/nginx-module-vts/archive/v$VTS_VERSION.tar.gz" - -get_src a3dcbab117a9c103bc1ea5200fc00a7b7d2af97ff7fd525f16f8ac2632e30fbf \ - "https://github.com/openresty/headers-more-nginx-module/archive/v$MORE_HEADERS_VERSION.tar.gz" - -get_src 53e440737ed1aff1f09fae150219a45f16add0c8d6e84546cb7d80f73ebffd90 \ - "https://bitbucket.org/nginx-goodies/nginx-sticky-module-ng/get/$STICKY_SESSIONS_VERSION.tar.gz" - -get_src 5ea7093cedea1a17b3c890015f83fc72346627400a9e6e3cec9c4b21297ecb61 \ - "https://github.com/atomx/nginx-http-auth-digest/archive/$NGINX_DIGEST_AUTH.tar.gz" - -get_src 618551948ab14cac51d6e4ad00452312c7b09938f59ebff4f93875013be31f2d \ - "https://github.com/yaoweibin/ngx_http_substitutions_filter_module/archive/$NGINX_SUBSTITUTIONS.tar.gz" - -get_src 8c55a7ec639b186689a0053eed5f1605f38c461f0d3ee6e728bb466a148a44d2 \ - "https://github.com/opentracing-contrib/nginx-opentracing/archive/v$NGINX_OPENTRACING_VERSION.tar.gz" - -get_src 9543f66790ba65810869a29b3aaef5286f1c446cb498a304d0d8b153c289cae8 \ - "https://github.com/opentracing/opentracing-cpp/archive/v$OPENTRACING_CPP_VERSION.tar.gz" - -get_src 3d91e866819986f5dda00549694c4eaca16f1209d1f87aecc8aca3b42aa72e08 \ - "https://github.com/rnburn/zipkin-cpp-opentracing/archive/v$ZIPKIN_CPP_VERSION.tar.gz" - -get_src 3abdecedb5bf544eeba8c1ce0bef2da6a9f064b216ebbe20b68894afec1d7d80 \ - "https://github.com/SpiderLabs/ModSecurity-nginx/archive/$MODSECURITY.tar.gz" - -#https://blog.cloudflare.com/optimizing-tls-over-tcp-to-reduce-latency/ -curl -sSL -o nginx__dynamic_tls_records.patch https://raw.githubusercontent.com/cloudflare/sslconfig/master/patches/nginx__1.11.5_dynamic_tls_records.patch - -# build opentracing lib -cd "$BUILD_PATH/opentracing-cpp-$OPENTRACING_CPP_VERSION" -mkdir .build -cd .build -cmake -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTING=OFF .. -make -make install - -# build zipkin lib -cd "$BUILD_PATH/zipkin-cpp-opentracing-$ZIPKIN_CPP_VERSION" -mkdir .build -cd .build -cmake -DCMAKE_BUILD_TYPE=Release -DBUILD_SHARED_LIBS=1 -DBUILD_TESTING=OFF .. -make -make install - -cd "$BUILD_PATH" - -if [[ ${ARCH} != "s390x" ]]; then - # Get Brotli source and deps - git clone --depth=1 https://github.com/google/ngx_brotli.git - cd ngx_brotli && git submodule update --init -fi - -if [[ ${ARCH} == "x86_64" ]]; then - # build modsecurity library - cd "$BUILD_PATH" - git clone https://github.com/SpiderLabs/ModSecurity - cd ModSecurity/ - git checkout -b v3/master origin/v3/master - sh build.sh - git submodule init - git submodule update - autoreconf -i - automake - autoconf - ./configure - make - make install -fi - -# build nginx -cd "$BUILD_PATH/nginx-$NGINX_VERSION" - -echo "Applying nginx patches..." -patch -p1 < $BUILD_PATH/nginx__dynamic_tls_records.patch - -WITH_FLAGS="--with-debug \ - --with-pcre-jit \ - --with-http_ssl_module \ - --with-http_stub_status_module \ - --with-http_realip_module \ - --with-http_auth_request_module \ - --with-http_addition_module \ - --with-http_dav_module \ - --with-http_geoip_module \ - --with-http_gzip_static_module \ - --with-http_sub_module \ - --with-http_v2_module \ - --with-stream \ - --with-stream_ssl_module \ - --with-stream_ssl_preread_module \ - --with-threads" - -if [[ ${ARCH} != "armv7l" || ${ARCH} != "aarch64" ]]; then - WITH_FLAGS+=" --with-file-aio" -fi - -CC_OPT='-g -O3 -flto -fPIE -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 --param=ssp-buffer-size=4 -DTCP_FASTOPEN=23 -Wno-error=strict-aliasing -fPIC' -LD_OPT='-Wl,-Bsymbolic-functions -fPIE -fPIC -pie -Wl,-z,relro -Wl,-z,now' - -if [[ ${ARCH} == "x86_64" ]]; then - CC_OPT+=' -m64 -mtune=generic' -fi - -WITH_MODULES="--add-module=$BUILD_PATH/ngx_devel_kit-$NDK_VERSION \ - --add-module=$BUILD_PATH/set-misc-nginx-module-$SETMISC_VERSION \ - --add-module=$BUILD_PATH/nginx-module-vts-$VTS_VERSION \ - --add-module=$BUILD_PATH/headers-more-nginx-module-$MORE_HEADERS_VERSION \ - --add-module=$BUILD_PATH/nginx-goodies-nginx-sticky-module-ng-$STICKY_SESSIONS_VERSION \ - --add-module=$BUILD_PATH/nginx-http-auth-digest-$NGINX_DIGEST_AUTH \ - --add-module=$BUILD_PATH/ngx_http_substitutions_filter_module-$NGINX_SUBSTITUTIONS \ - --add-dynamic-module=$BUILD_PATH/nginx-opentracing-$NGINX_OPENTRACING_VERSION/opentracing \ - --add-dynamic-module=$BUILD_PATH/nginx-opentracing-$NGINX_OPENTRACING_VERSION/zipkin" - -if [[ ${ARCH} == "x86_64" ]]; then - WITH_MODULES+=" --add-dynamic-module=$BUILD_PATH/ModSecurity-nginx-$MODSECURITY" -fi - -if [[ ${ARCH} != "s390x" ]]; then - WITH_MODULES+=" --add-module=$BUILD_PATH/ngx_brotli" -fi - -./configure \ - --prefix=/usr/share/nginx \ - --conf-path=/etc/nginx/nginx.conf \ - --modules-path=/etc/nginx/modules \ - --http-log-path=/var/log/nginx/access.log \ - --error-log-path=/var/log/nginx/error.log \ - --lock-path=/var/lock/nginx.lock \ - --pid-path=/run/nginx.pid \ - --http-client-body-temp-path=/var/lib/nginx/body \ - --http-fastcgi-temp-path=/var/lib/nginx/fastcgi \ - --http-proxy-temp-path=/var/lib/nginx/proxy \ - --http-scgi-temp-path=/var/lib/nginx/scgi \ - --http-uwsgi-temp-path=/var/lib/nginx/uwsgi \ - ${WITH_FLAGS} \ - --without-mail_pop3_module \ - --without-mail_smtp_module \ - --without-mail_imap_module \ - --without-http_uwsgi_module \ - --without-http_scgi_module \ - --with-cc-opt="${CC_OPT}" \ - --with-ld-opt="${LD_OPT}" \ - ${WITH_MODULES} \ - && make || exit 1 \ - && make install || exit 1 - -echo "Cleaning..." - -cd / - -apt-mark unmarkauto \ - bash \ - curl ca-certificates \ - libgeoip1 \ - libpcre3 \ - zlib1g \ - libaio1 \ - xz-utils \ - geoip-bin \ - libyajl2 liblmdb0 libxml2 libpcre++ \ - gzip \ - openssl - -apt-get remove -y --purge \ - build-essential \ - gcc-6 \ - cpp-6 \ - libgeoip-dev \ - libpcre3-dev \ - libssl-dev \ - zlib1g-dev \ - libaio-dev \ - linux-libc-dev \ - cmake \ - wget \ - git g++ pkgconf flex bison doxygen libyajl-dev liblmdb-dev libgeoip-dev libtool dh-autoreconf libpcre++-dev libxml2-dev - -apt-get autoremove -y - -mkdir -p /var/lib/nginx/body /usr/share/nginx/html - -mv /usr/share/nginx/sbin/nginx /usr/sbin - -rm -rf "$BUILD_PATH" -rm -Rf /usr/share/man /usr/share/doc -rm -rf /tmp/* /var/tmp/* -rm -rf /var/lib/apt/lists/* -rm -rf /var/cache/apt/archives/* -rm -rf /usr/local/modsecurity/bin -rm -rf /usr/local/modsecurity/include -rm -rf /usr/local/modsecurity/lib/libmodsecurity.a - -# Download owasp modsecurity crs -cd /etc/nginx/ -curl -sSL -o master.tgz https://github.com/SpiderLabs/owasp-modsecurity-crs/archive/v3.0.2/master.tar.gz -tar zxpvf master.tgz -mv owasp-modsecurity-crs-3.0.2/ owasp-modsecurity-crs -rm master.tgz - -cd owasp-modsecurity-crs -mv crs-setup.conf.example crs-setup.conf -mv rules/REQUEST-900-EXCLUSION-RULES-BEFORE-CRS.conf.example rules/REQUEST-900-EXCLUSION-RULES-BEFORE-CRS.conf -mv rules/RESPONSE-999-EXCLUSION-RULES-AFTER-CRS.conf.example rules/RESPONSE-999-EXCLUSION-RULES-AFTER-CRS.conf -cd .. - -# Download modsecurity.conf -mkdir modsecurity -cd modsecurity -curl -sSL -o modsecurity.conf https://raw.githubusercontent.com/SpiderLabs/ModSecurity/v3/master/modsecurity.conf-recommended - -# OWASP CRS v3 rules -echo " -Include /etc/nginx/owasp-modsecurity-crs/crs-setup.conf -Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-900-EXCLUSION-RULES-BEFORE-CRS.conf -Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-901-INITIALIZATION.conf -Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-903.9001-DRUPAL-EXCLUSION-RULES.conf -Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-903.9002-WORDPRESS-EXCLUSION-RULES.conf -Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-905-COMMON-EXCEPTIONS.conf -Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-910-IP-REPUTATION.conf -Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-911-METHOD-ENFORCEMENT.conf -Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-912-DOS-PROTECTION.conf -Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-913-SCANNER-DETECTION.conf -Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-920-PROTOCOL-ENFORCEMENT.conf -Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-921-PROTOCOL-ATTACK.conf -Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-930-APPLICATION-ATTACK-LFI.conf -Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-931-APPLICATION-ATTACK-RFI.conf -Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-932-APPLICATION-ATTACK-RCE.conf -Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-933-APPLICATION-ATTACK-PHP.conf -Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-941-APPLICATION-ATTACK-XSS.conf -Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-942-APPLICATION-ATTACK-SQLI.conf -Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION.conf -Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-949-BLOCKING-EVALUATION.conf -Include /etc/nginx/owasp-modsecurity-crs/rules/RESPONSE-950-DATA-LEAKAGES.conf -Include /etc/nginx/owasp-modsecurity-crs/rules/RESPONSE-951-DATA-LEAKAGES-SQL.conf -Include /etc/nginx/owasp-modsecurity-crs/rules/RESPONSE-952-DATA-LEAKAGES-JAVA.conf -Include /etc/nginx/owasp-modsecurity-crs/rules/RESPONSE-953-DATA-LEAKAGES-PHP.conf -Include /etc/nginx/owasp-modsecurity-crs/rules/RESPONSE-954-DATA-LEAKAGES-IIS.conf -Include /etc/nginx/owasp-modsecurity-crs/rules/RESPONSE-959-BLOCKING-EVALUATION.conf -Include /etc/nginx/owasp-modsecurity-crs/rules/RESPONSE-980-CORRELATION.conf -Include /etc/nginx/owasp-modsecurity-crs/rules/RESPONSE-999-EXCLUSION-RULES-AFTER-CRS.conf -" > /etc/nginx/owasp-modsecurity-crs/nginx-modsecurity.conf diff --git a/images/nginx/rc.yaml b/images/nginx/rc.yaml index 51ae7dcc4a..965b7a6a6a 100644 --- a/images/nginx/rc.yaml +++ b/images/nginx/rc.yaml @@ -3,35 +3,42 @@ kind: Service metadata: name: nginxsvc labels: - app: nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx spec: type: NodePort ports: - - port: 80 - protocol: TCP - name: http - - port: 443 - protocol: TCP - name: https + - port: 80 + protocol: TCP + name: http + - port: 443 + protocol: TCP + name: https selector: - app: nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx --- apiVersion: v1 kind: ReplicationController metadata: name: nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx spec: replicas: 1 selector: - app: nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx template: metadata: labels: - app: nginx - name: frontend + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx spec: containers: - - name: nginx - image: quay.io/kubernetes-ingress-controller/nginx:0.30 - ports: - - containerPort: 80 + - name: nginx + image: quay.io/kubernetes-ingress-controller/nginx:0.84 + ports: + - containerPort: 80 + - containerPort: 443 diff --git a/images/nginx/rootfs/Dockerfile b/images/nginx/rootfs/Dockerfile new file mode 100644 index 0000000000..6faaa73571 --- /dev/null +++ b/images/nginx/rootfs/Dockerfile @@ -0,0 +1,33 @@ +# Copyright 2015 The Kubernetes Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +FROM BASEIMAGE + +CROSS_BUILD_COPY qemu-ARCH-static /usr/bin/ + +COPY . / + +RUN clean-install bash + +RUN /build.sh + +# Create symlinks to redirect nginx logs to stdout and stderr docker log collector +# This only works if nginx is started with CMD or ENTRYPOINT +RUN ln -sf /dev/stdout /var/log/nginx/access.log +RUN ln -sf /dev/stderr /var/log/nginx/error.log + +EXPOSE 80 443 + +CMD ["nginx", "-g", "daemon off;"] diff --git a/images/nginx/rootfs/build.sh b/images/nginx/rootfs/build.sh new file mode 100755 index 0000000000..2b80d68834 --- /dev/null +++ b/images/nginx/rootfs/build.sh @@ -0,0 +1,626 @@ +#!/bin/bash + +# Copyright 2015 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +set -o errexit +set -o nounset +set -o pipefail + +export NGINX_VERSION=1.17.0 +export NDK_VERSION=0.3.1rc1 +export SETMISC_VERSION=0.32 +export MORE_HEADERS_VERSION=0.33 +export NGINX_DIGEST_AUTH=cd8641886c873cf543255aeda20d23e4cd603d05 +export NGINX_SUBSTITUTIONS=bc58cb11844bc42735bbaef7085ea86ace46d05b +export NGINX_OPENTRACING_VERSION=0.8.0 +export OPENTRACING_CPP_VERSION=1.5.1 +export ZIPKIN_CPP_VERSION=0.5.2 +export JAEGER_VERSION=cdfaf5bb25ff5f8ec179fd548e6c7c2ade9a6a09 +export MSGPACK_VERSION=3.1.1 +export DATADOG_CPP_VERSION=0.4.2 +export MODSECURITY_VERSION=d7101e13685efd7e7c9f808871b202656a969f4b +export MODSECURITY_LIB_VERSION=3.0.3 +export OWASP_MODSECURITY_CRS_VERSION=3.1.0 +export LUA_BRIDGE_TRACER_VERSION=0.1.0 +export LUA_NGX_VERSION=0.10.15 +export LUA_STREAM_NGX_VERSION=0.0.7 +export LUA_UPSTREAM_VERSION=0.07 +export NGINX_INFLUXDB_VERSION=5b09391cb7b9a889687c0aa67964c06a2d933e8b +export GEOIP2_VERSION=3.2 +export NGINX_AJP_VERSION=bf6cd93f2098b59260de8d494f0f4b1f11a84627 +export LUAJIT_VERSION=2.1-20190507 + +export BUILD_PATH=/tmp/build + +ARCH=$(uname -m) + +get_src() +{ + hash="$1" + url="$2" + f=$(basename "$url") + + echo "Downloading $url" + + curl -sSL "$url" -o "$f" + echo "$hash $f" | sha256sum -c - || exit 10 + tar xzf "$f" + rm -rf "$f" +} + +apt-get update && apt-get dist-upgrade -y + +# install required packages to build +clean-install \ + bash \ + build-essential \ + curl ca-certificates \ + libgeoip1 \ + libgeoip-dev \ + patch \ + libpcre3 \ + libpcre3-dev \ + libssl-dev \ + zlib1g \ + zlib1g-dev \ + libaio1 \ + libaio-dev \ + openssl \ + libperl-dev \ + cmake \ + util-linux \ + lua5.1 liblua5.1-0 liblua5.1-dev \ + lmdb-utils \ + wget \ + libcurl4-openssl-dev \ + libprotobuf-dev protobuf-compiler \ + libz-dev \ + procps \ + git g++ pkgconf flex bison doxygen libyajl-dev liblmdb-dev libtool dh-autoreconf libxml2 libpcre++-dev libxml2-dev \ + lua-cjson \ + python \ + luarocks \ + libmaxminddb-dev \ + dumb-init \ + gdb \ + bc \ + || exit 1 + +if [[ ${ARCH} == "x86_64" ]]; then + ln -s /usr/lib/x86_64-linux-gnu/liblua5.1.so /usr/lib/liblua.so + ln -s /usr/lib/x86_64-linux-gnu /usr/lib/lua-platform-path +fi + +if [[ ${ARCH} == "aarch64" ]]; then + ln -s /usr/lib/aarch64-linux-gnu/liblua5.1.so /usr/lib/liblua.so + ln -s /usr/lib/aarch64-linux-gnu /usr/lib/lua-platform-path +fi + +mkdir -p /etc/nginx + +# Get the GeoIP data +GEOIP_FOLDER=/etc/nginx/geoip +mkdir -p $GEOIP_FOLDER + +function geoip2_get { + wget -O $GEOIP_FOLDER/$1.tar.gz $2 || { echo "Could not download $1, exiting." ; exit 1; } + mkdir $GEOIP_FOLDER/$1 \ + && tar xf $GEOIP_FOLDER/$1.tar.gz -C $GEOIP_FOLDER/$1 --strip-components 1 \ + && mv $GEOIP_FOLDER/$1/$1.mmdb $GEOIP_FOLDER/$1.mmdb \ + && rm -rf $GEOIP_FOLDER/$1 \ + && rm -rf $GEOIP_FOLDER/$1.tar.gz +} + +geoip2_get "GeoLite2-City" "http://geolite.maxmind.com/download/geoip/database/GeoLite2-City.tar.gz" +geoip2_get "GeoLite2-ASN" "http://geolite.maxmind.com/download/geoip/database/GeoLite2-ASN.tar.gz" + +mkdir --verbose -p "$BUILD_PATH" +cd "$BUILD_PATH" + +# download, verify and extract the source files +get_src e21b5d06cd53e86afb94f0b3678e0abb0c0f011433471fa3d895cefa65ae0fab \ + "https://nginx.org/download/nginx-$NGINX_VERSION.tar.gz" + +get_src 49f50d4cd62b166bc1aaf712febec5e028d9f187cedbc27a610dfd01bdde2d36 \ + "https://github.com/simpl/ngx_devel_kit/archive/v$NDK_VERSION.tar.gz" + +get_src f1ad2459c4ee6a61771aa84f77871f4bfe42943a4aa4c30c62ba3f981f52c201 \ + "https://github.com/openresty/set-misc-nginx-module/archive/v$SETMISC_VERSION.tar.gz" + +get_src a3dcbab117a9c103bc1ea5200fc00a7b7d2af97ff7fd525f16f8ac2632e30fbf \ + "https://github.com/openresty/headers-more-nginx-module/archive/v$MORE_HEADERS_VERSION.tar.gz" + +get_src fe683831f832aae4737de1e1026a4454017c2d5f98cb88b08c5411dc380062f8 \ + "https://github.com/atomx/nginx-http-auth-digest/archive/$NGINX_DIGEST_AUTH.tar.gz" + +get_src 618551948ab14cac51d6e4ad00452312c7b09938f59ebff4f93875013be31f2d \ + "https://github.com/yaoweibin/ngx_http_substitutions_filter_module/archive/$NGINX_SUBSTITUTIONS.tar.gz" + +get_src b2159297814d5df153cf45f355bcd8ffdb71f2468e8149ad549d4f9c0cdc81ad \ + "https://github.com/opentracing-contrib/nginx-opentracing/archive/v$NGINX_OPENTRACING_VERSION.tar.gz" + +get_src 015c4187f7a6426a2b5196f0ccd982aa87f010cf61f507ae3ce5c90523f92301 \ + "https://github.com/opentracing/opentracing-cpp/archive/v$OPENTRACING_CPP_VERSION.tar.gz" + +get_src 30affaf0f3a84193f7127cc0135da91773ce45d902414082273dae78914f73df \ + "https://github.com/rnburn/zipkin-cpp-opentracing/archive/v$ZIPKIN_CPP_VERSION.tar.gz" + +get_src 5c8d25e68fb852f61489b669aebb7bd8ca8c88ebb5e5f969212fcceff3ee2d0b \ + "https://github.com/SpiderLabs/ModSecurity-nginx/archive/$MODSECURITY_VERSION.tar.gz" + +get_src 3183450d897baa9309347c8617edc0c97c5b29ffc32bd2d12f498edf2dcbeffa \ + "https://github.com/jaegertracing/jaeger-client-cpp/archive/$JAEGER_VERSION.tar.gz" + +get_src bda49f996a73d2c6080ff0523e7b535917cd28c8a79c3a5da54fc29332d61d1e \ + "https://github.com/msgpack/msgpack-c/archive/cpp-$MSGPACK_VERSION.tar.gz" + +get_src a3d1c03e7af570fa64c01df259e6e9bb78637a6bd9c65c6bf7e8703e466dc22f \ + "https://github.com/DataDog/dd-opentracing-cpp/archive/v$DATADOG_CPP_VERSION.tar.gz" + +get_src c29183001e3ab48299deecd02fb84b799b6627817c9baa66e4b342ac81dd6b40\ + "https://github.com/opentracing/lua-bridge-tracer/archive/v$LUA_BRIDGE_TRACER_VERSION.tar.gz" + +get_src 7d5f3439c8df56046d0564b5857fd8a30296ab1bd6df0f048aed7afb56a0a4c2 \ + "https://github.com/openresty/lua-nginx-module/archive/v$LUA_NGX_VERSION.tar.gz" + +get_src 99c47c75c159795c9faf76bbb9fa58e5a50b75286c86565ffcec8514b1c74bf9 \ + "https://github.com/openresty/stream-lua-nginx-module/archive/v$LUA_STREAM_NGX_VERSION.tar.gz" + +get_src 2a69815e4ae01aa8b170941a8e1a10b6f6a9aab699dee485d58f021dd933829a \ + "https://github.com/openresty/lua-upstream-nginx-module/archive/v$LUA_UPSTREAM_VERSION.tar.gz" + +get_src 42f0384f80b6a9b4f42f91ee688baf69165d0573347e6ea84ebed95e928211d7 \ + "https://github.com/openresty/lua-resty-lrucache/archive/v0.09.tar.gz" + +get_src 8f5f76d2689a3f6b0782f0a009c56a65e4c7a4382be86422c9b3549fe95b0dc4 \ + "https://github.com/openresty/lua-resty-core/archive/v0.1.17.tar.gz" + +get_src 517db9add320250b770f2daac83a49e38e6131611f2daa5ff05c69d5705e9746 \ + "https://github.com/openresty/lua-resty-lock/archive/v0.08rc1.tar.gz" + +get_src 3917d506e2d692088f7b4035c589cc32634de4ea66e40fc51259fbae43c9258d \ + "https://github.com/hamishforbes/lua-resty-iputils/archive/v0.3.0.tar.gz" + +get_src 5d16e623d17d4f42cc64ea9cfb69ca960d313e12f5d828f785dd227cc483fcbd \ + "https://github.com/openresty/lua-resty-upload/archive/v0.10.tar.gz" + +get_src 4aca34f324d543754968359672dcf5f856234574ee4da360ce02c778d244572a \ + "https://github.com/openresty/lua-resty-dns/archive/v0.21.tar.gz" + +get_src 095615fe94e64615c4a27f4f4475b91c047cf8d10bc2dbde8d5ba6aa625fc5ab \ + "https://github.com/openresty/lua-resty-string/archive/v0.11.tar.gz" + +get_src 89cedd6466801bfef20499689ebb34ecf17a2e60a34cd06e13c0204ea1775588 \ + "https://github.com/openresty/lua-resty-balancer/archive/v0.02rc5.tar.gz" + +get_src d81b33129c6fb5203b571fa4d8394823bf473d8872c0357a1d0f14420b1483bd \ + "https://github.com/cloudflare/lua-resty-cookie/archive/v0.1.0.tar.gz" + +get_src 9b5294fb2ecb76f7e7cb12169a29b75b6a9ead2d639095e903c8db1c7d95bd3a \ + "https://github.com/openresty/luajit2/archive/v$LUAJIT_VERSION.tar.gz" + +get_src 1af5a5632dc8b00ae103d51b7bf225de3a7f0df82f5c6a401996c080106e600e \ + "https://github.com/influxdata/nginx-influxdb-module/archive/$NGINX_INFLUXDB_VERSION.tar.gz" + +get_src 15bd1005228cf2c869a6f09e8c41a6aaa6846e4936c473106786ae8ac860fab7 \ + "https://github.com/leev/ngx_http_geoip2_module/archive/$GEOIP2_VERSION.tar.gz" + +get_src 5f629a50ba22347c441421091da70fdc2ac14586619934534e5a0f8a1390a950 \ + "https://github.com/yaoweibin/nginx_ajp_module/archive/$NGINX_AJP_VERSION.tar.gz" + +# improve compilation times +CORES=$(($(grep -c ^processor /proc/cpuinfo) - 0)) + +export MAKEFLAGS=-j${CORES} +export CTEST_BUILD_FLAGS=${MAKEFLAGS} +export HUNTER_JOBS_NUMBER=${CORES} +export HUNTER_KEEP_PACKAGE_SOURCES=false +export HUNTER_USE_CACHE_SERVERS=true + +# Install luajit from openresty fork +export LUAJIT_LIB=/usr/local/lib +export LUA_LIB_DIR="$LUAJIT_LIB/lua" + +cd "$BUILD_PATH/luajit2-$LUAJIT_VERSION" +make CCDEBUG=-g +make install + +export LUAJIT_INC=/usr/local/include/luajit-2.1 + +# Installing luarocks packages +if [[ ${ARCH} == "x86_64" ]]; then + export PCRE_DIR=/usr/lib/x86_64-linux-gnu +fi + +if [[ ${ARCH} == "aarch64" ]]; then + export PCRE_DIR=/usr/lib/aarch64-linux-gnu +fi + +cd "$BUILD_PATH" +luarocks install lrexlib-pcre 2.7.2-1 PCRE_LIBDIR=${PCRE_DIR} + +cd "$BUILD_PATH/lua-resty-core-0.1.17" +make install + +cd "$BUILD_PATH/lua-resty-lrucache-0.09" +make install + +cd "$BUILD_PATH/lua-resty-lock-0.08rc1" +make install + +cd "$BUILD_PATH/lua-resty-iputils-0.3.0" +make install + +cd "$BUILD_PATH/lua-resty-upload-0.10" +make install + +cd "$BUILD_PATH/lua-resty-dns-0.21" +make install + +cd "$BUILD_PATH/lua-resty-string-0.11" +make install + +cd "$BUILD_PATH/lua-resty-balancer-0.02rc5" +make all +make install + +cd "$BUILD_PATH/lua-resty-cookie-0.1.0" +make install + +# build and install lua-resty-waf with dependencies +/install_lua_resty_waf.sh + +# install openresty-gdb-utils +cd / +git clone --depth=1 https://github.com/openresty/openresty-gdb-utils.git +cat > ~/.gdbinit << EOF +directory /openresty-gdb-utils + +py import sys +py sys.path.append("/openresty-gdb-utils") + +source luajit20.gdb +source ngx-lua.gdb +source luajit21.py +source ngx-raw-req.py +set python print-stack full +EOF + +# build opentracing lib +cd "$BUILD_PATH/opentracing-cpp-$OPENTRACING_CPP_VERSION" +mkdir .build +cd .build + +cmake -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_CXX_FLAGS="-fPIC" \ + -DBUILD_TESTING=OFF \ + -DBUILD_MOCKTRACER=OFF \ + .. + +make +make install + +# build jaeger lib +cd "$BUILD_PATH/jaeger-client-cpp-$JAEGER_VERSION" +sed -i 's/-Werror/-Wno-psabi/' CMakeLists.txt + +cat < export.map +{ + global: + OpenTracingMakeTracerFactory; + local: *; +}; +EOF + +mkdir .build +cd .build + +cmake -DCMAKE_BUILD_TYPE=Release \ + -DBUILD_TESTING=OFF \ + -DJAEGERTRACING_BUILD_EXAMPLES=OFF \ + -DJAEGERTRACING_BUILD_CROSSDOCK=OFF \ + -DJAEGERTRACING_COVERAGE=OFF \ + -DJAEGERTRACING_PLUGIN=ON \ + -DHUNTER_CONFIGURATION_TYPES=Release \ + -DJAEGERTRACING_WITH_YAML_CPP=ON .. + +make +make install + +export HUNTER_INSTALL_DIR=$(cat _3rdParty/Hunter/install-root-dir) \ + +mv libjaegertracing_plugin.so /usr/local/lib/libjaegertracing_plugin.so + +# build zipkin lib +cd "$BUILD_PATH/zipkin-cpp-opentracing-$ZIPKIN_CPP_VERSION" + +cat < export.map +{ + global: + OpenTracingMakeTracerFactory; + local: *; +}; +EOF + +mkdir .build +cd .build + +cmake -DCMAKE_BUILD_TYPE=Release \ + -DBUILD_SHARED_LIBS=ON \ + -DBUILD_PLUGIN=ON \ + -DBUILD_TESTING=OFF .. + +make +make install + +# build msgpack lib +cd "$BUILD_PATH/msgpack-c-cpp-$MSGPACK_VERSION" + +mkdir .build +cd .build +cmake -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_CXX_FLAGS="-fPIC" \ + -DBUILD_SHARED_LIBS=OFF \ + -DBUILD_TESTING=OFF \ + -DBUILD_MOCKTRACER=OFF \ + .. + +make +make install + +# build datadog lib +cd "$BUILD_PATH/dd-opentracing-cpp-$DATADOG_CPP_VERSION" + +mkdir .build +cd .build +cmake .. + +make +make install + +# build Lua bridge tracer +cd "$BUILD_PATH/lua-bridge-tracer-$LUA_BRIDGE_TRACER_VERSION" +mkdir .build +cd .build +cmake .. +make +make install + +# Get Brotli source and deps +cd "$BUILD_PATH" +git clone --depth=1 https://github.com/google/ngx_brotli.git +cd ngx_brotli +git submodule init +git submodule update + +# build modsecurity library +cd "$BUILD_PATH" +git clone -b v$MODSECURITY_LIB_VERSION https://github.com/SpiderLabs/ModSecurity +cd ModSecurity/ +git submodule init +git submodule update +sh build.sh +./configure --disable-doxygen-doc --disable-examples --disable-dependency-tracking +make +make install + +mkdir -p /etc/nginx/modsecurity +cp modsecurity.conf-recommended /etc/nginx/modsecurity/modsecurity.conf +cp unicode.mapping /etc/nginx/modsecurity/unicode.mapping + +# Download owasp modsecurity crs +cd /etc/nginx/ +git clone -b v$OWASP_MODSECURITY_CRS_VERSION https://github.com/SpiderLabs/owasp-modsecurity-crs +cd owasp-modsecurity-crs + +mv crs-setup.conf.example crs-setup.conf +mv rules/REQUEST-900-EXCLUSION-RULES-BEFORE-CRS.conf.example rules/REQUEST-900-EXCLUSION-RULES-BEFORE-CRS.conf +mv rules/RESPONSE-999-EXCLUSION-RULES-AFTER-CRS.conf.example rules/RESPONSE-999-EXCLUSION-RULES-AFTER-CRS.conf +cd .. + +# OWASP CRS v3 rules +echo " +Include /etc/nginx/owasp-modsecurity-crs/crs-setup.conf +Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-900-EXCLUSION-RULES-BEFORE-CRS.conf +Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-901-INITIALIZATION.conf +Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-903.9001-DRUPAL-EXCLUSION-RULES.conf +Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-903.9002-WORDPRESS-EXCLUSION-RULES.conf +Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-905-COMMON-EXCEPTIONS.conf +Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-910-IP-REPUTATION.conf +Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-911-METHOD-ENFORCEMENT.conf +Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-912-DOS-PROTECTION.conf +Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-913-SCANNER-DETECTION.conf +Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-920-PROTOCOL-ENFORCEMENT.conf +Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-921-PROTOCOL-ATTACK.conf +Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-930-APPLICATION-ATTACK-LFI.conf +Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-931-APPLICATION-ATTACK-RFI.conf +Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-932-APPLICATION-ATTACK-RCE.conf +Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-933-APPLICATION-ATTACK-PHP.conf +Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-941-APPLICATION-ATTACK-XSS.conf +Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-942-APPLICATION-ATTACK-SQLI.conf +Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION.conf +Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-949-BLOCKING-EVALUATION.conf +Include /etc/nginx/owasp-modsecurity-crs/rules/RESPONSE-950-DATA-LEAKAGES.conf +Include /etc/nginx/owasp-modsecurity-crs/rules/RESPONSE-951-DATA-LEAKAGES-SQL.conf +Include /etc/nginx/owasp-modsecurity-crs/rules/RESPONSE-952-DATA-LEAKAGES-JAVA.conf +Include /etc/nginx/owasp-modsecurity-crs/rules/RESPONSE-953-DATA-LEAKAGES-PHP.conf +Include /etc/nginx/owasp-modsecurity-crs/rules/RESPONSE-954-DATA-LEAKAGES-IIS.conf +Include /etc/nginx/owasp-modsecurity-crs/rules/RESPONSE-959-BLOCKING-EVALUATION.conf +Include /etc/nginx/owasp-modsecurity-crs/rules/RESPONSE-980-CORRELATION.conf +Include /etc/nginx/owasp-modsecurity-crs/rules/RESPONSE-999-EXCLUSION-RULES-AFTER-CRS.conf +" > /etc/nginx/owasp-modsecurity-crs/nginx-modsecurity.conf + +# build nginx +cd "$BUILD_PATH/nginx-$NGINX_VERSION" + +# apply Nginx patches +patch -p1 < /patches/openresty-ssl_cert_cb_yield.patch + +WITH_FLAGS="--with-debug \ + --with-compat \ + --with-pcre-jit \ + --with-http_ssl_module \ + --with-http_stub_status_module \ + --with-http_realip_module \ + --with-http_auth_request_module \ + --with-http_addition_module \ + --with-http_dav_module \ + --with-http_geoip_module \ + --with-http_gzip_static_module \ + --with-http_sub_module \ + --with-http_v2_module \ + --with-stream \ + --with-stream_ssl_module \ + --with-stream_ssl_preread_module \ + --with-threads \ + --with-http_secure_link_module \ + --with-http_gunzip_module" + +if [[ ${ARCH} != "aarch64" ]]; then + WITH_FLAGS+=" --with-file-aio" +fi + +# "Combining -flto with -g is currently experimental and expected to produce unexpected results." +# https://gcc.gnu.org/onlinedocs/gcc/Optimize-Options.html +CC_OPT="-g -Og -fPIE -fstack-protector-strong \ + -Wformat \ + -Werror=format-security \ + -Wno-deprecated-declarations \ + -fno-strict-aliasing \ + -D_FORTIFY_SOURCE=2 \ + --param=ssp-buffer-size=4 \ + -DTCP_FASTOPEN=23 \ + -fPIC \ + -I$HUNTER_INSTALL_DIR/include \ + -Wno-cast-function-type" + +LD_OPT="-fPIE -fPIC -pie -Wl,-z,relro -Wl,-z,now -L$HUNTER_INSTALL_DIR/lib" + +if [[ ${ARCH} == "x86_64" ]]; then + CC_OPT+=' -m64 -mtune=native' +fi + +WITH_MODULES="--add-module=$BUILD_PATH/ngx_devel_kit-$NDK_VERSION \ + --add-module=$BUILD_PATH/set-misc-nginx-module-$SETMISC_VERSION \ + --add-module=$BUILD_PATH/headers-more-nginx-module-$MORE_HEADERS_VERSION \ + --add-module=$BUILD_PATH/nginx-http-auth-digest-$NGINX_DIGEST_AUTH \ + --add-module=$BUILD_PATH/ngx_http_substitutions_filter_module-$NGINX_SUBSTITUTIONS \ + --add-module=$BUILD_PATH/lua-nginx-module-$LUA_NGX_VERSION \ + --add-module=$BUILD_PATH/stream-lua-nginx-module-$LUA_STREAM_NGX_VERSION \ + --add-module=$BUILD_PATH/lua-upstream-nginx-module-$LUA_UPSTREAM_VERSION \ + --add-module=$BUILD_PATH/nginx-influxdb-module-$NGINX_INFLUXDB_VERSION \ + --add-dynamic-module=$BUILD_PATH/nginx-opentracing-$NGINX_OPENTRACING_VERSION/opentracing \ + --add-dynamic-module=$BUILD_PATH/ModSecurity-nginx-$MODSECURITY_VERSION \ + --add-dynamic-module=$BUILD_PATH/ngx_http_geoip2_module-${GEOIP2_VERSION} \ + --add-module=$BUILD_PATH/nginx_ajp_module-${NGINX_AJP_VERSION} \ + --add-module=$BUILD_PATH/ngx_brotli" + +./configure \ + --prefix=/usr/share/nginx \ + --conf-path=/etc/nginx/nginx.conf \ + --modules-path=/etc/nginx/modules \ + --http-log-path=/var/log/nginx/access.log \ + --error-log-path=/var/log/nginx/error.log \ + --lock-path=/var/lock/nginx.lock \ + --pid-path=/run/nginx.pid \ + --http-client-body-temp-path=/var/lib/nginx/body \ + --http-fastcgi-temp-path=/var/lib/nginx/fastcgi \ + --http-proxy-temp-path=/var/lib/nginx/proxy \ + --http-scgi-temp-path=/var/lib/nginx/scgi \ + --http-uwsgi-temp-path=/var/lib/nginx/uwsgi \ + ${WITH_FLAGS} \ + --without-mail_pop3_module \ + --without-mail_smtp_module \ + --without-mail_imap_module \ + --without-http_uwsgi_module \ + --without-http_scgi_module \ + --with-cc-opt="${CC_OPT}" \ + --with-ld-opt="${LD_OPT}" \ + --user=www-data \ + --group=www-data \ + ${WITH_MODULES} + +make || exit 1 +make install || exit 1 + +echo "Cleaning..." + +cd / + +mv /usr/share/nginx/sbin/nginx /usr/sbin + +apt-mark unmarkauto \ + bash \ + curl ca-certificates \ + libgeoip1 \ + libpcre3 \ + zlib1g \ + libaio1 \ + gdb \ + geoip-bin \ + libyajl2 liblmdb0 libxml2 libpcre++ \ + gzip \ + openssl + +apt-get remove -y --purge \ + build-essential \ + libgeoip-dev \ + libpcre3-dev \ + libssl-dev \ + zlib1g-dev \ + libaio-dev \ + linux-libc-dev \ + cmake \ + wget \ + patch \ + protobuf-compiler \ + python \ + xz-utils \ + bc \ + git g++ pkgconf flex bison doxygen libyajl-dev liblmdb-dev libgeoip-dev libtool dh-autoreconf libpcre++-dev libxml2-dev + +apt-get autoremove -y + +rm -rf "$BUILD_PATH" +rm -Rf /usr/share/man /usr/share/doc +rm -rf /tmp/* /var/tmp/* +rm -rf /var/lib/apt/lists/* +rm -rf /var/cache/apt/archives/* +rm -rf /usr/local/modsecurity/bin +rm -rf /usr/local/modsecurity/include +rm -rf /usr/local/modsecurity/lib/libmodsecurity.a + +rm -rf /root/.cache + +rm -rf /etc/nginx/owasp-modsecurity-crs/.git +rm -rf /etc/nginx/owasp-modsecurity-crs/util/regression-tests + +rm -rf $HOME/.hunter + +# update image permissions +writeDirs=( \ + /etc/nginx \ + /var/lib/nginx \ + /var/log/nginx \ + /opt/modsecurity/var/log \ + /opt/modsecurity/var/upload \ + /opt/modsecurity/var/audit \ +); + +for dir in "${writeDirs[@]}"; do + mkdir -p ${dir}; + chown -R www-data.www-data ${dir}; +done diff --git a/images/nginx/rootfs/etc/nginx/geoip/GeoIP.dat b/images/nginx/rootfs/etc/nginx/geoip/GeoIP.dat new file mode 100644 index 0000000000..be8b031f7d Binary files /dev/null and b/images/nginx/rootfs/etc/nginx/geoip/GeoIP.dat differ diff --git a/images/nginx/rootfs/etc/nginx/geoip/GeoIPASNum.dat b/images/nginx/rootfs/etc/nginx/geoip/GeoIPASNum.dat new file mode 100644 index 0000000000..85c2cb3296 Binary files /dev/null and b/images/nginx/rootfs/etc/nginx/geoip/GeoIPASNum.dat differ diff --git a/images/nginx/rootfs/etc/nginx/geoip/GeoLiteCity.dat b/images/nginx/rootfs/etc/nginx/geoip/GeoLiteCity.dat new file mode 100644 index 0000000000..1adb8c3c49 Binary files /dev/null and b/images/nginx/rootfs/etc/nginx/geoip/GeoLiteCity.dat differ diff --git a/images/nginx/rootfs/install_lua_resty_waf.sh b/images/nginx/rootfs/install_lua_resty_waf.sh new file mode 100755 index 0000000000..760c3aacd7 --- /dev/null +++ b/images/nginx/rootfs/install_lua_resty_waf.sh @@ -0,0 +1,70 @@ +#!/bin/bash + +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +set -o errexit +set -o nounset +set -o pipefail + +cd "$BUILD_PATH" +git clone --recursive --single-branch -b v0.11.1 https://github.com/p0pr0ck5/lua-resty-waf + +cd lua-resty-waf + +ARCH=$(uname -m) +if [[ ${ARCH} != "x86_64" ]]; then + # replace CFLAGS + sed -i 's/CFLAGS = -msse2 -msse3 -msse4.1 -O3/CFLAGS = -O3/' lua-aho-corasick/Makefile + # export PCRE lib directory + export PCRE_LIBDIR=$(find /usr/lib -name libpcre*.so* | head -1 | xargs dirname) + luarocks install lrexlib-pcre 2.7.2-1 PCRE_LIBDIR=${PCRE_LIBDIR} +fi + +curl -o 96b0a04ce62dd01b6c6c8a8c97df7ce9916d173e.patch -sSL https://github.com/p0pr0ck5/lua-resty-waf/commit/96b0a04ce62dd01b6c6c8a8c97df7ce9916d173e.patch +patch -p1 < 96b0a04ce62dd01b6c6c8a8c97df7ce9916d173e.patch + +make +make install-check + +# we can not use "make install" directly here because it also calls "install-deps" which requires OPM +# to avoid that we install the libraries "install-deps" would install manually + +git clone -b master --single-branch https://github.com/cloudflare/lua-resty-cookie.git "$BUILD_PATH/lua-resty-cookie" +cd "$BUILD_PATH/lua-resty-cookie" +make install + +git clone -b master --single-branch https://github.com/p0pr0ck5/lua-ffi-libinjection.git "$BUILD_PATH/lua-ffi-libinjection" +cd "$BUILD_PATH/lua-ffi-libinjection" +install lib/resty/*.lua "$LUA_LIB_DIR/resty/" + +git clone -b master --single-branch https://github.com/cloudflare/lua-resty-logger-socket.git "$BUILD_PATH/lua-resty-logger-socket" +cd "$BUILD_PATH/lua-resty-logger-socket" +install -d "$LUA_LIB_DIR/resty/logger" +install lib/resty/logger/*.lua "$LUA_LIB_DIR/resty/logger/" + +git clone -b master --single-branch https://github.com/bungle/lua-resty-random.git "$BUILD_PATH/lua-resty-random" +cd "$BUILD_PATH/lua-resty-cookie" +make install + +# and do the rest of what "make instal" does +cd "$BUILD_PATH/lua-resty-waf" +install -d "$LUA_LIB_DIR/resty/waf/storage" +install -d "$LUA_LIB_DIR/rules" +install -m 644 lib/resty/*.lua "$LUA_LIB_DIR/resty/" +install -m 644 lib/resty/waf/*.lua "$LUA_LIB_DIR/resty/waf/" +install -m 644 lib/resty/waf/storage/*.lua "$LUA_LIB_DIR/resty/waf/storage/" +install -m 644 lib/*.so $LUA_LIB_DIR +install -m 644 rules/*.json "$LUA_LIB_DIR/rules/" diff --git a/images/nginx/rootfs/patches/openresty-ssl_cert_cb_yield.patch b/images/nginx/rootfs/patches/openresty-ssl_cert_cb_yield.patch new file mode 100644 index 0000000000..e3940391f8 --- /dev/null +++ b/images/nginx/rootfs/patches/openresty-ssl_cert_cb_yield.patch @@ -0,0 +1,42 @@ +# HG changeset patch +# User Yichun Zhang +# Date 1451762084 28800 +# Sat Jan 02 11:14:44 2016 -0800 +# Node ID 449f0461859c16e95bdb18e8be6b94401545d3dd +# Parent 78b4e10b4367b31367aad3c83c9c3acdd42397c4 +SSL: handled SSL_CTX_set_cert_cb() callback yielding. + +OpenSSL 1.0.2+ introduces SSL_CTX_set_cert_cb() to allow custom +callbacks to serve the SSL certificiates and private keys dynamically +and lazily. The callbacks may yield for nonblocking I/O or sleeping. +Here we added support for such usage in NGINX 3rd-party modules +(like ngx_lua) in NGINX's event handlers for downstream SSL +connections. + +diff -r 78b4e10b4367 -r 449f0461859c src/event/ngx_event_openssl.c +--- a/src/event/ngx_event_openssl.c Thu Dec 17 16:39:15 2015 +0300 ++++ b/src/event/ngx_event_openssl.c Sat Jan 02 11:14:44 2016 -0800 +@@ -1210,6 +1210,23 @@ + return NGX_AGAIN; + } + ++#if OPENSSL_VERSION_NUMBER >= 0x10002000L ++ if (sslerr == SSL_ERROR_WANT_X509_LOOKUP) { ++ c->read->handler = ngx_ssl_handshake_handler; ++ c->write->handler = ngx_ssl_handshake_handler; ++ ++ if (ngx_handle_read_event(c->read, 0) != NGX_OK) { ++ return NGX_ERROR; ++ } ++ ++ if (ngx_handle_write_event(c->write, 0) != NGX_OK) { ++ return NGX_ERROR; ++ } ++ ++ return NGX_AGAIN; ++ } ++#endif ++ + err = (sslerr == SSL_ERROR_SYSCALL) ? ngx_errno : 0; + + c->ssl->no_wait_shutdown = 1; diff --git a/internal/admission/controller/main.go b/internal/admission/controller/main.go new file mode 100644 index 0000000000..1d7b6db8fb --- /dev/null +++ b/internal/admission/controller/main.go @@ -0,0 +1,93 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "github.com/google/uuid" + "k8s.io/api/admission/v1beta1" + networking "k8s.io/api/networking/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/ingress-nginx/internal/ingress/annotations/parser" + "k8s.io/klog" +) + +// Checker must return an error if the ingress provided as argument +// contains invalid instructions +type Checker interface { + CheckIngress(ing *networking.Ingress) error +} + +// IngressAdmission implements the AdmissionController interface +// to handle Admission Reviews and deny requests that are not validated +type IngressAdmission struct { + Checker Checker +} + +// HandleAdmission populates the admission Response +// with Allowed=false if the Object is an ingress that would prevent nginx to reload the configuration +// with Allowed=true otherwise +func (ia *IngressAdmission) HandleAdmission(ar *v1beta1.AdmissionReview) error { + if ar.Request == nil { + klog.Infof("rejecting nil request") + ar.Response = &v1beta1.AdmissionResponse{ + UID: types.UID(uuid.New().String()), + Allowed: false, + } + return nil + } + klog.V(3).Infof("handling ingress admission webhook request for {%s} %s in namespace %s", ar.Request.Resource.String(), ar.Request.Name, ar.Request.Namespace) + + ingressResource := v1.GroupVersionResource{Group: networking.SchemeGroupVersion.Group, Version: networking.SchemeGroupVersion.Version, Resource: "ingresses"} + + if ar.Request.Resource == ingressResource { + ar.Response = &v1beta1.AdmissionResponse{ + UID: types.UID(uuid.New().String()), + Allowed: false, + } + ingress := networking.Ingress{} + deserializer := codecs.UniversalDeserializer() + if _, _, err := deserializer.Decode(ar.Request.Object.Raw, nil, &ingress); err != nil { + ar.Response.Result = &v1.Status{Message: err.Error()} + ar.Response.AuditAnnotations = map[string]string{ + parser.GetAnnotationWithPrefix("error"): err.Error(), + } + klog.Errorf("failed to decode ingress %s in namespace %s: %s, refusing it", ar.Request.Name, ar.Request.Namespace, err.Error()) + return err + } + + err := ia.Checker.CheckIngress(&ingress) + if err != nil { + ar.Response.Result = &v1.Status{Message: err.Error()} + ar.Response.AuditAnnotations = map[string]string{ + parser.GetAnnotationWithPrefix("error"): err.Error(), + } + klog.Errorf("failed to generate configuration for ingress %s in namespace %s: %s, refusing it", ar.Request.Name, ar.Request.Namespace, err.Error()) + return err + } + ar.Response.Allowed = true + klog.Infof("successfully validated configuration, accepting ingress %s in namespace %s", ar.Request.Name, ar.Request.Namespace) + return nil + } + + klog.Infof("accepting non ingress %s in namespace %s %s", ar.Request.Name, ar.Request.Namespace, ar.Request.Resource.String()) + ar.Response = &v1beta1.AdmissionResponse{ + UID: types.UID(uuid.New().String()), + Allowed: true, + } + return nil +} diff --git a/internal/admission/controller/main_test.go b/internal/admission/controller/main_test.go new file mode 100644 index 0000000000..435eb4334b --- /dev/null +++ b/internal/admission/controller/main_test.go @@ -0,0 +1,109 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "fmt" + "testing" + + "k8s.io/api/admission/v1beta1" + networking "k8s.io/api/networking/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/json" +) + +const testIngressName = "testIngressName" + +type failTestChecker struct { + t *testing.T +} + +func (ftc failTestChecker) CheckIngress(ing *networking.Ingress) error { + ftc.t.Error("checker should not be called") + return nil +} + +type testChecker struct { + t *testing.T + err error +} + +func (tc testChecker) CheckIngress(ing *networking.Ingress) error { + if ing.ObjectMeta.Name != testIngressName { + tc.t.Errorf("CheckIngress should be called with %v ingress, but got %v", testIngressName, ing.ObjectMeta.Name) + } + return tc.err +} + +func TestHandleAdmission(t *testing.T) { + adm := &IngressAdmission{ + Checker: failTestChecker{t: t}, + } + review := &v1beta1.AdmissionReview{ + Request: &v1beta1.AdmissionRequest{ + Resource: v1.GroupVersionResource{Group: "", Version: "v1", Resource: "pod"}, + }, + } + err := adm.HandleAdmission(review) + if !review.Response.Allowed { + t.Errorf("with a non ingress resource, the check should pass") + } + if err != nil { + t.Errorf("with a non ingress resource, no error should be returned") + } + + review.Request.Resource = v1.GroupVersionResource{Group: networking.SchemeGroupVersion.Group, Version: networking.SchemeGroupVersion.Version, Resource: "ingresses"} + review.Request.Object.Raw = []byte{0xff} + + err = adm.HandleAdmission(review) + if review.Response.Allowed { + t.Errorf("when the request object is not decodable, the request should not be allowed") + } + if err == nil { + t.Errorf("when the request object is not decodable, an error should be returned") + } + + raw, err := json.Marshal(networking.Ingress{ObjectMeta: v1.ObjectMeta{Name: testIngressName}}) + if err != nil { + t.Errorf("failed to prepare test ingress data: %v", err.Error()) + } + review.Request.Object.Raw = raw + + adm.Checker = testChecker{ + t: t, + err: fmt.Errorf("this is a test error"), + } + err = adm.HandleAdmission(review) + if review.Response.Allowed { + t.Errorf("when the checker returns an error, the request should not be allowed") + } + if err == nil { + t.Errorf("when the checker returns an error, an error should be returned") + } + + adm.Checker = testChecker{ + t: t, + err: nil, + } + err = adm.HandleAdmission(review) + if !review.Response.Allowed { + t.Errorf("when the checker returns no error, the request should be allowed") + } + if err != nil { + t.Errorf("when the checker returns no error, no error should be returned") + } +} diff --git a/internal/admission/controller/server.go b/internal/admission/controller/server.go new file mode 100644 index 0000000000..5031dca58f --- /dev/null +++ b/internal/admission/controller/server.go @@ -0,0 +1,89 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "io" + "io/ioutil" + "net/http" + + "k8s.io/api/admission/v1beta1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/klog" +) + +var ( + scheme = runtime.NewScheme() + codecs = serializer.NewCodecFactory(scheme) +) + +// AdmissionController checks if an object +// is allowed in the cluster +type AdmissionController interface { + HandleAdmission(*v1beta1.AdmissionReview) error +} + +// AdmissionControllerServer implements an HTTP server +// for kubernetes validating webhook +// https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#validatingadmissionwebhook +type AdmissionControllerServer struct { + AdmissionController AdmissionController + Decoder runtime.Decoder +} + +// NewAdmissionControllerServer instanciates an admission controller server with +// a default codec +func NewAdmissionControllerServer(ac AdmissionController) *AdmissionControllerServer { + return &AdmissionControllerServer{ + AdmissionController: ac, + Decoder: codecs.UniversalDeserializer(), + } +} + +// ServeHTTP implements http.Server method +func (acs *AdmissionControllerServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { + klog.Infof("handling admission controller request %s", r.URL.String()) + + review, err := parseAdmissionReview(acs.Decoder, r.Body) + if err != nil { + klog.Error("Can't decode request", err) + w.WriteHeader(http.StatusBadRequest) + return + } + + acs.AdmissionController.HandleAdmission(review) + if err := writeAdmissionReview(w, review); err != nil { + klog.Error(err) + } +} + +func parseAdmissionReview(decoder runtime.Decoder, r io.Reader) (*v1beta1.AdmissionReview, error) { + review := &v1beta1.AdmissionReview{} + data, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + _, _, err = decoder.Decode(data, nil, review) + return review, err +} + +func writeAdmissionReview(w io.Writer, ar *v1beta1.AdmissionReview) error { + e := json.NewEncoder(w) + return e.Encode(ar) +} diff --git a/internal/admission/controller/server_test.go b/internal/admission/controller/server_test.go new file mode 100644 index 0000000000..4ec2893d6d --- /dev/null +++ b/internal/admission/controller/server_test.go @@ -0,0 +1,97 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "bytes" + "fmt" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/google/uuid" + "k8s.io/api/admission/v1beta1" + "k8s.io/apimachinery/pkg/types" +) + +type testAdmissionHandler struct{} + +func (testAdmissionHandler) HandleAdmission(ar *v1beta1.AdmissionReview) error { + ar.Response = &v1beta1.AdmissionResponse{ + UID: types.UID(uuid.New().String()), + Allowed: true, + } + return nil +} + +type errorReader struct{} + +func (errorReader) Read(p []byte) (n int, err error) { + return 0, fmt.Errorf("this is a test error") +} + +type errorWriter struct{} + +func (errorWriter) Write(p []byte) (n int, err error) { + return 0, fmt.Errorf("this is a test error") +} + +func (errorWriter) Header() http.Header { + return nil +} + +func (errorWriter) WriteHeader(statusCode int) {} + +func TestServer(t *testing.T) { + w := httptest.NewRecorder() + b := bytes.NewBuffer(nil) + writeAdmissionReview(b, &v1beta1.AdmissionReview{}) + + // Happy path + r := httptest.NewRequest("GET", "http://test.ns.svc", b) + NewAdmissionControllerServer(testAdmissionHandler{}).ServeHTTP(w, r) + ar, err := parseAdmissionReview(codecs.UniversalDeserializer(), w.Body) + if w.Code != http.StatusOK { + t.Errorf("when the admission review allows the request, the http status should be OK") + } + if err != nil { + t.Errorf("failed to parse admission response when the admission controller returns a value") + } + if !ar.Response.Allowed { + t.Errorf("when the admission review allows the request, the parsed body returns not allowed") + } + + // Ensure the code does not panic when failing to handle the request + NewAdmissionControllerServer(testAdmissionHandler{}).ServeHTTP(errorWriter{}, r) + + w = httptest.NewRecorder() + NewAdmissionControllerServer(testAdmissionHandler{}).ServeHTTP(w, httptest.NewRequest("GET", "http://test.ns.svc", strings.NewReader("invalid-json"))) + if w.Code != http.StatusBadRequest { + t.Errorf("when the server fails to read the request, the replied status should be bad request") + } +} + +func TestParseAdmissionReview(t *testing.T) { + ar, err := parseAdmissionReview(codecs.UniversalDeserializer(), errorReader{}) + if ar != nil { + t.Errorf("when reading from request fails, no AdmissionRewiew should be returned") + } + if err == nil { + t.Errorf("when reading from request fails, an error should be returned") + } +} diff --git a/internal/file/bindata.go b/internal/file/bindata.go deleted file mode 100644 index 6fd5810c9a..0000000000 --- a/internal/file/bindata.go +++ /dev/null @@ -1,289 +0,0 @@ -// Code generated by go-bindata. -// sources: -// rootfs/etc/nginx/nginx.conf -// rootfs/etc/nginx/template/nginx.tmpl -// rootfs/ingress-controller/clean-nginx-conf.sh -// DO NOT EDIT! - -package file - -import ( - "bytes" - "compress/gzip" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - "time" -) - -func bindataRead(data []byte, name string) ([]byte, error) { - gz, err := gzip.NewReader(bytes.NewBuffer(data)) - if err != nil { - return nil, fmt.Errorf("Read %q: %v", name, err) - } - - var buf bytes.Buffer - _, err = io.Copy(&buf, gz) - clErr := gz.Close() - - if err != nil { - return nil, fmt.Errorf("Read %q: %v", name, err) - } - if clErr != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -type asset struct { - bytes []byte - info os.FileInfo -} - -type bindataFileInfo struct { - name string - size int64 - mode os.FileMode - modTime time.Time -} - -func (fi bindataFileInfo) Name() string { - return fi.name -} -func (fi bindataFileInfo) Size() int64 { - return fi.size -} -func (fi bindataFileInfo) Mode() os.FileMode { - return fi.mode -} -func (fi bindataFileInfo) ModTime() time.Time { - return fi.modTime -} -func (fi bindataFileInfo) IsDir() bool { - return false -} -func (fi bindataFileInfo) Sys() interface{} { - return nil -} - -var _etcNginxNginxConf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x2c\xca\x41\x0a\x83\x40\x0c\x85\xe1\xfd\x9c\xe2\x41\xf7\x7a\x00\x57\x3d\xca\xa0\x89\x06\x34\x19\x32\x4f\x69\x29\xbd\x7b\x69\xe9\xea\x5f\xfc\xdf\x0d\x77\x5c\x92\x4f\x74\x3b\xda\x2e\xf0\xd5\xfc\x81\x39\x5c\x6d\x3d\xb3\xd2\xc2\xa1\xb6\x0b\xb8\x55\x42\x23\x67\xe9\x7f\xc4\x40\x67\x4d\x0e\xa5\xd9\x82\x31\x4f\x1f\x7f\x63\x68\xb6\x4c\xa5\xc8\x25\xce\x8e\xd7\xbb\x6c\x64\xfb\x76\xa9\x72\x84\x23\x54\xa7\x4f\x00\x00\x00\xff\xff\x75\xb5\xe6\xb8\x77\x00\x00\x00") - -func etcNginxNginxConfBytes() ([]byte, error) { - return bindataRead( - _etcNginxNginxConf, - "etc/nginx/nginx.conf", - ) -} - -func etcNginxNginxConf() (*asset, error) { - bytes, err := etcNginxNginxConfBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "etc/nginx/nginx.conf", size: 119, mode: os.FileMode(420), modTime: time.Unix(1508444716, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _etcNginxTemplateNginxTmpl = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x7d\x6d\x73\xdb\x38\xd2\xe0\xf7\xfc\x0a\x94\xec\x2a\xdb\xa9\x48\x76\xb2\xd9\x79\xe6\xb1\x2b\x57\xe7\xd8\xc9\xda\x37\x4e\xa2\xb2\x9c\xcc\xd4\x5e\x5d\xa9\x20\xb2\x25\x62\x4d\x01\x5c\x00\xb4\xad\xe4\x74\xbf\xfd\x0a\x6f\x24\x08\x82\x94\x9c\xc9\x64\x66\xb6\x86\x1f\x5c\x16\xd9\x68\x34\x1a\x8d\x06\xfa\x05\xc0\x97\x2f\x68\x17\xe7\x39\x3a\x7e\x85\x46\x68\xbd\x7e\xa2\x7e\x0b\xe0\x77\xc0\x85\x7e\x37\xb1\xff\xdb\x4f\xc9\x7c\xa1\x5f\x9f\xcd\x17\xee\xd5\xa5\xb8\x1c\x7f\xfa\xe1\x0d\xc5\xb3\x1c\x52\xfd\xb1\xf9\xc6\x82\x65\x80\x73\x99\x7d\xfe\x78\x7d\xa9\x61\x2e\xea\x9f\x16\x60\x86\x93\x5b\xa0\xa9\xa9\xf6\xb5\xfb\x61\x3f\x16\x9c\x3d\xac\x2e\x00\xa7\x8e\xae\xb1\x7a\x31\x01\xe9\xde\x59\x38\x9c\xa6\x3e\xd4\x69\xfd\x73\xbd\x7e\xa2\x20\xc8\x5c\x37\x62\x64\xa8\x7b\xc7\x52\x01\x49\xc9\x89\x5c\x29\x80\x9c\xe1\x74\xba\x64\x69\x99\x03\x3a\x04\x99\x1c\xd2\x05\xa1\x0f\x87\xe6\x8d\x38\xa4\x8b\x87\x69\x26\x65\xa1\x40\x5c\x31\x0b\x3e\x12\xec\x44\xa1\x07\x9a\xc6\x6b\xfa\x50\x00\x95\x1c\x27\x84\x2e\x1e\x53\x13\xab\x8b\xf5\xd6\xb4\x8f\x69\xda\x55\xdd\x3e\x05\xf3\xe9\x9f\xa4\xb8\x25\xf4\x8c\xe5\x39\x24\x92\xf1\x0b\x26\x24\x1a\x0c\x0e\x0e\x1e\x43\xd0\x67\x8d\xa3\x83\x96\x14\xc3\x92\x51\xc4\xe6\xf3\x93\x27\x4f\xee\x19\xbf\x05\x3e\x2d\x38\x4b\x40\x08\x10\xc8\xca\xcf\xe8\x67\xfd\x61\x5c\xbd\x5f\xaf\x4f\x9e\x14\x24\x45\x87\xbc\xa4\xa6\xda\x51\x41\xd2\x13\xdb\x32\x0a\x68\xf4\x0e\x3f\xa8\x06\xbd\x25\x39\x08\x74\xa4\xaa\xb2\xc8\x79\x4e\x96\x44\x4e\x29\x9b\x93\x1c\x54\x05\x4d\x50\x85\xd9\x90\x67\x38\x75\xf8\x14\xa9\x46\x1c\x1f\xda\x6a\x18\x5f\x1c\x02\x3d\x4c\x59\x62\x5a\x98\x30\x0e\xae\x69\x99\x5c\xe6\x3b\xb6\x1a\x91\x95\x32\x65\xf7\x74\x2a\xc9\x12\x58\x29\xd1\xd3\x43\x2d\x6f\x87\x4f\x11\xbe\x63\x24\x45\xf7\x98\x48\xc5\x6a\xc9\x18\xca\x19\x5d\xa0\xb4\xe4\xea\x37\x46\x1c\x14\x67\x4d\x81\x2e\x6c\x4d\xc6\x4c\xec\xe7\x1b\xfb\x75\xbd\x46\x27\x4f\x9e\xc0\x1d\x50\x29\xd0\x97\x27\x08\x21\xb4\x2c\x73\x49\xa6\x38\x49\xa0\x90\xc8\x3e\x8c\x9e\xe8\x6f\xb6\x92\x84\x51\x0a\x89\x24\x8c\x0a\x54\x55\xf0\x0e\x3f\x98\x3a\xce\xbc\xaf\x8a\x49\xaa\x60\x29\x00\x85\x0f\x14\x2c\xcf\x4f\x9e\xac\x9f\x3c\x51\x7c\xb3\xb5\xeb\x76\xdf\x83\x2e\x20\x33\x40\x77\x38\x2f\x01\xb1\xb9\xfe\x91\xe9\xe1\x86\x7e\x19\xbe\x65\xfc\x1e\xf3\x14\x52\xf5\x1f\x92\x0c\xcd\x00\x29\xc9\x54\xff\xba\x92\x0b\x60\x53\x52\x20\x2b\x76\x9a\x47\xa6\x82\x6a\xf0\x7c\x14\xa0\x47\xfa\x98\x33\xc9\x12\x96\x23\x0b\xc2\x01\xe7\x53\x52\x4c\x6d\x75\xfa\xd1\x3a\x42\xc9\x9b\x86\x3c\x71\xa8\x20\x17\xd0\x57\xcc\x31\xa7\x22\xf8\x2d\xe3\x46\x69\x54\xac\xf1\x44\xdc\xc7\xc2\x95\x0e\x10\xe4\x0e\x3c\xf6\x7f\xf9\x82\x38\xa6\x0b\x40\xbb\x92\x97\x42\x42\xaa\x1a\x78\xfc\xca\x54\xa1\x9b\x72\x0d\x38\xbf\x1c\x9f\x5d\x9e\x5f\x3b\xaa\x04\xc8\xa9\xc3\x39\xe7\x6c\xe9\xa8\xf2\x30\x74\x50\xa2\xbb\x22\xc5\x12\xcf\xb0\x1a\x49\xa5\x80\x54\xf1\x37\x05\x09\x7c\x49\xa8\xe1\x72\xc2\x4a\x2a\xf9\x0a\xa5\x50\x00\x4d\x95\x58\x32\x6a\x3e\xe4\x04\xa8\x44\x97\x63\x84\xd3\x94\x83\x10\x7e\x17\xf4\x8d\x15\xf5\xa1\x56\x09\x0b\x60\xa4\xf0\x87\x4d\x80\x46\x66\x44\x20\x22\x10\x87\x7f\x97\x84\x6b\x01\x48\x70\x9e\x94\x39\x96\x80\x24\xc7\xf3\x39\x49\xd0\x9c\x71\x44\x68\x4a\xee\x48\x5a\xe2\xbc\xa2\xb9\x14\x8a\xde\x7f\x00\xbb\x1c\x23\x62\xa8\x16\x12\xcb\x52\xa0\x02\x2f\x3c\x91\x31\x34\xb8\x52\xe6\xf1\xd4\x98\x46\x30\x4a\xb1\x3c\xf1\xa1\x95\xd6\xaf\x9e\x26\xf4\x15\x91\x70\x46\xe4\x2a\x2c\x63\x64\xac\xee\x78\xd5\xed\xa1\xcc\x1a\x0d\xfc\x49\x8a\x89\xa1\xd4\x92\x78\x97\x31\x21\xa7\xb6\xbd\x53\xd3\x8a\xe9\x67\x46\x01\x89\x0c\x73\x48\x8f\x63\x00\xc7\x4e\x3a\x2b\x74\xff\x64\x14\x26\xe4\x33\x54\x22\x11\xc5\x3b\x27\xb9\x04\x3e\x9d\xad\xa6\x4a\xb8\x6e\x61\x85\x3c\x44\xe7\x30\xc7\x65\x2e\xdf\x6a\x98\x9f\x60\xd5\x25\x5d\x02\x68\xaa\x15\xab\xf7\x54\x0d\xc6\x84\xb5\xd4\x85\xcc\x38\xe0\x54\x9c\x38\x80\xe9\x3d\x27\x12\x62\xa5\x65\x52\x4c\x29\x2b\x4a\x91\x35\x3f\xd6\xdf\x52\xc8\xf1\xaa\x5d\x30\x67\x8b\xa9\x28\x67\x4a\x96\x40\xc8\xe0\x23\x07\xd5\x5a\xa5\x56\x53\x56\x4a\x4f\x07\xd6\x20\xb7\x00\x05\xce\xc9\x1d\x54\xda\xb7\xe2\xcc\x4f\x00\xc5\xa9\xfa\x84\xd6\x6b\xdb\x86\x1a\xda\x56\x28\xda\xd0\xd7\xee\x8b\xe2\xa2\x2e\x65\xc6\x95\x55\x32\xd3\x59\x39\x9f\x2b\xad\xaf\xfa\xac\xa9\x71\xce\x34\x9c\xd1\x35\xaf\x35\x54\xa3\x63\x9b\x68\x2a\x72\xab\x27\x86\xa6\x9e\x34\x6c\x0b\x72\xcc\x17\x30\x8d\x51\x24\x1a\x48\xae\x14\x5c\x9b\x20\x11\x52\x33\x63\xe9\x2a\xd2\xa4\x90\x9a\xd7\x2c\x5d\x75\x37\x49\x23\x69\x37\x28\x86\xa4\xd1\x20\x8d\x43\xe9\x9c\x17\xd3\x25\x7e\x98\xce\x09\xe4\x69\x93\x0a\x0f\xc7\xc5\xcd\xcd\xf8\xc5\x3b\xfc\xf0\x56\x41\x35\xa8\xa8\x31\x58\x86\x04\x28\x42\x0c\x86\x21\x15\x0a\x23\xa4\xab\x02\xc4\x34\xc3\x22\xd3\x88\x42\x22\xd0\x8b\xa3\x97\x3f\x9e\xd8\x51\xa4\x56\xd0\x53\x8a\x97\xf1\x02\xae\x36\xb3\xd2\x7e\x8f\x97\x70\x81\x45\xf6\x0e\x3f\x34\x68\x6e\x63\x99\x95\xc9\x2d\x48\x87\x28\x8e\xe5\xb5\x86\x69\x20\x5a\xe2\x22\x56\xbe\xd5\xf8\x77\xb8\x88\x60\xd0\x28\x8c\x12\x34\xbc\x8b\x35\xc9\xa1\x18\x7b\x8b\xf6\x58\x9b\x22\x78\x1a\x44\x75\xe1\x89\xd1\x74\x87\x39\x51\x6a\x37\xca\x1c\x8f\xa6\x4f\x0e\xae\x83\x3d\x01\x9e\x56\xdf\x46\xf1\xf8\x0d\x33\x6b\x29\xaa\x68\x55\xcb\x49\x31\x25\xd4\x35\xb1\xc1\xe4\xe6\x6c\xf1\xb1\x2e\x70\x49\x6b\x93\x85\xd1\x7a\x01\xc3\xe6\xf3\x4a\x3f\x1b\x5a\xc9\x82\xaa\x05\x2b\xa1\x77\x38\x27\x69\x6f\x2d\x97\x1a\xf4\xd2\x40\x6e\x83\x3f\x3e\xa9\x05\x56\x8c\x56\xbe\xde\x2b\x6f\x11\xd4\x5c\xa5\x7c\x13\x13\x45\xa1\xb2\xe6\x47\xe2\x00\xa6\x6a\xf2\x8b\x48\x6f\x0c\x8f\x63\x5b\x0b\x47\xc1\xf8\x16\x38\xc6\x0a\x2a\xc0\xa1\x46\x25\x49\x40\x0f\xcb\xa8\x22\x33\x38\x26\x06\x4c\x8d\xca\xae\xb9\x96\xd0\x24\x2f\xd3\xa6\xed\x45\x96\x30\xd2\x7a\xc6\x94\x48\xcd\xb4\x3d\x55\xaf\x90\x84\x07\x79\xa8\x16\x5b\xe6\x5b\xbc\xbb\x5e\x73\x26\x73\xe2\x78\x37\x33\xbf\x5c\x27\x99\x9f\xd3\x84\x2d\x8b\x69\x0e\x77\x90\x57\x44\x9b\x62\x57\xfa\x9d\x23\xd7\x42\x6b\x72\x02\xc0\x1b\xfd\xae\x73\x85\xea\xaf\xe5\xff\xf1\x59\xaf\x65\xcd\x82\x4a\xfd\xef\x88\x51\x3f\x7c\x52\xfe\xee\xbd\xd6\x8b\xcc\x3b\xe0\x42\x4d\xe4\xcf\x47\xcf\xbd\x4f\x4b\x42\xa7\x39\xd0\x85\xcc\xd0\x8b\xbf\xff\xe0\x7d\x68\xd2\xa9\xaa\x6d\x52\xa9\x81\x94\xfe\x21\x90\x22\x4c\x57\xde\xdb\x3b\xcc\x57\x5d\x92\xbc\x83\xce\x4a\x21\xd9\x12\xb9\xb1\xa6\x96\xad\x1c\x44\xc1\xa8\x80\x60\xfd\x7f\xfb\x0c\xed\xde\xe9\xa5\x3f\x6e\x78\x21\x14\x14\x4e\xdd\x70\xd5\x34\xde\x2a\x23\xcf\x7b\x06\xea\xe5\x1d\x5a\xaf\x07\x1d\xeb\x32\x3d\x17\x48\x76\x0b\x54\xf8\x1c\x9e\x64\xec\xde\x4c\x00\x37\xe6\xdb\xc6\x11\xbe\x83\x52\x22\xb4\x59\x76\x8f\x39\x25\x74\x21\xac\xfe\x22\x94\x48\x82\x73\xf2\x19\xd2\xa9\xd3\x89\x53\x05\x63\xad\x7b\x53\xf8\x34\x4d\x89\x5a\x5f\xe1\x1c\xe1\x3b\x4c\x72\x8d\xa9\x52\xa1\xc7\x16\x6a\x57\xcf\x5a\x05\x4e\xc0\xbd\x20\x74\xa1\xec\x0d\x3d\x6e\xdc\x3b\x7f\x2c\x55\x4b\xbd\x39\xe3\x4b\x2c\x51\x59\x08\xc9\x01\x2f\x09\x9d\x33\xbf\xc1\x57\x6c\xf1\x56\x43\xbc\x11\x09\x2e\xe0\x7f\x4d\x3e\xbc\x47\xeb\x35\xe8\x1f\xaf\xfe\x25\x18\xad\x59\xb7\xf7\xe5\x0b\x9a\x95\x24\x4f\xab\x32\x1f\x2d\x52\xe3\xd5\x5a\xaf\xf7\x4e\x3c\xa3\x6a\x89\x0b\x54\xf2\x5c\x20\x99\x61\x89\x44\xc6\xca\x3c\x45\x94\x49\x84\x8b\x02\xb0\xb2\x55\x90\xb2\xbf\x85\x18\xe5\x6c\xf1\x75\x76\x93\x6a\x9f\xef\x6c\x30\xf8\xa6\x0d\x7c\x8a\x8c\x5d\xbb\xf6\x9c\x96\x9c\xa0\xdd\x9c\x2d\x16\x9a\xcd\xc6\x18\x6f\xca\x1c\x87\x7f\x7f\xe4\xa4\xb2\x37\x27\xb7\xa4\x38\xd5\x58\xaf\xd8\xe2\xe3\xf5\x55\x25\x7f\x4e\x45\x59\xf8\xf5\x1a\x1d\x9d\xd4\x42\xe6\x20\xac\xc2\x41\x76\xc4\xb5\x47\xf4\xb9\x91\x9d\xaa\x8a\x4a\xbc\xeb\x96\x68\x69\x41\x6d\x4b\xdc\x03\x71\xa3\xb4\x42\x33\xc6\x32\x53\x34\x35\x7a\x9d\xcc\x5f\x55\x6d\x0f\x47\x85\xfa\x05\x9c\x33\xae\xf1\x55\x08\xdf\xa8\x57\x1e\xbe\xf0\x7d\xad\xe1\x1c\x3e\x2d\x20\xd7\x20\x58\xae\x9d\x9e\x1a\xda\xfd\x6c\x1a\xdd\x3f\x67\x40\x41\xbd\xd5\xbd\x8c\x8c\x2a\x11\xda\xe1\x63\x2c\x93\x7b\x22\x33\xb5\x70\xc5\x68\x50\x7b\x5b\x06\x56\x71\x3c\xd3\x96\x6c\xe4\x83\xb2\x93\x05\x48\x65\x23\x0f\x92\x9c\x09\x18\x04\xb2\x75\x9f\x01\x45\x4b\x7c\xab\xbd\x4d\x19\x20\xa9\xd6\xed\xd2\xd5\x3a\x42\xe8\x46\x59\xdb\x4b\xc0\xd4\x8a\xee\x8a\x95\x28\xc1\x54\x89\xae\x20\xcb\x22\x5f\x69\xef\x4b\x13\xe9\xc0\xac\xc4\x94\xf1\x64\xc9\xa8\x29\x43\xbb\x5a\x58\x13\x8f\x54\xa5\xf4\x7e\x86\xd9\x84\xa9\xd5\x13\x12\x65\xa1\x27\xd0\x19\x24\x58\xa1\xd6\x66\x3a\x11\x28\xc1\x02\x4c\x3b\x83\xca\x22\xad\xbe\xd7\xc3\x6b\x06\x55\xdb\x07\xba\xa1\x9a\xc1\x0a\x03\xe3\x64\x41\x94\x96\x71\xdc\x4d\x89\x19\x8d\x19\xbe\x83\x2e\x16\x87\x8c\x23\x49\x66\x2b\x52\xec\x41\x94\xf5\x93\x62\xdd\x0d\x2d\x06\x4f\x08\x4d\xc0\x34\xd1\xc0\xa6\x70\x47\xb0\x04\xa4\x5d\x37\xcd\x4a\xa9\x52\x34\xb9\x95\x91\x19\x64\xf8\x8e\x30\x8e\xee\xc1\xd0\x5d\x79\xc2\x88\x50\x6a\x85\x33\x9c\x64\xa3\x1a\xc3\x0e\xba\x06\x89\x2d\x19\x6e\x2c\x1a\x54\x19\xa6\x69\xae\x57\x5c\x73\x54\x59\xa6\xbd\x12\x57\x6b\x13\xdd\x9d\x65\xb1\xe0\x38\x55\x8b\xae\x0a\xb2\x7a\xf7\xa5\xa5\x00\xaa\xc7\x82\x9c\x54\x10\x7b\x7b\xa8\xf9\x68\xa1\x6d\xe8\x0b\x55\xa9\x1b\x5a\xbe\x8f\xad\xc7\xeb\x86\x76\x65\x06\xce\x25\x56\x79\x1d\x37\x3a\x05\x0d\xd3\xfe\x01\x0d\x97\x96\xee\x15\x0d\x8e\x1c\x7c\x77\xfb\x76\x9b\x3e\xc4\xa9\x42\x12\x55\x5f\xf1\xd2\x1c\x96\x4c\x42\x50\xaa\x56\x51\xd5\x22\x42\x3b\xf6\xf4\x34\x31\x7d\x50\x53\x9c\xe1\x80\xa9\xb6\x5a\x5a\x24\x8c\x73\x48\x64\xbe\xd2\x7e\xb1\x44\x49\x98\x10\xb9\x52\xa8\x39\xc3\x29\xa1\x8b\xa0\x4b\xdb\x98\x76\x0b\x2c\xc4\xd4\x6a\x5a\x91\x64\xb0\xec\xed\xdc\x0e\x34\x3d\x9d\xbd\x6b\x90\xb6\xba\x3b\x82\x49\x69\x08\x43\x8f\x5d\xbd\xe8\x37\x3d\xd4\x74\x20\xe9\xa1\xc6\x05\xae\x3c\xb8\x5e\x9a\xb4\xe9\xb0\x3b\x53\x33\xab\xfe\xa6\x7f\x3f\x8a\x3f\xaa\x44\x1f\x7b\xd4\xc8\xf6\x80\x9a\x13\x28\xce\xf3\xd1\xa5\x98\x4c\xae\xc6\x58\x08\x99\x71\x56\x2e\x32\x2f\x66\x66\x24\x45\x51\x6e\x38\x65\xe2\x74\xa3\x2b\x22\x24\x50\x65\x8b\x88\x91\x2a\xab\x05\x7b\xbd\x56\xba\xe4\xe5\xcb\xbf\x69\xd5\x1c\x71\xc1\x2b\xf8\x9a\x13\xad\x6e\x30\x6f\x82\x1e\xd9\x54\x63\xe3\x79\xf9\xf2\x6f\x27\x7d\xc3\x2a\xa8\xd1\xf1\x03\xb5\x47\xd6\x23\x28\x54\x0d\xfe\x96\x54\x34\x16\xfa\x1f\x66\x5a\xf7\x2a\xf1\xd0\x23\x15\xa9\x7e\x0c\xa4\xc9\x48\x50\xd5\xcb\x9b\x85\x67\x93\xc0\x74\xc9\x8a\xf6\x89\xb1\x65\x51\x4a\x78\x5b\xe6\x79\x43\x8d\x56\xb2\xf2\x33\xa8\x89\x7e\x4f\xea\x49\xc5\x6a\x32\x65\x68\xf8\x12\xab\xe4\xc3\x4d\xd4\x6a\x5a\x51\x3a\xb6\x8a\xc0\x58\x3c\x1c\x8a\x1c\x27\x20\x2c\x40\xa5\xd2\x74\x74\x4b\x30\x46\x7b\xc6\x94\xc2\xbf\x3b\x2f\xf3\xbc\xf5\xb6\x21\x58\x4e\xfe\xcf\x36\x68\xf3\x16\x1b\x07\xd1\x2a\x9f\x45\x15\xf7\xa0\x87\xd3\x83\x0d\x05\xb6\x51\xf7\x9d\xb4\x18\xa6\x4e\x3d\xd6\xf5\x93\xd2\x0b\x5f\xc5\x2f\x3b\x65\xd5\x73\x0d\x4e\x09\x9d\x72\x48\x89\x9a\x38\xea\xa5\xb7\x92\xf5\xc6\x17\xfb\xd4\x96\x9c\x10\x79\xc5\x8a\xda\x6c\x36\xa3\xdd\xbe\xf4\x4c\x46\x59\x2a\x33\x90\x22\x01\x42\xdb\xe4\x09\x4e\x32\x13\xfd\x44\x29\xc7\x42\x92\x04\xe7\x6a\xda\x5a\x16\x9c\xdd\x01\x2a\x80\x6b\x33\x8e\x26\x10\xca\xf4\x64\x72\x35\x31\x48\xce\x70\x92\x55\xec\x56\xe4\x58\xe4\xd3\x44\x7f\x50\xab\x07\x49\xe8\xf1\xf3\xa3\xa3\x23\x17\x2f\x99\x4c\xae\x8e\x3d\x52\x7d\x44\x4d\xd7\xa9\x87\x2d\x0c\xb9\xd6\xe5\x6a\x47\x73\x87\xe5\x8f\xf3\x9c\xdd\xa3\x84\xd1\x39\x59\x98\xe8\xae\x9a\x8e\x1d\x0f\x24\x51\x8b\x61\x11\xa9\x50\xbf\x8f\x37\xfa\xc6\x7e\xdc\xd2\x11\xa7\x16\xbc\xfb\xb0\x2c\xe4\x2a\x8e\xe8\x27\x58\xa1\x83\x18\x0f\x0d\x11\x3a\x0c\xe4\xf9\x97\x2c\x69\xa3\x5b\x58\x75\x34\x59\xe4\x64\x91\xa9\x15\x08\x87\xb4\x4c\x8c\xc6\x50\x1c\x1c\x4a\x36\x9c\x13\x2e\xe4\x70\xb6\x92\x50\x55\xe7\x87\x05\x3c\x06\x07\x71\x80\xfe\x06\x9d\x91\x22\x03\x2e\x0e\x6a\xb5\xd6\xe6\x7b\x62\x7c\x30\x8a\xfd\x89\x01\xaf\x48\xb0\xbf\xd1\x9e\x57\xbf\x45\x69\x2c\xfc\x5a\xda\x41\xd3\x6a\xc6\x8e\x2b\xd6\xeb\xc0\x8c\x50\x7b\x7e\x31\xc6\x1c\x2f\xdb\xd4\x1a\x0a\xcf\x2f\x90\x8e\x9f\x6d\xe9\x10\x50\x74\xf9\x0e\x01\xf5\x3b\xcd\x0a\x55\x43\x45\xb7\xfd\xed\xf3\xd7\x12\xd1\xef\x81\x53\xc4\x7b\x9e\xc1\xf3\x15\xc5\x4b\x92\xdc\x5c\x4d\xae\x21\x61\x3c\x15\xbe\xd8\xa4\x2b\xa5\x2b\x12\xdd\x91\xd3\x9c\xa1\xa3\xb8\x17\x4a\xe4\x53\x48\xd2\x6c\x9a\x94\xfc\xae\xd1\xe1\x6f\xce\xce\x2f\xce\xf4\xcb\xa0\xbf\x47\xc6\x7b\xa6\xed\x6f\x51\xf3\xcc\xfa\xd4\xb4\x05\xaf\xe3\xba\xa6\x3f\x8d\x86\x26\x54\x02\x4f\xa0\x90\x53\x30\xc5\xba\xfb\xc8\x3a\x41\x80\xf3\x33\x96\x42\xe5\x05\x31\xe8\x2f\x6e\x6e\xc6\xcd\x8a\x8d\xc7\x40\xc7\x91\x15\xf1\xae\xd8\x7a\x8d\x5e\xa1\xff\x69\x7a\x70\xda\xfc\x70\x12\xd4\x69\xcd\x66\x6f\xa4\x71\x50\x33\x6b\x2c\x2a\x7c\xaa\x04\xc3\xa6\x35\x19\x1f\x5d\x65\xee\x78\xb8\xf4\x12\xc5\x2e\xe0\x0c\xd4\x23\x04\x52\x35\x71\x42\x49\x51\x80\x3c\x68\x71\x37\x51\x4d\x10\xe6\x6b\x35\x9a\x20\x75\x66\xae\x7b\x83\xb5\xcd\x6f\x7e\x2d\x71\xe1\x6a\x0b\xf1\xa3\x75\x74\x2a\xaa\x3b\x41\xcd\x46\xcf\xd0\xae\x73\xe1\xe8\xce\x98\x79\x49\x5d\x75\x2b\xe0\xdf\x35\xd8\xc8\xea\xb3\xd3\xf9\x9c\x50\x22\x57\x23\xf7\xcf\xcd\xaa\x00\x34\x48\x18\xbb\x25\x30\x70\xe5\x2b\xe4\x6a\xca\xb9\x5d\x0d\x15\x9d\x15\x26\xeb\x6c\xf7\xd6\x1d\x06\x0a\x65\x58\x64\xaf\x1a\xa0\x61\xa5\x67\xba\x9a\xf0\xed\x05\x16\xda\x8b\xa4\x5a\xf6\x35\xe5\x1d\x41\x5a\x1b\x30\x9a\xaf\xac\x8c\xd4\x9c\xd8\x5f\xd8\x31\xea\x3c\x93\x3f\xb9\xf0\xb3\x9f\xaf\x73\x74\xe0\xaf\x4a\xaa\x08\x75\xd5\x4b\xbd\x85\x9d\x8e\x88\xf4\x5d\xb3\xff\x8c\x62\xd4\xdd\x56\xb5\xf3\x0d\x4d\x0b\x46\xa8\x9e\xb0\xec\xf7\x3a\x5d\x70\x74\x6a\x0d\xee\xff\x8b\x8c\xd3\xf6\x72\x8c\xd6\xeb\x63\x0f\xc0\x06\x50\x90\x0e\xdd\x62\x92\x8b\x57\xde\xc7\x77\xf8\xe1\xad\x7a\xa7\x00\xd4\x47\x37\x53\xfb\x30\x0a\x20\x9c\xa6\x23\x0d\xa9\x65\xd1\xbd\x6d\x4a\xcb\x06\x31\xd9\x41\x57\x0c\xa7\x68\x86\x73\xb5\x62\x41\x38\x5f\x30\x4e\x64\xb6\x3c\x41\x66\xa8\x69\x97\x3f\x2b\x69\x8a\x38\x9b\x11\xfa\xcc\x7a\x96\x88\xf0\xfd\x34\x41\xcf\xba\xf0\x96\xc2\xfc\xda\x20\x3e\x75\x78\xd1\x40\x63\x9b\x6a\x6c\x83\xd0\x43\xdb\x59\x6a\x73\x4f\x2a\xcd\x53\xb5\xd3\x49\x85\x0e\x7b\xae\xfc\x5a\xd4\x78\x68\xf2\xa4\x05\xab\xf4\x81\xd0\x66\xa0\xdc\xa2\xd2\xbf\xc4\xf8\xd7\x8a\x71\x5c\x8a\x91\xf3\x2a\x6a\x57\x9a\x96\xb6\x25\x2e\xac\x9b\xf7\x9e\xe4\x39\x9a\xd9\xa4\x3c\x86\x74\x9c\x57\xe7\x57\x65\x80\x7e\xce\x88\x84\x9c\x08\xd9\x48\xb1\xb3\x1c\x22\x34\x85\x87\x67\x0d\x4e\x89\x3a\xe7\xb7\x09\x9b\xb3\xc4\xcc\x0f\x35\xd4\xe8\xca\xbe\xf3\xc1\x77\x0b\x2c\x33\x05\x64\x03\x2e\xb6\x54\x5d\xbe\x39\x8f\x11\xe1\x40\xf4\x24\x09\x69\x13\xb2\x06\x5c\x48\xb4\x9f\x83\x87\x68\x54\x35\x6d\x74\x76\x79\x7e\x7d\x60\x92\x43\xed\xc4\x77\x0e\xd4\x8c\xd6\x2f\x5f\x50\xc1\x09\x95\x15\xcd\x17\x4c\x48\x13\xb2\x35\x94\xd6\x49\x64\x81\xe7\xd1\xba\x2d\x15\x26\x17\xf0\x47\xfb\x1d\xb8\x06\xd3\x81\xc5\x77\xd0\x54\x28\x5e\x18\x25\x22\xa1\x36\x3d\xb0\xa3\x45\xa1\x3a\xd0\xe1\xd3\x56\xb0\x26\x9c\x8b\xb7\xff\x15\x74\x30\xd7\x89\xe0\xfb\x26\x7b\xec\x1a\x4b\xb8\x22\x4b\x22\x45\x2d\x12\xde\xba\x42\x7d\xd6\xe9\xb7\x26\x92\x94\x3b\x55\x1a\x67\xe5\xee\xbd\x6b\xd7\xd4\x82\x5f\x9e\xc7\xd9\x74\x74\xd2\xcd\x25\x9e\xd7\xfc\x89\xb3\xe6\x79\x8b\x35\xdb\x90\xab\x9d\x19\x1d\x14\xee\x9a\x1c\xe3\x0e\xaa\x8f\x6a\x2d\xad\xc0\x94\x92\xfa\x27\xa3\x50\x49\x8b\x3f\xc0\x9f\xa3\xc1\xa0\xc7\xe7\xe4\x8d\x6d\x9c\xe7\xd6\xfb\xa2\x53\x26\x53\xc4\xd5\x58\x36\xe4\x7f\x66\x14\xc4\x08\xbd\xc1\x49\x86\x74\x5c\xc7\x0c\x14\x0b\x2a\x10\x46\x29\x18\x7f\x71\xaa\x61\x83\x90\xc4\xf3\x77\xaf\xd1\xf0\x7f\xa0\xe7\x3f\x20\x99\xb1\x52\x60\x9a\xa2\x1f\x5e\x6a\x3b\x4e\x67\x56\x82\x40\x8c\x23\x3c\x53\xca\xe9\xc7\x1a\xe4\xf9\x8b\x1f\x1b\x30\x11\x45\xa2\xeb\x52\xd2\x63\x82\x68\x4e\x78\x14\x37\x6a\x01\x3a\xf0\xb5\x84\x2e\xd1\x25\x92\x87\x4f\xd1\x6b\xcd\x0b\xab\x98\x9c\xf3\x42\xa0\xfd\x39\x67\xcb\x43\xc9\xd0\xfd\xfd\xfd\x41\x8c\x92\xcc\x0e\xc9\x67\x68\x57\x32\xbd\x0f\xe0\xda\x16\x9e\x34\x35\x9b\xd3\xfe\x11\x81\x73\xe1\x03\x1d\xbf\xb7\x9e\xaa\xd7\x84\xa6\x76\x7a\xb8\x2c\xee\x5e\xfa\x02\x98\xeb\xd9\x11\xd9\x2d\x08\xba\xa4\x9d\x35\x42\x17\xaa\x5a\x44\xa3\xf5\x7a\xb3\x13\x2c\xc8\x67\x0e\x72\x7f\x7a\x2b\xdd\xec\x5e\xde\xe4\xdc\x6d\xd7\x6d\x7c\x13\x5d\x2d\x9a\xfc\xaa\x26\x55\xff\x28\xa3\xb2\xd7\x09\xe7\x37\xf9\x7b\x30\xf6\x3f\x88\x93\xcd\xd8\xbe\xad\xa2\xb5\x55\xe7\x6b\x46\xc2\x0f\x7f\x8d\x84\x6f\xde\x7f\xdb\x8c\x82\xff\x7d\x7c\xfc\x7f\xbe\x13\x67\x5d\x55\xff\x21\xfc\x6c\x8d\x85\xe0\x8d\xe7\x52\xd7\x42\xe5\x66\x94\x06\x22\x0e\xda\x11\xfe\xb7\xa3\xe7\x2e\x10\x7a\x7c\x78\xa8\xf7\x6d\x30\xb4\x5e\xfb\x59\x3b\xbd\x93\xfe\x76\x0b\x71\xb3\x8a\xd9\x51\xf3\x2f\x97\xa8\x6d\xb6\x5c\xd4\x14\xc6\xa7\xb6\xb0\x49\xed\x72\x0d\x2b\x28\x27\xb8\x65\x66\x49\x58\x16\x7a\xd3\xc6\x60\xf2\xe6\xfa\xd3\x9b\xeb\x81\x45\x7a\xa6\xbd\x45\x66\x1f\xa1\x23\xbf\x6d\x16\xb6\xdc\xa7\x1a\x30\xf4\x57\xa1\x6f\xe4\xb3\x42\xad\x9c\xe8\xc0\x73\x15\xe9\x8b\x56\x33\xcf\x3e\x4e\x6e\x3e\xbc\x9b\xbe\xb9\xbe\xfe\x70\x3d\x19\x98\x06\x36\x96\xdc\x3b\x3b\x1a\x41\x7f\x77\xc4\x7d\xeb\x6e\xb9\x6b\x0a\x3c\x33\x7b\x77\x94\xb5\xf2\xfe\x1f\x97\xef\x7f\x41\x66\xbb\x64\x92\x41\x72\x8b\xd4\x02\xcc\x44\xf1\x95\x69\x67\x32\x40\xd4\x3a\x4c\xc4\x7b\x7a\x07\x7d\xb4\xd1\xbd\xee\xc8\xb1\xdb\x99\x82\xf6\x39\xa6\x29\x5b\xda\x4d\x5c\xff\x2a\x85\xce\xbf\x31\xbb\xda\x6e\x29\xbb\xa7\x1a\x89\x38\x40\x58\x54\x24\x6b\xb4\x8a\x54\xe3\xd0\xf6\xfb\x2d\xc3\xea\xdd\xc2\xe4\xb4\x18\x9c\xde\xc2\x34\xc9\xb4\xa8\x13\x7a\xec\x95\xc9\xa4\x2c\xc4\xf1\xe1\xe1\x82\xc8\xac\x9c\x8d\x12\xb6\x3c\xbc\x2d\x67\xc0\x29\x48\x10\x87\x36\x53\x70\x68\x42\x16\xb3\x9c\xcd\x0e\x97\x58\x48\xe0\x87\x09\xa3\x92\xb3\x3c\x07\x2e\x6c\x40\xa3\xb8\x5d\x1c\x26\xcb\xd4\xfb\x62\x5d\xee\x0b\xb6\xcd\x02\xa2\x66\x8a\xcb\xb4\xad\x96\x9e\xa5\x00\x93\xe3\x84\x93\xdb\x9c\x2d\x5e\xb9\xe2\xaf\xcd\xef\x46\xb8\xa9\x67\x76\xdd\xa4\xb7\x7f\x3d\x05\x31\x55\x26\x5d\x74\xd4\x79\x40\x8c\x12\x18\x0c\x07\x9e\x41\x5a\x19\xdc\xc1\x5e\xdd\x86\xb1\x83\x3a\x72\xfb\x02\x8d\xf8\xe2\xc8\xb3\xe1\xd6\x91\x3a\x4c\xb7\xd8\xbd\x4a\x01\xfe\x6e\x7a\x75\x24\x80\xe2\xdc\x27\x1b\xc5\xd2\x9e\x5b\x5b\xaf\xdc\x13\xdd\x2a\x95\x12\x51\xe4\x78\x75\xb2\x35\xa4\x4b\x4a\xad\xb3\xaf\x7d\x7d\x12\x4c\xd5\x9b\x58\x26\x64\x39\x73\x8c\x70\x91\x0d\x1f\x5d\xb3\x33\xa3\xcc\x0c\x18\xd8\x13\x6a\x71\x4f\x2b\xdd\x0f\x21\xf4\xcb\x50\x07\x39\x5e\x1e\xbd\xdc\x44\x44\x7f\x2f\xb9\x9f\x43\x2b\xc2\x43\xeb\xf5\x1f\x9c\x44\x48\x28\xb0\xf0\xf6\x2b\xd8\x30\x59\x17\x86\xa8\x4c\x6d\xaf\xaf\xd7\x4f\x9e\x38\x07\xf0\x13\xd4\x4c\x2f\xd6\x9b\xca\x6a\xe7\x70\x23\xb3\x78\x62\xde\xc7\xf6\x44\xfc\xc6\xe9\xaf\x35\x55\xd1\x18\xd0\xe6\x84\xd7\x2a\x60\x7f\x73\x36\x46\x36\xbd\x5a\x84\xab\x0e\x65\x24\x27\xc5\xa4\x5a\x74\x8c\x6e\xce\xc6\xaf\x83\x40\x4d\xe5\x3a\x97\x49\xa1\xa3\x2c\x55\x09\xe7\x0d\x0d\xde\x5a\x04\xda\xc7\xa2\x13\xbf\xfb\x41\xba\xbf\x3a\x67\xeb\x97\x80\xec\x7f\x3d\x43\xbb\x60\xfd\xb8\x7a\xa9\x54\x17\xf5\xdd\xbb\xc1\xea\x07\xb5\x1e\x1d\xdf\xb3\x05\x2a\xd7\xaf\xb5\x57\xaa\xf7\x8d\x9d\x1f\x31\xcf\xdb\x37\xf7\x23\x44\xe9\x0c\xcc\xa9\x56\x1f\x58\xc9\x8c\x30\xd1\x5f\x2c\x8f\xce\x21\xb1\x61\xce\x8d\x8b\xff\x6e\xd3\x23\x4a\xe0\x77\xa2\xe8\xfb\x1a\xb2\x7f\xbe\xae\xa8\x96\x17\xbf\x57\x7f\x04\x6f\x0c\x92\xee\xfd\xab\xe6\x28\x0f\xad\x60\x62\x61\x91\xd8\x4c\x81\xcc\x36\xe1\xdf\x45\x17\xb5\x56\x79\x9b\xd8\xf9\x86\x5a\x76\x86\x4d\x72\xe6\x6b\xf5\xf8\x0b\x80\x6d\xa2\x41\x3b\xe8\xe3\x79\xaf\x66\x2f\x53\x5f\xb3\x7f\x3c\xef\xd6\xec\x65\x6a\xb8\x59\x95\x68\x70\xb3\x7e\xdb\xc9\xcd\x38\x48\xf7\xd7\xad\x35\x7b\x5d\xf4\x3b\x6b\xf6\xdf\x4f\xb5\xb7\x3a\x41\x75\xcf\x57\xab\xe5\x2d\xb0\xfd\xf1\x54\xea\xb7\x63\x41\xa5\x0e\xbf\x86\x0f\x51\x55\xe6\x36\xfc\xd5\x0a\xe9\x79\xa8\xae\xbe\xb9\xb6\xfb\xbd\xc6\x67\x98\x8d\x6c\x19\xe2\x8e\xd3\x49\x41\x67\xb7\xe8\x23\x16\xe6\xd5\x4a\x5c\xd4\x3e\x04\x0e\x05\x48\x62\x02\xc4\xf6\xd4\x1c\x53\xa8\xbd\x5c\x0f\xe2\x79\x45\x70\xc2\x53\xc7\xa9\x4f\x6d\xa1\xf4\xf2\xbf\xb4\x40\x76\xe5\x7f\x21\xdf\x8a\x8a\x27\x7c\x05\xa6\x95\xb3\x40\x03\x03\xb4\x2b\x51\xad\x4a\xaf\x6d\x02\x36\xed\xae\xda\xf4\x42\x81\xb4\x78\x79\x67\xdb\x61\x31\x06\x8b\x07\x69\x12\x94\xcd\x39\x41\x5b\xe2\xf8\x60\x77\x5a\x0d\x95\xf1\x8f\xcc\xa6\x96\xc0\x8f\xb9\x11\x47\x2d\x7b\x8e\x8e\x6a\x03\xe8\x96\x18\x2e\xad\xd7\xe7\xbd\xdb\x57\xdd\xd8\x31\xba\x25\x12\xbb\xeb\xda\x43\xe2\x6f\x31\x0d\xba\x86\x43\x78\x56\x89\x7e\xf6\x47\x4f\x0f\xd0\x21\x9a\x71\xc0\xb7\x9b\x0d\xd8\xc7\xd9\xb0\xee\xbf\x7a\x54\x35\x8e\xf5\x3a\x7c\x8a\xce\x3e\x5c\x4f\xaa\xfd\x75\x7a\x23\x93\x73\x96\x2d\x49\x92\x11\xc8\x6f\x71\x7e\xbb\xc4\x54\x3b\xcd\xac\x9f\xd4\x3a\xc3\x86\x09\xe3\x62\xc8\x0a\xa0\xc3\x86\x9b\xd4\x3b\xa0\xc7\x1f\x89\x8d\x01\xa8\x55\x15\xb3\x43\xee\x8c\x71\x61\x1d\xbc\xee\xfb\x0e\x52\x2f\xd1\x98\xc3\x5c\x27\x08\xa3\x25\xc8\x8c\xa5\x02\x51\x80\x54\x20\x5c\x6f\x0a\x66\x85\x19\xf9\x98\xa6\x28\x25\xf3\x39\x70\xa0\x12\x5d\x1b\x37\x91\x12\x6e\x83\x90\xcc\xd1\x7e\x25\x67\x06\x19\x7a\x85\xf6\x3e\x8c\x6f\x2e\x3f\xbc\x9f\xec\x1d\x78\xc3\xd0\xdb\x39\xbd\x67\x2c\xe6\xe1\x99\x71\xf7\x0d\x75\xf2\x88\x15\xe0\x3d\x9b\x01\xcc\xb8\xd0\x2d\xd0\xdf\xcc\x27\xb4\x5e\xef\x21\x9c\xdf\xe3\x95\x68\x2d\xe5\x9a\xf0\x67\x1c\x52\xa0\x92\x60\x93\xcf\xb3\xb1\x6a\x0f\x3e\x5a\x7f\x13\x5f\x45\x44\x64\x9e\xd9\x58\xd5\x3b\xc3\xf1\x68\x35\xf6\x5b\xb4\x9d\x1b\x11\x5b\xcd\x1a\x45\x5c\x6b\xdd\x47\x21\x7e\x87\x1f\x86\xa7\x0b\xd8\x43\xcf\xff\xeb\xc5\x8f\x47\xbe\x6f\xd0\x2f\xa4\xa0\x81\xca\xe1\xcd\xaa\x80\x3d\xb4\xa7\xcf\x37\x28\x72\x4c\x28\x4a\x32\xcc\x05\xc8\x57\x1f\x6f\xde\x0e\x7f\xdc\xeb\x2f\x7d\xa5\x0f\x03\xd8\xf3\x93\x48\x2a\xa7\xa4\x73\x6c\x79\xde\xa3\xbf\xa4\xe9\x8f\x25\x4d\x2d\x0d\xd8\x5c\x5f\x98\xc5\xf8\xb0\x72\xf8\x45\x57\x19\xf7\x44\x66\x0e\x12\xeb\x30\x56\xa8\xed\x5c\xf4\x2a\x58\x70\xb8\x63\x30\xdf\x12\xde\xce\x2e\xaa\xa3\x72\xa3\x09\x24\xac\xb5\x50\xfc\xc3\xa7\x8d\xd4\xc9\xd6\x91\xc4\xb5\xaf\x8f\x38\x7c\xf9\xa2\x77\x27\xfd\x6e\x49\x1b\xdf\xb3\x59\x51\x2b\xc9\x44\xc3\xff\xc4\x59\x14\xbf\x19\x07\xff\x10\x99\x0c\xdf\xb7\x75\x9b\xf3\x0a\xb6\x0a\xc6\x69\xd5\x67\xd8\x80\x58\x47\xb0\xd2\x4b\xa9\x70\x1b\x3a\x3b\xc3\xbd\x2e\x8f\x02\x11\x7b\x46\xa4\x0d\xa0\xdf\x5c\x4d\x90\xa0\xc4\x39\x39\xaa\x54\xba\x8a\x06\x7d\x98\x84\xe9\x23\xe0\x68\x59\x0a\x69\x55\xb5\x39\xc0\x20\xf0\x63\x81\x19\x00\xcf\x1a\x3b\x4c\x27\xef\x2f\x6b\x0c\x76\xa7\xa4\xd0\xbb\x07\x59\xc9\x13\x40\xfa\x60\xc7\x39\x53\x24\x11\x39\x0a\x69\x08\x13\x07\x6c\x27\x4e\x26\x57\x67\xc0\x25\x99\xeb\x3c\xc7\x83\xdf\x56\x17\x7f\xeb\x9c\x17\xf4\xfd\x92\xc0\x7e\x5b\xed\xa8\x77\xe0\xb5\xc9\xd4\xc7\xe6\xa1\xf5\x5a\x1f\xb3\xf7\x28\x35\xf0\x17\xb3\x7f\x13\x66\x7f\xc7\x79\xeb\x11\x23\xf6\x3f\x2f\x67\xef\x4f\x20\x0a\xc1\xb8\x7b\x7c\x77\xfd\xe9\x13\x01\xff\x04\x9d\xb4\x69\x15\xa1\xa7\xe6\x84\x2d\x97\x40\x25\x1a\xbf\x79\x87\x44\x86\xbd\x03\x96\xab\x23\xa0\x13\x69\x33\xae\x84\x9b\xf0\x17\x6a\x16\xd6\x3b\x03\x9a\xb9\x73\x98\xea\xdc\xb3\x04\x82\x13\xcb\x5d\x8d\x3b\xae\x9a\x63\x3f\xc9\x4d\xf5\x26\x2c\xcf\x32\x48\x6e\x45\xb9\x6c\xac\x73\x44\x3e\x4d\x6a\xf9\x41\x5d\x4f\x13\x99\x27\x71\x0d\x9e\x04\xd8\xf4\x3e\xf6\xaf\xc7\xd6\x27\xf3\x6f\xcb\x3c\x3f\xcb\x30\xa1\xbe\xf0\x07\xed\x72\xa7\x72\xf7\xb5\xaf\x49\x49\x0c\x6b\xab\x81\x42\xe2\x42\x1f\xe9\xd4\xf3\xf8\xf1\x41\xbf\xcc\xf4\x0e\x38\x99\xc7\x99\xd2\x1d\x53\x44\xbd\x9b\xf8\x94\x48\xec\x6f\xd6\x0c\x07\xb5\x74\x5f\x4c\x6e\x26\x8d\xa5\xd8\x92\x71\xf0\x1c\xb3\xa2\xab\x59\x83\x89\xe4\x24\x91\xc3\x1b\x8e\xa9\x50\x23\x6c\x38\xb1\xb7\x2b\x1c\xa3\x25\x7e\x18\xe2\x05\x54\x63\xcd\x55\xf4\x0e\x3f\x9c\x2e\xa0\xad\x12\xd4\xb7\x4b\x73\x96\xe6\xa4\x9c\xa5\x6c\x89\x89\xd9\x2f\xe8\x4e\xd8\x9c\x94\xb3\x73\xf3\xb6\x1e\x7d\x11\x1c\x63\x3b\x0c\xb4\x46\xd1\xff\x56\xe0\xad\xc3\x39\xea\x3d\xa6\xbd\xf2\xe5\xb1\xed\xb4\x94\xd9\xe8\xec\xf4\x2d\xc9\xf5\xa1\xa0\x41\xf6\x6b\x6c\xac\x85\x85\xc7\xb0\x9c\x5c\x9c\xb6\xc6\x9c\x39\x5f\xb9\x77\xe8\xf5\x20\xad\x29\x6a\x89\xa7\x91\x30\x5b\x41\x47\x3f\xf6\x60\xfe\xa4\x8b\x9b\x63\x9d\xbb\x70\xa7\x50\xc8\xac\x4b\x46\xfa\x70\x9b\xbd\x8e\x84\xd1\x73\x8d\x61\xab\xc1\x1e\x22\xd1\xa1\xa1\x31\x5e\x34\x86\xbb\x77\x42\xc0\xcb\xff\xfe\x3b\x7a\xf9\xdf\x3f\xa0\x57\x7d\x94\x54\x48\xb6\x56\xe9\x5b\x68\xa5\xce\x34\x69\x5f\xc9\x3c\x2e\xc3\xf9\x31\xdb\x39\x5d\x4d\x5b\x6c\xe9\xf4\xc1\x71\x29\xb3\xb1\x5f\x44\x31\xa8\x67\x27\x68\x9c\x07\xd5\xae\xc8\x6b\x13\x9e\x19\x9d\x16\xc5\x35\x63\xd2\xef\x24\x1d\x3c\x28\x39\x41\xaf\xd0\xe1\x41\x10\xb0\xab\x52\xf4\x5f\x68\xa2\xba\xd0\x35\xba\xeb\x91\xdd\xe4\x1a\x7a\x10\x0d\x2b\xbe\x6a\x32\xa3\x27\xa2\xe8\xbf\xed\x76\x13\xc0\x83\x81\x1f\x2a\x94\x40\x25\x31\xf5\x84\x29\xb1\xde\x61\x12\x2e\xac\x32\x63\xa9\x99\x20\x5a\x29\xa8\x1d\x01\x34\xfd\x34\xfd\xea\x7a\xff\x62\xa3\x6c\x4f\x97\xbd\xb1\xa4\xea\x91\x61\xbc\xc8\x07\xf1\x44\x54\x1b\xf1\x09\x9e\x46\x87\x45\x90\x6d\x1b\x17\xd5\x4f\x24\xb0\xa9\x9f\x47\x47\x37\x2d\xb6\x89\x39\x4d\x2f\x78\x22\x07\xee\x75\x25\xd3\x6e\x5d\xd7\x45\xf3\x80\xeb\x2d\xd8\xd3\x38\xec\xfa\xd1\xcc\xb9\xf2\x9a\x53\xed\x6a\xa9\x4f\x2f\xfb\x5a\x86\x55\x35\xbc\xf3\x3b\x3b\x08\xfa\x3d\x06\xa1\x6a\xea\xd0\xde\xbd\x30\x74\xfb\x3b\xbf\xbe\x3f\x95\x90\xbf\xb5\x77\xaf\x54\xcf\xc0\x84\x52\xdd\x1e\x84\x7a\x5f\x41\x7c\xbc\x99\x83\x60\xea\x6d\x36\xe6\x09\xb3\xba\x23\x63\xb3\x5a\x1f\x85\xb0\x76\x3e\x5f\xe2\x07\x73\x67\x82\x7f\x14\xfd\xa0\x21\x01\xda\x0e\x1a\xbd\x66\xe9\xca\x9a\x21\x83\x96\xe4\xe9\x9d\xf6\x7a\xb2\x8c\xde\xd1\x50\xe3\xea\xb8\xc2\x21\x46\x59\xf4\x3a\x88\x06\x61\xbd\xf7\x41\x78\xd4\xc5\xc6\x85\xd6\x83\xf6\x00\xd6\x6e\x79\x57\x32\x1b\x17\x77\x1d\x9d\xb7\x08\xb6\x54\xf0\x8d\x7d\x10\x45\x54\x67\x07\xab\xc5\xf6\x9e\x03\x0b\x50\x51\xfb\x49\x0a\xff\xae\x95\xc7\xdc\xd8\xd2\x85\xe3\xa4\x6d\xcf\x46\x58\x17\x9d\x42\xdc\xa1\x06\xee\x80\x0d\xbd\xdc\x6b\x99\xc0\x95\x61\xab\xb3\x05\xfd\xf9\x3a\x32\x05\xec\x2a\xbb\xe5\xf8\x15\xda\x5f\x80\xb4\xa9\x1b\x97\xd4\xe4\xd6\x37\x66\xfb\x91\xfd\x58\x1f\x93\x10\x60\x3a\x7c\xaa\x71\x29\x0d\x8f\x53\x2c\xb1\x32\x53\xa5\x5e\xc1\x2b\xeb\xd5\x95\x5e\xba\xaf\x0d\x1b\xb5\x6a\x35\x6d\xa6\x9f\x0c\x2c\x81\x8d\x94\xa8\x41\x64\xc2\xf5\x53\x4c\xfc\x72\xd7\x65\xde\x55\x24\x38\xff\xbf\x2a\x62\x53\x4f\xe2\xdc\x52\x0b\x16\xc6\x23\xab\x91\xb7\xca\x02\x9f\x4c\xae\x2a\x6d\xf6\x08\x23\xac\x85\xcc\xc3\x73\xd0\xe2\xf4\x0e\x02\x6a\xec\x7d\x7d\xa4\x2c\x75\x11\x00\x41\x5c\x4a\x86\x7d\xf4\xe2\x2a\x72\x8c\xec\x2b\x9d\x89\x12\x2e\xb7\x50\x73\x57\xa4\x4b\x56\x09\xce\x58\xed\xd6\xd1\xa1\x38\x44\x25\x3b\x3a\xfc\x82\x8b\xfc\x7c\x78\xef\xb2\xbe\xfa\x60\xae\xc8\xc7\x29\x2f\x73\xd0\xe3\x30\xbc\x01\xcf\x41\xf8\xff\x8f\x12\x46\xe7\x31\x15\x1b\x10\xf6\xe1\xe7\xd3\xc9\xf8\x8c\x71\x50\x52\xd4\xda\x88\xb3\xb9\x7a\x76\x8f\x45\x31\xf4\xe0\x86\x89\xdb\xef\x36\xdc\x48\x4e\x64\xe3\x4e\x2f\x53\xb7\x38\x88\xa5\x59\x60\xcb\x03\x59\x42\x89\xfa\x15\xe7\xaa\xf4\x88\xdc\xcb\xa3\xbf\x7d\x85\x38\xb5\x0e\xc7\x0c\xb5\xf7\x47\x01\x63\xc6\xe5\x25\xbd\xae\x4e\xa0\xd8\x78\x5e\x63\x93\x4b\x1b\xcd\x05\x64\x8e\xd5\xd4\xc1\x36\xc7\xef\x7a\xdb\x64\x63\xa5\xdf\x28\xa3\x3e\x4d\x1b\x57\x61\xa1\xd0\xde\x38\xe9\x2c\xa0\xe6\x19\xf5\x4e\x43\x4f\xcd\xd1\x6a\xf5\x29\x49\xf6\x44\x42\x90\xf6\x4b\x80\xa7\x4e\xfc\xa8\x9e\x09\xc8\xe1\x99\xc5\xe2\xa1\x0c\x85\x72\x58\xe5\xa7\xa7\x0f\xcf\xd0\x6e\x4e\xcc\xa9\x21\x95\x8d\x78\x6d\x93\x6b\x5d\x7a\x48\x9f\x04\x9a\xd2\xad\xf7\xc3\x47\x0b\xfe\x66\x03\x66\x42\x16\x94\xd0\x8f\xd7\x57\xad\x8e\xf3\x9d\x04\x47\xcf\x8d\xc5\x57\xb5\x46\x15\x53\xeb\x93\x4d\x58\xb7\x5d\x11\xe9\x29\x92\x98\xab\x16\x2b\xc6\x54\x93\x24\xf6\xcf\x89\xa9\xcf\x86\x79\x86\x12\x0e\xea\x43\xe3\x34\x98\x26\x1f\xf5\x31\x3f\xae\x1f\xaa\xc3\x5b\x7a\xb9\xef\xbc\x08\x1a\x50\x9f\x61\x64\xb0\x74\xe1\xef\x5e\xac\x84\x43\xee\x35\x16\x24\x39\x27\x0b\x10\xd2\xb0\x49\x69\x39\x88\x75\xa9\x75\xe0\x77\x96\x34\xe7\x06\xce\xd4\xdb\x41\x6b\x5b\xa6\x12\x52\xfd\x29\x58\x4b\x87\x48\xae\x01\xe7\xcb\xf6\x0a\xa0\x2e\x3f\x2d\x05\xf0\xa9\xbb\x83\xb4\x1b\xd1\x5b\x92\xc7\xd7\xbe\xb1\x4d\xa3\x0a\x7b\xaa\xcb\xfe\x0a\xf2\x0c\x82\x5f\x4d\x5f\x7b\x38\xb5\xcc\x29\x85\x80\x71\xf2\xd9\x48\xcb\xa0\x6d\x7e\x6c\xd3\xf1\x75\xb2\xa9\xfe\x37\x12\x99\xb4\xa5\xbc\x3d\x9f\x66\xab\x67\x8f\x9c\x3e\x72\xd8\x3b\x35\x3f\x8a\x0d\xf6\xca\xd9\xf4\xff\x9e\xfa\x16\x42\xcf\xac\x14\xf8\x9d\x2c\xee\x2a\xbf\x3c\xfa\x35\xa6\x10\xb6\x6a\x56\x9f\xbd\xe8\x3f\x7f\x5c\xdb\x31\xa8\xf7\xdb\xd8\x91\xe6\x34\xa9\x55\x75\x80\x41\x29\xc0\x18\x61\x08\x0b\xe3\x60\x91\xac\xda\x0a\xf5\x0c\xcd\x4a\x69\xcf\xf1\x65\x77\xc0\x39\x49\xfd\x03\xaf\xb6\x90\x20\x67\x59\x7d\x52\x55\x74\xf8\xbc\xbc\x91\xd3\xe1\xe1\x69\xf6\x51\x03\x67\xb4\x8b\x62\x4a\x64\xdb\xba\x82\x15\x7a\x27\x53\x83\x15\xcb\x58\xd9\xd7\x6a\x2a\x82\x07\xc9\x71\xa2\xe3\x8a\xc6\xc5\xef\x47\x20\x24\xd3\x30\x36\x03\x7e\x03\x13\x1f\x17\x2b\xa9\x71\x74\xc6\x49\xb0\x10\xea\xdd\x0d\xab\xae\x7b\xda\xc4\x23\x21\xf2\xa1\x69\xc6\x50\x35\xa3\xe2\x91\x17\x60\x31\x37\x4c\x99\x18\xe0\xd7\x75\x44\x47\x25\xdd\x7a\x73\x5b\x6c\x7e\x28\xd0\x27\xd9\xbc\xdf\xe0\x13\xf3\xf0\xa4\xde\x56\x2a\x1f\x8f\x98\xa6\x91\x63\x0a\xbe\x61\x93\xb7\x6d\xdc\x63\xca\xf9\x8d\xd9\x72\x6a\xda\x41\xda\x06\x42\xf7\x30\x13\xe6\xa2\x23\xef\x4a\xed\xad\x3d\x8b\xfe\xf3\xd1\xde\xaf\xe3\xf1\xd5\xbf\x8b\x67\x7b\x77\xa5\xff\x78\x17\x35\x39\xa4\xed\x2b\x7d\x36\x6d\x43\xfa\x65\xa8\xd6\x10\xc3\xcb\x71\x13\xb7\x7f\x80\x63\xaf\xa9\xbb\xe1\x6a\x8a\x9e\x7a\x9b\xb7\x84\xbb\x7a\x63\x17\x48\x7c\x9d\xcc\x75\xd5\xd0\xd7\xb2\x6d\x06\x9c\x8f\xd7\xd3\xa9\xbd\x7a\xb4\x17\xc9\xb8\xbe\x83\xb2\xbe\xeb\xe4\x31\xe5\xf5\x8d\x3f\x5e\xf9\x9e\x28\x44\x04\x51\x34\x3a\xf2\x08\x67\x7a\x47\x44\x24\x4a\x4a\xd7\x24\x52\x5d\xf2\x15\x74\xda\xd6\xb4\x37\xbb\x3a\x7e\xe7\x94\x13\xd9\x8e\xdb\xde\x03\xda\x96\x44\x92\x85\x9a\xc3\x2e\x6e\x6e\xc6\xec\x61\x85\x3e\x95\x39\x05\x8e\x67\x24\x27\x72\x15\x00\x3b\x77\xd7\xfd\xfd\xfd\xc8\x9c\x3b\x94\xb0\xe5\xe1\x2c\x67\x8b\x43\x8b\x87\xd0\xc5\x50\x66\x30\xd4\x27\x80\x3f\xac\x86\x77\x3e\xb6\xe1\x3d\x91\x99\x3d\xe7\xa8\xbf\xc9\x26\x85\xaa\xf5\xb4\x82\x73\xad\xdb\x2a\x25\x43\xee\xce\x4b\x33\x5d\x76\x98\x74\xde\xd5\x95\x79\xde\xb7\xe9\x32\x4a\xde\x97\xc8\x8d\x96\x15\x8d\x5f\x9a\x37\x5b\x7a\x75\x77\x07\xc6\xac\x42\x8b\xde\x18\x5d\x15\x0f\x97\xb3\x56\x31\xb6\x2e\xc4\x0e\xc9\xa6\x69\x0f\xde\x38\xea\x09\xd0\x74\x03\x5e\x0e\xf8\xf1\x78\xaf\x01\xa7\xed\xfb\xae\xbd\x02\xd6\x73\xbc\xdf\x30\x79\x4d\x59\xfd\xd7\x19\x10\x3a\x90\x35\xb0\xab\xdd\xc1\xc1\xb6\x05\xd8\x7c\x3e\x68\xfb\x8a\x5d\x83\x9a\x77\xc7\x6c\xd5\xa0\x76\x1d\x5b\x1a\xbf\xdf\xb8\xce\x8d\x70\x37\xec\x51\x66\xaf\x31\x5a\xfa\x92\xb5\x3a\x82\xec\x7d\xd6\x8e\x79\xa2\x96\x99\x6f\xf0\x44\x57\x3d\xfe\x8d\xee\xf1\xe7\xe5\x57\x62\xae\xd2\x07\x3a\x9b\x1c\xc3\x6b\xe3\xb2\xaf\xab\x42\xed\x40\x88\xbd\xfc\xdb\xbf\xdb\x37\xfa\xe8\x0b\x7f\xa3\x4a\x81\xdd\x12\x98\x9a\x0c\xaf\x68\xc9\xa8\x52\x50\x85\x4c\xfe\x57\x47\xcc\xd0\xe2\xd5\x86\x7d\xfc\xe9\xc4\xdb\x3c\x76\xc9\x3d\x3b\xe8\x92\xea\xcb\x30\x11\x9b\x23\xbb\x35\x5c\xf2\x95\x9e\xf2\x28\x3c\x48\xef\x1a\x0b\x13\x84\x99\xc1\x9c\x71\xb0\xfe\x03\xc5\x3c\x4c\x4d\xb9\x08\xb5\x0a\x41\x15\xe0\xeb\xa2\x56\x4f\x83\xef\xe1\xc1\xbb\xf1\x36\xa0\xbf\xf9\xd1\xcd\x91\xd7\x20\xf9\xea\x3d\xa3\x97\x29\x2c\x0b\x26\x5d\xea\x56\xdb\xb4\x76\x9b\xa7\x19\xcd\x57\xe8\x9e\xf1\x5b\xe1\x7c\x94\x89\xc9\x1f\x41\x44\x68\x2b\x2f\x61\xcb\x82\x83\x10\x90\x76\x19\xd4\x91\x1c\x9d\x34\x7d\x8d\x05\x18\x9f\xc8\x57\xad\x91\x4f\xf5\x06\xf8\xa1\x3e\x87\xc5\x49\xf0\xd6\x0e\xaa\xc3\xa7\xe8\x34\xd5\x77\x44\xfb\x1b\x9b\x9b\xe9\xb3\x66\x63\x61\xbc\x4d\xbe\x73\xcb\x2b\xe3\xe5\x6a\x6d\x8a\x18\xd8\xde\x70\xa1\x99\x58\x2a\x18\xfa\x86\xa7\x66\xa2\x7a\x3b\x64\xac\xde\xad\x1d\x6c\xc6\x51\x7d\x0f\x08\x73\x40\x6a\x9e\x75\x57\xd6\xba\x80\x85\x64\x08\xbb\x9b\x92\x9c\x67\xc6\x3a\x09\x9e\xe9\x72\x69\xda\x3c\x04\xdd\x2d\x63\x3a\x44\x67\x3f\xc3\x62\xcc\x61\x4e\x1e\x1a\x2e\x4e\x8d\x10\x0d\x4c\x45\xe1\x7e\xfc\xe1\x60\xb3\x7f\xc6\x3c\x91\x43\x1a\xfe\x1e\x06\x9c\x7e\xd3\x93\x19\xfe\x24\xa7\x2a\x74\xc8\xc6\xb6\xbe\x33\xdb\x5f\x31\x2f\x8f\x56\x63\x26\xdf\x5f\xe7\x75\x6c\x9f\xb2\xb0\xcd\x92\x63\x07\xbd\x67\x08\xaa\x13\x86\xea\xeb\xcc\xe7\x8c\xfb\x62\xdb\x28\x64\x7d\xbc\x2d\x41\xe8\x8e\xbd\x46\x6b\xae\xf2\x22\x53\xa0\x04\xd2\x11\xba\x06\x2c\x18\x3d\x6e\xea\x8f\x73\xfd\x31\x2c\xbd\x3d\x09\xcd\x34\xc6\x78\x66\x63\xc7\x6e\x88\x66\x7e\xb2\x39\x1e\x14\xe9\xc3\x69\xf5\xd6\x82\x24\x67\x65\xaa\x44\xe6\x8e\xe8\x01\x6a\x47\xac\x66\x5b\x69\x26\xbe\xde\xbd\x89\xd1\xd4\x49\x07\x7c\xf1\xdb\x1c\x46\x6a\xe3\xad\xc1\xb6\x09\xb3\x9d\xdc\xdc\x44\xa6\x44\x54\x9f\xb5\x4b\x04\x9a\x81\xd2\x5e\x4b\x46\x89\x64\x1c\x52\x0f\x8b\xde\x6e\xae\x66\x68\x1b\xd7\x43\x82\xcd\xe5\xbd\xd2\x79\xfb\x39\xb9\x05\x24\x56\x22\x25\x8b\x83\x76\xfb\xfa\x0e\x41\x35\xde\xe7\xe7\x2f\xfe\x6b\x74\x34\x3a\x1a\x3d\xef\x74\xbe\x84\x1b\xb8\x4c\xb9\xe3\xe3\xf0\x16\x0c\xf7\xa4\xa0\xa6\xb1\x3c\x3c\xc0\xe6\x6b\x0e\x28\x8d\x4b\x53\xfd\xef\xff\x0f\x00\x00\xff\xff\x2e\xe3\xb9\xdd\x59\x92\x00\x00") - -func etcNginxTemplateNginxTmplBytes() ([]byte, error) { - return bindataRead( - _etcNginxTemplateNginxTmpl, - "etc/nginx/template/nginx.tmpl", - ) -} - -func etcNginxTemplateNginxTmpl() (*asset, error) { - bytes, err := etcNginxTemplateNginxTmplBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "etc/nginx/template/nginx.tmpl", size: 37465, mode: os.FileMode(420), modTime: time.Unix(1511380479, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _ingressControllerCleanNginxConfSh = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x64\x92\x41\x6b\xe3\x48\x10\x85\xef\xfa\x15\x6f\x2d\x83\x77\x17\x47\x9d\x78\x0f\x0b\xc9\xc9\x9b\x64\x19\x91\x60\x43\xe4\x4c\x08\x98\x40\xbb\x55\x96\x0a\xa4\x6e\x4d\x75\x2b\xb6\x99\x99\xff\x3e\xb4\xe2\xcc\xc4\x8c\x8e\x55\x4f\xfd\xbe\x7a\x55\xe9\x1f\x6a\xc3\x56\x6d\xb4\xaf\x93\x24\xc5\xb5\xeb\x0e\xc2\x55\x1d\x30\x3b\xbf\xf8\x17\xab\x9a\x70\xd7\x6f\x48\x2c\x05\xf2\x98\xf7\xa1\x76\xe2\xb3\x24\x4d\x52\xdc\xb3\x21\xeb\xa9\x44\x6f\x4b\x12\x84\x9a\x30\xef\xb4\xa9\xe9\xbd\x33\xc5\x67\x12\xcf\xce\x62\x96\x9d\xe3\xcf\x28\x18\x1d\x5b\xa3\xbf\xae\x92\x14\x07\xd7\xa3\xd5\x07\x58\x17\xd0\x7b\x42\xa8\xd9\x63\xcb\x0d\x81\xf6\x86\xba\x00\xb6\x30\xae\xed\x1a\xd6\xd6\x10\x76\x1c\xea\xc1\xe6\xf8\x48\x96\xa4\x78\x3e\x3e\xe1\x36\x41\xb3\x85\x86\x71\xdd\x01\x6e\xfb\x51\x07\x1d\x06\xe0\xf8\xd5\x21\x74\x97\x4a\xed\x76\xbb\x4c\x0f\xb0\x99\x93\x4a\x35\x6f\x42\xaf\xee\xf3\xeb\xdb\x45\x71\x7b\x36\xcb\xce\x87\x5f\x1e\x6d\x43\xde\x43\xe8\x4b\xcf\x42\x25\x36\x07\xe8\xae\x6b\xd8\xe8\x4d\x43\x68\xf4\x0e\x4e\xa0\x2b\x21\x2a\x11\x5c\xe4\xdd\x09\x07\xb6\xd5\x14\xde\x6d\xc3\x4e\x0b\x25\x29\x4a\xf6\x41\x78\xd3\x87\x93\xb0\xde\xe9\xd8\x9f\x08\x9c\x85\xb6\x18\xcd\x0b\xe4\xc5\x08\xff\xcd\x8b\xbc\x98\x26\x29\x9e\xf2\xd5\xa7\xe5\xe3\x0a\x4f\xf3\x87\x87\xf9\x62\x95\xdf\x16\x58\x3e\xe0\x7a\xb9\xb8\xc9\x57\xf9\x72\x51\x60\xf9\x3f\xe6\x8b\x67\xdc\xe5\x8b\x9b\x29\x88\x43\x4d\x02\xda\x77\x12\xf9\x9d\x80\x63\x8c\x54\xc6\xcc\x0a\xa2\x13\x80\xad\x7b\x03\xf2\x1d\x19\xde\xb2\x41\xa3\x6d\xd5\xeb\x8a\x50\xb9\x57\x12\xcb\xb6\x42\x47\xd2\xb2\x8f\xcb\xf4\xd0\xb6\x4c\x52\x34\xdc\x72\xd0\x61\xa8\xfc\x36\x54\x16\x6f\x69\x15\xd7\xe9\x8d\x70\x17\x20\xd4\xba\x57\xf2\x30\xce\x7a\x32\x7d\xe0\x57\x02\xb5\x5d\x38\xa0\x61\x4b\x3e\x26\x67\x2b\xb6\xfb\xcc\x38\xbb\x8d\xc1\xfb\xe8\x1a\xaf\x8b\x3d\x5a\x27\x04\x1f\x27\x88\xe0\xda\xa2\x1f\xba\x1a\x95\x83\x50\x45\xfb\x64\x98\xaa\x8c\xc7\xd2\x6a\x5b\xfa\xcb\x24\xc5\x45\x76\x34\x1d\xb8\x84\x42\x2f\x16\x46\x8b\x30\x09\x4c\xad\x45\x9b\x40\xa2\x7c\x92\x62\xf6\x53\xfa\x01\x29\x49\xf1\x4f\xac\x77\x8d\x36\x84\xb6\x6f\x02\x47\xff\x8f\x8a\x88\x77\x46\x98\x78\xb5\x16\xa5\xaa\x09\xbe\xe1\x57\xe9\x05\xf8\x7b\xac\xd6\x93\xf1\x64\x6d\x4f\x7b\xea\x65\xac\xbe\x2e\xae\xd4\xcb\xda\x8e\xd5\xcd\xd5\xf7\x49\xf2\x23\x00\x00\xff\xff\x3b\xba\x15\x8d\x85\x03\x00\x00") - -func ingressControllerCleanNginxConfShBytes() ([]byte, error) { - return bindataRead( - _ingressControllerCleanNginxConfSh, - "ingress-controller/clean-nginx-conf.sh", - ) -} - -func ingressControllerCleanNginxConfSh() (*asset, error) { - bytes, err := ingressControllerCleanNginxConfShBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "ingress-controller/clean-nginx-conf.sh", size: 901, mode: os.FileMode(493), modTime: time.Unix(1509931394, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -// Asset loads and returns the asset for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func Asset(name string) ([]byte, error) { - cannonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[cannonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) - } - return a.bytes, nil - } - return nil, fmt.Errorf("Asset %s not found", name) -} - -// MustAsset is like Asset but panics when Asset would return an error. -// It simplifies safe initialization of global variables. -func MustAsset(name string) []byte { - a, err := Asset(name) - if err != nil { - panic("asset: Asset(" + name + "): " + err.Error()) - } - - return a -} - -// AssetInfo loads and returns the asset info for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func AssetInfo(name string) (os.FileInfo, error) { - cannonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[cannonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) - } - return a.info, nil - } - return nil, fmt.Errorf("AssetInfo %s not found", name) -} - -// AssetNames returns the names of the assets. -func AssetNames() []string { - names := make([]string, 0, len(_bindata)) - for name := range _bindata { - names = append(names, name) - } - return names -} - -// _bindata is a table, holding each asset generator, mapped to its name. -var _bindata = map[string]func() (*asset, error){ - "etc/nginx/nginx.conf": etcNginxNginxConf, - "etc/nginx/template/nginx.tmpl": etcNginxTemplateNginxTmpl, - "ingress-controller/clean-nginx-conf.sh": ingressControllerCleanNginxConfSh, -} - -// AssetDir returns the file names below a certain -// directory embedded in the file by go-bindata. -// For example if you run go-bindata on data/... and data contains the -// following hierarchy: -// data/ -// foo.txt -// img/ -// a.png -// b.png -// then AssetDir("data") would return []string{"foo.txt", "img"} -// AssetDir("data/img") would return []string{"a.png", "b.png"} -// AssetDir("foo.txt") and AssetDir("notexist") would return an error -// AssetDir("") will return []string{"data"}. -func AssetDir(name string) ([]string, error) { - node := _bintree - if len(name) != 0 { - cannonicalName := strings.Replace(name, "\\", "/", -1) - pathList := strings.Split(cannonicalName, "/") - for _, p := range pathList { - node = node.Children[p] - if node == nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - } - } - if node.Func != nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - rv := make([]string, 0, len(node.Children)) - for childName := range node.Children { - rv = append(rv, childName) - } - return rv, nil -} - -type bintree struct { - Func func() (*asset, error) - Children map[string]*bintree -} -var _bintree = &bintree{nil, map[string]*bintree{ - "etc": &bintree{nil, map[string]*bintree{ - "nginx": &bintree{nil, map[string]*bintree{ - "nginx.conf": &bintree{etcNginxNginxConf, map[string]*bintree{}}, - "template": &bintree{nil, map[string]*bintree{ - "nginx.tmpl": &bintree{etcNginxTemplateNginxTmpl, map[string]*bintree{}}, - }}, - }}, - }}, - "ingress-controller": &bintree{nil, map[string]*bintree{ - "clean-nginx-conf.sh": &bintree{ingressControllerCleanNginxConfSh, map[string]*bintree{}}, - }}, -}} - -// RestoreAsset restores an asset under the given directory -func RestoreAsset(dir, name string) error { - data, err := Asset(name) - if err != nil { - return err - } - info, err := AssetInfo(name) - if err != nil { - return err - } - err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) - if err != nil { - return err - } - err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) - if err != nil { - return err - } - err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) - if err != nil { - return err - } - return nil -} - -// RestoreAssets restores an asset under the given directory recursively -func RestoreAssets(dir, name string) error { - children, err := AssetDir(name) - // File - if err != nil { - return RestoreAsset(dir, name) - } - // Dir - for _, child := range children { - err = RestoreAssets(dir, filepath.Join(name, child)) - if err != nil { - return err - } - } - return nil -} - -func _filePath(dir, name string) string { - cannonicalName := strings.Replace(name, "\\", "/", -1) - return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) -} - diff --git a/internal/file/file.go b/internal/file/file.go index 25887c52bf..9939df944d 100644 --- a/internal/file/file.go +++ b/internal/file/file.go @@ -20,6 +20,7 @@ import ( "crypto/sha1" "encoding/hex" "io/ioutil" + "k8s.io/klog" ) // SHA1 returns the SHA1 of a file. @@ -27,6 +28,7 @@ func SHA1(filename string) string { hasher := sha1.New() s, err := ioutil.ReadFile(filename) if err != nil { + klog.Errorf("Error reading file %v", err) return "" } diff --git a/internal/file/filesystem.go b/internal/file/filesystem.go index 1adaba8ec1..4a66ed263a 100644 --- a/internal/file/filesystem.go +++ b/internal/file/filesystem.go @@ -1,14 +1,36 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package file import ( + "fmt" "os" "path/filepath" - - "github.com/golang/glog" + "strings" "k8s.io/kubernetes/pkg/util/filesystem" ) +// ReadWriteByUser defines linux permission to read and write files for the owner user +const ReadWriteByUser = 0660 + +// ReadByUserGroup defines linux permission to read files by the user and group owner/s +const ReadByUserGroup = 0640 + // Filesystem is an interface that we can use to mock various filesystem operations type Filesystem interface { filesystem.Filesystem @@ -18,110 +40,68 @@ type Filesystem interface { func NewLocalFS() (Filesystem, error) { fs := filesystem.DefaultFs{} - err := initialize(false, fs) - if err != nil { - return nil, err + for _, directory := range directories { + err := fs.MkdirAll(directory, ReadWriteByUser) + if err != nil { + return nil, err + } } return fs, nil } -// NewFakeFS creates an in-memory filesytem with all the required +// NewFakeFS creates an in-memory filesystem with all the required // paths used by the ingress controller. // This allows running test without polluting the local machine. func NewFakeFS() (Filesystem, error) { - fs := filesystem.NewFakeFs() - - err := initialize(true, fs) - if err != nil { - return nil, err - } + osFs := filesystem.DefaultFs{} + fakeFs := filesystem.NewFakeFs() - return fs, nil -} + //TODO: find another way to do this + rootFS := filepath.Clean(fmt.Sprintf("%v/%v", os.Getenv("GOPATH"), "src/k8s.io/ingress-nginx/rootfs")) -// initialize creates the required directory structure and when -// runs as virtual filesystem it copies the local files to it -func initialize(isVirtual bool, fs Filesystem) error { - for _, directory := range directories { - err := fs.MkdirAll(directory, 0655) + var fileList []string + err := filepath.Walk(rootFS, func(path string, f os.FileInfo, err error) error { if err != nil { return err } - } - - if !isVirtual { - return nil - } - for _, file := range files { - f, err := fs.Create(file) - if err != nil { - return err + if f.IsDir() { + return nil } - _, err = f.Write([]byte("")) - if err != nil { - return err + file := strings.TrimPrefix(path, rootFS) + if file == "" { + return nil } - err = f.Close() - if err != nil { - return err - } - } + fileList = append(fileList, file) + + return nil + }) - err := fs.MkdirAll("/proc", 0655) if err != nil { - return err + return nil, err } - glog.Info("Restoring generated (go-bindata) assets in virtual filesystem...") - for _, assetName := range AssetNames() { - err := restoreAsset("/", assetName, fs) + for _, file := range fileList { + realPath := fmt.Sprintf("%v%v", rootFS, file) + + data, err := osFs.ReadFile(realPath) if err != nil { - return err + return nil, err } - } - return nil -} - -// restoreAsset restores an asset under the given directory -func restoreAsset(dir, name string, fs Filesystem) error { - data, err := Asset(name) - if err != nil { - return err - } - info, err := AssetInfo(name) - if err != nil { - return err - } - err = fs.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) - if err != nil { - return err - } - - f, err := fs.Create(_filePath(dir, name)) - if err != nil { - return err - } - - _, err = f.Write(data) - if err != nil { - return err - } + fakeFile, err := fakeFs.Create(file) + if err != nil { + return nil, err + } - err = f.Close() - if err != nil { - return err + _, err = fakeFile.Write(data) + if err != nil { + return nil, err + } } - //Missing info.Mode() - - err = fs.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) - if err != nil { - return err - } - return nil + return fakeFs, nil } diff --git a/internal/file/structure.go b/internal/file/structure.go index aa4cd74ddb..acb52ffd82 100644 --- a/internal/file/structure.go +++ b/internal/file/structure.go @@ -1,3 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package file const ( @@ -9,18 +25,12 @@ const ( // This directory contains all the SSL certificates that are specified in Ingress rules. // The name of each file is -.pem. The content is the concatenated // certificate and key. - DefaultSSLDirectory = "/ingress-controller/ssl" + DefaultSSLDirectory = "/etc/ingress-controller/ssl" ) var ( directories = []string{ - "/etc/nginx/template", - "/run", DefaultSSLDirectory, AuthDirectory, } - - files = []string{ - "/run/nginx.pid", - } ) diff --git a/internal/ingress/annotations/alias/main.go b/internal/ingress/annotations/alias/main.go index 449d64b111..fb08e73f29 100644 --- a/internal/ingress/annotations/alias/main.go +++ b/internal/ingress/annotations/alias/main.go @@ -17,7 +17,7 @@ limitations under the License. package alias import ( - extensions "k8s.io/api/extensions/v1beta1" + networking "k8s.io/api/networking/v1beta1" "k8s.io/ingress-nginx/internal/ingress/annotations/parser" "k8s.io/ingress-nginx/internal/ingress/resolver" @@ -34,6 +34,6 @@ func NewParser(r resolver.Resolver) parser.IngressAnnotation { // Parse parses the annotations contained in the ingress rule // used to add an alias to the provided hosts -func (a alias) Parse(ing *extensions.Ingress) (interface{}, error) { +func (a alias) Parse(ing *networking.Ingress) (interface{}, error) { return parser.GetStringAnnotation("server-alias", ing) } diff --git a/internal/ingress/annotations/alias/main_test.go b/internal/ingress/annotations/alias/main_test.go index b1dba57c71..6eeeb25d16 100644 --- a/internal/ingress/annotations/alias/main_test.go +++ b/internal/ingress/annotations/alias/main_test.go @@ -20,7 +20,7 @@ import ( "testing" api "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" + networking "k8s.io/api/networking/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/ingress-nginx/internal/ingress/annotations/parser" "k8s.io/ingress-nginx/internal/ingress/resolver" @@ -46,12 +46,12 @@ func TestParse(t *testing.T) { {nil, ""}, } - ing := &extensions.Ingress{ + ing := &networking.Ingress{ ObjectMeta: meta_v1.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, }, - Spec: extensions.IngressSpec{}, + Spec: networking.IngressSpec{}, } for _, testCase := range testCases { diff --git a/internal/ingress/annotations/annotations.go b/internal/ingress/annotations/annotations.go index 2bbd8814d1..00080c8400 100644 --- a/internal/ingress/annotations/annotations.go +++ b/internal/ingress/annotations/annotations.go @@ -17,27 +17,40 @@ limitations under the License. package annotations import ( - "github.com/golang/glog" "github.com/imdario/mergo" + "k8s.io/ingress-nginx/internal/ingress/annotations/canary" + "k8s.io/ingress-nginx/internal/ingress/annotations/modsecurity" + "k8s.io/ingress-nginx/internal/ingress/annotations/sslcipher" + "k8s.io/klog" - extensions "k8s.io/api/extensions/v1beta1" + apiv1 "k8s.io/api/core/v1" + networking "k8s.io/api/networking/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/ingress-nginx/internal/ingress/annotations/alias" "k8s.io/ingress-nginx/internal/ingress/annotations/auth" "k8s.io/ingress-nginx/internal/ingress/annotations/authreq" + "k8s.io/ingress-nginx/internal/ingress/annotations/authreqglobal" "k8s.io/ingress-nginx/internal/ingress/annotations/authtls" + "k8s.io/ingress-nginx/internal/ingress/annotations/backendprotocol" "k8s.io/ingress-nginx/internal/ingress/annotations/clientbodybuffersize" + "k8s.io/ingress-nginx/internal/ingress/annotations/connection" "k8s.io/ingress-nginx/internal/ingress/annotations/cors" + "k8s.io/ingress-nginx/internal/ingress/annotations/customhttperrors" "k8s.io/ingress-nginx/internal/ingress/annotations/defaultbackend" - "k8s.io/ingress-nginx/internal/ingress/annotations/healthcheck" + "k8s.io/ingress-nginx/internal/ingress/annotations/http2pushpreload" + "k8s.io/ingress-nginx/internal/ingress/annotations/influxdb" "k8s.io/ingress-nginx/internal/ingress/annotations/ipwhitelist" + "k8s.io/ingress-nginx/internal/ingress/annotations/loadbalancing" + "k8s.io/ingress-nginx/internal/ingress/annotations/log" + "k8s.io/ingress-nginx/internal/ingress/annotations/luarestywaf" "k8s.io/ingress-nginx/internal/ingress/annotations/parser" "k8s.io/ingress-nginx/internal/ingress/annotations/portinredirect" "k8s.io/ingress-nginx/internal/ingress/annotations/proxy" "k8s.io/ingress-nginx/internal/ingress/annotations/ratelimit" "k8s.io/ingress-nginx/internal/ingress/annotations/redirect" "k8s.io/ingress-nginx/internal/ingress/annotations/rewrite" + "k8s.io/ingress-nginx/internal/ingress/annotations/satisfy" "k8s.io/ingress-nginx/internal/ingress/annotations/secureupstream" "k8s.io/ingress-nginx/internal/ingress/annotations/serversnippet" "k8s.io/ingress-nginx/internal/ingress/annotations/serviceupstream" @@ -46,7 +59,7 @@ import ( "k8s.io/ingress-nginx/internal/ingress/annotations/sslpassthrough" "k8s.io/ingress-nginx/internal/ingress/annotations/upstreamhashby" "k8s.io/ingress-nginx/internal/ingress/annotations/upstreamvhost" - "k8s.io/ingress-nginx/internal/ingress/annotations/vtsfilterkey" + "k8s.io/ingress-nginx/internal/ingress/annotations/xforwardedprefix" "k8s.io/ingress-nginx/internal/ingress/errors" "k8s.io/ingress-nginx/internal/ingress/resolver" ) @@ -57,30 +70,43 @@ const DeniedKeyName = "Denied" // Ingress defines the valid annotations present in one NGINX Ingress rule type Ingress struct { metav1.ObjectMeta + BackendProtocol string Alias string BasicDigestAuth auth.Config + Canary canary.Config CertificateAuth authtls.Config ClientBodyBufferSize string ConfigurationSnippet string + Connection connection.Config CorsConfig cors.Config - DefaultBackend string - Denied error - ExternalAuth authreq.Config - HealthCheck healthcheck.Config - Proxy proxy.Config - RateLimit ratelimit.Config - Redirect redirect.Config - Rewrite rewrite.Config - SecureUpstream secureupstream.Config - ServerSnippet string - ServiceUpstream bool - SessionAffinity sessionaffinity.Config - SSLPassthrough bool - UsePortInRedirects bool - UpstreamHashBy string - UpstreamVhost string - VtsFilterKey string - Whitelist ipwhitelist.SourceRange + CustomHTTPErrors []int + DefaultBackend *apiv1.Service + //TODO: Change this back into an error when https://github.com/imdario/mergo/issues/100 is resolved + Denied *string + ExternalAuth authreq.Config + EnableGlobalAuth bool + HTTP2PushPreload bool + Proxy proxy.Config + RateLimit ratelimit.Config + Redirect redirect.Config + Rewrite rewrite.Config + Satisfy string + SecureUpstream secureupstream.Config + ServerSnippet string + ServiceUpstream bool + SessionAffinity sessionaffinity.Config + SSLPassthrough bool + UsePortInRedirects bool + UpstreamHashBy upstreamhashby.Config + LoadBalancing string + UpstreamVhost string + Whitelist ipwhitelist.SourceRange + XForwardedPrefix string + SSLCiphers string + Logs log.Config + LuaRestyWAF luarestywaf.Config + InfluxDB influxdb.Config + ModSecurity modsecurity.Config } // Extractor defines the annotation parsers to be used in the extraction of annotations @@ -94,17 +120,22 @@ func NewAnnotationExtractor(cfg resolver.Resolver) Extractor { map[string]parser.IngressAnnotation{ "Alias": alias.NewParser(cfg), "BasicDigestAuth": auth.NewParser(auth.AuthDirectory, cfg), + "Canary": canary.NewParser(cfg), "CertificateAuth": authtls.NewParser(cfg), "ClientBodyBufferSize": clientbodybuffersize.NewParser(cfg), "ConfigurationSnippet": snippet.NewParser(cfg), + "Connection": connection.NewParser(cfg), "CorsConfig": cors.NewParser(cfg), + "CustomHTTPErrors": customhttperrors.NewParser(cfg), "DefaultBackend": defaultbackend.NewParser(cfg), "ExternalAuth": authreq.NewParser(cfg), - "HealthCheck": healthcheck.NewParser(cfg), + "EnableGlobalAuth": authreqglobal.NewParser(cfg), + "HTTP2PushPreload": http2pushpreload.NewParser(cfg), "Proxy": proxy.NewParser(cfg), "RateLimit": ratelimit.NewParser(cfg), "Redirect": redirect.NewParser(cfg), "Rewrite": rewrite.NewParser(cfg), + "Satisfy": satisfy.NewParser(cfg), "SecureUpstream": secureupstream.NewParser(cfg), "ServerSnippet": serversnippet.NewParser(cfg), "ServiceUpstream": serviceupstream.NewParser(cfg), @@ -112,15 +143,22 @@ func NewAnnotationExtractor(cfg resolver.Resolver) Extractor { "SSLPassthrough": sslpassthrough.NewParser(cfg), "UsePortInRedirects": portinredirect.NewParser(cfg), "UpstreamHashBy": upstreamhashby.NewParser(cfg), + "LoadBalancing": loadbalancing.NewParser(cfg), "UpstreamVhost": upstreamvhost.NewParser(cfg), - "VtsFilterKey": vtsfilterkey.NewParser(cfg), "Whitelist": ipwhitelist.NewParser(cfg), + "XForwardedPrefix": xforwardedprefix.NewParser(cfg), + "SSLCiphers": sslcipher.NewParser(cfg), + "Logs": log.NewParser(cfg), + "LuaRestyWAF": luarestywaf.NewParser(cfg), + "InfluxDB": influxdb.NewParser(cfg), + "BackendProtocol": backendprotocol.NewParser(cfg), + "ModSecurity": modsecurity.NewParser(cfg), }, } } // Extract extracts the annotations from an Ingress -func (e Extractor) Extract(ing *extensions.Ingress) *Ingress { +func (e Extractor) Extract(ing *networking.Ingress) *Ingress { pia := &Ingress{ ObjectMeta: ing.ObjectMeta, } @@ -128,7 +166,7 @@ func (e Extractor) Extract(ing *extensions.Ingress) *Ingress { data := make(map[string]interface{}) for name, annotationParser := range e.annotations { val, err := annotationParser.Parse(ing) - glog.V(5).Infof("annotation %v in Ingress %v/%v: %v", name, ing.GetNamespace(), ing.GetName(), val) + klog.V(5).Infof("annotation %v in Ingress %v/%v: %v", name, ing.GetNamespace(), ing.GetName(), val) if err != nil { if errors.IsMissingAnnotations(err) { continue @@ -138,14 +176,23 @@ func (e Extractor) Extract(ing *extensions.Ingress) *Ingress { continue } + if name == "CertificateAuth" && data[name] == nil { + data[name] = authtls.Config{ + AuthTLSError: err.Error(), + } + // avoid mapping the result from the annotation + val = nil + } + _, alreadyDenied := data[DeniedKeyName] if !alreadyDenied { - data[DeniedKeyName] = err - glog.Errorf("error reading %v annotation in Ingress %v/%v: %v", name, ing.GetNamespace(), ing.GetName(), err) + errString := err.Error() + data[DeniedKeyName] = &errString + klog.Errorf("error reading %v annotation in Ingress %v/%v: %v", name, ing.GetNamespace(), ing.GetName(), err) continue } - glog.V(5).Infof("error reading %v annotation in Ingress %v/%v: %v", name, ing.GetNamespace(), ing.GetName(), err) + klog.V(5).Infof("error reading %v annotation in Ingress %v/%v: %v", name, ing.GetNamespace(), ing.GetName(), err) } if val != nil { @@ -155,7 +202,7 @@ func (e Extractor) Extract(ing *extensions.Ingress) *Ingress { err := mergo.MapWithOverwrite(pia, data) if err != nil { - glog.Errorf("unexpected error merging extracted annotations: %v", err) + klog.Errorf("unexpected error merging extracted annotations: %v", err) } return pia diff --git a/internal/ingress/annotations/annotations_test.go b/internal/ingress/annotations/annotations_test.go index 4657de41e9..2bd66aafea 100644 --- a/internal/ingress/annotations/annotations_test.go +++ b/internal/ingress/annotations/annotations_test.go @@ -20,7 +20,7 @@ import ( "testing" apiv1 "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" + networking "k8s.io/api/networking/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" @@ -30,22 +30,19 @@ import ( ) var ( - annotationSecureUpstream = parser.GetAnnotationWithPrefix("secure-backends") annotationSecureVerifyCACert = parser.GetAnnotationWithPrefix("secure-verify-ca-secret") - annotationUpsMaxFails = parser.GetAnnotationWithPrefix("upstream-max-fails") - annotationUpsFailTimeout = parser.GetAnnotationWithPrefix("upstream-fail-timeout") annotationPassthrough = parser.GetAnnotationWithPrefix("ssl-passthrough") annotationAffinityType = parser.GetAnnotationWithPrefix("affinity") annotationCorsEnabled = parser.GetAnnotationWithPrefix("enable-cors") - annotationCorsAllowOrigin = parser.GetAnnotationWithPrefix("cors-allow-origin") annotationCorsAllowMethods = parser.GetAnnotationWithPrefix("cors-allow-methods") annotationCorsAllowHeaders = parser.GetAnnotationWithPrefix("cors-allow-headers") annotationCorsAllowCredentials = parser.GetAnnotationWithPrefix("cors-allow-credentials") + backendProtocol = parser.GetAnnotationWithPrefix("backend-protocol") defaultCorsMethods = "GET, PUT, POST, DELETE, PATCH, OPTIONS" defaultCorsHeaders = "DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Authorization" annotationAffinityCookieName = parser.GetAnnotationWithPrefix("session-cookie-name") - annotationAffinityCookieHash = parser.GetAnnotationWithPrefix("session-cookie-hash") annotationUpstreamHashBy = parser.GetAnnotationWithPrefix("upstream-hash-by") + annotationCustomHTTPErrors = parser.GetAnnotationWithPrefix("custom-http-errors") ) type mockCfg struct { @@ -77,28 +74,28 @@ func (m mockCfg) GetAuthCertificate(name string) (*resolver.AuthSSLCert, error) return nil, nil } -func buildIngress() *extensions.Ingress { - defaultBackend := extensions.IngressBackend{ +func buildIngress() *networking.Ingress { + defaultBackend := networking.IngressBackend{ ServiceName: "default-backend", ServicePort: intstr.FromInt(80), } - return &extensions.Ingress{ + return &networking.Ingress{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: apiv1.NamespaceDefault, }, - Spec: extensions.IngressSpec{ - Backend: &extensions.IngressBackend{ + Spec: networking.IngressSpec{ + Backend: &networking.IngressBackend{ ServiceName: "default-backend", ServicePort: intstr.FromInt(80), }, - Rules: []extensions.IngressRule{ + Rules: []networking.IngressRule{ { Host: "foo.bar.com", - IngressRuleValue: extensions.IngressRuleValue{ - HTTP: &extensions.HTTPIngressRuleValue{ - Paths: []extensions.HTTPIngressPath{ + IngressRuleValue: networking.IngressRuleValue{ + HTTP: &networking.HTTPIngressRuleValue{ + Paths: []networking.HTTPIngressPath{ { Path: "/foo", Backend: defaultBackend, @@ -112,30 +109,6 @@ func buildIngress() *extensions.Ingress { } } -func TestSecureUpstream(t *testing.T) { - ec := NewAnnotationExtractor(mockCfg{}) - ing := buildIngress() - - fooAnns := []struct { - annotations map[string]string - er bool - }{ - {map[string]string{annotationSecureUpstream: "true"}, true}, - {map[string]string{annotationSecureUpstream: "false"}, false}, - {map[string]string{annotationSecureUpstream + "_no": "true"}, false}, - {map[string]string{}, false}, - {nil, false}, - } - - for _, foo := range fooAnns { - ing.SetAnnotations(foo.annotations) - r := ec.Extract(ing).SecureUpstream - if r.Secure != foo.er { - t.Errorf("Returned %v but expected %v", r, foo.er) - } - } -} - func TestSecureVerifyCACert(t *testing.T) { ec := NewAnnotationExtractor(mockCfg{ MockSecrets: map[string]*apiv1.Secret{ @@ -152,11 +125,11 @@ func TestSecureVerifyCACert(t *testing.T) { annotations map[string]string exists bool }{ - {1, map[string]string{annotationSecureUpstream: "true", annotationSecureVerifyCACert: "not"}, false}, - {2, map[string]string{annotationSecureUpstream: "false", annotationSecureVerifyCACert: "secure-verify-ca"}, false}, - {3, map[string]string{annotationSecureUpstream: "true", annotationSecureVerifyCACert: "secure-verify-ca"}, true}, - {4, map[string]string{annotationSecureUpstream: "true", annotationSecureVerifyCACert + "_not": "secure-verify-ca"}, false}, - {5, map[string]string{annotationSecureUpstream: "true"}, false}, + {1, map[string]string{backendProtocol: "HTTPS", annotationSecureVerifyCACert: "not"}, false}, + {2, map[string]string{backendProtocol: "HTTP", annotationSecureVerifyCACert: "secure-verify-ca"}, false}, + {3, map[string]string{backendProtocol: "HTTPS", annotationSecureVerifyCACert: "secure-verify-ca"}, true}, + {4, map[string]string{backendProtocol: "HTTPS", annotationSecureVerifyCACert + "_not": "secure-verify-ca"}, false}, + {5, map[string]string{backendProtocol: "HTTPS"}, false}, {6, map[string]string{}, false}, {7, nil, false}, } @@ -171,36 +144,6 @@ func TestSecureVerifyCACert(t *testing.T) { } } -func TestHealthCheck(t *testing.T) { - ec := NewAnnotationExtractor(mockCfg{}) - ing := buildIngress() - - fooAnns := []struct { - annotations map[string]string - eumf int - euft int - }{ - {map[string]string{annotationUpsMaxFails: "3", annotationUpsFailTimeout: "10"}, 3, 10}, - {map[string]string{annotationUpsMaxFails: "3"}, 3, 0}, - {map[string]string{annotationUpsFailTimeout: "10"}, 0, 10}, - {map[string]string{}, 0, 0}, - {nil, 0, 0}, - } - - for _, foo := range fooAnns { - ing.SetAnnotations(foo.annotations) - r := ec.Extract(ing).HealthCheck - - if r.FailTimeout != foo.euft { - t.Errorf("Returned %d but expected %d for FailTimeout", r.FailTimeout, foo.euft) - } - - if r.MaxFails != foo.eumf { - t.Errorf("Returned %d but expected %d for MaxFails", r.MaxFails, foo.eumf) - } - } -} - func TestSSLPassthrough(t *testing.T) { ec := NewAnnotationExtractor(mockCfg{}) ing := buildIngress() @@ -242,7 +185,7 @@ func TestUpstreamHashBy(t *testing.T) { for _, foo := range fooAnns { ing.SetAnnotations(foo.annotations) - r := ec.Extract(ing).UpstreamHashBy + r := ec.Extract(ing).UpstreamHashBy.UpstreamHashBy if r != foo.er { t.Errorf("Returned %v but expected %v", r, foo.er) } @@ -256,24 +199,19 @@ func TestAffinitySession(t *testing.T) { fooAnns := []struct { annotations map[string]string affinitytype string - hash string name string }{ - {map[string]string{annotationAffinityType: "cookie", annotationAffinityCookieHash: "md5", annotationAffinityCookieName: "route"}, "cookie", "md5", "route"}, - {map[string]string{annotationAffinityType: "cookie", annotationAffinityCookieHash: "xpto", annotationAffinityCookieName: "route1"}, "cookie", "md5", "route1"}, - {map[string]string{annotationAffinityType: "cookie", annotationAffinityCookieHash: "", annotationAffinityCookieName: ""}, "cookie", "md5", "INGRESSCOOKIE"}, - {map[string]string{}, "", "", ""}, - {nil, "", "", ""}, + {map[string]string{annotationAffinityType: "cookie", annotationAffinityCookieName: "route"}, "cookie", "route"}, + {map[string]string{annotationAffinityType: "cookie", annotationAffinityCookieName: "route1"}, "cookie", "route1"}, + {map[string]string{annotationAffinityType: "cookie", annotationAffinityCookieName: ""}, "cookie", "INGRESSCOOKIE"}, + {map[string]string{}, "", ""}, + {nil, "", ""}, } for _, foo := range fooAnns { ing.SetAnnotations(foo.annotations) r := ec.Extract(ing).SessionAffinity - t.Logf("Testing pass %v %v %v", foo.affinitytype, foo.hash, foo.name) - - if r.Cookie.Hash != foo.hash { - t.Errorf("Returned %v but expected %v for Hash", r.Cookie.Hash, foo.hash) - } + t.Logf("Testing pass %v %v", foo.affinitytype, foo.name) if r.Cookie.Name != foo.name { t.Errorf("Returned %v but expected %v for Name", r.Cookie.Name, foo.name) @@ -327,6 +265,41 @@ func TestCors(t *testing.T) { } } +func TestCustomHTTPErrors(t *testing.T) { + ec := NewAnnotationExtractor(mockCfg{}) + ing := buildIngress() + + fooAnns := []struct { + annotations map[string]string + er []int + }{ + {map[string]string{annotationCustomHTTPErrors: "404,415"}, []int{404, 415}}, + {map[string]string{annotationCustomHTTPErrors: "404"}, []int{404}}, + {map[string]string{annotationCustomHTTPErrors: ""}, []int{}}, + {map[string]string{annotationCustomHTTPErrors + "_no": "404"}, []int{}}, + {map[string]string{}, []int{}}, + {nil, []int{}}, + } + + for _, foo := range fooAnns { + ing.SetAnnotations(foo.annotations) + r := ec.Extract(ing).CustomHTTPErrors + + // Check that expected codes were created + for i := range foo.er { + if r[i] != foo.er[i] { + t.Errorf("Returned %v but expected %v", r, foo.er) + } + } + + // Check that no unexpected codes were also created + for i := range r { + if r[i] != foo.er[i] { + t.Errorf("Returned %v but expected %v", r, foo.er) + } + } + } +} /* func TestMergeLocationAnnotations(t *testing.T) { diff --git a/internal/ingress/annotations/auth/main.go b/internal/ingress/annotations/auth/main.go index 1dd885f047..32a9c901fa 100644 --- a/internal/ingress/annotations/auth/main.go +++ b/internal/ingress/annotations/auth/main.go @@ -19,13 +19,12 @@ package auth import ( "fmt" "io/ioutil" - "os" - "path" "regexp" "github.com/pkg/errors" api "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" + networking "k8s.io/api/networking/v1beta1" + "k8s.io/client-go/tools/cache" "k8s.io/ingress-nginx/internal/file" "k8s.io/ingress-nginx/internal/ingress/annotations/parser" @@ -47,6 +46,7 @@ type Config struct { File string `json:"file"` Secured bool `json:"secured"` FileSHA string `json:"fileSha"` + Secret string `json:"secret"` } // Equal tests for equality between two Config types @@ -72,7 +72,9 @@ func (bd1 *Config) Equal(bd2 *Config) bool { if bd1.FileSHA != bd2.FileSHA { return false } - + if bd1.Secret != bd2.Secret { + return false + } return true } @@ -83,17 +85,6 @@ type auth struct { // NewParser creates a new authentication annotation parser func NewParser(authDirectory string, r resolver.Resolver) parser.IngressAnnotation { - os.MkdirAll(authDirectory, 0755) - - currPath := authDirectory - for currPath != "/" { - currPath = path.Dir(currPath) - err := os.Chmod(currPath, 0755) - if err != nil { - break - } - } - return auth{r, authDirectory} } @@ -101,7 +92,7 @@ func NewParser(authDirectory string, r resolver.Resolver) parser.IngressAnnotati // rule used to add authentication in the paths defined in the rule // and generated an htpasswd compatible file to be used as source // during the authentication process -func (a auth) Parse(ing *extensions.Ingress) (interface{}, error) { +func (a auth) Parse(ing *networking.Ingress) (interface{}, error) { at, err := parser.GetStringAnnotation("auth-type", ing) if err != nil { return nil, err @@ -118,7 +109,18 @@ func (a auth) Parse(ing *extensions.Ingress) (interface{}, error) { } } - name := fmt.Sprintf("%v/%v", ing.Namespace, s) + sns, sname, err := cache.SplitMetaNamespaceKey(s) + if err != nil { + return nil, ing_errors.LocationDenied{ + Reason: errors.Wrap(err, "error reading secret name from annotation"), + } + } + + if sns == "" { + sns = ing.Namespace + } + + name := fmt.Sprintf("%v/%v", sns, sname) secret, err := a.r.GetSecret(name) if err != nil { return nil, ing_errors.LocationDenied{ @@ -140,6 +142,7 @@ func (a auth) Parse(ing *extensions.Ingress) (interface{}, error) { File: passFile, Secured: true, FileSHA: file.SHA1(passFile), + Secret: name, }, nil } @@ -153,8 +156,7 @@ func dumpSecret(filename string, secret *api.Secret) error { } } - // TODO: check permissions required - err := ioutil.WriteFile(filename, val, 0777) + err := ioutil.WriteFile(filename, val, file.ReadWriteByUser) if err != nil { return ing_errors.LocationDenied{ Reason: errors.Wrap(err, "unexpected error creating password file"), diff --git a/internal/ingress/annotations/auth/main_test.go b/internal/ingress/annotations/auth/main_test.go index 3546bd0252..b8e4d231c3 100644 --- a/internal/ingress/annotations/auth/main_test.go +++ b/internal/ingress/annotations/auth/main_test.go @@ -26,35 +26,36 @@ import ( "github.com/pkg/errors" api "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" + networking "k8s.io/api/networking/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/ingress-nginx/internal/ingress/annotations/parser" + ing_errors "k8s.io/ingress-nginx/internal/ingress/errors" "k8s.io/ingress-nginx/internal/ingress/resolver" ) -func buildIngress() *extensions.Ingress { - defaultBackend := extensions.IngressBackend{ +func buildIngress() *networking.Ingress { + defaultBackend := networking.IngressBackend{ ServiceName: "default-backend", ServicePort: intstr.FromInt(80), } - return &extensions.Ingress{ + return &networking.Ingress{ ObjectMeta: meta_v1.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, }, - Spec: extensions.IngressSpec{ - Backend: &extensions.IngressBackend{ + Spec: networking.IngressSpec{ + Backend: &networking.IngressBackend{ ServiceName: "default-backend", ServicePort: intstr.FromInt(80), }, - Rules: []extensions.IngressRule{ + Rules: []networking.IngressRule{ { Host: "foo.bar.com", - IngressRuleValue: extensions.IngressRuleValue{ - HTTP: &extensions.HTTPIngressRuleValue{ - Paths: []extensions.HTTPIngressPath{ + IngressRuleValue: networking.IngressRuleValue{ + HTTP: &networking.HTTPIngressRuleValue{ + Paths: []networking.HTTPIngressPath{ { Path: "/foo", Backend: defaultBackend, @@ -96,6 +97,42 @@ func TestIngressWithoutAuth(t *testing.T) { } } +func TestIngressAuthBadAuthType(t *testing.T) { + ing := buildIngress() + + data := map[string]string{} + data[parser.GetAnnotationWithPrefix("auth-type")] = "invalid" + ing.SetAnnotations(data) + + _, dir, _ := dummySecretContent(t) + defer os.RemoveAll(dir) + + expected := ing_errors.NewLocationDenied("invalid authentication type") + _, err := NewParser(dir, &mockSecret{}).Parse(ing) + if err.Error() != expected.Error() { + t.Errorf("expected '%v' but got '%v'", expected, err) + } +} + +func TestInvalidIngressAuthNoSecret(t *testing.T) { + ing := buildIngress() + + data := map[string]string{} + data[parser.GetAnnotationWithPrefix("auth-type")] = "basic" + ing.SetAnnotations(data) + + _, dir, _ := dummySecretContent(t) + defer os.RemoveAll(dir) + + expected := ing_errors.LocationDenied{ + Reason: errors.New("error reading secret name from annotation: ingress rule without annotations"), + } + _, err := NewParser(dir, &mockSecret{}).Parse(ing) + if err.Error() != expected.Reason.Error() { + t.Errorf("expected '%v' but got '%v'", expected, err) + } +} + func TestIngressAuth(t *testing.T) { ing := buildIngress() diff --git a/internal/ingress/annotations/authreq/main.go b/internal/ingress/annotations/authreq/main.go index 43f936138e..18258b9f05 100644 --- a/internal/ingress/annotations/authreq/main.go +++ b/internal/ingress/annotations/authreq/main.go @@ -17,15 +17,19 @@ limitations under the License. package authreq import ( + "fmt" "net/url" "regexp" "strings" - extensions "k8s.io/api/extensions/v1beta1" + "k8s.io/klog" + + networking "k8s.io/api/networking/v1beta1" "k8s.io/ingress-nginx/internal/ingress/annotations/parser" ing_errors "k8s.io/ingress-nginx/internal/ingress/errors" "k8s.io/ingress-nginx/internal/ingress/resolver" + "k8s.io/ingress-nginx/internal/sets" ) // Config returns external authentication configuration for an Ingress rule @@ -35,7 +39,9 @@ type Config struct { Host string `json:"host"` SigninURL string `json:"signinUrl"` Method string `json:"method"` - ResponseHeaders []string `json:"responseHeaders,omitEmpty"` + ResponseHeaders []string `json:"responseHeaders,omitempty"` + RequestRedirect string `json:"requestRedirect"` + AuthSnippet string `json:"authSnippet"` } // Equal tests for equality between two Config types @@ -58,21 +64,17 @@ func (e1 *Config) Equal(e2 *Config) bool { if e1.Method != e2.Method { return false } - if e1.Method != e2.Method { + + match := sets.StringElementsMatch(e1.ResponseHeaders, e2.ResponseHeaders) + if !match { return false } - for _, ep1 := range e1.ResponseHeaders { - found := false - for _, ep2 := range e2.ResponseHeaders { - if ep1 == ep2 { - found = true - break - } - } - if !found { - return false - } + if e1.RequestRedirect != e2.RequestRedirect { + return false + } + if e1.AuthSnippet != e2.AuthSnippet { + return false } return true @@ -83,7 +85,8 @@ var ( headerRegexp = regexp.MustCompile(`^[a-zA-Z\d\-_]+$`) ) -func validMethod(method string) bool { +// ValidMethod checks is the provided string a valid HTTP method +func ValidMethod(method string) bool { if len(method) == 0 { return false } @@ -96,7 +99,8 @@ func validMethod(method string) bool { return false } -func validHeader(header string) bool { +// ValidHeader checks is the provided string satisfies the header's name regex +func ValidHeader(header string) bool { return headerRegexp.Match([]byte(header)) } @@ -111,59 +115,77 @@ func NewParser(r resolver.Resolver) parser.IngressAnnotation { // ParseAnnotations parses the annotations contained in the ingress // rule used to use an Config URL as source for authentication -func (a authReq) Parse(ing *extensions.Ingress) (interface{}, error) { - str, err := parser.GetStringAnnotation("auth-url", ing) +func (a authReq) Parse(ing *networking.Ingress) (interface{}, error) { + // Required Parameters + urlString, err := parser.GetStringAnnotation("auth-url", ing) if err != nil { return nil, err } - if str == "" { - return nil, ing_errors.NewLocationDenied("an empty string is not a valid URL") + authURL, message := ParseStringToURL(urlString) + if authURL == nil { + return nil, ing_errors.NewLocationDenied(message) } - signin, _ := parser.GetStringAnnotation("auth-signin", ing) - - ur, err := url.Parse(str) - if err != nil { - return nil, err - } - if ur.Scheme == "" { - return nil, ing_errors.NewLocationDenied("url scheme is empty") - } - if ur.Host == "" { - return nil, ing_errors.NewLocationDenied("url host is empty") + authMethod, _ := parser.GetStringAnnotation("auth-method", ing) + if len(authMethod) != 0 && !ValidMethod(authMethod) { + return nil, ing_errors.NewLocationDenied("invalid HTTP method") } - if strings.Contains(ur.Host, "..") { - return nil, ing_errors.NewLocationDenied("invalid url host") + // Optional Parameters + signIn, err := parser.GetStringAnnotation("auth-signin", ing) + if err != nil { + klog.V(3).Infof("auth-signin annotation is undefined and will not be set") } - m, _ := parser.GetStringAnnotation("auth-method", ing) - if len(m) != 0 && !validMethod(m) { - return nil, ing_errors.NewLocationDenied("invalid HTTP method") + authSnippet, err := parser.GetStringAnnotation("auth-snippet", ing) + if err != nil { + klog.V(3).Infof("auth-snippet annotation is undefined and will not be set") } - h := []string{} + responseHeaders := []string{} hstr, _ := parser.GetStringAnnotation("auth-response-headers", ing) if len(hstr) != 0 { - harr := strings.Split(hstr, ",") for _, header := range harr { header = strings.TrimSpace(header) if len(header) > 0 { - if !validHeader(header) { + if !ValidHeader(header) { return nil, ing_errors.NewLocationDenied("invalid headers list") } - h = append(h, header) + responseHeaders = append(responseHeaders, header) } } } + requestRedirect, _ := parser.GetStringAnnotation("auth-request-redirect", ing) + return &Config{ - URL: str, - Host: ur.Hostname(), - SigninURL: signin, - Method: m, - ResponseHeaders: h, + URL: urlString, + Host: authURL.Hostname(), + SigninURL: signIn, + Method: authMethod, + ResponseHeaders: responseHeaders, + RequestRedirect: requestRedirect, + AuthSnippet: authSnippet, }, nil } + +// ParseStringToURL parses the provided string into URL and returns error +// message in case of failure +func ParseStringToURL(input string) (*url.URL, string) { + + parsedURL, err := url.Parse(input) + if err != nil { + return nil, fmt.Sprintf("%v is not a valid URL: %v", input, err) + } + if parsedURL.Scheme == "" { + return nil, "url scheme is empty." + } else if parsedURL.Host == "" { + return nil, "url host is empty." + } else if strings.Contains(parsedURL.Host, "..") { + return nil, "invalid url host." + } + return parsedURL, "" + +} diff --git a/internal/ingress/annotations/authreq/main_test.go b/internal/ingress/annotations/authreq/main_test.go index fc9b1e9aee..a1a5d0f538 100644 --- a/internal/ingress/annotations/authreq/main_test.go +++ b/internal/ingress/annotations/authreq/main_test.go @@ -18,11 +18,12 @@ package authreq import ( "fmt" + "net/url" "reflect" "testing" api "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" + networking "k8s.io/api/networking/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/ingress-nginx/internal/ingress/annotations/parser" "k8s.io/ingress-nginx/internal/ingress/resolver" @@ -30,28 +31,28 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" ) -func buildIngress() *extensions.Ingress { - defaultBackend := extensions.IngressBackend{ +func buildIngress() *networking.Ingress { + defaultBackend := networking.IngressBackend{ ServiceName: "default-backend", ServicePort: intstr.FromInt(80), } - return &extensions.Ingress{ + return &networking.Ingress{ ObjectMeta: meta_v1.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, }, - Spec: extensions.IngressSpec{ - Backend: &extensions.IngressBackend{ + Spec: networking.IngressSpec{ + Backend: &networking.IngressBackend{ ServiceName: "default-backend", ServicePort: intstr.FromInt(80), }, - Rules: []extensions.IngressRule{ + Rules: []networking.IngressRule{ { Host: "foo.bar.com", - IngressRuleValue: extensions.IngressRuleValue{ - HTTP: &extensions.HTTPIngressRuleValue{ - Paths: []extensions.HTTPIngressPath{ + IngressRuleValue: networking.IngressRuleValue{ + HTTP: &networking.HTTPIngressRuleValue{ + Paths: []networking.HTTPIngressPath{ { Path: "/foo", Backend: defaultBackend, @@ -72,33 +73,43 @@ func TestAnnotations(t *testing.T) { ing.SetAnnotations(data) tests := []struct { - title string - url string - signinURL string - method string - expErr bool + title string + url string + signinURL string + method string + requestRedirect string + authSnippet string + expErr bool }{ - {"empty", "", "", "", true}, - {"no scheme", "bar", "bar", "", true}, - {"invalid host", "http://", "http://", "", true}, - {"invalid host (multiple dots)", "http://foo..bar.com", "http://foo..bar.com", "", true}, - {"valid URL", "http://bar.foo.com/external-auth", "http://bar.foo.com/external-auth", "", false}, - {"valid URL - send body", "http://foo.com/external-auth", "http://foo.com/external-auth", "POST", false}, - {"valid URL - send body", "http://foo.com/external-auth", "http://foo.com/external-auth", "GET", false}, + {"empty", "", "", "", "", "", true}, + {"no scheme", "bar", "bar", "", "", "", true}, + {"invalid host", "http://", "http://", "", "", "", true}, + {"invalid host (multiple dots)", "http://foo..bar.com", "http://foo..bar.com", "", "", "", true}, + {"valid URL", "http://bar.foo.com/external-auth", "http://bar.foo.com/external-auth", "", "", "", false}, + {"valid URL - send body", "http://foo.com/external-auth", "http://foo.com/external-auth", "POST", "", "", false}, + {"valid URL - send body", "http://foo.com/external-auth", "http://foo.com/external-auth", "GET", "", "", false}, + {"valid URL - request redirect", "http://foo.com/external-auth", "http://foo.com/external-auth", "GET", "http://foo.com/redirect-me", "", false}, + {"auth snippet", "http://foo.com/external-auth", "http://foo.com/external-auth", "", "", "proxy_set_header My-Custom-Header 42;", false}, } for _, test := range tests { data[parser.GetAnnotationWithPrefix("auth-url")] = test.url data[parser.GetAnnotationWithPrefix("auth-signin")] = test.signinURL data[parser.GetAnnotationWithPrefix("auth-method")] = fmt.Sprintf("%v", test.method) + data[parser.GetAnnotationWithPrefix("auth-request-redirect")] = test.requestRedirect + data[parser.GetAnnotationWithPrefix("auth-snippet")] = test.authSnippet i, err := NewParser(&resolver.Mock{}).Parse(ing) if test.expErr { if err == nil { - t.Errorf("%v: expected error but retuned nil", test.title) + t.Errorf("%v: expected error but returned nil", test.title) } continue } + if err != nil { + t.Errorf("%v: unexpected error: %v", test.title, err) + } + u, ok := i.(*Config) if !ok { t.Errorf("%v: expected an External type", test.title) @@ -112,6 +123,12 @@ func TestAnnotations(t *testing.T) { if u.Method != test.method { t.Errorf("%v: expected \"%v\" but \"%v\" was returned", test.title, test.method, u.Method) } + if u.RequestRedirect != test.requestRedirect { + t.Errorf("%v: expected \"%v\" but \"%v\" was returned", test.title, test.requestRedirect, u.RequestRedirect) + } + if u.AuthSnippet != test.authSnippet { + t.Errorf("%v: expected \"%v\" but \"%v\" was returned", test.title, test.authSnippet, u.AuthSnippet) + } } } @@ -162,3 +179,38 @@ func TestHeaderAnnotations(t *testing.T) { } } } + +func TestParseStringToURL(t *testing.T) { + validURL := "http://bar.foo.com/external-auth" + validParsedURL, _ := url.Parse(validURL) + + tests := []struct { + title string + url string + message string + parsed *url.URL + expErr bool + }{ + {"empty", "", "url scheme is empty.", nil, true}, + {"no scheme", "bar", "url scheme is empty.", nil, true}, + {"invalid host", "http://", "url host is empty.", nil, true}, + {"invalid host (multiple dots)", "http://foo..bar.com", "invalid url host.", nil, true}, + {"valid URL", validURL, "", validParsedURL, false}, + } + + for _, test := range tests { + + i, err := ParseStringToURL(test.url) + if test.expErr { + if err != test.message { + t.Errorf("%v: expected error \"%v\" but \"%v\" was returned", test.title, test.message, err) + } + continue + } + + if i.String() != test.parsed.String() { + t.Errorf("%v: expected \"%v\" but \"%v\" was returned", test.title, test.parsed, i) + } + } + +} diff --git a/internal/ingress/annotations/authreqglobal/main.go b/internal/ingress/annotations/authreqglobal/main.go new file mode 100644 index 0000000000..170f6957db --- /dev/null +++ b/internal/ingress/annotations/authreqglobal/main.go @@ -0,0 +1,45 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authreqglobal + +import ( + networking "k8s.io/api/networking/v1beta1" + + "k8s.io/ingress-nginx/internal/ingress/annotations/parser" + "k8s.io/ingress-nginx/internal/ingress/resolver" +) + +type authReqGlobal struct { + r resolver.Resolver +} + +// NewParser creates a new authentication request annotation parser +func NewParser(r resolver.Resolver) parser.IngressAnnotation { + return authReqGlobal{r} +} + +// ParseAnnotations parses the annotations contained in the ingress +// rule used to enable or disable global external authentication +func (a authReqGlobal) Parse(ing *networking.Ingress) (interface{}, error) { + + enableGlobalAuth, err := parser.GetBoolAnnotation("enable-global-auth", ing) + if err != nil { + enableGlobalAuth = true + } + + return enableGlobalAuth, nil +} diff --git a/internal/ingress/annotations/authreqglobal/main_test.go b/internal/ingress/annotations/authreqglobal/main_test.go new file mode 100644 index 0000000000..a4096f7da9 --- /dev/null +++ b/internal/ingress/annotations/authreqglobal/main_test.go @@ -0,0 +1,82 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authreqglobal + +import ( + "testing" + + api "k8s.io/api/core/v1" + networking "k8s.io/api/networking/v1beta1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/ingress-nginx/internal/ingress/annotations/parser" + "k8s.io/ingress-nginx/internal/ingress/resolver" + + "k8s.io/apimachinery/pkg/util/intstr" +) + +func buildIngress() *networking.Ingress { + defaultBackend := networking.IngressBackend{ + ServiceName: "default-backend", + ServicePort: intstr.FromInt(80), + } + + return &networking.Ingress{ + ObjectMeta: meta_v1.ObjectMeta{ + Name: "foo", + Namespace: api.NamespaceDefault, + }, + Spec: networking.IngressSpec{ + Backend: &networking.IngressBackend{ + ServiceName: "default-backend", + ServicePort: intstr.FromInt(80), + }, + Rules: []networking.IngressRule{ + { + Host: "foo.bar.com", + IngressRuleValue: networking.IngressRuleValue{ + HTTP: &networking.HTTPIngressRuleValue{ + Paths: []networking.HTTPIngressPath{ + { + Path: "/foo", + Backend: defaultBackend, + }, + }, + }, + }, + }, + }, + }, + } +} + +func TestAnnotation(t *testing.T) { + ing := buildIngress() + + data := map[string]string{} + data[parser.GetAnnotationWithPrefix("auth-url")] = "http://foo.com/external-auth" + data[parser.GetAnnotationWithPrefix("enable-global-auth")] = "false" + ing.SetAnnotations(data) + + i, _ := NewParser(&resolver.Mock{}).Parse(ing) + u, ok := i.(bool) + if !ok { + t.Errorf("expected a Config type") + } + if u { + t.Errorf("Expected false but returned true") + } +} diff --git a/internal/ingress/annotations/authtls/main.go b/internal/ingress/annotations/authtls/main.go index 7c608bb536..16e218d333 100644 --- a/internal/ingress/annotations/authtls/main.go +++ b/internal/ingress/annotations/authtls/main.go @@ -18,7 +18,7 @@ package authtls import ( "github.com/pkg/errors" - extensions "k8s.io/api/extensions/v1beta1" + networking "k8s.io/api/networking/v1beta1" "regexp" @@ -37,7 +37,7 @@ var ( authVerifyClientRegex = regexp.MustCompile(`on|off|optional|optional_no_ca`) ) -// Config contains the AuthSSLCert used for muthual autentication +// Config contains the AuthSSLCert used for mutual authentication // and the configured ValidationDepth type Config struct { resolver.AuthSSLCert @@ -45,6 +45,7 @@ type Config struct { ValidationDepth int `json:"validationDepth"` ErrorPage string `json:"errorPage"` PassCertToUpstream bool `json:"passCertToUpstream"` + AuthTLSError string } // Equal tests for equality between two Config types @@ -85,54 +86,46 @@ type authTLS struct { // Parse parses the annotations contained in the ingress // rule used to use a Certificate as authentication method -func (a authTLS) Parse(ing *extensions.Ingress) (interface{}, error) { +func (a authTLS) Parse(ing *networking.Ingress) (interface{}, error) { + var err error + config := &Config{} tlsauthsecret, err := parser.GetStringAnnotation("auth-tls-secret", ing) if err != nil { return &Config{}, err } - if tlsauthsecret == "" { - return &Config{}, ing_errors.NewLocationDenied("an empty string is not a valid secret name") - } - _, _, err = k8s.ParseNameNS(tlsauthsecret) if err != nil { return &Config{}, ing_errors.NewLocationDenied(err.Error()) } - tlsVerifyClient, err := parser.GetStringAnnotation("auth-tls-verify-client", ing) - if err != nil || !authVerifyClientRegex.MatchString(tlsVerifyClient) { - tlsVerifyClient = defaultAuthVerifyClient + authCert, err := a.r.GetAuthCertificate(tlsauthsecret) + if err != nil { + e := errors.Wrap(err, "error obtaining certificate") + return &Config{}, ing_errors.LocationDenied{Reason: e} } + config.AuthSSLCert = *authCert - tlsdepth, err := parser.GetIntAnnotation("auth-tls-verify-depth", ing) - if err != nil || tlsdepth == 0 { - tlsdepth = defaultAuthTLSDepth + config.VerifyClient, err = parser.GetStringAnnotation("auth-tls-verify-client", ing) + if err != nil || !authVerifyClientRegex.MatchString(config.VerifyClient) { + config.VerifyClient = defaultAuthVerifyClient } - authCert, err := a.r.GetAuthCertificate(tlsauthsecret) - if err != nil { - return &Config{}, ing_errors.LocationDenied{ - Reason: errors.Wrap(err, "error obtaining certificate"), - } + config.ValidationDepth, err = parser.GetIntAnnotation("auth-tls-verify-depth", ing) + if err != nil || config.ValidationDepth == 0 { + config.ValidationDepth = defaultAuthTLSDepth } - errorpage, err := parser.GetStringAnnotation("auth-tls-error-page", ing) - if err != nil || errorpage == "" { - errorpage = "" + config.ErrorPage, err = parser.GetStringAnnotation("auth-tls-error-page", ing) + if err != nil { + config.ErrorPage = "" } - passCert, err := parser.GetBoolAnnotation("auth-tls-pass-certificate-to-upstream", ing) + config.PassCertToUpstream, err = parser.GetBoolAnnotation("auth-tls-pass-certificate-to-upstream", ing) if err != nil { - passCert = false + config.PassCertToUpstream = false } - return &Config{ - AuthSSLCert: *authCert, - VerifyClient: tlsVerifyClient, - ValidationDepth: tlsdepth, - ErrorPage: errorpage, - PassCertToUpstream: passCert, - }, nil + return config, nil } diff --git a/internal/ingress/annotations/authtls/main_test.go b/internal/ingress/annotations/authtls/main_test.go index a3613bb159..b713928405 100644 --- a/internal/ingress/annotations/authtls/main_test.go +++ b/internal/ingress/annotations/authtls/main_test.go @@ -20,33 +20,36 @@ import ( "testing" api "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" + networking "k8s.io/api/networking/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/ingress-nginx/internal/ingress/annotations/parser" + "k8s.io/ingress-nginx/internal/ingress/errors" + "k8s.io/ingress-nginx/internal/ingress/resolver" ) -func buildIngress() *extensions.Ingress { - defaultBackend := extensions.IngressBackend{ +func buildIngress() *networking.Ingress { + defaultBackend := networking.IngressBackend{ ServiceName: "default-backend", ServicePort: intstr.FromInt(80), } - return &extensions.Ingress{ + return &networking.Ingress{ ObjectMeta: meta_v1.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, }, - Spec: extensions.IngressSpec{ - Backend: &extensions.IngressBackend{ + Spec: networking.IngressSpec{ + Backend: &networking.IngressBackend{ ServiceName: "default-backend", ServicePort: intstr.FromInt(80), }, - Rules: []extensions.IngressRule{ + Rules: []networking.IngressRule{ { Host: "foo.bar.com", - IngressRuleValue: extensions.IngressRuleValue{ - HTTP: &extensions.HTTPIngressRuleValue{ - Paths: []extensions.HTTPIngressPath{ + IngressRuleValue: networking.IngressRuleValue{ + HTTP: &networking.HTTPIngressRuleValue{ + Paths: []networking.HTTPIngressPath{ { Path: "/foo", Backend: defaultBackend, @@ -60,49 +63,66 @@ func buildIngress() *extensions.Ingress { } } +// mocks the resolver for authTLS +type mockSecret struct { + resolver.Mock +} + +// GetAuthCertificate from mockSecret mocks the GetAuthCertificate for authTLS +func (m mockSecret) GetAuthCertificate(name string) (*resolver.AuthSSLCert, error) { + if name != "default/demo-secret" { + return nil, errors.Errorf("there is no secret with name %v", name) + } + + return &resolver.AuthSSLCert{ + Secret: "default/demo-secret", + CAFileName: "/ssl/ca.crt", + PemSHA: "abc", + }, nil + +} + func TestAnnotations(t *testing.T) { ing := buildIngress() - data := map[string]string{} + + data[parser.GetAnnotationWithPrefix("auth-tls-secret")] = "default/demo-secret" + data[parser.GetAnnotationWithPrefix("auth-tls-verify-client")] = "off" + data[parser.GetAnnotationWithPrefix("auth-tls-verify-depth")] = "1" + data[parser.GetAnnotationWithPrefix("auth-tls-error-page")] = "ok.com/error" + data[parser.GetAnnotationWithPrefix("auth-tls-pass-certificate-to-upstream")] = "true" + ing.SetAnnotations(data) - /* - tests := []struct { - title string - url string - method string - sendBody bool - expErr bool - }{ - {"empty", "", "", false, true}, - {"no scheme", "bar", "", false, true}, - {"invalid host", "http://", "", false, true}, - {"invalid host (multiple dots)", "http://foo..bar.com", "", false, true}, - {"valid URL", "http://bar.foo.com/external-auth", "", false, false}, - {"valid URL - send body", "http://foo.com/external-auth", "POST", true, false}, - {"valid URL - send body", "http://foo.com/external-auth", "GET", true, false}, - } - - for _, test := range tests { - data[authTLSSecret] = "" - test.title - - u, err := ParseAnnotations(ing) - - if test.expErr { - if err == nil { - t.Errorf("%v: expected error but retuned nil", test.title) - } - continue - } - - if u.URL != test.url { - t.Errorf("%v: expected \"%v\" but \"%v\" was returned", test.title, test.url, u.URL) - } - if u.Method != test.method { - t.Errorf("%v: expected \"%v\" but \"%v\" was returned", test.title, test.method, u.Method) - } - if u.SendBody != test.sendBody { - t.Errorf("%v: expected \"%v\" but \"%v\" was returned", test.title, test.sendBody, u.SendBody) - } - }*/ + + fakeSecret := &mockSecret{} + i, err := NewParser(fakeSecret).Parse(ing) + if err != nil { + t.Errorf("Uxpected error with ingress: %v", err) + } + + u, ok := i.(*Config) + if !ok { + t.Errorf("expected *Config but got %v", u) + } + + secret, err := fakeSecret.GetAuthCertificate("default/demo-secret") + if err != nil { + t.Errorf("unexpected error getting secret %v", err) + } + + if u.AuthSSLCert.Secret != secret.Secret { + t.Errorf("expected %v but got %v", secret.Secret, u.AuthSSLCert.Secret) + } + if u.VerifyClient != "off" { + t.Errorf("expected %v but got %v", "off", u.VerifyClient) + } + if u.ValidationDepth != 1 { + t.Errorf("expected %v but got %v", 1, u.ValidationDepth) + } + if u.ErrorPage != "ok.com/error" { + t.Errorf("expected %v but got %v", "ok.com/error", u.ErrorPage) + } + if u.PassCertToUpstream != true { + t.Errorf("expected %v but got %v", true, u.PassCertToUpstream) + } } diff --git a/internal/ingress/annotations/backendprotocol/main.go b/internal/ingress/annotations/backendprotocol/main.go new file mode 100644 index 0000000000..075a036e23 --- /dev/null +++ b/internal/ingress/annotations/backendprotocol/main.go @@ -0,0 +1,65 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package backendprotocol + +import ( + "regexp" + "strings" + + networking "k8s.io/api/networking/v1beta1" + "k8s.io/klog" + + "k8s.io/ingress-nginx/internal/ingress/annotations/parser" + "k8s.io/ingress-nginx/internal/ingress/resolver" +) + +// HTTP protocol +const HTTP = "HTTP" + +var ( + validProtocols = regexp.MustCompile(`^(HTTP|HTTPS|AJP|GRPC|GRPCS)$`) +) + +type backendProtocol struct { + r resolver.Resolver +} + +// NewParser creates a new backend protocol annotation parser +func NewParser(r resolver.Resolver) parser.IngressAnnotation { + return backendProtocol{r} +} + +// ParseAnnotations parses the annotations contained in the ingress +// rule used to indicate the backend protocol. +func (a backendProtocol) Parse(ing *networking.Ingress) (interface{}, error) { + if ing.GetAnnotations() == nil { + return HTTP, nil + } + + proto, err := parser.GetStringAnnotation("backend-protocol", ing) + if err != nil { + return HTTP, nil + } + + proto = strings.TrimSpace(strings.ToUpper(proto)) + if !validProtocols.MatchString(proto) { + klog.Warningf("Protocol %v is not a valid value for the backend-protocol annotation. Using HTTP as protocol", proto) + return HTTP, nil + } + + return proto, nil +} diff --git a/internal/ingress/annotations/backendprotocol/main_test.go b/internal/ingress/annotations/backendprotocol/main_test.go new file mode 100644 index 0000000000..4a1c1bf31f --- /dev/null +++ b/internal/ingress/annotations/backendprotocol/main_test.go @@ -0,0 +1,112 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package backendprotocol + +import ( + "testing" + + api "k8s.io/api/core/v1" + networking "k8s.io/api/networking/v1beta1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/ingress-nginx/internal/ingress/annotations/parser" + "k8s.io/ingress-nginx/internal/ingress/resolver" + + "k8s.io/apimachinery/pkg/util/intstr" +) + +func buildIngress() *networking.Ingress { + return &networking.Ingress{ + ObjectMeta: meta_v1.ObjectMeta{ + Name: "foo", + Namespace: api.NamespaceDefault, + }, + Spec: networking.IngressSpec{ + Backend: &networking.IngressBackend{ + ServiceName: "default-backend", + ServicePort: intstr.FromInt(80), + }, + }, + } +} +func TestParseInvalidAnnotations(t *testing.T) { + ing := buildIngress() + + // Test no annotations set + i, err := NewParser(&resolver.Mock{}).Parse(ing) + if err != nil { + t.Errorf("unexpected error parsing ingress with backend-protocol") + } + val, ok := i.(string) + if !ok { + t.Errorf("expected a string type") + } + if val != "HTTP" { + t.Errorf("expected HTTPS but %v returned", val) + } + + data := map[string]string{} + ing.SetAnnotations(data) + + // Test with empty annotations + i, err = NewParser(&resolver.Mock{}).Parse(ing) + if err != nil { + t.Errorf("unexpected error parsing ingress with backend-protocol") + } + val, ok = i.(string) + if !ok { + t.Errorf("expected a string type") + } + if val != "HTTP" { + t.Errorf("expected HTTPS but %v returned", val) + } + + // Test invalid annotation set + data[parser.GetAnnotationWithPrefix("backend-protocol")] = "INVALID" + ing.SetAnnotations(data) + + i, err = NewParser(&resolver.Mock{}).Parse(ing) + if err != nil { + t.Errorf("unexpected error parsing ingress with backend-protocol") + } + val, ok = i.(string) + if !ok { + t.Errorf("expected a string type") + } + if val != "HTTP" { + t.Errorf("expected HTTPS but %v returned", val) + } +} + +func TestParseAnnotations(t *testing.T) { + ing := buildIngress() + + data := map[string]string{} + data[parser.GetAnnotationWithPrefix("backend-protocol")] = "HTTPS" + ing.SetAnnotations(data) + + i, err := NewParser(&resolver.Mock{}).Parse(ing) + if err != nil { + t.Errorf("unexpected error parsing ingress with backend-protocol") + } + val, ok := i.(string) + if !ok { + t.Errorf("expected a string type") + } + if val != "HTTPS" { + t.Errorf("expected HTTPS but %v returned", val) + } +} diff --git a/internal/ingress/annotations/canary/main.go b/internal/ingress/annotations/canary/main.go new file mode 100644 index 0000000000..55d0e7fcd1 --- /dev/null +++ b/internal/ingress/annotations/canary/main.go @@ -0,0 +1,81 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package canary + +import ( + networking "k8s.io/api/networking/v1beta1" + + "k8s.io/ingress-nginx/internal/ingress/annotations/parser" + "k8s.io/ingress-nginx/internal/ingress/errors" + "k8s.io/ingress-nginx/internal/ingress/resolver" +) + +type canary struct { + r resolver.Resolver +} + +// Config returns the configuration rules for setting up the Canary +type Config struct { + Enabled bool + Weight int + Header string + HeaderValue string + Cookie string +} + +// NewParser parses the ingress for canary related annotations +func NewParser(r resolver.Resolver) parser.IngressAnnotation { + return canary{r} +} + +// Parse parses the annotations contained in the ingress +// rule used to indicate if the canary should be enabled and with what config +func (c canary) Parse(ing *networking.Ingress) (interface{}, error) { + config := &Config{} + var err error + + config.Enabled, err = parser.GetBoolAnnotation("canary", ing) + if err != nil { + config.Enabled = false + } + + config.Weight, err = parser.GetIntAnnotation("canary-weight", ing) + if err != nil { + config.Weight = 0 + } + + config.Header, err = parser.GetStringAnnotation("canary-by-header", ing) + if err != nil { + config.Header = "" + } + + config.HeaderValue, err = parser.GetStringAnnotation("canary-by-header-value", ing) + if err != nil { + config.HeaderValue = "" + } + + config.Cookie, err = parser.GetStringAnnotation("canary-by-cookie", ing) + if err != nil { + config.Cookie = "" + } + + if !config.Enabled && (config.Weight > 0 || len(config.Header) > 0 || len(config.HeaderValue) > 0 || len(config.Cookie) > 0) { + return nil, errors.NewInvalidAnnotationConfiguration("canary", "configured but not enabled") + } + + return config, nil +} diff --git a/internal/ingress/annotations/canary/main_test.go b/internal/ingress/annotations/canary/main_test.go new file mode 100644 index 0000000000..f755fe8656 --- /dev/null +++ b/internal/ingress/annotations/canary/main_test.go @@ -0,0 +1,152 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package canary + +import ( + "testing" + + api "k8s.io/api/core/v1" + networking "k8s.io/api/networking/v1beta1" + metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/ingress-nginx/internal/ingress/annotations/parser" + + "strconv" + + "k8s.io/ingress-nginx/internal/ingress/resolver" +) + +func buildIngress() *networking.Ingress { + defaultBackend := networking.IngressBackend{ + ServiceName: "default-backend", + ServicePort: intstr.FromInt(80), + } + + return &networking.Ingress{ + ObjectMeta: metaV1.ObjectMeta{ + Name: "foo", + Namespace: api.NamespaceDefault, + }, + Spec: networking.IngressSpec{ + Backend: &networking.IngressBackend{ + ServiceName: "default-backend", + ServicePort: intstr.FromInt(80), + }, + Rules: []networking.IngressRule{ + { + Host: "foo.bar.com", + IngressRuleValue: networking.IngressRuleValue{ + HTTP: &networking.HTTPIngressRuleValue{ + Paths: []networking.HTTPIngressPath{ + { + Path: "/foo", + Backend: defaultBackend, + }, + }, + }, + }, + }, + }, + }, + } +} + +func TestCanaryInvalid(t *testing.T) { + ing := buildIngress() + + data := map[string]string{} + ing.SetAnnotations(data) + + i, err := NewParser(&resolver.Mock{}).Parse(ing) + if err != nil { + t.Errorf("Error Parsing Canary Annotations") + } + + val, ok := i.(*Config) + if !ok { + t.Errorf("Expected %v and got %v", "*Config", val) + } + if val.Enabled != false { + t.Errorf("Expected %v but got %v", false, val.Enabled) + } + if val.Weight != 0 { + t.Errorf("Expected %v but got %v", 0, val.Weight) + } + +} + +func TestAnnotations(t *testing.T) { + ing := buildIngress() + + data := map[string]string{} + ing.SetAnnotations(data) + + tests := []struct { + title string + canaryEnabled bool + canaryWeight int + canaryHeader string + canaryCookie string + expErr bool + }{ + {"canary disabled and no weight", false, 0, "", "", false}, + {"canary disabled and weight", false, 20, "", "", true}, + {"canary disabled and header", false, 0, "X-Canary", "", true}, + {"canary disabled and cookie", false, 0, "", "canary_enabled", true}, + {"canary enabled and weight", true, 20, "", "", false}, + {"canary enabled and no weight", true, 0, "", "", false}, + {"canary enabled by header", true, 20, "X-Canary", "", false}, + {"canary enabled by cookie", true, 20, "", "canary_enabled", false}, + } + + for _, test := range tests { + data[parser.GetAnnotationWithPrefix("canary")] = strconv.FormatBool(test.canaryEnabled) + data[parser.GetAnnotationWithPrefix("canary-weight")] = strconv.Itoa(test.canaryWeight) + data[parser.GetAnnotationWithPrefix("canary-by-header")] = test.canaryHeader + data[parser.GetAnnotationWithPrefix("canary-by-cookie")] = test.canaryCookie + + i, err := NewParser(&resolver.Mock{}).Parse(ing) + if test.expErr { + if err == nil { + t.Errorf("%v: expected error but returned nil", test.title) + } + + continue + } else { + if err != nil { + t.Errorf("%v: expected nil but returned error %v", test.title, err) + } + } + + canaryConfig, ok := i.(*Config) + if !ok { + t.Errorf("%v: expected an External type", test.title) + } + if canaryConfig.Enabled != test.canaryEnabled { + t.Errorf("%v: expected \"%v\", but \"%v\" was returned", test.title, test.canaryEnabled, canaryConfig.Enabled) + } + if canaryConfig.Weight != test.canaryWeight { + t.Errorf("%v: expected \"%v\", but \"%v\" was returned", test.title, test.canaryWeight, canaryConfig.Weight) + } + if canaryConfig.Header != test.canaryHeader { + t.Errorf("%v: expected \"%v\", but \"%v\" was returned", test.title, test.canaryHeader, canaryConfig.Header) + } + if canaryConfig.Cookie != test.canaryCookie { + t.Errorf("%v: expected \"%v\", but \"%v\" was returned", test.title, test.canaryCookie, canaryConfig.Cookie) + } + } +} diff --git a/internal/ingress/annotations/class/main.go b/internal/ingress/annotations/class/main.go index 6b5a6d06c1..fab3a96558 100644 --- a/internal/ingress/annotations/class/main.go +++ b/internal/ingress/annotations/class/main.go @@ -17,8 +17,9 @@ limitations under the License. package class import ( - "github.com/golang/glog" - extensions "k8s.io/api/extensions/v1beta1" + "k8s.io/klog" + + networking "k8s.io/api/networking/v1beta1" ) const ( @@ -41,10 +42,10 @@ var ( // IsValid returns true if the given Ingress either doesn't specify // the ingress.class annotation, or it's set to the configured in the // ingress controller. -func IsValid(ing *extensions.Ingress) bool { +func IsValid(ing *networking.Ingress) bool { ingress, ok := ing.GetAnnotations()[IngressKey] if !ok { - glog.V(3).Infof("annotation %v is not present in ingress %v/%v", IngressKey, ing.Namespace, ing.Name) + klog.V(3).Infof("annotation %v is not present in ingress %v/%v", IngressKey, ing.Namespace, ing.Name) } // we have 2 valid combinations diff --git a/internal/ingress/annotations/class/main_test.go b/internal/ingress/annotations/class/main_test.go index 8b85e31920..f38eeb790d 100644 --- a/internal/ingress/annotations/class/main_test.go +++ b/internal/ingress/annotations/class/main_test.go @@ -20,7 +20,7 @@ import ( "testing" api "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" + networking "k8s.io/api/networking/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -47,7 +47,7 @@ func TestIsValidClass(t *testing.T) { {"custom", "nginx", "nginx", false}, } - ing := &extensions.Ingress{ + ing := &networking.Ingress{ ObjectMeta: meta_v1.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, diff --git a/internal/ingress/annotations/clientbodybuffersize/main.go b/internal/ingress/annotations/clientbodybuffersize/main.go index 41cce30ba2..924ceecd17 100644 --- a/internal/ingress/annotations/clientbodybuffersize/main.go +++ b/internal/ingress/annotations/clientbodybuffersize/main.go @@ -17,7 +17,7 @@ limitations under the License. package clientbodybuffersize import ( - extensions "k8s.io/api/extensions/v1beta1" + networking "k8s.io/api/networking/v1beta1" "k8s.io/ingress-nginx/internal/ingress/annotations/parser" "k8s.io/ingress-nginx/internal/ingress/resolver" @@ -34,6 +34,6 @@ func NewParser(r resolver.Resolver) parser.IngressAnnotation { // Parse parses the annotations contained in the ingress rule // used to add an client-body-buffer-size to the provided locations -func (cbbs clientBodyBufferSize) Parse(ing *extensions.Ingress) (interface{}, error) { +func (cbbs clientBodyBufferSize) Parse(ing *networking.Ingress) (interface{}, error) { return parser.GetStringAnnotation("client-body-buffer-size", ing) } diff --git a/internal/ingress/annotations/clientbodybuffersize/main_test.go b/internal/ingress/annotations/clientbodybuffersize/main_test.go index cc261fb63d..56f64083c4 100644 --- a/internal/ingress/annotations/clientbodybuffersize/main_test.go +++ b/internal/ingress/annotations/clientbodybuffersize/main_test.go @@ -20,7 +20,7 @@ import ( "testing" api "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" + networking "k8s.io/api/networking/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/ingress-nginx/internal/ingress/annotations/parser" "k8s.io/ingress-nginx/internal/ingress/resolver" @@ -44,12 +44,12 @@ func TestParse(t *testing.T) { {nil, ""}, } - ing := &extensions.Ingress{ + ing := &networking.Ingress{ ObjectMeta: meta_v1.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, }, - Spec: extensions.IngressSpec{}, + Spec: networking.IngressSpec{}, } for _, testCase := range testCases { diff --git a/internal/ingress/annotations/connection/main.go b/internal/ingress/annotations/connection/main.go new file mode 100644 index 0000000000..7d45fdc364 --- /dev/null +++ b/internal/ingress/annotations/connection/main.go @@ -0,0 +1,72 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package connection + +import ( + networking "k8s.io/api/networking/v1beta1" + + "k8s.io/ingress-nginx/internal/ingress/annotations/parser" + "k8s.io/ingress-nginx/internal/ingress/resolver" +) + +// Config returns the connection header configuration for an Ingress rule +type Config struct { + Header string `json:"header"` + Enabled bool `json:"enabled"` +} + +type connection struct { + r resolver.Resolver +} + +// NewParser creates a new port in redirect annotation parser +func NewParser(r resolver.Resolver) parser.IngressAnnotation { + return connection{r} +} + +// Parse parses the annotations contained in the ingress +// rule used to indicate if the connection header should be overridden. +func (a connection) Parse(ing *networking.Ingress) (interface{}, error) { + cp, err := parser.GetStringAnnotation("connection-proxy-header", ing) + if err != nil { + return &Config{ + Enabled: false, + }, err + } + return &Config{ + Enabled: true, + Header: cp, + }, nil +} + +// Equal tests for equality between two Connection types +func (r1 *Config) Equal(r2 *Config) bool { + if r1 == r2 { + return true + } + if r1 == nil || r2 == nil { + return false + } + if r1.Enabled != r2.Enabled { + return false + } + if r1.Header != r2.Header { + return false + } + + return true +} diff --git a/internal/ingress/annotations/connection/main_test.go b/internal/ingress/annotations/connection/main_test.go new file mode 100644 index 0000000000..d86aeb16a2 --- /dev/null +++ b/internal/ingress/annotations/connection/main_test.go @@ -0,0 +1,63 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package connection + +import ( + "testing" + + api "k8s.io/api/core/v1" + networking "k8s.io/api/networking/v1beta1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/ingress-nginx/internal/ingress/annotations/parser" + "k8s.io/ingress-nginx/internal/ingress/resolver" +) + +func TestParse(t *testing.T) { + annotation := parser.GetAnnotationWithPrefix("connection-proxy-header") + + ap := NewParser(&resolver.Mock{}) + if ap == nil { + t.Fatalf("expected a parser.IngressAnnotation but returned nil") + } + + testCases := []struct { + annotations map[string]string + expected *Config + }{ + {map[string]string{annotation: "keep-alive"}, &Config{Enabled: true, Header: "keep-alive"}}, + {map[string]string{}, &Config{Enabled: false}}, + {nil, &Config{Enabled: false}}, + } + + ing := &networking.Ingress{ + ObjectMeta: meta_v1.ObjectMeta{ + Name: "foo", + Namespace: api.NamespaceDefault, + }, + Spec: networking.IngressSpec{}, + } + + for _, testCase := range testCases { + ing.SetAnnotations(testCase.annotations) + i, _ := ap.Parse(ing) + p, _ := i.(*Config) + + if !p.Equal(testCase.expected) { + t.Errorf("expected %v but returned %v, annotations: %s", testCase.expected, p, testCase.annotations) + } + } +} diff --git a/internal/ingress/annotations/cors/main.go b/internal/ingress/annotations/cors/main.go index a23a1c5be1..b2c0bf778e 100644 --- a/internal/ingress/annotations/cors/main.go +++ b/internal/ingress/annotations/cors/main.go @@ -19,7 +19,7 @@ package cors import ( "regexp" - extensions "k8s.io/api/extensions/v1beta1" + networking "k8s.io/api/networking/v1beta1" "k8s.io/ingress-nginx/internal/ingress/annotations/parser" "k8s.io/ingress-nginx/internal/ingress/resolver" @@ -29,6 +29,7 @@ const ( // Default values defaultCorsMethods = "GET, PUT, POST, DELETE, PATCH, OPTIONS" defaultCorsHeaders = "DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Authorization" + defaultCorsMaxAge = 1728000 ) var ( @@ -55,6 +56,7 @@ type Config struct { CorsAllowMethods string `json:"corsAllowMethods"` CorsAllowHeaders string `json:"corsAllowHeaders"` CorsAllowCredentials bool `json:"corsAllowCredentials"` + CorsMaxAge int `json:"corsMaxAge"` } // NewParser creates a new CORS annotation parser @@ -70,6 +72,9 @@ func (c1 *Config) Equal(c2 *Config) bool { if c1 == nil || c2 == nil { return false } + if c1.CorsMaxAge != c2.CorsMaxAge { + return false + } if c1.CorsAllowCredentials != c2.CorsAllowCredentials { return false } @@ -91,38 +96,40 @@ func (c1 *Config) Equal(c2 *Config) bool { // Parse parses the annotations contained in the ingress // rule used to indicate if the location/s should allows CORS -func (c cors) Parse(ing *extensions.Ingress) (interface{}, error) { - corsenabled, err := parser.GetBoolAnnotation("enable-cors", ing) +func (c cors) Parse(ing *networking.Ingress) (interface{}, error) { + var err error + config := &Config{} + + config.CorsEnabled, err = parser.GetBoolAnnotation("enable-cors", ing) if err != nil { - corsenabled = false + config.CorsEnabled = false } - corsalloworigin, err := parser.GetStringAnnotation("cors-allow-origin", ing) - if err != nil || corsalloworigin == "" || !corsOriginRegex.MatchString(corsalloworigin) { - corsalloworigin = "*" + config.CorsAllowOrigin, err = parser.GetStringAnnotation("cors-allow-origin", ing) + if err != nil || !corsOriginRegex.MatchString(config.CorsAllowOrigin) { + config.CorsAllowOrigin = "*" } - corsallowheaders, err := parser.GetStringAnnotation("cors-allow-headers", ing) - if err != nil || corsallowheaders == "" || !corsHeadersRegex.MatchString(corsallowheaders) { - corsallowheaders = defaultCorsHeaders + config.CorsAllowHeaders, err = parser.GetStringAnnotation("cors-allow-headers", ing) + if err != nil || !corsHeadersRegex.MatchString(config.CorsAllowHeaders) { + config.CorsAllowHeaders = defaultCorsHeaders } - corsallowmethods, err := parser.GetStringAnnotation("cors-allow-methods", ing) - if err != nil || corsallowmethods == "" || !corsMethodsRegex.MatchString(corsallowmethods) { - corsallowmethods = defaultCorsMethods + config.CorsAllowMethods, err = parser.GetStringAnnotation("cors-allow-methods", ing) + if err != nil || !corsMethodsRegex.MatchString(config.CorsAllowMethods) { + config.CorsAllowMethods = defaultCorsMethods + } + + config.CorsAllowCredentials, err = parser.GetBoolAnnotation("cors-allow-credentials", ing) + if err != nil { + config.CorsAllowCredentials = true } - corsallowcredentials, err := parser.GetBoolAnnotation("cors-allow-credentials", ing) + config.CorsMaxAge, err = parser.GetIntAnnotation("cors-max-age", ing) if err != nil { - corsallowcredentials = true + config.CorsMaxAge = defaultCorsMaxAge } - return &Config{ - CorsEnabled: corsenabled, - CorsAllowOrigin: corsalloworigin, - CorsAllowHeaders: corsallowheaders, - CorsAllowMethods: corsallowmethods, - CorsAllowCredentials: corsallowcredentials, - }, nil + return config, nil } diff --git a/internal/ingress/annotations/cors/main_test.go b/internal/ingress/annotations/cors/main_test.go index c8a1952942..029216598c 100644 --- a/internal/ingress/annotations/cors/main_test.go +++ b/internal/ingress/annotations/cors/main_test.go @@ -20,35 +20,35 @@ import ( "testing" api "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" + networking "k8s.io/api/networking/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/ingress-nginx/internal/ingress/annotations/parser" "k8s.io/ingress-nginx/internal/ingress/resolver" ) -func buildIngress() *extensions.Ingress { - defaultBackend := extensions.IngressBackend{ +func buildIngress() *networking.Ingress { + defaultBackend := networking.IngressBackend{ ServiceName: "default-backend", ServicePort: intstr.FromInt(80), } - return &extensions.Ingress{ + return &networking.Ingress{ ObjectMeta: meta_v1.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, }, - Spec: extensions.IngressSpec{ - Backend: &extensions.IngressBackend{ + Spec: networking.IngressSpec{ + Backend: &networking.IngressBackend{ ServiceName: "default-backend", ServicePort: intstr.FromInt(80), }, - Rules: []extensions.IngressRule{ + Rules: []networking.IngressRule{ { Host: "foo.bar.com", - IngressRuleValue: extensions.IngressRuleValue{ - HTTP: &extensions.HTTPIngressRuleValue{ - Paths: []extensions.HTTPIngressPath{ + IngressRuleValue: networking.IngressRuleValue{ + HTTP: &networking.HTTPIngressRuleValue{ + Paths: []networking.HTTPIngressPath{ { Path: "/foo", Backend: defaultBackend, @@ -62,37 +62,100 @@ func buildIngress() *extensions.Ingress { } } -func TestIngressCorsConfig(t *testing.T) { +func TestIngressCorsConfigValid(t *testing.T) { ing := buildIngress() data := map[string]string{} + + // Valid data[parser.GetAnnotationWithPrefix("enable-cors")] = "true" data[parser.GetAnnotationWithPrefix("cors-allow-headers")] = "DNT,X-CustomHeader, Keep-Alive,User-Agent" data[parser.GetAnnotationWithPrefix("cors-allow-credentials")] = "false" - data[parser.GetAnnotationWithPrefix("cors-allow-methods")] = "PUT, GET,OPTIONS, PATCH, $nginx_version" + data[parser.GetAnnotationWithPrefix("cors-allow-methods")] = "GET, PATCH" data[parser.GetAnnotationWithPrefix("cors-allow-origin")] = "https://origin123.test.com:4443" + data[parser.GetAnnotationWithPrefix("cors-max-age")] = "600" ing.SetAnnotations(data) - corst, _ := NewParser(&resolver.Mock{}).Parse(ing) + corst, err := NewParser(&resolver.Mock{}).Parse(ing) + if err != nil { + t.Errorf("error parsing annotations: %v", err) + } + nginxCors, ok := corst.(*Config) if !ok { - t.Errorf("expected a Config type") + t.Errorf("expected a Config type but returned %t", corst) } if !nginxCors.CorsEnabled { - t.Errorf("expected cors enabled but returned %v", nginxCors.CorsEnabled) + t.Errorf("expected %v but returned %v", data[parser.GetAnnotationWithPrefix("enable-cors")], nginxCors.CorsEnabled) + } + + if nginxCors.CorsAllowCredentials { + t.Errorf("expected %v but returned %v", data[parser.GetAnnotationWithPrefix("cors-allow-credentials")], nginxCors.CorsAllowCredentials) } if nginxCors.CorsAllowHeaders != "DNT,X-CustomHeader, Keep-Alive,User-Agent" { - t.Errorf("expected headers not found. Found %v", nginxCors.CorsAllowHeaders) + t.Errorf("expected %v but returned %v", data[parser.GetAnnotationWithPrefix("cors-allow-headers")], nginxCors.CorsAllowHeaders) } - if nginxCors.CorsAllowMethods != "GET, PUT, POST, DELETE, PATCH, OPTIONS" { - t.Errorf("expected default methods, but got %v", nginxCors.CorsAllowMethods) + if nginxCors.CorsAllowMethods != "GET, PATCH" { + t.Errorf("expected %v but returned %v", data[parser.GetAnnotationWithPrefix("cors-allow-methods")], nginxCors.CorsAllowMethods) } if nginxCors.CorsAllowOrigin != "https://origin123.test.com:4443" { - t.Errorf("expected origin https://origin123.test.com:4443, but got %v", nginxCors.CorsAllowOrigin) + t.Errorf("expected %v but returned %v", data[parser.GetAnnotationWithPrefix("cors-allow-origin")], nginxCors.CorsAllowOrigin) + } + + if nginxCors.CorsMaxAge != 600 { + t.Errorf("expected %v but returned %v", data[parser.GetAnnotationWithPrefix("cors-max-age")], nginxCors.CorsMaxAge) + } +} + +func TestIngressCorsConfigInvalid(t *testing.T) { + ing := buildIngress() + + data := map[string]string{} + + // Valid + data[parser.GetAnnotationWithPrefix("enable-cors")] = "yes" + data[parser.GetAnnotationWithPrefix("cors-allow-headers")] = "@alright, #ingress" + data[parser.GetAnnotationWithPrefix("cors-allow-credentials")] = "no" + data[parser.GetAnnotationWithPrefix("cors-allow-methods")] = "GET, PATCH, $nginx" + data[parser.GetAnnotationWithPrefix("cors-allow-origin")] = "origin123.test.com:4443" + data[parser.GetAnnotationWithPrefix("cors-max-age")] = "abcd" + ing.SetAnnotations(data) + + corst, err := NewParser(&resolver.Mock{}).Parse(ing) + if err != nil { + t.Errorf("error parsing annotations: %v", err) } + nginxCors, ok := corst.(*Config) + if !ok { + t.Errorf("expected a Config type but returned %t", corst) + } + + if nginxCors.CorsEnabled { + t.Errorf("expected %v but returned %v", false, nginxCors.CorsEnabled) + } + + if !nginxCors.CorsAllowCredentials { + t.Errorf("expected %v but returned %v", true, nginxCors.CorsAllowCredentials) + } + + if nginxCors.CorsAllowHeaders != defaultCorsHeaders { + t.Errorf("expected %v but returned %v", defaultCorsHeaders, nginxCors.CorsAllowHeaders) + } + + if nginxCors.CorsAllowMethods != defaultCorsMethods { + t.Errorf("expected %v but returned %v", defaultCorsHeaders, nginxCors.CorsAllowMethods) + } + + if nginxCors.CorsAllowOrigin != "*" { + t.Errorf("expected %v but returned %v", "*", nginxCors.CorsAllowOrigin) + } + + if nginxCors.CorsMaxAge != defaultCorsMaxAge { + t.Errorf("expected %v but returned %v", defaultCorsMaxAge, nginxCors.CorsMaxAge) + } } diff --git a/internal/ingress/annotations/customhttperrors/main.go b/internal/ingress/annotations/customhttperrors/main.go new file mode 100644 index 0000000000..3c5fbf077b --- /dev/null +++ b/internal/ingress/annotations/customhttperrors/main.go @@ -0,0 +1,57 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package customhttperrors + +import ( + "strconv" + "strings" + + networking "k8s.io/api/networking/v1beta1" + + "k8s.io/ingress-nginx/internal/ingress/annotations/parser" + "k8s.io/ingress-nginx/internal/ingress/resolver" +) + +type customhttperrors struct { + r resolver.Resolver +} + +// NewParser creates a new custom http errors annotation parser +func NewParser(r resolver.Resolver) parser.IngressAnnotation { + return customhttperrors{r} +} + +// Parse parses the annotations contained in the ingress to use +// custom http errors +func (e customhttperrors) Parse(ing *networking.Ingress) (interface{}, error) { + c, err := parser.GetStringAnnotation("custom-http-errors", ing) + if err != nil { + return nil, err + } + + cSplit := strings.Split(c, ",") + var codes []int + for _, i := range cSplit { + num, err := strconv.Atoi(i) + if err != nil { + return nil, err + } + codes = append(codes, num) + } + + return codes, nil +} diff --git a/internal/ingress/annotations/customhttperrors/main_test.go b/internal/ingress/annotations/customhttperrors/main_test.go new file mode 100644 index 0000000000..3827d197fd --- /dev/null +++ b/internal/ingress/annotations/customhttperrors/main_test.go @@ -0,0 +1,91 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package customhttperrors + +import ( + "reflect" + "sort" + "testing" + + api "k8s.io/api/core/v1" + networking "k8s.io/api/networking/v1beta1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/ingress-nginx/internal/ingress/annotations/parser" + "k8s.io/ingress-nginx/internal/ingress/resolver" + + "k8s.io/apimachinery/pkg/util/intstr" +) + +func buildIngress() *networking.Ingress { + return &networking.Ingress{ + ObjectMeta: meta_v1.ObjectMeta{ + Name: "foo", + Namespace: api.NamespaceDefault, + }, + Spec: networking.IngressSpec{ + Backend: &networking.IngressBackend{ + ServiceName: "default-backend", + ServicePort: intstr.FromInt(80), + }, + }, + } +} + +func TestParseInvalidAnnotations(t *testing.T) { + ing := buildIngress() + + _, err := NewParser(&resolver.Mock{}).Parse(ing) + if err == nil { + t.Errorf("expected error parsing ingress with custom-http-errors") + } + + data := map[string]string{} + data[parser.GetAnnotationWithPrefix("custom-http-errors")] = "400,404,abc,502" + ing.SetAnnotations(data) + + i, err := NewParser(&resolver.Mock{}).Parse(ing) + if err == nil { + t.Errorf("expected error parsing ingress with custom-http-errors") + } + if i != nil { + t.Errorf("expected %v but got %v", nil, i) + } +} + +func TestParseAnnotations(t *testing.T) { + ing := buildIngress() + + data := map[string]string{} + data[parser.GetAnnotationWithPrefix("custom-http-errors")] = "400,404,500,502" + ing.SetAnnotations(data) + + i, err := NewParser(&resolver.Mock{}).Parse(ing) + if err != nil { + t.Errorf("unexpected error parsing ingress with custom-http-errors") + } + val, ok := i.([]int) + if !ok { + t.Errorf("expected a []int type") + } + + expected := []int{400, 404, 500, 502} + sort.Ints(val) + + if !reflect.DeepEqual(expected, val) { + t.Errorf("expected %v but got %v", expected, val) + } +} diff --git a/internal/ingress/annotations/defaultbackend/main.go b/internal/ingress/annotations/defaultbackend/main.go index 8b4112a3e1..ff4f41ce3f 100644 --- a/internal/ingress/annotations/defaultbackend/main.go +++ b/internal/ingress/annotations/defaultbackend/main.go @@ -20,7 +20,7 @@ import ( "fmt" "github.com/pkg/errors" - extensions "k8s.io/api/extensions/v1beta1" + networking "k8s.io/api/networking/v1beta1" "k8s.io/ingress-nginx/internal/ingress/annotations/parser" "k8s.io/ingress-nginx/internal/ingress/resolver" @@ -37,7 +37,7 @@ func NewParser(r resolver.Resolver) parser.IngressAnnotation { // Parse parses the annotations contained in the ingress to use // a custom default backend -func (db backend) Parse(ing *extensions.Ingress) (interface{}, error) { +func (db backend) Parse(ing *networking.Ingress) (interface{}, error) { s, err := parser.GetStringAnnotation("default-backend", ing) if err != nil { return nil, err diff --git a/internal/ingress/annotations/defaultbackend/main_test.go b/internal/ingress/annotations/defaultbackend/main_test.go new file mode 100644 index 0000000000..9278602157 --- /dev/null +++ b/internal/ingress/annotations/defaultbackend/main_test.go @@ -0,0 +1,105 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package defaultbackend + +import ( + "testing" + + api "k8s.io/api/core/v1" + networking "k8s.io/api/networking/v1beta1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/ingress-nginx/internal/ingress/annotations/parser" + "k8s.io/ingress-nginx/internal/ingress/errors" + "k8s.io/ingress-nginx/internal/ingress/resolver" + + "k8s.io/apimachinery/pkg/util/intstr" +) + +func buildIngress() *networking.Ingress { + defaultBackend := networking.IngressBackend{ + ServiceName: "default-backend", + ServicePort: intstr.FromInt(80), + } + + return &networking.Ingress{ + ObjectMeta: meta_v1.ObjectMeta{ + Name: "foo", + Namespace: api.NamespaceDefault, + }, + Spec: networking.IngressSpec{ + Backend: &networking.IngressBackend{ + ServiceName: "default-backend", + ServicePort: intstr.FromInt(80), + }, + Rules: []networking.IngressRule{ + { + Host: "foo.bar.com", + IngressRuleValue: networking.IngressRuleValue{ + HTTP: &networking.HTTPIngressRuleValue{ + Paths: []networking.HTTPIngressPath{ + { + Path: "/foo", + Backend: defaultBackend, + }, + }, + }, + }, + }, + }, + }, + } +} + +type mockService struct { + resolver.Mock +} + +// GetService mocks the GetService call from the defaultbackend package +func (m mockService) GetService(name string) (*api.Service, error) { + if name != "default/demo-service" { + return nil, errors.Errorf("there is no service with name %v", name) + } + + return &api.Service{ + ObjectMeta: meta_v1.ObjectMeta{ + Namespace: api.NamespaceDefault, + Name: "demo-service", + }, + }, nil +} + +func TestAnnotations(t *testing.T) { + ing := buildIngress() + + data := map[string]string{} + data[parser.GetAnnotationWithPrefix("default-backend")] = "demo-service" + ing.SetAnnotations(data) + + fakeService := &mockService{} + i, err := NewParser(fakeService).Parse(ing) + if err != nil { + t.Errorf("unexpected error %v", err) + } + + svc, ok := i.(*api.Service) + if !ok { + t.Errorf("expected *api.Service but got %v", svc) + } + if svc.Name != "demo-service" { + t.Errorf("expected %v but got %v", "demo-service", svc.Name) + } +} diff --git a/internal/ingress/annotations/healthcheck/main.go b/internal/ingress/annotations/healthcheck/main.go deleted file mode 100644 index 24c65e0a28..0000000000 --- a/internal/ingress/annotations/healthcheck/main.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package healthcheck - -import ( - extensions "k8s.io/api/extensions/v1beta1" - - "k8s.io/ingress-nginx/internal/ingress/annotations/parser" - "k8s.io/ingress-nginx/internal/ingress/resolver" -) - -// Config returns the URL and method to use check the status of -// the upstream server/s -type Config struct { - MaxFails int `json:"maxFails"` - FailTimeout int `json:"failTimeout"` -} - -type healthCheck struct { - r resolver.Resolver -} - -// NewParser creates a new health check annotation parser -func NewParser(r resolver.Resolver) parser.IngressAnnotation { - return healthCheck{r} -} - -// ParseAnnotations parses the annotations contained in the ingress -// rule used to configure upstream check parameters -func (hc healthCheck) Parse(ing *extensions.Ingress) (interface{}, error) { - defBackend := hc.r.GetDefaultBackend() - if ing.GetAnnotations() == nil { - return &Config{defBackend.UpstreamMaxFails, defBackend.UpstreamFailTimeout}, nil - } - - mf, err := parser.GetIntAnnotation("upstream-max-fails", ing) - if err != nil { - mf = defBackend.UpstreamMaxFails - } - - ft, err := parser.GetIntAnnotation("upstream-fail-timeout", ing) - if err != nil { - ft = defBackend.UpstreamFailTimeout - } - - return &Config{mf, ft}, nil -} diff --git a/internal/ingress/annotations/healthcheck/main_test.go b/internal/ingress/annotations/healthcheck/main_test.go deleted file mode 100644 index 6a407f2735..0000000000 --- a/internal/ingress/annotations/healthcheck/main_test.go +++ /dev/null @@ -1,95 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package healthcheck - -import ( - "testing" - - api "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - - "k8s.io/ingress-nginx/internal/ingress/annotations/parser" - "k8s.io/ingress-nginx/internal/ingress/defaults" - "k8s.io/ingress-nginx/internal/ingress/resolver" -) - -func buildIngress() *extensions.Ingress { - defaultBackend := extensions.IngressBackend{ - ServiceName: "default-backend", - ServicePort: intstr.FromInt(80), - } - - return &extensions.Ingress{ - ObjectMeta: meta_v1.ObjectMeta{ - Name: "foo", - Namespace: api.NamespaceDefault, - }, - Spec: extensions.IngressSpec{ - Backend: &extensions.IngressBackend{ - ServiceName: "default-backend", - ServicePort: intstr.FromInt(80), - }, - Rules: []extensions.IngressRule{ - { - Host: "foo.bar.com", - IngressRuleValue: extensions.IngressRuleValue{ - HTTP: &extensions.HTTPIngressRuleValue{ - Paths: []extensions.HTTPIngressPath{ - { - Path: "/foo", - Backend: defaultBackend, - }, - }, - }, - }, - }, - }, - }, - } -} - -type mockBackend struct { - resolver.Mock -} - -func (m mockBackend) GetDefaultBackend() defaults.Backend { - return defaults.Backend{UpstreamFailTimeout: 1} -} - -func TestIngressHealthCheck(t *testing.T) { - ing := buildIngress() - - data := map[string]string{} - data[parser.GetAnnotationWithPrefix("upstream-max-fails")] = "2" - ing.SetAnnotations(data) - - hzi, _ := NewParser(mockBackend{}).Parse(ing) - nginxHz, ok := hzi.(*Config) - if !ok { - t.Errorf("expected a Upstream type") - } - - if nginxHz.MaxFails != 2 { - t.Errorf("expected 2 as max-fails but returned %v", nginxHz.MaxFails) - } - - if nginxHz.FailTimeout != 1 { - t.Errorf("expected 0 as fail-timeout but returned %v", nginxHz.FailTimeout) - } -} diff --git a/internal/ingress/annotations/http2pushpreload/main.go b/internal/ingress/annotations/http2pushpreload/main.go new file mode 100644 index 0000000000..c542f03cf5 --- /dev/null +++ b/internal/ingress/annotations/http2pushpreload/main.go @@ -0,0 +1,39 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package http2pushpreload + +import ( + networking "k8s.io/api/networking/v1beta1" + + "k8s.io/ingress-nginx/internal/ingress/annotations/parser" + "k8s.io/ingress-nginx/internal/ingress/resolver" +) + +type http2PushPreload struct { + r resolver.Resolver +} + +// NewParser creates a new http2PushPreload annotation parser +func NewParser(r resolver.Resolver) parser.IngressAnnotation { + return http2PushPreload{r} +} + +// Parse parses the annotations contained in the ingress rule +// used to add http2 push preload to the server +func (h2pp http2PushPreload) Parse(ing *networking.Ingress) (interface{}, error) { + return parser.GetBoolAnnotation("http2-push-preload", ing) +} diff --git a/internal/ingress/annotations/http2pushpreload/main_test.go b/internal/ingress/annotations/http2pushpreload/main_test.go new file mode 100644 index 0000000000..6b24ecfaee --- /dev/null +++ b/internal/ingress/annotations/http2pushpreload/main_test.go @@ -0,0 +1,62 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package http2pushpreload + +import ( + "testing" + + api "k8s.io/api/core/v1" + networking "k8s.io/api/networking/v1beta1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/ingress-nginx/internal/ingress/annotations/parser" + "k8s.io/ingress-nginx/internal/ingress/resolver" +) + +func TestParse(t *testing.T) { + annotation := parser.GetAnnotationWithPrefix("http2-push-preload") + ap := NewParser(&resolver.Mock{}) + if ap == nil { + t.Fatalf("expected a parser.IngressAnnotation but returned nil") + } + + testCases := []struct { + annotations map[string]string + expected bool + }{ + {map[string]string{annotation: "true"}, true}, + {map[string]string{annotation: "1"}, true}, + {map[string]string{annotation: ""}, false}, + {map[string]string{}, false}, + {nil, false}, + } + + ing := &networking.Ingress{ + ObjectMeta: meta_v1.ObjectMeta{ + Name: "foo", + Namespace: api.NamespaceDefault, + }, + Spec: networking.IngressSpec{}, + } + + for _, testCase := range testCases { + ing.SetAnnotations(testCase.annotations) + result, _ := ap.Parse(ing) + if result != testCase.expected { + t.Errorf("expected %v but returned %v, annotations: %s", testCase.expected, result, testCase.annotations) + } + } +} diff --git a/internal/ingress/annotations/influxdb/main.go b/internal/ingress/annotations/influxdb/main.go new file mode 100644 index 0000000000..cec014b897 --- /dev/null +++ b/internal/ingress/annotations/influxdb/main.go @@ -0,0 +1,101 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package influxdb + +import ( + networking "k8s.io/api/networking/v1beta1" + + "k8s.io/ingress-nginx/internal/ingress/annotations/parser" + "k8s.io/ingress-nginx/internal/ingress/resolver" +) + +type influxdb struct { + r resolver.Resolver +} + +// Config contains the IfluxDB configuration to be used in the Ingress +type Config struct { + InfluxDBEnabled bool `json:"influxDBEnabled"` + InfluxDBMeasurement string `json:"influxDBMeasurement"` + InfluxDBPort string `json:"influxDBPort"` + InfluxDBHost string `json:"influxDBHost"` + InfluxDBServerName string `json:"influxDBServerName"` +} + +// NewParser creates a new InfluxDB annotation parser +func NewParser(r resolver.Resolver) parser.IngressAnnotation { + return influxdb{r} +} + +// Parse parses the annotations to look for InfluxDB configurations +func (c influxdb) Parse(ing *networking.Ingress) (interface{}, error) { + var err error + config := &Config{} + + config.InfluxDBEnabled, err = parser.GetBoolAnnotation("enable-influxdb", ing) + if err != nil { + config.InfluxDBEnabled = false + } + + config.InfluxDBMeasurement, err = parser.GetStringAnnotation("influxdb-measurement", ing) + if err != nil { + config.InfluxDBMeasurement = "default" + } + + config.InfluxDBPort, err = parser.GetStringAnnotation("influxdb-port", ing) + if err != nil { + // This is not the default 8086 port but the port usually used to expose + // influxdb in UDP, the module uses UDP to talk to influx via the line protocol. + config.InfluxDBPort = "8089" + } + + config.InfluxDBHost, err = parser.GetStringAnnotation("influxdb-host", ing) + if err != nil { + config.InfluxDBHost = "127.0.0.1" + } + + config.InfluxDBServerName, err = parser.GetStringAnnotation("influxdb-server-name", ing) + if err != nil { + config.InfluxDBServerName = "nginx-ingress" + } + + return config, nil +} + +// Equal tests for equality between two Config types +func (e1 *Config) Equal(e2 *Config) bool { + if e1 == e2 { + return true + } + if e1 == nil || e2 == nil { + return false + } + if e1.InfluxDBEnabled != e2.InfluxDBEnabled { + return false + } + if e1.InfluxDBPort != e2.InfluxDBPort { + return false + } + if e1.InfluxDBHost != e2.InfluxDBHost { + return false + } + if e1.InfluxDBServerName != e2.InfluxDBServerName { + return false + } + + return true +} diff --git a/internal/ingress/annotations/influxdb/main_test.go b/internal/ingress/annotations/influxdb/main_test.go new file mode 100644 index 0000000000..97ba149638 --- /dev/null +++ b/internal/ingress/annotations/influxdb/main_test.go @@ -0,0 +1,131 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package influxdb + +import ( + "testing" + + api "k8s.io/api/core/v1" + networking "k8s.io/api/networking/v1beta1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/ingress-nginx/internal/ingress/annotations/parser" + "k8s.io/ingress-nginx/internal/ingress/resolver" +) + +func buildIngress() *networking.Ingress { + defaultBackend := networking.IngressBackend{ + ServiceName: "default-backend", + ServicePort: intstr.FromInt(80), + } + + return &networking.Ingress{ + ObjectMeta: meta_v1.ObjectMeta{ + Name: "foo", + Namespace: api.NamespaceDefault, + }, + Spec: networking.IngressSpec{ + Backend: &networking.IngressBackend{ + ServiceName: "default-backend", + ServicePort: intstr.FromInt(80), + }, + Rules: []networking.IngressRule{ + { + Host: "foo.bar.com", + IngressRuleValue: networking.IngressRuleValue{ + HTTP: &networking.HTTPIngressRuleValue{ + Paths: []networking.HTTPIngressPath{ + { + Path: "/foo", + Backend: defaultBackend, + }, + }, + }, + }, + }, + }, + }, + } +} + +func TestIngressInvalidInfluxDB(t *testing.T) { + ing := buildIngress() + + influx, _ := NewParser(&resolver.Mock{}).Parse(ing) + nginxInflux, ok := influx.(*Config) + if !ok { + t.Errorf("expected a Config type") + } + + if nginxInflux.InfluxDBEnabled == true { + t.Errorf("expected influxdb enabled but returned %v", nginxInflux.InfluxDBEnabled) + } + + if nginxInflux.InfluxDBMeasurement != "default" { + t.Errorf("expected measurement name not found. Found %v", nginxInflux.InfluxDBMeasurement) + } + + if nginxInflux.InfluxDBPort != "8089" { + t.Errorf("expected port not found. Found %v", nginxInflux.InfluxDBPort) + } + + if nginxInflux.InfluxDBHost != "127.0.0.1" { + t.Errorf("expected host not found. Found %v", nginxInflux.InfluxDBHost) + } + + if nginxInflux.InfluxDBServerName != "nginx-ingress" { + t.Errorf("expected server name not found. Found %v", nginxInflux.InfluxDBServerName) + } +} + +func TestIngressInfluxDB(t *testing.T) { + ing := buildIngress() + + data := map[string]string{} + data[parser.GetAnnotationWithPrefix("enable-influxdb")] = "true" + data[parser.GetAnnotationWithPrefix("influxdb-measurement")] = "nginxmeasures" + data[parser.GetAnnotationWithPrefix("influxdb-port")] = "9091" + data[parser.GetAnnotationWithPrefix("influxdb-host")] = "10.99.0.13" + data[parser.GetAnnotationWithPrefix("influxdb-server-name")] = "nginx-test-1" + ing.SetAnnotations(data) + + influx, _ := NewParser(&resolver.Mock{}).Parse(ing) + nginxInflux, ok := influx.(*Config) + if !ok { + t.Errorf("expected a Config type") + } + + if !nginxInflux.InfluxDBEnabled { + t.Errorf("expected influxdb enabled but returned %v", nginxInflux.InfluxDBEnabled) + } + + if nginxInflux.InfluxDBMeasurement != "nginxmeasures" { + t.Errorf("expected measurement name not found. Found %v", nginxInflux.InfluxDBMeasurement) + } + + if nginxInflux.InfluxDBPort != "9091" { + t.Errorf("expected port not found. Found %v", nginxInflux.InfluxDBPort) + } + + if nginxInflux.InfluxDBHost != "10.99.0.13" { + t.Errorf("expected host not found. Found %v", nginxInflux.InfluxDBHost) + } + + if nginxInflux.InfluxDBServerName != "nginx-test-1" { + t.Errorf("expected server name not found. Found %v", nginxInflux.InfluxDBServerName) + } +} diff --git a/internal/ingress/annotations/ipwhitelist/main.go b/internal/ingress/annotations/ipwhitelist/main.go index 0d76a2e00a..dc743a8d66 100644 --- a/internal/ingress/annotations/ipwhitelist/main.go +++ b/internal/ingress/annotations/ipwhitelist/main.go @@ -22,17 +22,18 @@ import ( "github.com/pkg/errors" - extensions "k8s.io/api/extensions/v1beta1" + networking "k8s.io/api/networking/v1beta1" "k8s.io/ingress-nginx/internal/net" "k8s.io/ingress-nginx/internal/ingress/annotations/parser" ing_errors "k8s.io/ingress-nginx/internal/ingress/errors" "k8s.io/ingress-nginx/internal/ingress/resolver" + "k8s.io/ingress-nginx/internal/sets" ) // SourceRange returns the CIDR type SourceRange struct { - CIDR []string `json:"cidr,omitEmpty"` + CIDR []string `json:"cidr,omitempty"` } // Equal tests for equality between two SourceRange types @@ -44,23 +45,11 @@ func (sr1 *SourceRange) Equal(sr2 *SourceRange) bool { return false } - if len(sr1.CIDR) != len(sr2.CIDR) { + match := sets.StringElementsMatch(sr1.CIDR, sr2.CIDR) + if !match { return false } - for _, s1l := range sr1.CIDR { - found := false - for _, sl2 := range sr2.CIDR { - if s1l == sl2 { - found = true - break - } - } - if !found { - return false - } - } - return true } @@ -77,7 +66,7 @@ func NewParser(r resolver.Resolver) parser.IngressAnnotation { // rule used to limit access to certain client addresses or networks. // Multiple ranges can specified using commas as separator // e.g. `18.0.0.0/8,56.0.0.0/8` -func (a ipwhitelist) Parse(ing *extensions.Ingress) (interface{}, error) { +func (a ipwhitelist) Parse(ing *networking.Ingress) (interface{}, error) { defBackend := a.r.GetDefaultBackend() sort.Strings(defBackend.WhitelistSourceRange) diff --git a/internal/ingress/annotations/ipwhitelist/main_test.go b/internal/ingress/annotations/ipwhitelist/main_test.go index 3ffc599594..43aef75735 100644 --- a/internal/ingress/annotations/ipwhitelist/main_test.go +++ b/internal/ingress/annotations/ipwhitelist/main_test.go @@ -20,7 +20,7 @@ import ( "testing" api "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" + networking "k8s.io/api/networking/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/ingress-nginx/internal/ingress/annotations/parser" @@ -28,28 +28,28 @@ import ( "k8s.io/ingress-nginx/internal/ingress/resolver" ) -func buildIngress() *extensions.Ingress { - defaultBackend := extensions.IngressBackend{ +func buildIngress() *networking.Ingress { + defaultBackend := networking.IngressBackend{ ServiceName: "default-backend", ServicePort: intstr.FromInt(80), } - return &extensions.Ingress{ + return &networking.Ingress{ ObjectMeta: meta_v1.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, }, - Spec: extensions.IngressSpec{ - Backend: &extensions.IngressBackend{ + Spec: networking.IngressSpec{ + Backend: &networking.IngressBackend{ ServiceName: "default-backend", ServicePort: intstr.FromInt(80), }, - Rules: []extensions.IngressRule{ + Rules: []networking.IngressRule{ { Host: "foo.bar.com", - IngressRuleValue: extensions.IngressRuleValue{ - HTTP: &extensions.HTTPIngressRuleValue{ - Paths: []extensions.HTTPIngressPath{ + IngressRuleValue: networking.IngressRuleValue{ + HTTP: &networking.HTTPIngressRuleValue{ + Paths: []networking.HTTPIngressPath{ { Path: "/foo", Backend: defaultBackend, diff --git a/internal/ingress/annotations/loadbalancing/main.go b/internal/ingress/annotations/loadbalancing/main.go new file mode 100644 index 0000000000..ddae0ccbe5 --- /dev/null +++ b/internal/ingress/annotations/loadbalancing/main.go @@ -0,0 +1,40 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package loadbalancing + +import ( + networking "k8s.io/api/networking/v1beta1" + + "k8s.io/ingress-nginx/internal/ingress/annotations/parser" + "k8s.io/ingress-nginx/internal/ingress/resolver" +) + +type loadbalancing struct { + r resolver.Resolver +} + +// NewParser creates a new CORS annotation parser +func NewParser(r resolver.Resolver) parser.IngressAnnotation { + return loadbalancing{r} +} + +// Parse parses the annotations contained in the ingress rule +// used to indicate if the location/s contains a fragment of +// configuration to be included inside the paths of the rules +func (a loadbalancing) Parse(ing *networking.Ingress) (interface{}, error) { + return parser.GetStringAnnotation("load-balance", ing) +} diff --git a/internal/ingress/annotations/loadbalancing/main_test.go b/internal/ingress/annotations/loadbalancing/main_test.go new file mode 100644 index 0000000000..bbda797154 --- /dev/null +++ b/internal/ingress/annotations/loadbalancing/main_test.go @@ -0,0 +1,61 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package loadbalancing + +import ( + "testing" + + api "k8s.io/api/core/v1" + networking "k8s.io/api/networking/v1beta1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/ingress-nginx/internal/ingress/annotations/parser" + "k8s.io/ingress-nginx/internal/ingress/resolver" +) + +func TestParse(t *testing.T) { + annotation := parser.GetAnnotationWithPrefix("load-balance") + + ap := NewParser(&resolver.Mock{}) + if ap == nil { + t.Fatalf("expected a parser.IngressAnnotation but returned nil") + } + + testCases := []struct { + annotations map[string]string + expected string + }{ + {map[string]string{annotation: "ip_hash"}, "ip_hash"}, + {map[string]string{}, ""}, + {nil, ""}, + } + + ing := &networking.Ingress{ + ObjectMeta: meta_v1.ObjectMeta{ + Name: "foo", + Namespace: api.NamespaceDefault, + }, + Spec: networking.IngressSpec{}, + } + + for _, testCase := range testCases { + ing.SetAnnotations(testCase.annotations) + result, _ := ap.Parse(ing) + if result != testCase.expected { + t.Errorf("expected %v but returned %v, annotations: %s", testCase.expected, result, testCase.annotations) + } + } +} diff --git a/internal/ingress/annotations/log/main.go b/internal/ingress/annotations/log/main.go new file mode 100644 index 0000000000..6cf99d9c8b --- /dev/null +++ b/internal/ingress/annotations/log/main.go @@ -0,0 +1,71 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package log + +import ( + networking "k8s.io/api/networking/v1beta1" + + "k8s.io/ingress-nginx/internal/ingress/annotations/parser" + "k8s.io/ingress-nginx/internal/ingress/resolver" +) + +type log struct { + r resolver.Resolver +} + +// Config contains the configuration to be used in the Ingress +type Config struct { + Access bool `json:"accessLog"` + Rewrite bool `json:"rewriteLog"` +} + +// Equal tests for equality between two Config types +func (bd1 *Config) Equal(bd2 *Config) bool { + if bd1.Access != bd2.Access { + return false + } + + if bd1.Rewrite != bd2.Rewrite { + return false + } + + return true +} + +// NewParser creates a new log annotations parser +func NewParser(r resolver.Resolver) parser.IngressAnnotation { + return log{r} +} + +// Parse parses the annotations contained in the ingress +// rule used to indicate if the location/s should enable logs +func (l log) Parse(ing *networking.Ingress) (interface{}, error) { + var err error + config := &Config{} + + config.Access, err = parser.GetBoolAnnotation("enable-access-log", ing) + if err != nil { + config.Access = true + } + + config.Rewrite, err = parser.GetBoolAnnotation("enable-rewrite-log", ing) + if err != nil { + config.Rewrite = false + } + + return config, nil +} diff --git a/internal/ingress/annotations/log/main_test.go b/internal/ingress/annotations/log/main_test.go new file mode 100644 index 0000000000..068b1be16c --- /dev/null +++ b/internal/ingress/annotations/log/main_test.go @@ -0,0 +1,99 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package log + +import ( + "testing" + + api "k8s.io/api/core/v1" + networking "k8s.io/api/networking/v1beta1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/ingress-nginx/internal/ingress/annotations/parser" + "k8s.io/ingress-nginx/internal/ingress/resolver" +) + +func buildIngress() *networking.Ingress { + defaultBackend := networking.IngressBackend{ + ServiceName: "default-backend", + ServicePort: intstr.FromInt(80), + } + + return &networking.Ingress{ + ObjectMeta: meta_v1.ObjectMeta{ + Name: "foo", + Namespace: api.NamespaceDefault, + }, + Spec: networking.IngressSpec{ + Backend: &networking.IngressBackend{ + ServiceName: "default-backend", + ServicePort: intstr.FromInt(80), + }, + Rules: []networking.IngressRule{ + { + Host: "foo.bar.com", + IngressRuleValue: networking.IngressRuleValue{ + HTTP: &networking.HTTPIngressRuleValue{ + Paths: []networking.HTTPIngressPath{ + { + Path: "/foo", + Backend: defaultBackend, + }, + }, + }, + }, + }, + }, + }, + } +} + +func TestIngressAccessLogConfig(t *testing.T) { + ing := buildIngress() + + data := map[string]string{} + data[parser.GetAnnotationWithPrefix("enable-access-log")] = "false" + ing.SetAnnotations(data) + + log, _ := NewParser(&resolver.Mock{}).Parse(ing) + nginxLogs, ok := log.(*Config) + if !ok { + t.Errorf("expected a Config type") + } + + if nginxLogs.Access { + t.Errorf("expected access be disabled but is enabled") + } +} + +func TestIngressRewriteLogConfig(t *testing.T) { + ing := buildIngress() + + data := map[string]string{} + data[parser.GetAnnotationWithPrefix("enable-rewrite-log")] = "true" + ing.SetAnnotations(data) + + log, _ := NewParser(&resolver.Mock{}).Parse(ing) + nginxLogs, ok := log.(*Config) + if !ok { + t.Errorf("expected a Config type") + } + + if !nginxLogs.Rewrite { + t.Errorf("expected rewrite log to be enabled but it is disabled") + } +} diff --git a/internal/ingress/annotations/luarestywaf/main.go b/internal/ingress/annotations/luarestywaf/main.go new file mode 100644 index 0000000000..76b59ef8ee --- /dev/null +++ b/internal/ingress/annotations/luarestywaf/main.go @@ -0,0 +1,126 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package luarestywaf + +import ( + "strings" + + networking "k8s.io/api/networking/v1beta1" + + "k8s.io/ingress-nginx/internal/ingress/annotations/parser" + "k8s.io/ingress-nginx/internal/ingress/errors" + "k8s.io/ingress-nginx/internal/ingress/resolver" + "k8s.io/ingress-nginx/internal/sets" +) + +var luaRestyWAFModes = map[string]bool{"ACTIVE": true, "INACTIVE": true, "SIMULATE": true} + +// Config returns lua-resty-waf configuration for an Ingress rule +type Config struct { + Mode string `json:"mode"` + Debug bool `json:"debug"` + IgnoredRuleSets []string `json:"ignored-rulesets"` + ExtraRulesetString string `json:"extra-ruleset-string"` + ScoreThreshold int `json:"score-threshold"` + AllowUnknownContentTypes bool `json:"allow-unknown-content-types"` + ProcessMultipartBody bool `json:"process-multipart-body"` +} + +// Equal tests for equality between two Config types +func (e1 *Config) Equal(e2 *Config) bool { + if e1 == e2 { + return true + } + if e1 == nil || e2 == nil { + return false + } + if e1.Mode != e2.Mode { + return false + } + if e1.Debug != e2.Debug { + return false + } + + match := sets.StringElementsMatch(e1.IgnoredRuleSets, e2.IgnoredRuleSets) + if !match { + return false + } + + if e1.ExtraRulesetString != e2.ExtraRulesetString { + return false + } + if e1.ScoreThreshold != e2.ScoreThreshold { + return false + } + if e1.AllowUnknownContentTypes != e2.AllowUnknownContentTypes { + return false + } + if e1.ProcessMultipartBody != e2.ProcessMultipartBody { + return false + } + + return true +} + +type luarestywaf struct { + r resolver.Resolver +} + +// NewParser creates a new CORS annotation parser +func NewParser(r resolver.Resolver) parser.IngressAnnotation { + return luarestywaf{r} +} + +// Parse parses the annotations contained in the ingress rule +// used to indicate if the location/s contains a fragment of +// configuration to be included inside the paths of the rules +func (a luarestywaf) Parse(ing *networking.Ingress) (interface{}, error) { + var err error + config := &Config{} + + mode, err := parser.GetStringAnnotation("lua-resty-waf", ing) + if err != nil { + return &Config{}, err + } + + config.Mode = strings.ToUpper(mode) + if _, ok := luaRestyWAFModes[config.Mode]; !ok { + return &Config{}, errors.NewInvalidAnnotationContent("lua-resty-waf", mode) + } + + config.Debug, _ = parser.GetBoolAnnotation("lua-resty-waf-debug", ing) + + ignoredRuleSetsStr, _ := parser.GetStringAnnotation("lua-resty-waf-ignore-rulesets", ing) + config.IgnoredRuleSets = strings.FieldsFunc(ignoredRuleSetsStr, func(c rune) bool { + strC := string(c) + return strC == "," || strC == " " + }) + + // TODO(elvinefendi) maybe validate the ruleset string here + config.ExtraRulesetString, _ = parser.GetStringAnnotation("lua-resty-waf-extra-rules", ing) + + config.ScoreThreshold, _ = parser.GetIntAnnotation("lua-resty-waf-score-threshold", ing) + + config.AllowUnknownContentTypes, _ = parser.GetBoolAnnotation("lua-resty-waf-allow-unknown-content-types", ing) + + config.ProcessMultipartBody, err = parser.GetBoolAnnotation("lua-resty-waf-process-multipart-body", ing) + if err != nil { + config.ProcessMultipartBody = true + } + + return config, nil +} diff --git a/internal/ingress/annotations/luarestywaf/main_test.go b/internal/ingress/annotations/luarestywaf/main_test.go new file mode 100644 index 0000000000..cdad6a1c3f --- /dev/null +++ b/internal/ingress/annotations/luarestywaf/main_test.go @@ -0,0 +1,86 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package luarestywaf + +import ( + "testing" + + api "k8s.io/api/core/v1" + networking "k8s.io/api/networking/v1beta1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/ingress-nginx/internal/ingress/annotations/parser" + "k8s.io/ingress-nginx/internal/ingress/resolver" +) + +func TestParse(t *testing.T) { + luaRestyWAFAnnotation := parser.GetAnnotationWithPrefix("lua-resty-waf") + luaRestyWAFDebugAnnotation := parser.GetAnnotationWithPrefix("lua-resty-waf-debug") + luaRestyWAFIgnoredRuleSetsAnnotation := parser.GetAnnotationWithPrefix("lua-resty-waf-ignore-rulesets") + luaRestyWAFScoreThresholdAnnotation := parser.GetAnnotationWithPrefix("lua-resty-waf-score-threshold") + luaRestyWAFAllowUnknownContentTypesAnnotation := parser.GetAnnotationWithPrefix("lua-resty-waf-allow-unknown-content-types") + luaRestyWAFProcessMultipartBody := parser.GetAnnotationWithPrefix("lua-resty-waf-process-multipart-body") + + ap := NewParser(&resolver.Mock{}) + if ap == nil { + t.Fatalf("expected a parser.IngressAnnotation but returned nil") + } + + testCases := []struct { + annotations map[string]string + expected *Config + }{ + {nil, &Config{}}, + {map[string]string{}, &Config{}}, + + {map[string]string{luaRestyWAFAnnotation: "active"}, &Config{Mode: "ACTIVE", Debug: false, IgnoredRuleSets: []string{}, ProcessMultipartBody: true}}, + {map[string]string{luaRestyWAFDebugAnnotation: "true"}, &Config{Debug: false}}, + + {map[string]string{luaRestyWAFAnnotation: "active", luaRestyWAFDebugAnnotation: "true"}, &Config{Mode: "ACTIVE", Debug: true, IgnoredRuleSets: []string{}, ProcessMultipartBody: true}}, + {map[string]string{luaRestyWAFAnnotation: "active", luaRestyWAFDebugAnnotation: "false"}, &Config{Mode: "ACTIVE", Debug: false, IgnoredRuleSets: []string{}, ProcessMultipartBody: true}}, + {map[string]string{luaRestyWAFAnnotation: "inactive", luaRestyWAFDebugAnnotation: "true"}, &Config{Mode: "INACTIVE", Debug: true, IgnoredRuleSets: []string{}, ProcessMultipartBody: true}}, + + {map[string]string{ + luaRestyWAFAnnotation: "active", + luaRestyWAFDebugAnnotation: "true", + luaRestyWAFIgnoredRuleSetsAnnotation: "ruleset1, ruleset2 ruleset3, another.ruleset", + luaRestyWAFScoreThresholdAnnotation: "10", + luaRestyWAFAllowUnknownContentTypesAnnotation: "true"}, + &Config{Mode: "ACTIVE", Debug: true, IgnoredRuleSets: []string{"ruleset1", "ruleset2", "ruleset3", "another.ruleset"}, ScoreThreshold: 10, AllowUnknownContentTypes: true, ProcessMultipartBody: true}}, + + {map[string]string{luaRestyWAFAnnotation: "siMulate", luaRestyWAFDebugAnnotation: "true"}, &Config{Mode: "SIMULATE", Debug: true, IgnoredRuleSets: []string{}, ProcessMultipartBody: true}}, + {map[string]string{luaRestyWAFAnnotation: "siMulateX", luaRestyWAFDebugAnnotation: "true"}, &Config{Debug: false}}, + + {map[string]string{luaRestyWAFAnnotation: "active", luaRestyWAFProcessMultipartBody: "false"}, &Config{Mode: "ACTIVE", ProcessMultipartBody: false, IgnoredRuleSets: []string{}}}, + } + + ing := &networking.Ingress{ + ObjectMeta: meta_v1.ObjectMeta{ + Name: "foo", + Namespace: api.NamespaceDefault, + }, + Spec: networking.IngressSpec{}, + } + + for _, testCase := range testCases { + ing.SetAnnotations(testCase.annotations) + result, _ := ap.Parse(ing) + config := result.(*Config) + if !config.Equal(testCase.expected) { + t.Errorf("expected %v but returned %v, annotations: %s", testCase.expected, result, testCase.annotations) + } + } +} diff --git a/internal/ingress/annotations/modsecurity/main.go b/internal/ingress/annotations/modsecurity/main.go new file mode 100644 index 0000000000..85fd71573e --- /dev/null +++ b/internal/ingress/annotations/modsecurity/main.go @@ -0,0 +1,93 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package modsecurity + +import ( + networking "k8s.io/api/networking/v1beta1" + "k8s.io/ingress-nginx/internal/ingress/annotations/parser" + "k8s.io/ingress-nginx/internal/ingress/resolver" +) + +// Config contains ModSecurity Configuration items +type Config struct { + Enable bool `json:"enable-modsecurity"` + OWASPRules bool `json:"enable-owasp-core-rules"` + TransactionID string `json:"modsecurity-transaction-id"` + Snippet string `json:"modsecurity-snippet"` +} + +// Equal tests for equality between two Config types +func (modsec1 *Config) Equal(modsec2 *Config) bool { + if modsec1 == modsec2 { + return true + } + if modsec1 == nil || modsec2 == nil { + return false + } + if modsec1.Enable != modsec2.Enable { + return false + } + if modsec1.OWASPRules != modsec2.OWASPRules { + return false + } + if modsec1.TransactionID != modsec2.TransactionID { + return false + } + if modsec1.Snippet != modsec2.Snippet { + return false + } + + return true +} + +// NewParser creates a new ModSecurity annotation parser +func NewParser(resolver resolver.Resolver) parser.IngressAnnotation { + return modSecurity{resolver} +} + +type modSecurity struct { + r resolver.Resolver +} + +// Parse parses the annotations contained in the ingress +// rule used to enable ModSecurity in a particular location +func (a modSecurity) Parse(ing *networking.Ingress) (interface{}, error) { + var err error + config := &Config{} + + config.Enable, err = parser.GetBoolAnnotation("enable-modsecurity", ing) + if err != nil { + config.Enable = false + } + + config.OWASPRules, err = parser.GetBoolAnnotation("enable-owasp-core-rules", ing) + if err != nil { + config.OWASPRules = false + } + + config.TransactionID, err = parser.GetStringAnnotation("modsecurity-transaction-id", ing) + if err != nil { + config.TransactionID = "" + } + + config.Snippet, err = parser.GetStringAnnotation("modsecurity-snippet", ing) + if err != nil { + config.Snippet = "" + } + + return config, nil +} diff --git a/internal/ingress/annotations/modsecurity/main_test.go b/internal/ingress/annotations/modsecurity/main_test.go new file mode 100644 index 0000000000..e609c8b00c --- /dev/null +++ b/internal/ingress/annotations/modsecurity/main_test.go @@ -0,0 +1,78 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package modsecurity + +import ( + "testing" + + api "k8s.io/api/core/v1" + networking "k8s.io/api/networking/v1beta1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/ingress-nginx/internal/ingress/annotations/parser" + "k8s.io/ingress-nginx/internal/ingress/resolver" +) + +func TestParse(t *testing.T) { + enable := parser.GetAnnotationWithPrefix("enable-modsecurity") + owasp := parser.GetAnnotationWithPrefix("enable-owasp-core-rules") + transID := parser.GetAnnotationWithPrefix("modsecurity-transaction-id") + snippet := parser.GetAnnotationWithPrefix("modsecurity-snippet") + + ap := NewParser(&resolver.Mock{}) + if ap == nil { + t.Fatalf("expected a parser.IngressAnnotation but returned nil") + } + + testCases := []struct { + annotations map[string]string + expected Config + }{ + {map[string]string{enable: "true"}, Config{true, false, "", ""}}, + {map[string]string{enable: "false"}, Config{false, false, "", ""}}, + {map[string]string{enable: ""}, Config{false, false, "", ""}}, + + {map[string]string{owasp: "true"}, Config{false, true, "", ""}}, + {map[string]string{owasp: "false"}, Config{false, false, "", ""}}, + {map[string]string{owasp: ""}, Config{false, false, "", ""}}, + + {map[string]string{transID: "ok"}, Config{false, false, "ok", ""}}, + {map[string]string{transID: ""}, Config{false, false, "", ""}}, + + {map[string]string{snippet: "ModSecurity Rule"}, Config{false, false, "", "ModSecurity Rule"}}, + {map[string]string{snippet: ""}, Config{false, false, "", ""}}, + + {map[string]string{}, Config{false, false, "", ""}}, + {nil, Config{false, false, "", ""}}, + } + + ing := &networking.Ingress{ + ObjectMeta: meta_v1.ObjectMeta{ + Name: "foo", + Namespace: api.NamespaceDefault, + }, + Spec: networking.IngressSpec{}, + } + + for _, testCase := range testCases { + ing.SetAnnotations(testCase.annotations) + result, _ := ap.Parse(ing) + config := result.(*Config) + if !config.Equal(&testCase.expected) { + t.Errorf("expected %v but returned %v, annotations: %s", testCase.expected, result, testCase.annotations) + } + } +} diff --git a/internal/ingress/annotations/parser/main.go b/internal/ingress/annotations/parser/main.go index f167e83b65..9ad8dde64b 100644 --- a/internal/ingress/annotations/parser/main.go +++ b/internal/ingress/annotations/parser/main.go @@ -19,8 +19,9 @@ package parser import ( "fmt" "strconv" + "strings" - extensions "k8s.io/api/extensions/v1beta1" + networking "k8s.io/api/networking/v1beta1" "k8s.io/ingress-nginx/internal/ingress/errors" ) @@ -32,7 +33,7 @@ var ( // IngressAnnotation has a method to parse annotations located in Ingress type IngressAnnotation interface { - Parse(ing *extensions.Ingress) (interface{}, error) + Parse(ing *networking.Ingress) (interface{}, error) } type ingAnnotations map[string]string @@ -52,7 +53,12 @@ func (a ingAnnotations) parseBool(name string) (bool, error) { func (a ingAnnotations) parseString(name string) (string, error) { val, ok := a[name] if ok { - return val, nil + s := normalizeString(val) + if len(s) == 0 { + return "", errors.NewInvalidAnnotationContent(name, val) + } + + return s, nil } return "", errors.ErrMissingAnnotations } @@ -69,7 +75,7 @@ func (a ingAnnotations) parseInt(name string) (int, error) { return 0, errors.ErrMissingAnnotations } -func checkAnnotation(name string, ing *extensions.Ingress) error { +func checkAnnotation(name string, ing *networking.Ingress) error { if ing == nil || len(ing.GetAnnotations()) == 0 { return errors.ErrMissingAnnotations } @@ -81,7 +87,7 @@ func checkAnnotation(name string, ing *extensions.Ingress) error { } // GetBoolAnnotation extracts a boolean from an Ingress annotation -func GetBoolAnnotation(name string, ing *extensions.Ingress) (bool, error) { +func GetBoolAnnotation(name string, ing *networking.Ingress) (bool, error) { v := GetAnnotationWithPrefix(name) err := checkAnnotation(v, ing) if err != nil { @@ -91,17 +97,18 @@ func GetBoolAnnotation(name string, ing *extensions.Ingress) (bool, error) { } // GetStringAnnotation extracts a string from an Ingress annotation -func GetStringAnnotation(name string, ing *extensions.Ingress) (string, error) { +func GetStringAnnotation(name string, ing *networking.Ingress) (string, error) { v := GetAnnotationWithPrefix(name) err := checkAnnotation(v, ing) if err != nil { return "", err } + return ingAnnotations(ing.GetAnnotations()).parseString(v) } // GetIntAnnotation extracts an int from an Ingress annotation -func GetIntAnnotation(name string, ing *extensions.Ingress) (int, error) { +func GetIntAnnotation(name string, ing *networking.Ingress) (int, error) { v := GetAnnotationWithPrefix(name) err := checkAnnotation(v, ing) if err != nil { @@ -114,3 +121,12 @@ func GetIntAnnotation(name string, ing *extensions.Ingress) (int, error) { func GetAnnotationWithPrefix(suffix string) string { return fmt.Sprintf("%v/%v", AnnotationsPrefix, suffix) } + +func normalizeString(input string) string { + trimmedContent := []string{} + for _, line := range strings.Split(input, "\n") { + trimmedContent = append(trimmedContent, strings.TrimSpace(line)) + } + + return strings.Join(trimmedContent, "\n") +} diff --git a/internal/ingress/annotations/parser/main_test.go b/internal/ingress/annotations/parser/main_test.go index f65fbdbad2..a89f3b16ae 100644 --- a/internal/ingress/annotations/parser/main_test.go +++ b/internal/ingress/annotations/parser/main_test.go @@ -20,17 +20,17 @@ import ( "testing" api "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" + networking "k8s.io/api/networking/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -func buildIngress() *extensions.Ingress { - return &extensions.Ingress{ +func buildIngress() *networking.Ingress { + return &networking.Ingress{ ObjectMeta: meta_v1.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, }, - Spec: extensions.IngressSpec{}, + Spec: networking.IngressSpec{}, } } @@ -79,7 +79,7 @@ func TestGetStringAnnotation(t *testing.T) { _, err := GetStringAnnotation("", nil) if err == nil { - t.Errorf("expected error but retuned nil") + t.Errorf("expected error but none returned") } tests := []struct { @@ -89,8 +89,17 @@ func TestGetStringAnnotation(t *testing.T) { exp string expErr bool }{ - {"valid - A", "string", "A", "A", false}, - {"valid - B", "string", "B", "B", false}, + {"valid - A", "string", "A ", "A", false}, + {"valid - B", "string", " B", "B", false}, + {"empty", "string", " ", "", true}, + {"valid multiline", "string", ` + rewrite (?i)/arcgis/rest/services/Utilities/Geometry/GeometryServer(.*)$ /arcgis/rest/services/Utilities/Geometry/GeometryServer$1 break; + rewrite (?i)/arcgis/services/Utilities/Geometry/GeometryServer(.*)$ /arcgis/services/Utilities/Geometry/GeometryServer$1 break; + `, ` +rewrite (?i)/arcgis/rest/services/Utilities/Geometry/GeometryServer(.*)$ /arcgis/rest/services/Utilities/Geometry/GeometryServer$1 break; +rewrite (?i)/arcgis/services/Utilities/Geometry/GeometryServer(.*)$ /arcgis/services/Utilities/Geometry/GeometryServer$1 break; +`, + false}, } data := map[string]string{} @@ -102,7 +111,7 @@ func TestGetStringAnnotation(t *testing.T) { s, err := GetStringAnnotation(test.field, ing) if test.expErr { if err == nil { - t.Errorf("%v: expected error but retuned nil", test.name) + t.Errorf("%v: expected error but none returned", test.name) } continue } diff --git a/internal/ingress/annotations/portinredirect/main.go b/internal/ingress/annotations/portinredirect/main.go index c091f1530d..bb5925c31b 100644 --- a/internal/ingress/annotations/portinredirect/main.go +++ b/internal/ingress/annotations/portinredirect/main.go @@ -17,7 +17,7 @@ limitations under the License. package portinredirect import ( - extensions "k8s.io/api/extensions/v1beta1" + networking "k8s.io/api/networking/v1beta1" "k8s.io/ingress-nginx/internal/ingress/annotations/parser" "k8s.io/ingress-nginx/internal/ingress/resolver" @@ -34,7 +34,7 @@ func NewParser(r resolver.Resolver) parser.IngressAnnotation { // Parse parses the annotations contained in the ingress // rule used to indicate if the redirects must -func (a portInRedirect) Parse(ing *extensions.Ingress) (interface{}, error) { +func (a portInRedirect) Parse(ing *networking.Ingress) (interface{}, error) { up, err := parser.GetBoolAnnotation("use-port-in-redirects", ing) if err != nil { return a.r.GetDefaultBackend().UsePortInRedirects, nil diff --git a/internal/ingress/annotations/portinredirect/main_test.go b/internal/ingress/annotations/portinredirect/main_test.go index c4e0e9179e..7087ddcd3f 100644 --- a/internal/ingress/annotations/portinredirect/main_test.go +++ b/internal/ingress/annotations/portinredirect/main_test.go @@ -21,7 +21,7 @@ import ( "testing" api "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" + networking "k8s.io/api/networking/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" @@ -30,28 +30,28 @@ import ( "k8s.io/ingress-nginx/internal/ingress/resolver" ) -func buildIngress() *extensions.Ingress { - defaultBackend := extensions.IngressBackend{ +func buildIngress() *networking.Ingress { + defaultBackend := networking.IngressBackend{ ServiceName: "default-backend", ServicePort: intstr.FromInt(80), } - return &extensions.Ingress{ + return &networking.Ingress{ ObjectMeta: meta_v1.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, }, - Spec: extensions.IngressSpec{ - Backend: &extensions.IngressBackend{ + Spec: networking.IngressSpec{ + Backend: &networking.IngressBackend{ ServiceName: "default-backend", ServicePort: intstr.FromInt(80), }, - Rules: []extensions.IngressRule{ + Rules: []networking.IngressRule{ { Host: "foo.bar.com", - IngressRuleValue: extensions.IngressRuleValue{ - HTTP: &extensions.HTTPIngressRuleValue{ - Paths: []extensions.HTTPIngressPath{ + IngressRuleValue: networking.IngressRuleValue{ + HTTP: &networking.HTTPIngressRuleValue{ + Paths: []networking.HTTPIngressPath{ { Path: "/foo", Backend: defaultBackend, diff --git a/internal/ingress/annotations/proxy/main.go b/internal/ingress/annotations/proxy/main.go index 8171bdbbb6..aafc9d5d85 100644 --- a/internal/ingress/annotations/proxy/main.go +++ b/internal/ingress/annotations/proxy/main.go @@ -17,7 +17,7 @@ limitations under the License. package proxy import ( - extensions "k8s.io/api/extensions/v1beta1" + networking "k8s.io/api/networking/v1beta1" "k8s.io/ingress-nginx/internal/ingress/annotations/parser" "k8s.io/ingress-nginx/internal/ingress/resolver" @@ -25,18 +25,21 @@ import ( // Config returns the proxy timeout to use in the upstream server/s type Config struct { - BodySize string `json:"bodySize"` - ConnectTimeout int `json:"connectTimeout"` - SendTimeout int `json:"sendTimeout"` - ReadTimeout int `json:"readTimeout"` - BufferSize string `json:"bufferSize"` - CookieDomain string `json:"cookieDomain"` - CookiePath string `json:"cookiePath"` - NextUpstream string `json:"nextUpstream"` - PassParams string `json:"passParams"` - ProxyRedirectFrom string `json:"proxyRedirectFrom"` - ProxyRedirectTo string `json:"proxyRedirectTo"` - RequestBuffering string `json:"requestBuffering"` + BodySize string `json:"bodySize"` + ConnectTimeout int `json:"connectTimeout"` + SendTimeout int `json:"sendTimeout"` + ReadTimeout int `json:"readTimeout"` + BuffersNumber int `json:"buffersNumber"` + BufferSize string `json:"bufferSize"` + CookieDomain string `json:"cookieDomain"` + CookiePath string `json:"cookiePath"` + NextUpstream string `json:"nextUpstream"` + NextUpstreamTimeout int `json:"nextUpstreamTimeout"` + NextUpstreamTries int `json:"nextUpstreamTries"` + ProxyRedirectFrom string `json:"proxyRedirectFrom"` + ProxyRedirectTo string `json:"proxyRedirectTo"` + RequestBuffering string `json:"requestBuffering"` + ProxyBuffering string `json:"proxyBuffering"` } // Equal tests for equality between two Configuration types @@ -59,6 +62,9 @@ func (l1 *Config) Equal(l2 *Config) bool { if l1.ReadTimeout != l2.ReadTimeout { return false } + if l1.BuffersNumber != l2.BuffersNumber { + return false + } if l1.BufferSize != l2.BufferSize { return false } @@ -71,7 +77,10 @@ func (l1 *Config) Equal(l2 *Config) bool { if l1.NextUpstream != l2.NextUpstream { return false } - if l1.PassParams != l2.PassParams { + if l1.NextUpstreamTimeout != l2.NextUpstreamTimeout { + return false + } + if l1.NextUpstreamTries != l2.NextUpstreamTries { return false } if l1.RequestBuffering != l2.RequestBuffering { @@ -83,6 +92,9 @@ func (l1 *Config) Equal(l2 *Config) bool { if l1.ProxyRedirectTo != l2.ProxyRedirectTo { return false } + if l1.ProxyBuffering != l2.ProxyBuffering { + return false + } return true } @@ -98,67 +110,86 @@ func NewParser(r resolver.Resolver) parser.IngressAnnotation { // ParseAnnotations parses the annotations contained in the ingress // rule used to configure upstream check parameters -func (a proxy) Parse(ing *extensions.Ingress) (interface{}, error) { +func (a proxy) Parse(ing *networking.Ingress) (interface{}, error) { defBackend := a.r.GetDefaultBackend() - ct, err := parser.GetIntAnnotation("proxy-connect-timeout", ing) + config := &Config{} + + var err error + + config.ConnectTimeout, err = parser.GetIntAnnotation("proxy-connect-timeout", ing) + if err != nil { + config.ConnectTimeout = defBackend.ProxyConnectTimeout + } + + config.SendTimeout, err = parser.GetIntAnnotation("proxy-send-timeout", ing) + if err != nil { + config.SendTimeout = defBackend.ProxySendTimeout + } + + config.ReadTimeout, err = parser.GetIntAnnotation("proxy-read-timeout", ing) if err != nil { - ct = defBackend.ProxyConnectTimeout + config.ReadTimeout = defBackend.ProxyReadTimeout } - st, err := parser.GetIntAnnotation("proxy-send-timeout", ing) + config.BuffersNumber, err = parser.GetIntAnnotation("proxy-buffers-number", ing) if err != nil { - st = defBackend.ProxySendTimeout + config.BuffersNumber = defBackend.ProxyBuffersNumber } - rt, err := parser.GetIntAnnotation("proxy-read-timeout", ing) + config.BufferSize, err = parser.GetStringAnnotation("proxy-buffer-size", ing) if err != nil { - rt = defBackend.ProxyReadTimeout + config.BufferSize = defBackend.ProxyBufferSize } - bufs, err := parser.GetStringAnnotation("proxy-buffer-size", ing) - if err != nil || bufs == "" { - bufs = defBackend.ProxyBufferSize + config.CookiePath, err = parser.GetStringAnnotation("proxy-cookie-path", ing) + if err != nil { + config.CookiePath = defBackend.ProxyCookiePath } - cp, err := parser.GetStringAnnotation("proxy-cookie-path", ing) - if err != nil || cp == "" { - cp = defBackend.ProxyCookiePath + config.CookieDomain, err = parser.GetStringAnnotation("proxy-cookie-domain", ing) + if err != nil { + config.CookieDomain = defBackend.ProxyCookieDomain } - cd, err := parser.GetStringAnnotation("proxy-cookie-domain", ing) - if err != nil || cd == "" { - cd = defBackend.ProxyCookieDomain + config.BodySize, err = parser.GetStringAnnotation("proxy-body-size", ing) + if err != nil { + config.BodySize = defBackend.ProxyBodySize } - bs, err := parser.GetStringAnnotation("proxy-body-size", ing) - if err != nil || bs == "" { - bs = defBackend.ProxyBodySize + config.NextUpstream, err = parser.GetStringAnnotation("proxy-next-upstream", ing) + if err != nil { + config.NextUpstream = defBackend.ProxyNextUpstream } - nu, err := parser.GetStringAnnotation("proxy-next-upstream", ing) - if err != nil || nu == "" { - nu = defBackend.ProxyNextUpstream + config.NextUpstreamTimeout, err = parser.GetIntAnnotation("proxy-next-upstream-timeout", ing) + if err != nil { + config.NextUpstreamTimeout = defBackend.ProxyNextUpstreamTimeout } - pp, err := parser.GetStringAnnotation("proxy-pass-params", ing) - if err != nil || pp == "" { - pp = defBackend.ProxyPassParams + config.NextUpstreamTries, err = parser.GetIntAnnotation("proxy-next-upstream-tries", ing) + if err != nil { + config.NextUpstreamTries = defBackend.ProxyNextUpstreamTries } - rb, err := parser.GetStringAnnotation("proxy-request-buffering", ing) - if err != nil || rb == "" { - rb = defBackend.ProxyRequestBuffering + config.RequestBuffering, err = parser.GetStringAnnotation("proxy-request-buffering", ing) + if err != nil { + config.RequestBuffering = defBackend.ProxyRequestBuffering + } + + config.ProxyRedirectFrom, err = parser.GetStringAnnotation("proxy-redirect-from", ing) + if err != nil { + config.ProxyRedirectFrom = defBackend.ProxyRedirectFrom } - prf, err := parser.GetStringAnnotation("proxy-redirect-from", ing) - if err != nil || rb == "" { - prf = defBackend.ProxyRedirectFrom + config.ProxyRedirectTo, err = parser.GetStringAnnotation("proxy-redirect-to", ing) + if err != nil { + config.ProxyRedirectTo = defBackend.ProxyRedirectTo } - prt, err := parser.GetStringAnnotation("proxy-redirect-to", ing) - if err != nil || rb == "" { - prt = defBackend.ProxyRedirectTo + config.ProxyBuffering, err = parser.GetStringAnnotation("proxy-buffering", ing) + if err != nil { + config.ProxyBuffering = defBackend.ProxyBuffering } - return &Config{bs, ct, st, rt, bufs, cd, cp, nu, pp, prf, prt, rb}, nil + return config, nil } diff --git a/internal/ingress/annotations/proxy/main_test.go b/internal/ingress/annotations/proxy/main_test.go index 558d140f33..8520312e70 100644 --- a/internal/ingress/annotations/proxy/main_test.go +++ b/internal/ingress/annotations/proxy/main_test.go @@ -20,7 +20,7 @@ import ( "testing" api "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" + networking "k8s.io/api/networking/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" @@ -29,28 +29,28 @@ import ( "k8s.io/ingress-nginx/internal/ingress/resolver" ) -func buildIngress() *extensions.Ingress { - defaultBackend := extensions.IngressBackend{ +func buildIngress() *networking.Ingress { + defaultBackend := networking.IngressBackend{ ServiceName: "default-backend", ServicePort: intstr.FromInt(80), } - return &extensions.Ingress{ + return &networking.Ingress{ ObjectMeta: meta_v1.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, }, - Spec: extensions.IngressSpec{ - Backend: &extensions.IngressBackend{ + Spec: networking.IngressSpec{ + Backend: &networking.IngressBackend{ ServiceName: "default-backend", ServicePort: intstr.FromInt(80), }, - Rules: []extensions.IngressRule{ + Rules: []networking.IngressRule{ { Host: "foo.bar.com", - IngressRuleValue: extensions.IngressRuleValue{ - HTTP: &extensions.HTTPIngressRuleValue{ - Paths: []extensions.HTTPIngressPath{ + IngressRuleValue: networking.IngressRuleValue{ + HTTP: &networking.HTTPIngressRuleValue{ + Paths: []networking.HTTPIngressPath{ { Path: "/foo", Backend: defaultBackend, @@ -70,15 +70,17 @@ type mockBackend struct { func (m mockBackend) GetDefaultBackend() defaults.Backend { return defaults.Backend{ - UpstreamFailTimeout: 1, - ProxyConnectTimeout: 10, - ProxySendTimeout: 15, - ProxyReadTimeout: 20, - ProxyBufferSize: "10k", - ProxyBodySize: "3k", - ProxyNextUpstream: "error", - ProxyPassParams: "nocanon keepalive=On", - ProxyRequestBuffering: "on", + ProxyConnectTimeout: 10, + ProxySendTimeout: 15, + ProxyReadTimeout: 20, + ProxyBuffersNumber: 4, + ProxyBufferSize: "10k", + ProxyBodySize: "3k", + ProxyNextUpstream: "error", + ProxyNextUpstreamTimeout: 0, + ProxyNextUpstreamTries: 3, + ProxyRequestBuffering: "on", + ProxyBuffering: "off", } } @@ -89,11 +91,14 @@ func TestProxy(t *testing.T) { data[parser.GetAnnotationWithPrefix("proxy-connect-timeout")] = "1" data[parser.GetAnnotationWithPrefix("proxy-send-timeout")] = "2" data[parser.GetAnnotationWithPrefix("proxy-read-timeout")] = "3" + data[parser.GetAnnotationWithPrefix("proxy-buffers-number")] = "8" data[parser.GetAnnotationWithPrefix("proxy-buffer-size")] = "1k" data[parser.GetAnnotationWithPrefix("proxy-body-size")] = "2k" data[parser.GetAnnotationWithPrefix("proxy-next-upstream")] = "off" - data[parser.GetAnnotationWithPrefix("proxy-pass-params")] = "smax=5 max=10" + data[parser.GetAnnotationWithPrefix("proxy-next-upstream-timeout")] = "5" + data[parser.GetAnnotationWithPrefix("proxy-next-upstream-tries")] = "3" data[parser.GetAnnotationWithPrefix("proxy-request-buffering")] = "off" + data[parser.GetAnnotationWithPrefix("proxy-buffering")] = "on" ing.SetAnnotations(data) i, err := NewParser(mockBackend{}).Parse(ing) @@ -113,6 +118,9 @@ func TestProxy(t *testing.T) { if p.ReadTimeout != 3 { t.Errorf("expected 3 as read-timeout but returned %v", p.ReadTimeout) } + if p.BuffersNumber != 8 { + t.Errorf("expected 8 as proxy-buffers-number but returned %v", p.BuffersNumber) + } if p.BufferSize != "1k" { t.Errorf("expected 1k as buffer-size but returned %v", p.BufferSize) } @@ -122,12 +130,18 @@ func TestProxy(t *testing.T) { if p.NextUpstream != "off" { t.Errorf("expected off as next-upstream but returned %v", p.NextUpstream) } - if p.PassParams != "smax=5 max=10" { - t.Errorf("expected \"smax=5 max=10\" as pass-params but returned \"%v\"", p.PassParams) + if p.NextUpstreamTimeout != 5 { + t.Errorf("expected 5 as next-upstream-timeout but returned %v", p.NextUpstreamTimeout) + } + if p.NextUpstreamTries != 3 { + t.Errorf("expected 3 as next-upstream-tries but returned %v", p.NextUpstreamTries) } if p.RequestBuffering != "off" { t.Errorf("expected off as request-buffering but returned %v", p.RequestBuffering) } + if p.ProxyBuffering != "on" { + t.Errorf("expected on as proxy-buffering but returned %v", p.ProxyBuffering) + } } func TestProxyWithNoAnnotation(t *testing.T) { @@ -153,6 +167,9 @@ func TestProxyWithNoAnnotation(t *testing.T) { if p.ReadTimeout != 20 { t.Errorf("expected 20 as read-timeout but returned %v", p.ReadTimeout) } + if p.BuffersNumber != 4 { + t.Errorf("expected 4 as buffer-number but returned %v", p.BuffersNumber) + } if p.BufferSize != "10k" { t.Errorf("expected 10k as buffer-size but returned %v", p.BufferSize) } @@ -162,8 +179,11 @@ func TestProxyWithNoAnnotation(t *testing.T) { if p.NextUpstream != "error" { t.Errorf("expected error as next-upstream but returned %v", p.NextUpstream) } - if p.PassParams != "nocanon keepalive=On" { - t.Errorf("expected \"nocanon keepalive=On\" as pass-params but returned \"%v\"", p.PassParams) + if p.NextUpstreamTimeout != 0 { + t.Errorf("expected 0 as next-upstream-timeout but returned %v", p.NextUpstreamTimeout) + } + if p.NextUpstreamTries != 3 { + t.Errorf("expected 3 as next-upstream-tries but returned %v", p.NextUpstreamTries) } if p.RequestBuffering != "on" { t.Errorf("expected on as request-buffering but returned %v", p.RequestBuffering) diff --git a/internal/ingress/annotations/ratelimit/main.go b/internal/ingress/annotations/ratelimit/main.go index 44e1a46c9c..b3a12abaf1 100644 --- a/internal/ingress/annotations/ratelimit/main.go +++ b/internal/ingress/annotations/ratelimit/main.go @@ -22,11 +22,12 @@ import ( "sort" "strings" - extensions "k8s.io/api/extensions/v1beta1" + networking "k8s.io/api/networking/v1beta1" "k8s.io/ingress-nginx/internal/ingress/annotations/parser" "k8s.io/ingress-nginx/internal/ingress/resolver" "k8s.io/ingress-nginx/internal/net" + "k8s.io/ingress-nginx/internal/sets" ) const ( @@ -94,17 +95,9 @@ func (rt1 *Config) Equal(rt2 *Config) bool { return false } - for _, r1l := range rt1.Whitelist { - found := false - for _, rl2 := range rt2.Whitelist { - if r1l == rl2 { - found = true - break - } - } - if !found { - return false - } + match := sets.StringElementsMatch(rt1.Whitelist, rt2.Whitelist) + if !match { + return false } return true @@ -155,7 +148,7 @@ func NewParser(r resolver.Resolver) parser.IngressAnnotation { // ParseAnnotations parses the annotations contained in the ingress // rule used to rewrite the defined paths -func (a ratelimit) Parse(ing *extensions.Ingress) (interface{}, error) { +func (a ratelimit) Parse(ing *networking.Ingress) (interface{}, error) { defBackend := a.r.GetDefaultBackend() lr, err := parser.GetIntAnnotation("limit-rate", ing) if err != nil { diff --git a/internal/ingress/annotations/ratelimit/main_test.go b/internal/ingress/annotations/ratelimit/main_test.go index 06ced468b8..3878d3a0a1 100644 --- a/internal/ingress/annotations/ratelimit/main_test.go +++ b/internal/ingress/annotations/ratelimit/main_test.go @@ -17,10 +17,12 @@ limitations under the License. package ratelimit import ( + "reflect" + "sort" "testing" api "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" + networking "k8s.io/api/networking/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" @@ -29,28 +31,28 @@ import ( "k8s.io/ingress-nginx/internal/ingress/resolver" ) -func buildIngress() *extensions.Ingress { - defaultBackend := extensions.IngressBackend{ +func buildIngress() *networking.Ingress { + defaultBackend := networking.IngressBackend{ ServiceName: "default-backend", ServicePort: intstr.FromInt(80), } - return &extensions.Ingress{ + return &networking.Ingress{ ObjectMeta: meta_v1.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, }, - Spec: extensions.IngressSpec{ - Backend: &extensions.IngressBackend{ + Spec: networking.IngressSpec{ + Backend: &networking.IngressBackend{ ServiceName: "default-backend", ServicePort: intstr.FromInt(80), }, - Rules: []extensions.IngressRule{ + Rules: []networking.IngressRule{ { Host: "foo.bar.com", - IngressRuleValue: extensions.IngressRuleValue{ - HTTP: &extensions.HTTPIngressRuleValue{ - Paths: []extensions.HTTPIngressPath{ + IngressRuleValue: networking.IngressRuleValue{ + HTTP: &networking.HTTPIngressRuleValue{ + Paths: []networking.HTTPIngressPath{ { Path: "/foo", Backend: defaultBackend, @@ -83,7 +85,24 @@ func TestWithoutAnnotations(t *testing.T) { } } -func TestBadRateLimiting(t *testing.T) { +func TestParseCIDRs(t *testing.T) { + cidr, _ := parseCIDRs("invalid.com") + if cidr != nil { + t.Errorf("expected %v but got %v", nil, cidr) + } + + expected := []string{"192.0.0.1", "192.0.1.0/24"} + cidr, err := parseCIDRs("192.0.0.1, 192.0.1.0/24") + if err != nil { + t.Errorf("unexpected error %v", err) + } + sort.Strings(cidr) + if !reflect.DeepEqual(expected, cidr) { + t.Errorf("expected %v but got %v", expected, cidr) + } +} + +func TestRateLimiting(t *testing.T) { ing := buildIngress() data := map[string]string{} diff --git a/internal/ingress/annotations/redirect/redirect.go b/internal/ingress/annotations/redirect/redirect.go index d94ede184a..02ee1d522c 100644 --- a/internal/ingress/annotations/redirect/redirect.go +++ b/internal/ingress/annotations/redirect/redirect.go @@ -21,13 +21,15 @@ import ( "net/url" "strings" - extensions "k8s.io/api/extensions/v1beta1" + networking "k8s.io/api/networking/v1beta1" "k8s.io/ingress-nginx/internal/ingress/annotations/parser" "k8s.io/ingress-nginx/internal/ingress/errors" "k8s.io/ingress-nginx/internal/ingress/resolver" ) +const defaultPermanentRedirectCode = http.StatusMovedPermanently + // Config returns the redirect configuration for an Ingress rule type Config struct { URL string `json:"url"` @@ -48,7 +50,7 @@ func NewParser(r resolver.Resolver) parser.IngressAnnotation { // rule used to create a redirect in the paths defined in the rule. // If the Ingress contains both annotations the execution order is // temporal and then permanent -func (a redirect) Parse(ing *extensions.Ingress) (interface{}, error) { +func (r redirect) Parse(ing *networking.Ingress) (interface{}, error) { r3w, _ := parser.GetBoolAnnotation("from-to-www-redirect", ing) tr, err := parser.GetStringAnnotation("temporal-redirect", ing) @@ -73,20 +75,19 @@ func (a redirect) Parse(ing *extensions.Ingress) (interface{}, error) { return nil, err } - if pr != "" { - if err := isValidURL(pr); err != nil { - return nil, err - } + prc, err := parser.GetIntAnnotation("permanent-redirect-code", ing) + if err != nil && !errors.IsMissingAnnotations(err) { + return nil, err + } - return &Config{ - URL: pr, - Code: http.StatusMovedPermanently, - FromToWWW: r3w, - }, nil + if prc < http.StatusMultipleChoices || prc > http.StatusPermanentRedirect { + prc = defaultPermanentRedirectCode } - if r3w { + if pr != "" || r3w { return &Config{ + URL: pr, + Code: prc, FromToWWW: r3w, }, nil } diff --git a/internal/ingress/annotations/redirect/redirect_test.go b/internal/ingress/annotations/redirect/redirect_test.go new file mode 100644 index 0000000000..b9bda6688f --- /dev/null +++ b/internal/ingress/annotations/redirect/redirect_test.go @@ -0,0 +1,157 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package redirect + +import ( + "net/http" + "net/url" + "reflect" + "strconv" + "testing" + + networking "k8s.io/api/networking/v1beta1" + + "k8s.io/ingress-nginx/internal/ingress/annotations/parser" + "k8s.io/ingress-nginx/internal/ingress/errors" + "k8s.io/ingress-nginx/internal/ingress/resolver" +) + +const ( + defRedirectURL = "http://some-site.com" +) + +func TestPermanentRedirectWithDefaultCode(t *testing.T) { + rp := NewParser(resolver.Mock{}) + if rp == nil { + t.Fatalf("Expected a parser.IngressAnnotation but returned nil") + } + + ing := new(networking.Ingress) + + data := make(map[string]string, 1) + data[parser.GetAnnotationWithPrefix("permanent-redirect")] = defRedirectURL + ing.SetAnnotations(data) + + i, err := rp.Parse(ing) + if err != nil { + t.Errorf("Unexpected error with ingress: %v", err) + } + redirect, ok := i.(*Config) + if !ok { + t.Errorf("Expected a Redirect type") + } + if redirect.URL != defRedirectURL { + t.Errorf("Expected %v as redirect but returned %s", defRedirectURL, redirect.URL) + } + if redirect.Code != defaultPermanentRedirectCode { + t.Errorf("Expected %v as redirect to have a code %d but had %d", defRedirectURL, defaultPermanentRedirectCode, redirect.Code) + } +} + +func TestPermanentRedirectWithCustomCode(t *testing.T) { + rp := NewParser(resolver.Mock{}) + if rp == nil { + t.Fatalf("Expected a parser.IngressAnnotation but returned nil") + } + + testCases := map[string]struct { + input int + expectOutput int + }{ + "valid code": {http.StatusPermanentRedirect, http.StatusPermanentRedirect}, + "invalid code": {http.StatusTeapot, defaultPermanentRedirectCode}, + } + + for n, tc := range testCases { + t.Run(n, func(t *testing.T) { + ing := new(networking.Ingress) + + data := make(map[string]string, 2) + data[parser.GetAnnotationWithPrefix("permanent-redirect")] = defRedirectURL + data[parser.GetAnnotationWithPrefix("permanent-redirect-code")] = strconv.Itoa(tc.input) + ing.SetAnnotations(data) + + i, err := rp.Parse(ing) + if err != nil { + t.Errorf("Unexpected error with ingress: %v", err) + } + redirect, ok := i.(*Config) + if !ok { + t.Errorf("Expected a redirect Config type") + } + if redirect.URL != defRedirectURL { + t.Errorf("Expected %v as redirect but returned %s", defRedirectURL, redirect.URL) + } + if redirect.Code != tc.expectOutput { + t.Errorf("Expected %v as redirect to have a code %d but had %d", defRedirectURL, tc.expectOutput, redirect.Code) + } + }) + } +} + +func TestTemporalRedirect(t *testing.T) { + rp := NewParser(resolver.Mock{}) + if rp == nil { + t.Fatalf("Expected a parser.IngressAnnotation but returned nil") + } + + ing := new(networking.Ingress) + + data := make(map[string]string, 1) + data[parser.GetAnnotationWithPrefix("from-to-www-redirect")] = "true" + data[parser.GetAnnotationWithPrefix("temporal-redirect")] = defRedirectURL + ing.SetAnnotations(data) + + i, err := rp.Parse(ing) + if err != nil { + t.Errorf("Unexpected error with ingress: %v", err) + } + redirect, ok := i.(*Config) + if !ok { + t.Errorf("Expected a Redirect type") + } + if redirect.URL != defRedirectURL { + t.Errorf("Expected %v as redirect but returned %s", defRedirectURL, redirect.URL) + } + if redirect.Code != http.StatusFound { + t.Errorf("Expected %v as redirect to have a code %d but had %d", defRedirectURL, defaultPermanentRedirectCode, redirect.Code) + } + if redirect.FromToWWW != true { + t.Errorf("Expected %v as redirect to have from-to-www as %v but got %v", defRedirectURL, true, redirect.FromToWWW) + } +} + +func TestIsValidURL(t *testing.T) { + + invalid := "ok.com" + urlParse, err := url.Parse(invalid) + if err != nil { + t.Errorf("unexpected error %v", err) + } + + expected := errors.Errorf("only http and https are valid protocols (%v)", urlParse.Scheme) + err = isValidURL(invalid) + if reflect.DeepEqual(expected.Error, err.Error) { + t.Errorf("expected '%v' but got '%v'", expected, err) + } + + valid := "http://ok.com" + err = isValidURL(valid) + if err != nil { + t.Errorf("expected nil but got %v", err) + } +} diff --git a/internal/ingress/annotations/rewrite/main.go b/internal/ingress/annotations/rewrite/main.go index 2f222a5099..82d1ff2828 100644 --- a/internal/ingress/annotations/rewrite/main.go +++ b/internal/ingress/annotations/rewrite/main.go @@ -17,7 +17,7 @@ limitations under the License. package rewrite import ( - extensions "k8s.io/api/extensions/v1beta1" + networking "k8s.io/api/networking/v1beta1" "k8s.io/ingress-nginx/internal/ingress/annotations/parser" "k8s.io/ingress-nginx/internal/ingress/resolver" @@ -27,17 +27,14 @@ import ( type Config struct { // Target URI where the traffic must be redirected Target string `json:"target"` - // AddBaseURL indicates if is required to add a base tag in the head - // of the responses from the upstream servers - AddBaseURL bool `json:"addBaseUrl"` - // BaseURLScheme override for the scheme passed to the base tag - BaseURLScheme string `json:"baseUrlScheme"` // SSLRedirect indicates if the location section is accessible SSL only SSLRedirect bool `json:"sslRedirect"` // ForceSSLRedirect indicates if the location section is accessible SSL only ForceSSLRedirect bool `json:"forceSSLRedirect"` - // AppRoot defines the Application Root that the Controller must redirect if it's not in '/' context + // AppRoot defines the Application Root that the Controller must redirect if it's in '/' context AppRoot string `json:"appRoot"` + // UseRegex indicates whether or not the locations use regex paths + UseRegex bool `json:"useRegex"` } // Equal tests for equality between two Redirect types @@ -51,12 +48,6 @@ func (r1 *Config) Equal(r2 *Config) bool { if r1.Target != r2.Target { return false } - if r1.AddBaseURL != r2.AddBaseURL { - return false - } - if r1.BaseURLScheme != r2.BaseURLScheme { - return false - } if r1.SSLRedirect != r2.SSLRedirect { return false } @@ -66,6 +57,9 @@ func (r1 *Config) Equal(r2 *Config) bool { if r1.AppRoot != r2.AppRoot { return false } + if r1.UseRegex != r2.UseRegex { + return false + } return true } @@ -74,33 +68,30 @@ type rewrite struct { r resolver.Resolver } -// NewParser creates a new reqrite annotation parser +// NewParser creates a new rewrite annotation parser func NewParser(r resolver.Resolver) parser.IngressAnnotation { return rewrite{r} } // ParseAnnotations parses the annotations contained in the ingress // rule used to rewrite the defined paths -func (a rewrite) Parse(ing *extensions.Ingress) (interface{}, error) { - rt, _ := parser.GetStringAnnotation("rewrite-target", ing) - sslRe, err := parser.GetBoolAnnotation("ssl-redirect", ing) +func (a rewrite) Parse(ing *networking.Ingress) (interface{}, error) { + var err error + config := &Config{} + + config.Target, _ = parser.GetStringAnnotation("rewrite-target", ing) + config.SSLRedirect, err = parser.GetBoolAnnotation("ssl-redirect", ing) if err != nil { - sslRe = a.r.GetDefaultBackend().SSLRedirect + config.SSLRedirect = a.r.GetDefaultBackend().SSLRedirect } - fSslRe, err := parser.GetBoolAnnotation("force-ssl-redirect", ing) + + config.ForceSSLRedirect, err = parser.GetBoolAnnotation("force-ssl-redirect", ing) if err != nil { - fSslRe = a.r.GetDefaultBackend().ForceSSLRedirect + config.ForceSSLRedirect = a.r.GetDefaultBackend().ForceSSLRedirect } - abu, _ := parser.GetBoolAnnotation("add-base-url", ing) - bus, _ := parser.GetStringAnnotation("base-url-scheme", ing) - ar, _ := parser.GetStringAnnotation("app-root", ing) - - return &Config{ - Target: rt, - AddBaseURL: abu, - BaseURLScheme: bus, - SSLRedirect: sslRe, - ForceSSLRedirect: fSslRe, - AppRoot: ar, - }, nil + + config.AppRoot, _ = parser.GetStringAnnotation("app-root", ing) + config.UseRegex, _ = parser.GetBoolAnnotation("use-regex", ing) + + return config, nil } diff --git a/internal/ingress/annotations/rewrite/main_test.go b/internal/ingress/annotations/rewrite/main_test.go index b54e3bfdc1..03afaa3c01 100644 --- a/internal/ingress/annotations/rewrite/main_test.go +++ b/internal/ingress/annotations/rewrite/main_test.go @@ -20,7 +20,7 @@ import ( "testing" api "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" + networking "k8s.io/api/networking/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" @@ -33,28 +33,28 @@ const ( defRoute = "/demo" ) -func buildIngress() *extensions.Ingress { - defaultBackend := extensions.IngressBackend{ +func buildIngress() *networking.Ingress { + defaultBackend := networking.IngressBackend{ ServiceName: "default-backend", ServicePort: intstr.FromInt(80), } - return &extensions.Ingress{ + return &networking.Ingress{ ObjectMeta: meta_v1.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, }, - Spec: extensions.IngressSpec{ - Backend: &extensions.IngressBackend{ + Spec: networking.IngressSpec{ + Backend: &networking.IngressBackend{ ServiceName: "default-backend", ServicePort: intstr.FromInt(80), }, - Rules: []extensions.IngressRule{ + Rules: []networking.IngressRule{ { Host: "foo.bar.com", - IngressRuleValue: extensions.IngressRuleValue{ - HTTP: &extensions.HTTPIngressRuleValue{ - Paths: []extensions.HTTPIngressPath{ + IngressRuleValue: networking.IngressRuleValue{ + HTTP: &networking.HTTPIngressRuleValue{ + Paths: []networking.HTTPIngressPath{ { Path: "/foo", Backend: defaultBackend, @@ -178,3 +178,20 @@ func TestAppRoot(t *testing.T) { t.Errorf("Unexpected value got in AppRoot") } } + +func TestUseRegex(t *testing.T) { + ing := buildIngress() + + data := map[string]string{} + data[parser.GetAnnotationWithPrefix("use-regex")] = "true" + ing.SetAnnotations(data) + + i, _ := NewParser(mockBackend{redirect: true}).Parse(ing) + redirect, ok := i.(*Config) + if !ok { + t.Errorf("expected a App Context") + } + if !redirect.UseRegex { + t.Errorf("Unexpected value got in UseRegex") + } +} diff --git a/internal/ingress/annotations/satisfy/main.go b/internal/ingress/annotations/satisfy/main.go new file mode 100644 index 0000000000..a064bdf96f --- /dev/null +++ b/internal/ingress/annotations/satisfy/main.go @@ -0,0 +1,44 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package satisfy + +import ( + networking "k8s.io/api/networking/v1beta1" + + "k8s.io/ingress-nginx/internal/ingress/annotations/parser" + "k8s.io/ingress-nginx/internal/ingress/resolver" +) + +type satisfy struct { + r resolver.Resolver +} + +// NewParser creates a new SATISFY annotation parser +func NewParser(r resolver.Resolver) parser.IngressAnnotation { + return satisfy{r} +} + +// Parse parses annotation contained in the ingress +func (s satisfy) Parse(ing *networking.Ingress) (interface{}, error) { + satisfy, err := parser.GetStringAnnotation("satisfy", ing) + + if err != nil || (satisfy != "any" && satisfy != "all") { + satisfy = "" + } + + return satisfy, nil +} diff --git a/internal/ingress/annotations/satisfy/main_test.go b/internal/ingress/annotations/satisfy/main_test.go new file mode 100644 index 0000000000..a3475316ae --- /dev/null +++ b/internal/ingress/annotations/satisfy/main_test.go @@ -0,0 +1,96 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package satisfy + +import ( + "testing" + + api "k8s.io/api/core/v1" + networking "k8s.io/api/networking/v1beta1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/ingress-nginx/internal/ingress/annotations/parser" + "k8s.io/ingress-nginx/internal/ingress/resolver" +) + +func buildIngress() *networking.Ingress { + defaultBackend := networking.IngressBackend{ + ServiceName: "default-backend", + ServicePort: intstr.FromInt(80), + } + + return &networking.Ingress{ + ObjectMeta: meta_v1.ObjectMeta{ + Name: "fake", + Namespace: api.NamespaceDefault, + }, + Spec: networking.IngressSpec{ + Backend: &networking.IngressBackend{ + ServiceName: "default-backend", + ServicePort: intstr.FromInt(80), + }, + Rules: []networking.IngressRule{ + { + Host: "fake.host.com", + IngressRuleValue: networking.IngressRuleValue{ + HTTP: &networking.HTTPIngressRuleValue{ + Paths: []networking.HTTPIngressPath{ + { + Path: "/fake", + Backend: defaultBackend, + }, + }, + }, + }, + }, + }, + }, + } +} + +func TestSatisfyParser(t *testing.T) { + ing := buildIngress() + + data := map[string]string{ + "any": "any", + "all": "all", + "invalid": "", + "": "", + } + + annotations := map[string]string{} + + for input, expected := range data { + annotations[parser.GetAnnotationWithPrefix("satisfy")] = input + ing.SetAnnotations(annotations) + + satisfyt, err := NewParser(&resolver.Mock{}).Parse(ing) + if err != nil { + t.Errorf("error parsing annotations: %v", err) + } + + val, ok := satisfyt.(string) + if !ok { + t.Errorf("expected a string type but return %t", satisfyt) + } + + if val != expected { + t.Errorf("expected %v but returned %v", expected, val) + } + } +} diff --git a/internal/ingress/annotations/secureupstream/main.go b/internal/ingress/annotations/secureupstream/main.go index e973dacbc0..7efb0b9a14 100644 --- a/internal/ingress/annotations/secureupstream/main.go +++ b/internal/ingress/annotations/secureupstream/main.go @@ -20,7 +20,7 @@ import ( "fmt" "github.com/pkg/errors" - extensions "k8s.io/api/extensions/v1beta1" + networking "k8s.io/api/networking/v1beta1" "k8s.io/ingress-nginx/internal/ingress/annotations/parser" "k8s.io/ingress-nginx/internal/ingress/resolver" @@ -28,7 +28,6 @@ import ( // Config describes SSL backend configuration type Config struct { - Secure bool `json:"secure"` CACert resolver.AuthSSLCert `json:"caCert"` } @@ -43,14 +42,14 @@ func NewParser(r resolver.Resolver) parser.IngressAnnotation { // Parse parses the annotations contained in the ingress // rule used to indicate if the upstream servers should use SSL -func (a su) Parse(ing *extensions.Ingress) (interface{}, error) { - s, _ := parser.GetBoolAnnotation("secure-backends", ing) +func (a su) Parse(ing *networking.Ingress) (interface{}, error) { + bp, _ := parser.GetStringAnnotation("backend-protocol", ing) ca, _ := parser.GetStringAnnotation("secure-verify-ca-secret", ing) secure := &Config{ - Secure: s, CACert: resolver.AuthSSLCert{}, } - if !s && ca != "" { + + if (bp != "HTTPS" && bp != "GRPCS") && ca != "" { return secure, errors.Errorf("trying to use CA from secret %v/%v on a non secure backend", ing.Namespace, ca) } @@ -65,7 +64,6 @@ func (a su) Parse(ing *extensions.Ingress) (interface{}, error) { return secure, nil } return &Config{ - Secure: s, CACert: *caCert, }, nil } diff --git a/internal/ingress/annotations/secureupstream/main_test.go b/internal/ingress/annotations/secureupstream/main_test.go index 13a7c4e2c1..a2028acbb5 100644 --- a/internal/ingress/annotations/secureupstream/main_test.go +++ b/internal/ingress/annotations/secureupstream/main_test.go @@ -21,7 +21,7 @@ import ( "testing" api "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" + networking "k8s.io/api/networking/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" @@ -29,28 +29,28 @@ import ( "k8s.io/ingress-nginx/internal/ingress/resolver" ) -func buildIngress() *extensions.Ingress { - defaultBackend := extensions.IngressBackend{ +func buildIngress() *networking.Ingress { + defaultBackend := networking.IngressBackend{ ServiceName: "default-backend", ServicePort: intstr.FromInt(80), } - return &extensions.Ingress{ + return &networking.Ingress{ ObjectMeta: meta_v1.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, }, - Spec: extensions.IngressSpec{ - Backend: &extensions.IngressBackend{ + Spec: networking.IngressSpec{ + Backend: &networking.IngressBackend{ ServiceName: "default-backend", ServicePort: intstr.FromInt(80), }, - Rules: []extensions.IngressRule{ + Rules: []networking.IngressRule{ { Host: "foo.bar.com", - IngressRuleValue: extensions.IngressRuleValue{ - HTTP: &extensions.HTTPIngressRuleValue{ - Paths: []extensions.HTTPIngressPath{ + IngressRuleValue: networking.IngressRuleValue{ + HTTP: &networking.HTTPIngressRuleValue{ + Paths: []networking.HTTPIngressPath{ { Path: "/foo", Backend: defaultBackend, @@ -76,10 +76,26 @@ func (cfg mockCfg) GetAuthCertificate(secret string) (*resolver.AuthSSLCert, err return nil, fmt.Errorf("secret not found: %v", secret) } +func TestNoCA(t *testing.T) { + ing := buildIngress() + data := map[string]string{} + data[parser.GetAnnotationWithPrefix("backend-protocol")] = "HTTPS" + ing.SetAnnotations(data) + + _, err := NewParser(mockCfg{ + certs: map[string]resolver.AuthSSLCert{ + "default/secure-verify-ca": {}, + }, + }).Parse(ing) + if err != nil { + t.Errorf("Unexpected error on ingress: %v", err) + } +} + func TestAnnotations(t *testing.T) { ing := buildIngress() data := map[string]string{} - data[parser.GetAnnotationWithPrefix("secure-backends")] = "true" + data[parser.GetAnnotationWithPrefix("backend-protocol")] = "HTTPS" data[parser.GetAnnotationWithPrefix("secure-verify-ca-secret")] = "secure-verify-ca" ing.SetAnnotations(data) @@ -96,7 +112,7 @@ func TestAnnotations(t *testing.T) { func TestSecretNotFound(t *testing.T) { ing := buildIngress() data := map[string]string{} - data[parser.GetAnnotationWithPrefix("secure-backends")] = "true" + data[parser.GetAnnotationWithPrefix("backend-protocol")] = "HTTPS" data[parser.GetAnnotationWithPrefix("secure-verify-ca-secret")] = "secure-verify-ca" ing.SetAnnotations(data) _, err := NewParser(mockCfg{}).Parse(ing) @@ -108,7 +124,7 @@ func TestSecretNotFound(t *testing.T) { func TestSecretOnNonSecure(t *testing.T) { ing := buildIngress() data := map[string]string{} - data[parser.GetAnnotationWithPrefix("secure-backends")] = "false" + data[parser.GetAnnotationWithPrefix("backend-protocol")] = "HTTP" data[parser.GetAnnotationWithPrefix("secure-verify-ca-secret")] = "secure-verify-ca" ing.SetAnnotations(data) _, err := NewParser(mockCfg{ diff --git a/internal/ingress/annotations/serversnippet/main.go b/internal/ingress/annotations/serversnippet/main.go index 82bed533f2..33a672650f 100644 --- a/internal/ingress/annotations/serversnippet/main.go +++ b/internal/ingress/annotations/serversnippet/main.go @@ -17,7 +17,7 @@ limitations under the License. package serversnippet import ( - extensions "k8s.io/api/extensions/v1beta1" + networking "k8s.io/api/networking/v1beta1" "k8s.io/ingress-nginx/internal/ingress/annotations/parser" "k8s.io/ingress-nginx/internal/ingress/resolver" @@ -35,6 +35,6 @@ func NewParser(r resolver.Resolver) parser.IngressAnnotation { // Parse parses the annotations contained in the ingress rule // used to indicate if the location/s contains a fragment of // configuration to be included inside the paths of the rules -func (a serverSnippet) Parse(ing *extensions.Ingress) (interface{}, error) { +func (a serverSnippet) Parse(ing *networking.Ingress) (interface{}, error) { return parser.GetStringAnnotation("server-snippet", ing) } diff --git a/internal/ingress/annotations/serversnippet/main_test.go b/internal/ingress/annotations/serversnippet/main_test.go index 1e18228f01..066334f69b 100644 --- a/internal/ingress/annotations/serversnippet/main_test.go +++ b/internal/ingress/annotations/serversnippet/main_test.go @@ -20,7 +20,7 @@ import ( "testing" api "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" + networking "k8s.io/api/networking/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/ingress-nginx/internal/ingress/annotations/parser" "k8s.io/ingress-nginx/internal/ingress/resolver" @@ -44,12 +44,12 @@ func TestParse(t *testing.T) { {nil, ""}, } - ing := &extensions.Ingress{ + ing := &networking.Ingress{ ObjectMeta: meta_v1.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, }, - Spec: extensions.IngressSpec{}, + Spec: networking.IngressSpec{}, } for _, testCase := range testCases { diff --git a/internal/ingress/annotations/serviceupstream/main.go b/internal/ingress/annotations/serviceupstream/main.go index 9f9a41a354..ff90f8160d 100644 --- a/internal/ingress/annotations/serviceupstream/main.go +++ b/internal/ingress/annotations/serviceupstream/main.go @@ -17,7 +17,7 @@ limitations under the License. package serviceupstream import ( - extensions "k8s.io/api/extensions/v1beta1" + networking "k8s.io/api/networking/v1beta1" "k8s.io/ingress-nginx/internal/ingress/annotations/parser" "k8s.io/ingress-nginx/internal/ingress/resolver" @@ -32,6 +32,6 @@ func NewParser(r resolver.Resolver) parser.IngressAnnotation { return serviceUpstream{r} } -func (s serviceUpstream) Parse(ing *extensions.Ingress) (interface{}, error) { +func (s serviceUpstream) Parse(ing *networking.Ingress) (interface{}, error) { return parser.GetBoolAnnotation("service-upstream", ing) } diff --git a/internal/ingress/annotations/serviceupstream/main_test.go b/internal/ingress/annotations/serviceupstream/main_test.go index 6c2bcc2b4b..c7f44598e6 100644 --- a/internal/ingress/annotations/serviceupstream/main_test.go +++ b/internal/ingress/annotations/serviceupstream/main_test.go @@ -20,35 +20,35 @@ import ( "testing" api "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" + networking "k8s.io/api/networking/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/ingress-nginx/internal/ingress/annotations/parser" "k8s.io/ingress-nginx/internal/ingress/resolver" ) -func buildIngress() *extensions.Ingress { - defaultBackend := extensions.IngressBackend{ +func buildIngress() *networking.Ingress { + defaultBackend := networking.IngressBackend{ ServiceName: "default-backend", ServicePort: intstr.FromInt(80), } - return &extensions.Ingress{ + return &networking.Ingress{ ObjectMeta: meta_v1.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, }, - Spec: extensions.IngressSpec{ - Backend: &extensions.IngressBackend{ + Spec: networking.IngressSpec{ + Backend: &networking.IngressBackend{ ServiceName: "default-backend", ServicePort: intstr.FromInt(80), }, - Rules: []extensions.IngressRule{ + Rules: []networking.IngressRule{ { Host: "foo.bar.com", - IngressRuleValue: extensions.IngressRuleValue{ - HTTP: &extensions.HTTPIngressRuleValue{ - Paths: []extensions.HTTPIngressPath{ + IngressRuleValue: networking.IngressRuleValue{ + HTTP: &networking.HTTPIngressRuleValue{ + Paths: []networking.HTTPIngressPath{ { Path: "/foo", Backend: defaultBackend, diff --git a/internal/ingress/annotations/sessionaffinity/main.go b/internal/ingress/annotations/sessionaffinity/main.go index 49de25d33d..417f625b0a 100644 --- a/internal/ingress/annotations/sessionaffinity/main.go +++ b/internal/ingress/annotations/sessionaffinity/main.go @@ -19,9 +19,8 @@ package sessionaffinity import ( "regexp" - "github.com/golang/glog" - - extensions "k8s.io/api/extensions/v1beta1" + networking "k8s.io/api/networking/v1beta1" + "k8s.io/klog" "k8s.io/ingress-nginx/internal/ingress/annotations/parser" "k8s.io/ingress-nginx/internal/ingress/resolver" @@ -35,14 +34,23 @@ const ( defaultAffinityCookieName = "INGRESSCOOKIE" - // This is the algorithm used by nginx to generate a value for the session cookie, if - // one isn't supplied and affinity is set to "cookie". - annotationAffinityCookieHash = "session-cookie-hash" - defaultAffinityCookieHash = "md5" + // This is used to control the cookie expires, its value is a number of seconds until the + // cookie expires + annotationAffinityCookieExpires = "session-cookie-expires" + + // This is used to control the cookie expires, its value is a number of seconds until the + // cookie expires + annotationAffinityCookieMaxAge = "session-cookie-max-age" + + // This is used to control the cookie path when use-regex is set to true + annotationAffinityCookiePath = "session-cookie-path" + + // This is used to control the cookie change after request failure + annotationAffinityCookieChangeOnFailure = "session-cookie-change-on-failure" ) var ( - affinityCookieHashRegex = regexp.MustCompile(`^(index|md5|sha1)$`) + affinityCookieExpiresRegex = regexp.MustCompile(`(^0|-?[1-9]\d*$)`) ) // Config describes the per ingress session affinity config @@ -56,31 +64,57 @@ type Config struct { type Cookie struct { // The name of the cookie that will be used in case of cookie affinity type. Name string `json:"name"` - // The hash that will be used to encode the cookie in case of cookie affinity type - Hash string `json:"hash"` + // The time duration to control cookie expires + Expires string `json:"expires"` + // The number of seconds until the cookie expires + MaxAge string `json:"maxage"` + // The path that a cookie will be set on + Path string `json:"path"` + // Flag that allows cookie regeneration on request failure + ChangeOnFailure bool `json:"changeonfailure"` } // cookieAffinityParse gets the annotation values related to Cookie Affinity // It also sets default values when no value or incorrect value is found -func (a affinity) cookieAffinityParse(ing *extensions.Ingress) *Cookie { - sn, err := parser.GetStringAnnotation(annotationAffinityCookieName, ing) +func (a affinity) cookieAffinityParse(ing *networking.Ingress) *Cookie { + var err error + + cookie := &Cookie{} + + cookie.Name, err = parser.GetStringAnnotation(annotationAffinityCookieName, ing) + if err != nil { + klog.V(3).Infof("Ingress %v: No value found in annotation %v. Using the default %v", ing.Name, annotationAffinityCookieName, defaultAffinityCookieName) + cookie.Name = defaultAffinityCookieName + } + + cookie.Expires, err = parser.GetStringAnnotation(annotationAffinityCookieExpires, ing) + if err != nil || !affinityCookieExpiresRegex.MatchString(cookie.Expires) { + klog.V(3).Infof("Invalid or no annotation value found in Ingress %v: %v. Ignoring it", ing.Name, annotationAffinityCookieExpires) + cookie.Expires = "" + } - if err != nil || sn == "" { - glog.V(3).Infof("Ingress %v: No value found in annotation %v. Using the default %v", ing.Name, annotationAffinityCookieName, defaultAffinityCookieName) - sn = defaultAffinityCookieName + cookie.MaxAge, err = parser.GetStringAnnotation(annotationAffinityCookieMaxAge, ing) + if err != nil || !affinityCookieExpiresRegex.MatchString(cookie.MaxAge) { + klog.V(3).Infof("Invalid or no annotation value found in Ingress %v: %v. Ignoring it", ing.Name, annotationAffinityCookieMaxAge) + cookie.MaxAge = "" } - sh, err := parser.GetStringAnnotation(annotationAffinityCookieHash, ing) + cookie.Path, err = parser.GetStringAnnotation(annotationAffinityCookiePath, ing) + if err != nil { + klog.V(3).Infof("Invalid or no annotation value found in Ingress %v: %v. Ignoring it", ing.Name, annotationAffinityCookieMaxAge) + } - if err != nil || !affinityCookieHashRegex.MatchString(sh) { - glog.V(3).Infof("Invalid or no annotation value found in Ingress %v: %v. Setting it to default %v", ing.Name, annotationAffinityCookieHash, defaultAffinityCookieHash) - sh = defaultAffinityCookieHash + cookie.ChangeOnFailure, err = parser.GetBoolAnnotation(annotationAffinityCookieChangeOnFailure, ing) + if err != nil { + klog.V(3).Infof("Invalid or no annotation value found in Ingress %v: %v. Ignoring it", ing.Name, annotationAffinityCookieChangeOnFailure) } - return &Cookie{ - Name: sn, - Hash: sh, + cookie.ChangeOnFailure, err = parser.GetBoolAnnotation(annotationAffinityCookieChangeOnFailure, ing) + if err != nil { + klog.V(3).Infof("Invalid or no annotation value found in Ingress %v: %v. Ignoring it", ing.Name, annotationAffinityCookieChangeOnFailure) } + + return cookie } // NewParser creates a new Affinity annotation parser @@ -94,7 +128,7 @@ type affinity struct { // ParseAnnotations parses the annotations contained in the ingress // rule used to configure the affinity directives -func (a affinity) Parse(ing *extensions.Ingress) (interface{}, error) { +func (a affinity) Parse(ing *networking.Ingress) (interface{}, error) { cookie := &Cookie{} // Check the type of affinity that will be used at, err := parser.GetStringAnnotation(annotationAffinityType, ing) @@ -106,7 +140,7 @@ func (a affinity) Parse(ing *extensions.Ingress) (interface{}, error) { case "cookie": cookie = a.cookieAffinityParse(ing) default: - glog.V(3).Infof("No default affinity was found for Ingress %v", ing.Name) + klog.V(3).Infof("No default affinity was found for Ingress %v", ing.Name) } diff --git a/internal/ingress/annotations/sessionaffinity/main_test.go b/internal/ingress/annotations/sessionaffinity/main_test.go index 3db53a2f78..e4de833df7 100644 --- a/internal/ingress/annotations/sessionaffinity/main_test.go +++ b/internal/ingress/annotations/sessionaffinity/main_test.go @@ -20,35 +20,35 @@ import ( "testing" api "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" + networking "k8s.io/api/networking/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/ingress-nginx/internal/ingress/annotations/parser" "k8s.io/ingress-nginx/internal/ingress/resolver" ) -func buildIngress() *extensions.Ingress { - defaultBackend := extensions.IngressBackend{ +func buildIngress() *networking.Ingress { + defaultBackend := networking.IngressBackend{ ServiceName: "default-backend", ServicePort: intstr.FromInt(80), } - return &extensions.Ingress{ + return &networking.Ingress{ ObjectMeta: meta_v1.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, }, - Spec: extensions.IngressSpec{ - Backend: &extensions.IngressBackend{ + Spec: networking.IngressSpec{ + Backend: &networking.IngressBackend{ ServiceName: "default-backend", ServicePort: intstr.FromInt(80), }, - Rules: []extensions.IngressRule{ + Rules: []networking.IngressRule{ { Host: "foo.bar.com", - IngressRuleValue: extensions.IngressRuleValue{ - HTTP: &extensions.HTTPIngressRuleValue{ - Paths: []extensions.HTTPIngressPath{ + IngressRuleValue: networking.IngressRuleValue{ + HTTP: &networking.HTTPIngressRuleValue{ + Paths: []networking.HTTPIngressPath{ { Path: "/foo", Backend: defaultBackend, @@ -67,8 +67,11 @@ func TestIngressAffinityCookieConfig(t *testing.T) { data := map[string]string{} data[parser.GetAnnotationWithPrefix(annotationAffinityType)] = "cookie" - data[parser.GetAnnotationWithPrefix(annotationAffinityCookieHash)] = "sha123" data[parser.GetAnnotationWithPrefix(annotationAffinityCookieName)] = "INGRESSCOOKIE" + data[parser.GetAnnotationWithPrefix(annotationAffinityCookieExpires)] = "4500" + data[parser.GetAnnotationWithPrefix(annotationAffinityCookieMaxAge)] = "3000" + data[parser.GetAnnotationWithPrefix(annotationAffinityCookiePath)] = "/foo" + data[parser.GetAnnotationWithPrefix(annotationAffinityCookieChangeOnFailure)] = "true" ing.SetAnnotations(data) affin, _ := NewParser(&resolver.Mock{}).Parse(ing) @@ -78,14 +81,30 @@ func TestIngressAffinityCookieConfig(t *testing.T) { } if nginxAffinity.Type != "cookie" { - t.Errorf("expected cookie as sticky-type but returned %v", nginxAffinity.Type) + t.Errorf("expected cookie as affinity but returned %v", nginxAffinity.Type) } - if nginxAffinity.Cookie.Hash != "md5" { - t.Errorf("expected md5 as sticky-hash but returned %v", nginxAffinity.Cookie.Hash) + if nginxAffinity.Cookie.Name != "INGRESSCOOKIE" { + t.Errorf("expected route as session-cookie-name but returned %v", nginxAffinity.Cookie.Name) } - if nginxAffinity.Cookie.Name != "INGRESSCOOKIE" { - t.Errorf("expected route as sticky-name but returned %v", nginxAffinity.Cookie.Name) + if nginxAffinity.Cookie.Expires != "4500" { + t.Errorf("expected 1h as session-cookie-expires but returned %v", nginxAffinity.Cookie.Expires) + } + + if nginxAffinity.Cookie.MaxAge != "3000" { + t.Errorf("expected 3000 as session-cookie-max-age but returned %v", nginxAffinity.Cookie.MaxAge) + } + + if nginxAffinity.Cookie.Path != "/foo" { + t.Errorf("expected /foo as session-cookie-path but returned %v", nginxAffinity.Cookie.Path) + } + + if !nginxAffinity.Cookie.ChangeOnFailure { + t.Errorf("expected change of failure parameter set to true but returned %v", nginxAffinity.Cookie.ChangeOnFailure) + } + + if !nginxAffinity.Cookie.ChangeOnFailure { + t.Errorf("expected change of failure parameter set to true but returned %v", nginxAffinity.Cookie.ChangeOnFailure) } } diff --git a/internal/ingress/annotations/snippet/main.go b/internal/ingress/annotations/snippet/main.go index c21fbad4dd..9a3878603c 100644 --- a/internal/ingress/annotations/snippet/main.go +++ b/internal/ingress/annotations/snippet/main.go @@ -17,7 +17,7 @@ limitations under the License. package snippet import ( - extensions "k8s.io/api/extensions/v1beta1" + networking "k8s.io/api/networking/v1beta1" "k8s.io/ingress-nginx/internal/ingress/annotations/parser" "k8s.io/ingress-nginx/internal/ingress/resolver" @@ -35,6 +35,6 @@ func NewParser(r resolver.Resolver) parser.IngressAnnotation { // Parse parses the annotations contained in the ingress rule // used to indicate if the location/s contains a fragment of // configuration to be included inside the paths of the rules -func (a snippet) Parse(ing *extensions.Ingress) (interface{}, error) { +func (a snippet) Parse(ing *networking.Ingress) (interface{}, error) { return parser.GetStringAnnotation("configuration-snippet", ing) } diff --git a/internal/ingress/annotations/snippet/main_test.go b/internal/ingress/annotations/snippet/main_test.go index e197547cbd..0abeaed8a0 100644 --- a/internal/ingress/annotations/snippet/main_test.go +++ b/internal/ingress/annotations/snippet/main_test.go @@ -20,7 +20,7 @@ import ( "testing" api "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" + networking "k8s.io/api/networking/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/ingress-nginx/internal/ingress/annotations/parser" "k8s.io/ingress-nginx/internal/ingress/resolver" @@ -44,12 +44,12 @@ func TestParse(t *testing.T) { {nil, ""}, } - ing := &extensions.Ingress{ + ing := &networking.Ingress{ ObjectMeta: meta_v1.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, }, - Spec: extensions.IngressSpec{}, + Spec: networking.IngressSpec{}, } for _, testCase := range testCases { diff --git a/internal/ingress/annotations/sslcipher/main.go b/internal/ingress/annotations/sslcipher/main.go new file mode 100644 index 0000000000..267694fefb --- /dev/null +++ b/internal/ingress/annotations/sslcipher/main.go @@ -0,0 +1,39 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sslcipher + +import ( + networking "k8s.io/api/networking/v1beta1" + + "k8s.io/ingress-nginx/internal/ingress/annotations/parser" + "k8s.io/ingress-nginx/internal/ingress/resolver" +) + +type sslCipher struct { + r resolver.Resolver +} + +// NewParser creates a new sslCipher annotation parser +func NewParser(r resolver.Resolver) parser.IngressAnnotation { + return sslCipher{r} +} + +// Parse parses the annotations contained in the ingress rule +// used to add ssl-ciphers to the server name +func (sc sslCipher) Parse(ing *networking.Ingress) (interface{}, error) { + return parser.GetStringAnnotation("ssl-ciphers", ing) +} diff --git a/internal/ingress/annotations/sslcipher/main_test.go b/internal/ingress/annotations/sslcipher/main_test.go new file mode 100644 index 0000000000..dbb0f500f8 --- /dev/null +++ b/internal/ingress/annotations/sslcipher/main_test.go @@ -0,0 +1,63 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sslcipher + +import ( + "testing" + + api "k8s.io/api/core/v1" + networking "k8s.io/api/networking/v1beta1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/ingress-nginx/internal/ingress/annotations/parser" + "k8s.io/ingress-nginx/internal/ingress/resolver" +) + +func TestParse(t *testing.T) { + annotation := parser.GetAnnotationWithPrefix("ssl-ciphers") + ap := NewParser(&resolver.Mock{}) + if ap == nil { + t.Fatalf("expected a parser.IngressAnnotation but returned nil") + } + + testCases := []struct { + annotations map[string]string + expected string + }{ + {map[string]string{annotation: "ALL:!aNULL:!EXPORT56:RC4+RSA:+HIGH:+MEDIUM:+LOW:+SSLv2:+EXP"}, "ALL:!aNULL:!EXPORT56:RC4+RSA:+HIGH:+MEDIUM:+LOW:+SSLv2:+EXP"}, + {map[string]string{annotation: "ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256"}, + "ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256"}, + {map[string]string{annotation: ""}, ""}, + {map[string]string{}, ""}, + {nil, ""}, + } + + ing := &networking.Ingress{ + ObjectMeta: meta_v1.ObjectMeta{ + Name: "foo", + Namespace: api.NamespaceDefault, + }, + Spec: networking.IngressSpec{}, + } + + for _, testCase := range testCases { + ing.SetAnnotations(testCase.annotations) + result, _ := ap.Parse(ing) + if result != testCase.expected { + t.Errorf("expected %v but returned %v, annotations: %s", testCase.expected, result, testCase.annotations) + } + } +} diff --git a/internal/ingress/annotations/sslpassthrough/main.go b/internal/ingress/annotations/sslpassthrough/main.go index 8ab5ab066e..20ff1a010d 100644 --- a/internal/ingress/annotations/sslpassthrough/main.go +++ b/internal/ingress/annotations/sslpassthrough/main.go @@ -17,7 +17,7 @@ limitations under the License. package sslpassthrough import ( - extensions "k8s.io/api/extensions/v1beta1" + networking "k8s.io/api/networking/v1beta1" "k8s.io/ingress-nginx/internal/ingress/annotations/parser" ing_errors "k8s.io/ingress-nginx/internal/ingress/errors" @@ -35,7 +35,7 @@ func NewParser(r resolver.Resolver) parser.IngressAnnotation { // ParseAnnotations parses the annotations contained in the ingress // rule used to indicate if is required to configure -func (a sslpt) Parse(ing *extensions.Ingress) (interface{}, error) { +func (a sslpt) Parse(ing *networking.Ingress) (interface{}, error) { if ing.GetAnnotations() == nil { return false, ing_errors.ErrMissingAnnotations } diff --git a/internal/ingress/annotations/sslpassthrough/main_test.go b/internal/ingress/annotations/sslpassthrough/main_test.go index 575d338940..d5e54b2e2f 100644 --- a/internal/ingress/annotations/sslpassthrough/main_test.go +++ b/internal/ingress/annotations/sslpassthrough/main_test.go @@ -20,7 +20,7 @@ import ( "testing" api "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" + networking "k8s.io/api/networking/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/ingress-nginx/internal/ingress/annotations/parser" "k8s.io/ingress-nginx/internal/ingress/resolver" @@ -28,14 +28,14 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" ) -func buildIngress() *extensions.Ingress { - return &extensions.Ingress{ +func buildIngress() *networking.Ingress { + return &networking.Ingress{ ObjectMeta: meta_v1.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, }, - Spec: extensions.IngressSpec{ - Backend: &extensions.IngressBackend{ + Spec: networking.IngressSpec{ + Backend: &networking.IngressBackend{ ServiceName: "default-backend", ServicePort: intstr.FromInt(80), }, @@ -61,7 +61,7 @@ func TestParseAnnotations(t *testing.T) { } // test with a valid host - ing.Spec.TLS = []extensions.IngressTLS{ + ing.Spec.TLS = []networking.IngressTLS{ { Hosts: []string{"foo.bar.com"}, }, diff --git a/internal/ingress/annotations/upstreamhashby/main.go b/internal/ingress/annotations/upstreamhashby/main.go index bfeb5afa58..bb202f1b05 100644 --- a/internal/ingress/annotations/upstreamhashby/main.go +++ b/internal/ingress/annotations/upstreamhashby/main.go @@ -17,7 +17,7 @@ limitations under the License. package upstreamhashby import ( - extensions "k8s.io/api/extensions/v1beta1" + networking "k8s.io/api/networking/v1beta1" "k8s.io/ingress-nginx/internal/ingress/annotations/parser" "k8s.io/ingress-nginx/internal/ingress/resolver" @@ -27,14 +27,27 @@ type upstreamhashby struct { r resolver.Resolver } -// NewParser creates a new CORS annotation parser +// Config contains the Consistent hash configuration to be used in the Ingress +type Config struct { + UpstreamHashBy string `json:"upstream-hash-by,omitempty"` + UpstreamHashBySubset bool `json:"upstream-hash-by-subset,omitempty"` + UpstreamHashBySubsetSize int `json:"upstream-hash-by-subset-size,omitempty"` +} + +// NewParser creates a new UpstreamHashBy annotation parser func NewParser(r resolver.Resolver) parser.IngressAnnotation { return upstreamhashby{r} } // Parse parses the annotations contained in the ingress rule -// used to indicate if the location/s contains a fragment of -// configuration to be included inside the paths of the rules -func (a upstreamhashby) Parse(ing *extensions.Ingress) (interface{}, error) { - return parser.GetStringAnnotation("upstream-hash-by", ing) +func (a upstreamhashby) Parse(ing *networking.Ingress) (interface{}, error) { + upstreamHashBy, _ := parser.GetStringAnnotation("upstream-hash-by", ing) + upstreamHashBySubset, _ := parser.GetBoolAnnotation("upstream-hash-by-subset", ing) + upstreamHashbySubsetSize, _ := parser.GetIntAnnotation("upstream-hash-by-subset-size", ing) + + if upstreamHashbySubsetSize == 0 { + upstreamHashbySubsetSize = 3 + } + + return &Config{upstreamHashBy, upstreamHashBySubset, upstreamHashbySubsetSize}, nil } diff --git a/internal/ingress/annotations/upstreamhashby/main_test.go b/internal/ingress/annotations/upstreamhashby/main_test.go index 0f4cee5186..e67803e51d 100644 --- a/internal/ingress/annotations/upstreamhashby/main_test.go +++ b/internal/ingress/annotations/upstreamhashby/main_test.go @@ -20,7 +20,7 @@ import ( "testing" api "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" + networking "k8s.io/api/networking/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/ingress-nginx/internal/ingress/annotations/parser" "k8s.io/ingress-nginx/internal/ingress/resolver" @@ -44,18 +44,23 @@ func TestParse(t *testing.T) { {nil, ""}, } - ing := &extensions.Ingress{ + ing := &networking.Ingress{ ObjectMeta: meta_v1.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, }, - Spec: extensions.IngressSpec{}, + Spec: networking.IngressSpec{}, } for _, testCase := range testCases { ing.SetAnnotations(testCase.annotations) result, _ := ap.Parse(ing) - if result != testCase.expected { + uc, ok := result.(*Config) + if !ok { + t.Fatalf("expected a Config type") + } + + if uc.UpstreamHashBy != testCase.expected { t.Errorf("expected %v but returned %v, annotations: %s", testCase.expected, result, testCase.annotations) } } diff --git a/internal/ingress/annotations/upstreamvhost/main.go b/internal/ingress/annotations/upstreamvhost/main.go index e11fe79140..bf761a70f8 100644 --- a/internal/ingress/annotations/upstreamvhost/main.go +++ b/internal/ingress/annotations/upstreamvhost/main.go @@ -17,7 +17,7 @@ limitations under the License. package upstreamvhost import ( - extensions "k8s.io/api/extensions/v1beta1" + networking "k8s.io/api/networking/v1beta1" "k8s.io/ingress-nginx/internal/ingress/annotations/parser" "k8s.io/ingress-nginx/internal/ingress/resolver" @@ -35,6 +35,6 @@ func NewParser(r resolver.Resolver) parser.IngressAnnotation { // Parse parses the annotations contained in the ingress rule // used to indicate if the location/s contains a fragment of // configuration to be included inside the paths of the rules -func (a upstreamVhost) Parse(ing *extensions.Ingress) (interface{}, error) { +func (a upstreamVhost) Parse(ing *networking.Ingress) (interface{}, error) { return parser.GetStringAnnotation("upstream-vhost", ing) } diff --git a/internal/ingress/annotations/upstreamvhost/main_test.go b/internal/ingress/annotations/upstreamvhost/main_test.go new file mode 100644 index 0000000000..1506c4f7fb --- /dev/null +++ b/internal/ingress/annotations/upstreamvhost/main_test.go @@ -0,0 +1,55 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package upstreamvhost + +import ( + "testing" + + api "k8s.io/api/core/v1" + networking "k8s.io/api/networking/v1beta1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/ingress-nginx/internal/ingress/annotations/parser" + "k8s.io/ingress-nginx/internal/ingress/resolver" +) + +func TestParse(t *testing.T) { + ing := &networking.Ingress{ + ObjectMeta: meta_v1.ObjectMeta{ + Name: "foo", + Namespace: api.NamespaceDefault, + }, + Spec: networking.IngressSpec{}, + } + + data := map[string]string{} + data[parser.GetAnnotationWithPrefix("upstream-vhost")] = "ok.com" + + ing.SetAnnotations(data) + + i, err := NewParser(&resolver.Mock{}).Parse(ing) + if err != nil { + t.Errorf("unexpected error %v", err) + } + + vhost, ok := i.(string) + if !ok { + t.Errorf("expected string but got %v", vhost) + } + if vhost != "ok.com" { + t.Errorf("expected %v but got %v", "ok.com", vhost) + } +} diff --git a/internal/ingress/annotations/vtsfilterkey/main.go b/internal/ingress/annotations/vtsfilterkey/main.go deleted file mode 100644 index e349c36a6d..0000000000 --- a/internal/ingress/annotations/vtsfilterkey/main.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package vtsfilterkey - -import ( - extensions "k8s.io/api/extensions/v1beta1" - - "k8s.io/ingress-nginx/internal/ingress/annotations/parser" - "k8s.io/ingress-nginx/internal/ingress/resolver" -) - -type vtsFilterKey struct { - r resolver.Resolver -} - -// NewParser creates a new vts filter key annotation parser -func NewParser(r resolver.Resolver) parser.IngressAnnotation { - return vtsFilterKey{r} -} - -// Parse parses the annotations contained in the ingress rule -// used to indicate if the location/s contains a fragment of -// configuration to be included inside the paths of the rules -func (a vtsFilterKey) Parse(ing *extensions.Ingress) (interface{}, error) { - return parser.GetStringAnnotation("vts-filter-key", ing) -} diff --git a/internal/ingress/annotations/xforwardedprefix/main.go b/internal/ingress/annotations/xforwardedprefix/main.go new file mode 100644 index 0000000000..2071b64113 --- /dev/null +++ b/internal/ingress/annotations/xforwardedprefix/main.go @@ -0,0 +1,39 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package xforwardedprefix + +import ( + networking "k8s.io/api/networking/v1beta1" + + "k8s.io/ingress-nginx/internal/ingress/annotations/parser" + "k8s.io/ingress-nginx/internal/ingress/resolver" +) + +type xforwardedprefix struct { + r resolver.Resolver +} + +// NewParser creates a new xforwardedprefix annotation parser +func NewParser(r resolver.Resolver) parser.IngressAnnotation { + return xforwardedprefix{r} +} + +// Parse parses the annotations contained in the ingress rule +// used to add an x-forwarded-prefix header to the request +func (cbbs xforwardedprefix) Parse(ing *networking.Ingress) (interface{}, error) { + return parser.GetStringAnnotation("x-forwarded-prefix", ing) +} diff --git a/internal/ingress/annotations/xforwardedprefix/main_test.go b/internal/ingress/annotations/xforwardedprefix/main_test.go new file mode 100644 index 0000000000..c94df3ab2d --- /dev/null +++ b/internal/ingress/annotations/xforwardedprefix/main_test.go @@ -0,0 +1,62 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package xforwardedprefix + +import ( + "testing" + + api "k8s.io/api/core/v1" + networking "k8s.io/api/networking/v1beta1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/ingress-nginx/internal/ingress/annotations/parser" + "k8s.io/ingress-nginx/internal/ingress/resolver" +) + +func TestParse(t *testing.T) { + annotation := parser.GetAnnotationWithPrefix("x-forwarded-prefix") + ap := NewParser(&resolver.Mock{}) + if ap == nil { + t.Fatalf("expected a parser.IngressAnnotation but returned nil") + } + + testCases := []struct { + annotations map[string]string + expected string + }{ + {map[string]string{annotation: "true"}, "true"}, + {map[string]string{annotation: "1"}, "1"}, + {map[string]string{annotation: ""}, ""}, + {map[string]string{}, ""}, + {nil, ""}, + } + + ing := &networking.Ingress{ + ObjectMeta: meta_v1.ObjectMeta{ + Name: "foo", + Namespace: api.NamespaceDefault, + }, + Spec: networking.IngressSpec{}, + } + + for _, testCase := range testCases { + ing.SetAnnotations(testCase.annotations) + result, _ := ap.Parse(ing) + if result != testCase.expected { + t.Errorf("expected %v but returned %v, annotations: %s", testCase.expected, result, testCase.annotations) + } + } +} diff --git a/internal/ingress/controller/backend_ssl.go b/internal/ingress/controller/backend_ssl.go deleted file mode 100644 index c48a34d54f..0000000000 --- a/internal/ingress/controller/backend_ssl.go +++ /dev/null @@ -1,199 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "fmt" - "io/ioutil" - "strings" - - "github.com/golang/glog" - "github.com/imdario/mergo" - - apiv1 "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" - - "k8s.io/ingress-nginx/internal/ingress" - "k8s.io/ingress-nginx/internal/ingress/annotations/class" - "k8s.io/ingress-nginx/internal/ingress/annotations/parser" - "k8s.io/ingress-nginx/internal/net/ssl" -) - -// syncSecret keeps in sync Secrets used by Ingress rules with the files on -// disk to allow copy of the content of the secret to disk to be used -// by external processes. -func (ic *NGINXController) syncSecret(key string) { - glog.V(3).Infof("starting syncing of secret %v", key) - - cert, err := ic.getPemCertificate(key) - if err != nil { - glog.Warningf("error obtaining PEM from secret %v: %v", key, err) - return - } - - // create certificates and add or update the item in the store - cur, exists := ic.sslCertTracker.Get(key) - if exists { - s := cur.(*ingress.SSLCert) - if s.Equal(cert) { - // no need to update - return - } - glog.Infof("updating secret %v in the local store", key) - ic.sslCertTracker.Update(key, cert) - // this update must trigger an update - // (like an update event from a change in Ingress) - ic.syncQueue.Enqueue(&extensions.Ingress{}) - return - } - - glog.Infof("adding secret %v to the local store", key) - ic.sslCertTracker.Add(key, cert) - // this update must trigger an update - // (like an update event from a change in Ingress) - ic.syncQueue.Enqueue(&extensions.Ingress{}) -} - -// getPemCertificate receives a secret, and creates a ingress.SSLCert as return. -// It parses the secret and verifies if it's a keypair, or a 'ca.crt' secret only. -func (ic *NGINXController) getPemCertificate(secretName string) (*ingress.SSLCert, error) { - secret, err := ic.listers.Secret.GetByName(secretName) - if err != nil { - return nil, fmt.Errorf("error retrieving secret %v: %v", secretName, err) - } - - cert, okcert := secret.Data[apiv1.TLSCertKey] - key, okkey := secret.Data[apiv1.TLSPrivateKeyKey] - ca := secret.Data["ca.crt"] - - // namespace/secretName -> namespace-secretName - nsSecName := strings.Replace(secretName, "/", "-", -1) - - var s *ingress.SSLCert - if okcert && okkey { - if cert == nil { - return nil, fmt.Errorf("secret %v has no 'tls.crt'", secretName) - } - if key == nil { - return nil, fmt.Errorf("secret %v has no 'tls.key'", secretName) - } - - // If 'ca.crt' is also present, it will allow this secret to be used in the - // 'nginx.ingress.kubernetes.io/auth-tls-secret' annotation - s, err = ssl.AddOrUpdateCertAndKey(nsSecName, cert, key, ca) - if err != nil { - return nil, fmt.Errorf("unexpected error creating pem file: %v", err) - } - - glog.V(3).Infof("found 'tls.crt' and 'tls.key', configuring %v as a TLS Secret (CN: %v)", secretName, s.CN) - if ca != nil { - glog.V(3).Infof("found 'ca.crt', secret %v can also be used for Certificate Authentication", secretName) - } - - } else if ca != nil { - s, err = ssl.AddCertAuth(nsSecName, ca) - - if err != nil { - return nil, fmt.Errorf("unexpected error creating pem file: %v", err) - } - - // makes this secret in 'syncSecret' to be used for Certificate Authentication - // this does not enable Certificate Authentication - glog.V(3).Infof("found only 'ca.crt', configuring %v as an Certificate Authentication Secret", secretName) - - } else { - return nil, fmt.Errorf("no keypair or CA cert could be found in %v", secretName) - } - - s.Name = secret.Name - s.Namespace = secret.Namespace - return s, nil -} - -func (ic *NGINXController) checkSSLChainIssues() { - for _, secretName := range ic.sslCertTracker.ListKeys() { - s, _ := ic.sslCertTracker.Get(secretName) - secret := s.(*ingress.SSLCert) - - if secret.FullChainPemFileName != "" { - // chain already checked - continue - } - - data, err := ssl.FullChainCert(secret.PemFileName) - if err != nil { - glog.Errorf("unexpected error generating SSL certificate with full intermediate chain CA certs: %v", err) - continue - } - - fullChainPemFileName := fmt.Sprintf("%v/%v-%v-full-chain.pem", ingress.DefaultSSLDirectory, secret.Namespace, secret.Name) - err = ioutil.WriteFile(fullChainPemFileName, data, 0655) - if err != nil { - glog.Errorf("unexpected error creating SSL certificate: %v", err) - continue - } - - dst := &ingress.SSLCert{} - - err = mergo.MergeWithOverwrite(dst, secret) - if err != nil { - glog.Errorf("unexpected error creating SSL certificate: %v", err) - continue - } - - dst.FullChainPemFileName = fullChainPemFileName - - glog.Infof("updating local copy of ssl certificate %v with missing intermediate CA certs", secretName) - ic.sslCertTracker.Update(secretName, dst) - // this update must trigger an update - // (like an update event from a change in Ingress) - ic.syncQueue.Enqueue(&extensions.Ingress{}) - } -} - -// checkMissingSecrets verify if one or more ingress rules contains a reference -// to a secret that is not present in the local secret store. -// In this case we call syncSecret. -func (ic *NGINXController) checkMissingSecrets() { - for _, obj := range ic.listers.Ingress.List() { - ing := obj.(*extensions.Ingress) - - if !class.IsValid(ing) { - continue - } - - for _, tls := range ing.Spec.TLS { - if tls.SecretName == "" { - continue - } - - key := fmt.Sprintf("%v/%v", ing.Namespace, tls.SecretName) - if _, ok := ic.sslCertTracker.Get(key); !ok { - ic.syncSecret(key) - } - } - - key, _ := parser.GetStringAnnotation("auth-tls-secret", ing) - if key == "" { - continue - } - - if _, ok := ic.sslCertTracker.Get(key); !ok { - ic.syncSecret(key) - } - } -} diff --git a/internal/ingress/controller/certificate.go b/internal/ingress/controller/certificate.go index cec4206598..e8707c716e 100644 --- a/internal/ingress/controller/certificate.go +++ b/internal/ingress/controller/certificate.go @@ -1,3 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package controller import ( diff --git a/internal/ingress/controller/checker.go b/internal/ingress/controller/checker.go index cc40fd41db..b2beaa79d0 100644 --- a/internal/ingress/controller/checker.go +++ b/internal/ingress/controller/checker.go @@ -24,6 +24,9 @@ import ( "github.com/ncabatoff/process-exporter/proc" "github.com/pkg/errors" + "k8s.io/klog" + + "k8s.io/ingress-nginx/internal/nginx" ) // Name returns the healthcheck name @@ -33,29 +36,41 @@ func (n NGINXController) Name() string { // Check returns if the nginx healthz endpoint is returning ok (status code 200) func (n *NGINXController) Check(_ *http.Request) error { - res, err := http.Get(fmt.Sprintf("http://0.0.0.0:%v%v", n.cfg.ListenPorts.Status, ngxHealthPath)) + statusCode, _, err := nginx.NewGetStatusRequest(nginx.HealthPath) if err != nil { + klog.Errorf("healthcheck error: %v", err) return err } - defer res.Body.Close() - if res.StatusCode != 200 { + + if statusCode != 200 { + klog.Errorf("healthcheck error: %v", statusCode) return fmt.Errorf("ingress controller is not healthy") } + statusCode, _, err = nginx.NewGetStatusRequest("/is-dynamic-lb-initialized") + if err != nil { + klog.Errorf("healthcheck error: %v", err) + return err + } + + if statusCode != 200 { + klog.Errorf("healthcheck error: %v", statusCode) + return fmt.Errorf("dynamic load balancer not started") + } + // check the nginx master process is running - fs, err := proc.NewFS("/proc") + fs, err := proc.NewFS("/proc", false) if err != nil { return errors.Wrap(err, "unexpected error reading /proc directory") } - f, err := n.fileSystem.ReadFile("/run/nginx.pid") + f, err := n.fileSystem.ReadFile(nginx.PID) if err != nil { - return errors.Wrap(err, "unexpected error reading /run/nginx.pid") + return errors.Wrapf(err, "unexpected error reading %v", nginx.PID) } pid, err := strconv.Atoi(strings.TrimRight(string(f), "\r\n")) if err != nil { - return errors.Wrap(err, "unexpected error reading the PID from /run/nginx.pid") + return errors.Wrapf(err, "unexpected error reading the nginx PID from %v", nginx.PID) } _, err = fs.NewProc(pid) - return err } diff --git a/internal/ingress/controller/checker_test.go b/internal/ingress/controller/checker_test.go index 51002a5eab..290c12ea15 100644 --- a/internal/ingress/controller/checker_test.go +++ b/internal/ingress/controller/checker_test.go @@ -21,54 +21,66 @@ import ( "net" "net/http" "net/http/httptest" + "os" "os/exec" "testing" "k8s.io/apiserver/pkg/server/healthz" "k8s.io/kubernetes/pkg/util/filesystem" + "k8s.io/ingress-nginx/internal/file" ngx_config "k8s.io/ingress-nginx/internal/ingress/controller/config" + "k8s.io/ingress-nginx/internal/nginx" ) func TestNginxCheck(t *testing.T) { mux := http.NewServeMux() - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - fmt.Fprintf(w, "ok") - })) + listener, err := net.Listen("unix", nginx.StatusSocket) + if err != nil { + t.Errorf("crating unix listener: %s", err) + } + defer listener.Close() + defer os.Remove(nginx.StatusSocket) + + server := &httptest.Server{ + Listener: listener, + Config: &http.Server{ + Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, "ok") + }), + }, + } defer server.Close() - // port to be used in the check - p := server.Listener.Addr().(*net.TCPAddr).Port + server.Start() // mock filesystem - fs := filesystem.NewFakeFs() + fs := filesystem.DefaultFs{} n := &NGINXController{ cfg: &Configuration{ - ListenPorts: &ngx_config.ListenPorts{ - Status: p, - }, + ListenPorts: &ngx_config.ListenPorts{}, }, fileSystem: fs, } t.Run("no pid or process", func(t *testing.T) { if err := callHealthz(true, mux); err == nil { - t.Errorf("expected an error but none returned") + t.Error("expected an error but none returned") } }) - // create required files - fs.MkdirAll("/run", 0655) - pidFile, err := fs.Create("/run/nginx.pid") + // create pid file + fs.MkdirAll("/tmp", file.ReadWriteByUser) + pidFile, err := fs.Create(nginx.PID) if err != nil { t.Fatalf("unexpected error: %v", err) } t.Run("no process", func(t *testing.T) { if err := callHealthz(true, mux); err == nil { - t.Errorf("expected an error but none returned") + t.Error("expected an error but none returned") } }) @@ -92,32 +104,23 @@ func TestNginxCheck(t *testing.T) { } }) - pidFile, err = fs.Create("/run/nginx.pid") - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - pidFile.Write([]byte(fmt.Sprintf("%v", pid))) + // pollute pid file + pidFile.Write([]byte(fmt.Sprint("999999"))) pidFile.Close() - t.Run("valid request", func(t *testing.T) { + t.Run("bad pid", func(t *testing.T) { if err := callHealthz(true, mux); err == nil { - t.Errorf("expected an error but none returned") - } - }) - - t.Run("invalid port", func(t *testing.T) { - n.cfg.ListenPorts.Status = 9000 - if err := callHealthz(true, mux); err == nil { - t.Errorf("expected an error but none returned") + t.Error("expected an error but none returned") } }) } func callHealthz(expErr bool, mux *http.ServeMux) error { - req, err := http.NewRequest("GET", "http://localhost:8080/healthz", nil) + req, err := http.NewRequest("GET", "/healthz", nil) if err != nil { - return err + return fmt.Errorf("healthz error: %v", err) } + w := httptest.NewRecorder() mux.ServeHTTP(w, req) diff --git a/internal/ingress/controller/config/config.go b/internal/ingress/controller/config/config.go index 05ff73bec0..f4f7897a8e 100644 --- a/internal/ingress/controller/config/config.go +++ b/internal/ingress/controller/config/config.go @@ -18,15 +18,16 @@ package config import ( "fmt" - "runtime" "strconv" + "time" - "github.com/golang/glog" + "k8s.io/klog" apiv1 "k8s.io/api/core/v1" "k8s.io/ingress-nginx/internal/ingress" "k8s.io/ingress-nginx/internal/ingress/defaults" + "k8s.io/ingress-nginx/internal/runtime" ) const ( @@ -45,11 +46,11 @@ const ( // max-age is the time, in seconds, that the browser should remember that this site is only to be accessed using HTTPS. hstsMaxAge = "15724800" - gzipTypes = "application/atom+xml application/javascript application/x-javascript application/json application/rss+xml application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/svg+xml image/x-icon text/css text/plain text/x-component" + gzipTypes = "application/atom+xml application/javascript application/x-javascript application/json application/rss+xml application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/svg+xml image/x-icon text/css text/javascript text/plain text/x-component" - brotliTypes = "application/xml+rss application/atom+xml application/javascript application/x-javascript application/json application/rss+xml application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/svg+xml image/x-icon text/css text/plain text/x-component" + brotliTypes = "application/xml+rss application/atom+xml application/javascript application/x-javascript application/json application/rss+xml application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/svg+xml image/x-icon text/css text/javascript text/plain text/x-component" - logFormatUpstream = `%v - [$the_real_ip] - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" $request_length $request_time [$proxy_upstream_name] $upstream_addr $upstream_response_length $upstream_response_time $upstream_status` + logFormatUpstream = `%v - [$the_real_ip] - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" $request_length $request_time [$proxy_upstream_name] $upstream_addr $upstream_response_length $upstream_response_time $upstream_status $req_id` logFormatStream = `[$time_local] $protocol $status $bytes_sent $bytes_received $session_time` @@ -75,9 +76,6 @@ const ( // http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_session_cache sslSessionCacheSize = "10m" - // Default setting for load balancer algorithm - defaultLoadBalancerAlgorithm = "least_conn" - // Parameters for a shared memory zone that will keep states for various keys. // http://nginx.org/en/docs/http/ngx_http_limit_conn_module.html#limit_conn_zone defaultLimitConnZoneVariable = "$binary_remote_addr" @@ -96,11 +94,24 @@ type Configuration struct { // By default this is disabled AllowBackendServerHeader bool `json:"allow-backend-server-header"` + // AccessLogParams sets additionals params for access_log + // http://nginx.org/en/docs/http/ngx_http_log_module.html#access_log + // By default it's empty + AccessLogParams string `json:"access-log-params,omitempty"` + + // EnableAccessLogForDefaultBackend enable access_log for default backend + // By default this is disabled + EnableAccessLogForDefaultBackend bool `json:"enable-access-log-for-default-backend"` + // AccessLogPath sets the path of the access logs if enabled // http://nginx.org/en/docs/http/ngx_http_log_module.html#access_log // By default access logs go to /var/log/nginx/access.log AccessLogPath string `json:"access-log-path,omitempty"` + // WorkerCPUAffinity bind nginx worker processes to CPUs this will improve response latency + // http://nginx.org/en/docs/ngx_core_module.html#worker_cpu_affinity + // By default this is disabled + WorkerCPUAffinity string `json:"worker-cpu-affinity,omitempty"` // ErrorLogPath sets the path of the error logs // http://nginx.org/en/docs/ngx_core_module.html#error_log // By default error logs go to /var/log/nginx/error.log @@ -115,7 +126,7 @@ type Configuration struct { // By default this is disabled EnableModsecurity bool `json:"enable-modsecurity"` - // EnableModsecurity enables the OWASP ModSecurity Core Rule Set (CRS) + // EnableOWASPCoreRules enables the OWASP ModSecurity Core Rule Set (CRS) // By default this is disabled EnableOWASPCoreRules bool `json:"enable-owasp-modsecurity-crs"` @@ -140,6 +151,9 @@ type Configuration struct { //http://nginx.org/en/docs/http/ngx_http_log_module.html DisableAccessLog bool `json:"disable-access-log,omitempty"` + // DisableIpv6DNS disables IPv6 for nginx resolver + DisableIpv6DNS bool `json:"disable-ipv6-dns"` + // DisableIpv6 disable listening on ipv6 address DisableIpv6 bool `json:"disable-ipv6,omitempty"` @@ -153,26 +167,6 @@ type Configuration struct { // By default this is enabled IgnoreInvalidHeaders bool `json:"ignore-invalid-headers"` - // EnableVtsStatus allows the replacement of the default status page with a third party module named - // nginx-module-vts - https://github.com/vozlt/nginx-module-vts - // By default this is disabled - EnableVtsStatus bool `json:"enable-vts-status,omitempty"` - - // Vts config on http level - // Description: Sets parameters for a shared memory zone that will keep states for various keys. The cache is shared between all worker processe - // https://github.com/vozlt/nginx-module-vts#vhost_traffic_status_zone - // Default value is 10m - VtsStatusZoneSize string `json:"vts-status-zone-size,omitempty"` - - // Vts config on http level - // Description: Enables the keys by user defined variable. The key is a key string to calculate traffic. - // The name is a group string to calculate traffic. The key and name can contain variables such as $host, - // $server_name. The name's group belongs to filterZones if specified. The key's group belongs to serverZones - // if not specified second argument name. The example with geoip module is as follows: - // https://github.com/vozlt/nginx-module-vts#vhost_traffic_status_filter_by_set_key - // Default value is $geoip_country_code country::* - VtsDefaultFilterKey string `json:"vts-default-filter-key,omitempty"` - // RetryNonIdempotent since 1.9.13 NGINX will not retry non-idempotent requests (POST, LOCK, PATCH) // in case of an error. The previous behavior can be restored using the value true RetryNonIdempotent bool `json:"retry-non-idempotent"` @@ -190,6 +184,12 @@ type Configuration struct { // HTTP2MaxHeaderSize Limits the maximum size of the entire request header list after HPACK decompression HTTP2MaxHeaderSize string `json:"http2-max-header-size,omitempty"` + // http://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_max_requests + // HTTP2MaxRequests Sets the maximum number of requests (including push requests) that can be served + // through one HTTP/2 connection, after which the next client request will lead to connection closing + // and the need of establishing a new connection. + HTTP2MaxRequests int `json:"http2-max-requests,omitempty"` + // Enables or disables the header HSTS in servers running SSL HSTS bool `json:"hsts,omitempty"` @@ -234,15 +234,30 @@ type Configuration struct { // http://nginx.org/en/docs/http/ngx_http_log_module.html#log_format LogFormatStream string `json:"log-format-stream,omitempty"` + // If disabled, a worker process will accept one new connection at a time. + // Otherwise, a worker process will accept all new connections at a time. + // http://nginx.org/en/docs/ngx_core_module.html#multi_accept + // Default: true + EnableMultiAccept bool `json:"enable-multi-accept,omitempty"` + // Maximum number of simultaneous connections that can be opened by each worker process // http://nginx.org/en/docs/ngx_core_module.html#worker_connections MaxWorkerConnections int `json:"max-worker-connections,omitempty"` + // Maximum number of files that can be opened by each worker process. + // http://nginx.org/en/docs/ngx_core_module.html#worker_rlimit_nofile + MaxWorkerOpenFiles int `json:"max-worker-open-files,omitempty"` + // Sets the bucket size for the map variables hash tables. // Default value depends on the processor’s cache line size. // http://nginx.org/en/docs/http/ngx_http_map_module.html#map_hash_bucket_size MapHashBucketSize int `json:"map-hash-bucket-size,omitempty"` + // NginxStatusIpv4Whitelist has the list of cidr that are allowed to access + // the /nginx_status endpoint of the "_" server + NginxStatusIpv4Whitelist []string `json:"nginx-status-ipv4-whitelist,omitempty"` + NginxStatusIpv6Whitelist []string `json:"nginx-status-ipv6-whitelist,omitempty"` + // If UseProxyProtocol is enabled ProxyRealIPCIDR defines the default the IP/network address // of your external load balancer ProxyRealIPCIDR []string `json:"proxy-real-ip-cidr,omitempty"` @@ -286,7 +301,7 @@ type Configuration struct { SSLECDHCurve string `json:"ssl-ecdh-curve,omitempty"` // The secret that contains Diffie-Hellman key to help with "Perfect Forward Secrecy" - // https://www.openssl.org/docs/manmaster/apps/dhparam.html + // https://wiki.openssl.org/index.php/Diffie-Hellman_parameters // https://wiki.mozilla.org/Security/Server_Side_TLS#DHE_handshake_and_dhparam // http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_dhparam SSLDHParam string `json:"ssl-dh-param,omitempty"` @@ -310,7 +325,7 @@ type Configuration struct { // Sets the secret key used to encrypt and decrypt TLS session tickets. // http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_session_tickets // By default, a randomly generated key is used. - // Example: openssl rand 80 | base64 -w0 + // Example: openssl rand 80 | openssl enc -A -base64 SSLSessionTicketKey string `json:"ssl-session-ticket-key,omitempty"` // Time during which a client may reuse the session parameters stored in a cache. @@ -329,10 +344,23 @@ type Configuration struct { // https://www.nginx.com/resources/admin-guide/proxy-protocol/ UseProxyProtocol bool `json:"use-proxy-protocol,omitempty"` + // When use-proxy-protocol is enabled, sets the maximum time the connection handler will wait + // to receive proxy headers. + // Example '60s' + ProxyProtocolHeaderTimeout time.Duration `json:"proxy-protocol-header-timeout,omitempty"` + // Enables or disables the use of the nginx module that compresses responses using the "gzip" method // http://nginx.org/en/docs/http/ngx_http_gzip_module.html UseGzip bool `json:"use-gzip,omitempty"` + // Enables or disables the use of the nginx geoip module that creates variables with values depending on the client IP + // http://nginx.org/en/docs/http/ngx_http_geoip_module.html + UseGeoIP bool `json:"use-geoip,omitempty"` + + // UseGeoIP2 enables the geoip2 module for NGINX + // By default this is disabled + UseGeoIP2 bool `json:"use-geoip2,omitempty"` + // Enables or disables the use of the NGINX Brotli Module for compression // https://github.com/google/ngx_brotli EnableBrotli bool `json:"enable-brotli,omitempty"` @@ -348,6 +376,9 @@ type Configuration struct { // Default: true UseHTTP2 bool `json:"use-http2,omitempty"` + // gzip Compression Level that will be used + GzipLevel int `json:"gzip-level,omitempty"` + // MIME types in addition to "text/html" to compress. The special value “*” matches any MIME type. // Responses with the “text/html” type are always compressed if UseGzip is enabled GzipTypes string `json:"gzip-types,omitempty"` @@ -360,9 +391,6 @@ type Configuration struct { // http://nginx.org/en/docs/ngx_core_module.html#worker_shutdown_timeout WorkerShutdownTimeout string `json:"worker-shutdown-timeout,omitempty"` - // Defines the load balancing algorithm to use. The deault is round-robin - LoadBalanceAlgorithm string `json:"load-balance,omitempty"` - // Sets the bucket size for the variables hash table. // http://nginx.org/en/docs/http/ngx_http_map_module.html#variables_hash_bucket_size VariablesHashBucketSize int `json:"variables-hash-bucket-size,omitempty"` @@ -376,9 +404,17 @@ type Configuration struct { // upstream servers that are preserved in the cache of each worker process. When this // number is exceeded, the least recently used connections are closed. // http://nginx.org/en/docs/http/ngx_http_upstream_module.html#keepalive - // Default: 32 UpstreamKeepaliveConnections int `json:"upstream-keepalive-connections,omitempty"` + // Sets a timeout during which an idle keepalive connection to an upstream server will stay open. + // http://nginx.org/en/docs/http/ngx_http_upstream_module.html#keepalive_timeout + UpstreamKeepaliveTimeout int `json:"upstream-keepalive-timeout,omitempty"` + + // Sets the maximum number of requests that can be served through one keepalive connection. + // After the maximum number of requests is made, the connection is closed. + // http://nginx.org/en/docs/http/ngx_http_upstream_module.html#keepalive_requests + UpstreamKeepaliveRequests int `json:"upstream-keepalive-requests,omitempty"` + // Sets the maximum size of the variables hash table. // http://nginx.org/en/docs/http/ngx_http_map_module.html#variables_hash_max_size LimitConnZoneVariable string `json:"limit-conn-zone-variable,omitempty"` @@ -400,6 +436,9 @@ type Configuration struct { // Sets the ipv6 addresses on which the server will accept requests. BindAddressIpv6 []string `json:"bind-address-ipv6,omitempty"` + // Sets whether to use incoming X-Forwarded headers. + UseForwardedHeaders bool `json:"use-forwarded-headers"` + // Sets the header field for identifying the originating IP address of a client // Default is X-Forwarded-For ForwardedForHeader string `json:"forwarded-for-header,omitempty"` @@ -408,8 +447,16 @@ type Configuration struct { // Default: false ComputeFullForwardedFor bool `json:"compute-full-forwarded-for,omitempty"` + // If the request does not have a request-id, should we generate a random value? + // Default: true + GenerateRequestID bool `json:"generate-request-id,omitempty"` + + // Adds an X-Original-Uri header with the original request URI to the backend request + // Default: true + ProxyAddOriginalURIHeader bool `json:"proxy-add-original-uri-header"` + // EnableOpentracing enables the nginx Opentracing extension - // https://github.com/rnburn/nginx-opentracing + // https://github.com/opentracing-contrib/nginx-opentracing // By default this is disabled EnableOpentracing bool `json:"enable-opentracing"` @@ -417,12 +464,63 @@ type Configuration struct { ZipkinCollectorHost string `json:"zipkin-collector-host"` // ZipkinCollectorPort specifies the port to use when uploading traces + // Default: 9411 ZipkinCollectorPort int `json:"zipkin-collector-port"` // ZipkinServiceName specifies the service name to use for any traces created // Default: nginx ZipkinServiceName string `json:"zipkin-service-name"` + // ZipkinSampleRate specifies sampling rate for traces + // Default: 1.0 + ZipkinSampleRate float32 `json:"zipkin-sample-rate"` + + // JaegerCollectorHost specifies the host to use when uploading traces + JaegerCollectorHost string `json:"jaeger-collector-host"` + + // JaegerCollectorPort specifies the port to use when uploading traces + // Default: 6831 + JaegerCollectorPort int `json:"jaeger-collector-port"` + + // JaegerServiceName specifies the service name to use for any traces created + // Default: nginx + JaegerServiceName string `json:"jaeger-service-name"` + + // JaegerSamplerType specifies the sampler to be used when sampling traces. + // The available samplers are: const, probabilistic, ratelimiting, remote + // Default: const + JaegerSamplerType string `json:"jaeger-sampler-type"` + + // JaegerSamplerParam specifies the argument to be passed to the sampler constructor + // Default: 1 + JaegerSamplerParam string `json:"jaeger-sampler-param"` + + // JaegerSamplerHost specifies the host used for remote sampling consultation + // Default: http://127.0.0.1 + JaegerSamplerHost string `json:"jaeger-sampler-host"` + + // JaegerSamplerHost specifies the host used for remote sampling consultation + // Default: 5778 + JaegerSamplerPort int `json:"jaeger-sampler-port"` + + // DatadogCollectorHost specifies the datadog agent host to use when uploading traces + DatadogCollectorHost string `json:"datadog-collector-host"` + + // DatadogCollectorPort specifies the port to use when uploading traces + // Default: 8126 + DatadogCollectorPort int `json:"datadog-collector-port"` + + // DatadogServiceName specifies the service name to use for any traces created + // Default: nginx + DatadogServiceName string `json:"datadog-service-name"` + + // DatadogOperationNameOverride overrides the operation naem to use for any traces crated + // Default: nginx.handle + DatadogOperationNameOverride string `json:"datadog-operation-name-override"` + + // MainSnippet adds custom configuration to the main section of the nginx configuration + MainSnippet string `json:"main-snippet"` + // HTTPSnippet adds custom configuration to the http section of the nginx configuration HTTPSnippet string `json:"http-snippet"` @@ -436,97 +534,208 @@ type Configuration struct { // Supported codes are 301,302,307 and 308 // Default: 308 HTTPRedirectCode int `json:"http-redirect-code"` + + // ReusePort instructs NGINX to create an individual listening socket for + // each worker process (using the SO_REUSEPORT socket option), allowing a + // kernel to distribute incoming connections between worker processes + // Default: true + ReusePort bool `json:"reuse-port"` + + // HideHeaders sets additional header that will not be passed from the upstream + // server to the client response + // Default: empty + HideHeaders []string `json:"hide-headers"` + + // LimitReqStatusCode Sets the status code to return in response to rejected requests. + // http://nginx.org/en/docs/http/ngx_http_limit_req_module.html#limit_req_status + // Default: 503 + LimitReqStatusCode int `json:"limit-req-status-code"` + + // LimitConnStatusCode Sets the status code to return in response to rejected connections. + // http://nginx.org/en/docs/http/ngx_http_limit_conn_module.html#limit_conn_status + // Default: 503 + LimitConnStatusCode int `json:"limit-conn-status-code"` + + // EnableSyslog enables the configuration for remote logging in NGINX + EnableSyslog bool `json:"enable-syslog"` + // SyslogHost FQDN or IP address where the logs should be sent + SyslogHost string `json:"syslog-host"` + // SyslogPort port + SyslogPort int `json:"syslog-port"` + + // NoTLSRedirectLocations is a comma-separated list of locations + // that should not get redirected to TLS + NoTLSRedirectLocations string `json:"no-tls-redirect-locations"` + + // NoAuthLocations is a comma-separated list of locations that + // should not get authenticated + NoAuthLocations string `json:"no-auth-locations"` + + // GlobalExternalAuth indicates the access to all locations requires + // authentication using an external provider + // +optional + GlobalExternalAuth GlobalExternalAuth `json:"global-external-auth"` + + // DisableLuaRestyWAF disables lua-resty-waf globally regardless + // of whether there's an ingress that has enabled the WAF using annotation + DisableLuaRestyWAF bool `json:"disable-lua-resty-waf"` + + // EnableInfluxDB enables the nginx InfluxDB extension + // http://github.com/influxdata/nginx-influxdb-module/ + // By default this is disabled + EnableInfluxDB bool `json:"enable-influxdb"` + + // Checksum contains a checksum of the configmap configuration + Checksum string `json:"-"` + + // Block all requests from given IPs + BlockCIDRs []string `json:"block-cidrs"` + + // Block all requests with given User-Agent headers + BlockUserAgents []string `json:"block-user-agents"` + + // Block all requests with given Referer headers + BlockReferers []string `json:"block-referers"` } // NewDefault returns the default nginx configuration func NewDefault() Configuration { defIPCIDR := make([]string, 0) - defIPCIDR = append(defIPCIDR, "0.0.0.0/0") defBindAddress := make([]string, 0) + defBlockEntity := make([]string, 0) + defNginxStatusIpv4Whitelist := make([]string, 0) + defNginxStatusIpv6Whitelist := make([]string, 0) + defResponseHeaders := make([]string, 0) + + defIPCIDR = append(defIPCIDR, "0.0.0.0/0") + defNginxStatusIpv4Whitelist = append(defNginxStatusIpv4Whitelist, "127.0.0.1") + defNginxStatusIpv6Whitelist = append(defNginxStatusIpv6Whitelist, "::1") + defProxyDeadlineDuration := time.Duration(5) * time.Second + defGlobalExternalAuth := GlobalExternalAuth{"", "", "", "", append(defResponseHeaders, ""), "", ""} + cfg := Configuration{ - AllowBackendServerHeader: false, - AccessLogPath: "/var/log/nginx/access.log", - ErrorLogPath: "/var/log/nginx/error.log", - BrotliLevel: 4, - BrotliTypes: brotliTypes, - ClientHeaderBufferSize: "1k", - ClientHeaderTimeout: 60, - ClientBodyBufferSize: "8k", - ClientBodyTimeout: 60, - EnableDynamicTLSRecords: true, - EnableUnderscoresInHeaders: false, - ErrorLogLevel: errorLevel, - ForwardedForHeader: "X-Forwarded-For", - ComputeFullForwardedFor: false, - HTTP2MaxFieldSize: "4k", - HTTP2MaxHeaderSize: "16k", - HTTPRedirectCode: 308, - HSTS: true, - HSTSIncludeSubdomains: true, - HSTSMaxAge: hstsMaxAge, - HSTSPreload: false, - IgnoreInvalidHeaders: true, - GzipTypes: gzipTypes, - KeepAlive: 75, - KeepAliveRequests: 100, - LargeClientHeaderBuffers: "4 8k", - LogFormatEscapeJSON: false, - LogFormatStream: logFormatStream, - LogFormatUpstream: logFormatUpstream, - MaxWorkerConnections: 16384, - MapHashBucketSize: 64, - ProxyRealIPCIDR: defIPCIDR, - ServerNameHashMaxSize: 1024, - ProxyHeadersHashMaxSize: 512, - ProxyHeadersHashBucketSize: 64, - ProxyStreamResponses: 1, - ShowServerTokens: true, - SSLBufferSize: sslBufferSize, - SSLCiphers: sslCiphers, - SSLECDHCurve: "auto", - SSLProtocols: sslProtocols, - SSLSessionCache: true, - SSLSessionCacheSize: sslSessionCacheSize, - SSLSessionTickets: true, - SSLSessionTimeout: sslSessionTimeout, - EnableBrotli: true, - UseGzip: true, - WorkerProcesses: strconv.Itoa(runtime.NumCPU()), - WorkerShutdownTimeout: "10s", - LoadBalanceAlgorithm: defaultLoadBalancerAlgorithm, - VtsStatusZoneSize: "10m", - VtsDefaultFilterKey: "$geoip_country_code country::*", - VariablesHashBucketSize: 128, - VariablesHashMaxSize: 2048, - UseHTTP2: true, - ProxyStreamTimeout: "600s", + AllowBackendServerHeader: false, + AccessLogPath: "/var/log/nginx/access.log", + AccessLogParams: "", + EnableAccessLogForDefaultBackend: false, + WorkerCPUAffinity: "", + ErrorLogPath: "/var/log/nginx/error.log", + BlockCIDRs: defBlockEntity, + BlockUserAgents: defBlockEntity, + BlockReferers: defBlockEntity, + BrotliLevel: 4, + BrotliTypes: brotliTypes, + ClientHeaderBufferSize: "1k", + ClientHeaderTimeout: 60, + ClientBodyBufferSize: "8k", + ClientBodyTimeout: 60, + EnableDynamicTLSRecords: true, + EnableUnderscoresInHeaders: false, + ErrorLogLevel: errorLevel, + UseForwardedHeaders: false, + ForwardedForHeader: "X-Forwarded-For", + ComputeFullForwardedFor: false, + ProxyAddOriginalURIHeader: true, + GenerateRequestID: true, + HTTP2MaxFieldSize: "4k", + HTTP2MaxHeaderSize: "16k", + HTTP2MaxRequests: 1000, + HTTPRedirectCode: 308, + HSTS: true, + HSTSIncludeSubdomains: true, + HSTSMaxAge: hstsMaxAge, + HSTSPreload: false, + IgnoreInvalidHeaders: true, + GzipLevel: 5, + GzipTypes: gzipTypes, + KeepAlive: 75, + KeepAliveRequests: 100, + LargeClientHeaderBuffers: "4 8k", + LogFormatEscapeJSON: false, + LogFormatStream: logFormatStream, + LogFormatUpstream: logFormatUpstream, + EnableMultiAccept: true, + MaxWorkerConnections: 16384, + MaxWorkerOpenFiles: 0, + MapHashBucketSize: 64, + NginxStatusIpv4Whitelist: defNginxStatusIpv4Whitelist, + NginxStatusIpv6Whitelist: defNginxStatusIpv6Whitelist, + ProxyRealIPCIDR: defIPCIDR, + ProxyProtocolHeaderTimeout: defProxyDeadlineDuration, + ServerNameHashMaxSize: 1024, + ProxyHeadersHashMaxSize: 512, + ProxyHeadersHashBucketSize: 64, + ProxyStreamResponses: 1, + ReusePort: true, + ShowServerTokens: true, + SSLBufferSize: sslBufferSize, + SSLCiphers: sslCiphers, + SSLECDHCurve: "auto", + SSLProtocols: sslProtocols, + SSLSessionCache: true, + SSLSessionCacheSize: sslSessionCacheSize, + SSLSessionTickets: true, + SSLSessionTimeout: sslSessionTimeout, + EnableBrotli: false, + UseGzip: true, + UseGeoIP: true, + UseGeoIP2: false, + WorkerProcesses: strconv.Itoa(runtime.NumCPU()), + WorkerShutdownTimeout: "10s", + VariablesHashBucketSize: 128, + VariablesHashMaxSize: 2048, + UseHTTP2: true, + ProxyStreamTimeout: "600s", Backend: defaults.Backend{ - ProxyBodySize: bodySize, - ProxyConnectTimeout: 5, - ProxyReadTimeout: 60, - ProxySendTimeout: 60, - ProxyBufferSize: "4k", - ProxyCookieDomain: "off", - ProxyCookiePath: "off", - ProxyNextUpstream: "error timeout invalid_header http_502 http_503 http_504", - ProxyRequestBuffering: "on", - ProxyRedirectFrom: "off", - SSLRedirect: true, - CustomHTTPErrors: []int{}, - WhitelistSourceRange: []string{}, - SkipAccessLogURLs: []string{}, - LimitRate: 0, - LimitRateAfter: 0, + ProxyBodySize: bodySize, + ProxyConnectTimeout: 5, + ProxyReadTimeout: 60, + ProxySendTimeout: 60, + ProxyBuffersNumber: 4, + ProxyBufferSize: "4k", + ProxyCookieDomain: "off", + ProxyCookiePath: "off", + ProxyNextUpstream: "error timeout", + ProxyNextUpstreamTimeout: 0, + ProxyNextUpstreamTries: 3, + ProxyRequestBuffering: "on", + ProxyRedirectFrom: "off", + ProxyRedirectTo: "off", + SSLRedirect: true, + CustomHTTPErrors: []int{}, + WhitelistSourceRange: []string{}, + SkipAccessLogURLs: []string{}, + LimitRate: 0, + LimitRateAfter: 0, + ProxyBuffering: "off", }, UpstreamKeepaliveConnections: 32, + UpstreamKeepaliveTimeout: 60, + UpstreamKeepaliveRequests: 100, LimitConnZoneVariable: defaultLimitConnZoneVariable, BindAddressIpv4: defBindAddress, BindAddressIpv6: defBindAddress, ZipkinCollectorPort: 9411, ZipkinServiceName: "nginx", + ZipkinSampleRate: 1.0, + JaegerCollectorPort: 6831, + JaegerServiceName: "nginx", + JaegerSamplerType: "const", + JaegerSamplerParam: "1", + JaegerSamplerPort: 5778, + JaegerSamplerHost: "http://127.0.0.1", + DatadogServiceName: "nginx", + DatadogCollectorPort: 8126, + DatadogOperationNameOverride: "nginx.handle", + LimitReqStatusCode: 503, + LimitConnStatusCode: 503, + SyslogPort: 514, + NoTLSRedirectLocations: "/.well-known/acme-challenge", + NoAuthLocations: "/.well-known/acme-challenge", + GlobalExternalAuth: defGlobalExternalAuth, } - if glog.V(5) { + if klog.V(5) { cfg.ErrorLogLevel = "debug" } @@ -546,23 +755,30 @@ func (cfg Configuration) BuildLogFormatUpstream() string { // TemplateConfig contains the nginx configuration to render the file nginx.conf type TemplateConfig struct { - ProxySetHeaders map[string]string - AddHeaders map[string]string - MaxOpenFiles int - BacklogSize int - Backends []*ingress.Backend - PassthroughBackends []*ingress.SSLPassthroughBackend - Servers []*ingress.Server - TCPBackends []ingress.L4Service - UDPBackends []ingress.L4Service - HealthzURI string - CustomErrors bool - Cfg Configuration - IsIPV6Enabled bool - IsSSLPassthroughEnabled bool - RedirectServers map[string]string - ListenPorts *ListenPorts - PublishService *apiv1.Service + ProxySetHeaders map[string]string + AddHeaders map[string]string + BacklogSize int + Backends []*ingress.Backend + PassthroughBackends []*ingress.SSLPassthroughBackend + Servers []*ingress.Server + TCPBackends []ingress.L4Service + UDPBackends []ingress.L4Service + HealthzURI string + Cfg Configuration + IsIPV6Enabled bool + IsSSLPassthroughEnabled bool + NginxStatusIpv4Whitelist []string + NginxStatusIpv6Whitelist []string + RedirectServers interface{} + ListenPorts *ListenPorts + PublishService *apiv1.Service + DynamicCertificatesEnabled bool + EnableMetrics bool + + PID string + StatusSocket string + StatusPath string + StreamSocket string } // ListenPorts describe the ports required to run the @@ -570,8 +786,20 @@ type TemplateConfig struct { type ListenPorts struct { HTTP int HTTPS int - Status int Health int Default int SSLProxy int } + +// GlobalExternalAuth describe external authentication configuration for the +// NGINX Ingress controller +type GlobalExternalAuth struct { + URL string `json:"url"` + // Host contains the hostname defined in the URL + Host string `json:"host"` + SigninURL string `json:"signinUrl"` + Method string `json:"method"` + ResponseHeaders []string `json:"responseHeaders,omitempty"` + RequestRedirect string `json:"requestRedirect"` + AuthSnippet string `json:"authSnippet"` +} diff --git a/internal/ingress/controller/controller.go b/internal/ingress/controller/controller.go index 1e0fb2d996..51c1ff2dcb 100644 --- a/internal/ingress/controller/controller.go +++ b/internal/ingress/controller/controller.go @@ -18,35 +18,26 @@ package controller import ( "fmt" - "math/rand" - "net" - "reflect" "sort" "strconv" "strings" - "sync/atomic" "time" - "github.com/golang/glog" - + "github.com/mitchellh/hashstructure" apiv1 "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/conversion" + networking "k8s.io/api/networking/v1beta1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" - "k8s.io/ingress-nginx/internal/ingress" "k8s.io/ingress-nginx/internal/ingress/annotations" "k8s.io/ingress-nginx/internal/ingress/annotations/class" - "k8s.io/ingress-nginx/internal/ingress/annotations/healthcheck" - "k8s.io/ingress-nginx/internal/ingress/annotations/parser" + "k8s.io/ingress-nginx/internal/ingress/annotations/log" "k8s.io/ingress-nginx/internal/ingress/annotations/proxy" ngx_config "k8s.io/ingress-nginx/internal/ingress/controller/config" - "k8s.io/ingress-nginx/internal/ingress/defaults" - "k8s.io/ingress-nginx/internal/ingress/resolver" "k8s.io/ingress-nginx/internal/k8s" - "k8s.io/ingress-nginx/internal/task" + "k8s.io/klog" ) const ( @@ -55,15 +46,6 @@ const ( rootLocation = "/" ) -var ( - cloner *conversion.Cloner -) - -func init() { - cloner := conversion.NewCloner() - cloner.RegisterDeepCopyFunc(ingress.GetGeneratedDeepCopyFuncs) -} - // Configuration contains all the settings required by an Ingress controller type Configuration struct { APIServerHost string @@ -77,46 +59,51 @@ type Configuration struct { Namespace string - ForceNamespaceIsolation bool - - // optional + // +optional TCPConfigMapName string - // optional + // +optional UDPConfigMapName string - DefaultHealthzURL string DefaultSSLCertificate string - // optional - PublishService string + // +optional + PublishService string + PublishStatusAddress string UpdateStatus bool UseNodeInternalIP bool ElectionID string UpdateStatusOnShutdown bool - SortBackends bool - ListenPorts *ngx_config.ListenPorts EnableSSLPassthrough bool EnableProfiling bool + EnableMetrics bool + MetricsPerHost bool + EnableSSLChainCompletion bool - FakeCertificatePath string - FakeCertificateSHA string -} + FakeCertificate *ingress.SSLCert -// GetDefaultBackend returns the default backend -func (n NGINXController) GetDefaultBackend() defaults.Backend { - return n.backendDefaults + SyncRateLimit float32 + + DynamicCertificatesEnabled bool + + DisableCatchAll bool + + ValidationWebhook string + ValidationWebhookCertPath string + ValidationWebhookKeyPath string + + GlobalExternalAuth *ngx_config.GlobalExternalAuth } -// GetPublishService returns the configured service used to set ingress status +// GetPublishService returns the Service used to set the load-balancer status of Ingresses. func (n NGINXController) GetPublishService() *apiv1.Service { - s, err := n.listers.Service.GetByName(n.cfg.PublishService) + s, err := n.store.GetService(n.cfg.PublishService) if err != nil { return nil } @@ -124,165 +111,193 @@ func (n NGINXController) GetPublishService() *apiv1.Service { return s } -// GetSecret searches for a secret in the local secrets Store -func (n NGINXController) GetSecret(name string) (*apiv1.Secret, error) { - return n.listers.Secret.GetByName(name) -} - -// GetService searches for a service in the local secrets Store -func (n NGINXController) GetService(name string) (*apiv1.Service, error) { - return n.listers.Service.GetByName(name) -} - -// sync collects all the pieces required to assemble the configuration file and -// then sends the content to the backend (OnUpdate) receiving the populated -// template as response reloading the backend if is required. -func (n *NGINXController) syncIngress(item interface{}) error { +// syncIngress collects all the pieces required to assemble the NGINX +// configuration file and passes the resulting data structures to the backend +// (OnUpdate) when a reload is deemed necessary. +func (n *NGINXController) syncIngress(interface{}) error { n.syncRateLimiter.Accept() if n.syncQueue.IsShuttingDown() { return nil } - if element, ok := item.(task.Element); ok { - if name, ok := element.Key.(string); ok { - if obj, exists, _ := n.listers.Ingress.GetByKey(name); exists { - ing := obj.(*extensions.Ingress) - n.readSecrets(ing) - } - } + ings := n.store.ListIngresses(nil) + hosts, servers, pcfg := n.getConfiguration(ings) + + if n.isLeader() { + klog.V(2).Infof("Updating ssl expiration metrics.") + n.metricCollector.SetSSLExpireTime(servers) } - // Sort ingress rules using the ResourceVersion field - ings := n.listers.Ingress.List() - sort.SliceStable(ings, func(i, j int) bool { - ir := ings[i].(*extensions.Ingress).ResourceVersion - jr := ings[j].(*extensions.Ingress).ResourceVersion - return ir < jr - }) + if n.runningConfig.Equal(pcfg) { + klog.V(3).Infof("No configuration change detected, skipping backend reload.") + return nil + } - // filter ingress rules - var ingresses []*extensions.Ingress - for _, ingIf := range ings { - ing := ingIf.(*extensions.Ingress) - if !class.IsValid(ing) { - continue + n.metricCollector.SetHosts(hosts) + + if !n.IsDynamicConfigurationEnough(pcfg) { + klog.Infof("Configuration changes detected, backend reload required.") + + hash, _ := hashstructure.Hash(pcfg, &hashstructure.HashOptions{ + TagName: "json", + }) + + pcfg.ConfigurationChecksum = fmt.Sprintf("%v", hash) + + err := n.OnUpdate(*pcfg) + if err != nil { + n.metricCollector.IncReloadErrorCount() + n.metricCollector.ConfigSuccess(hash, false) + klog.Errorf("Unexpected failure reloading the backend:\n%v", err) + return err } - ingresses = append(ingresses, ing) + klog.Infof("Backend successfully reloaded.") + n.metricCollector.ConfigSuccess(hash, true) + n.metricCollector.IncReloadCount() } - upstreams, servers := n.getBackendServers(ingresses) - var passUpstreams []*ingress.SSLPassthroughBackend + isFirstSync := n.runningConfig.Equal(&ingress.Configuration{}) + if isFirstSync { + // For the initial sync it always takes some time for NGINX to start listening + // For large configurations it might take a while so we loop and back off + klog.Info("Initial sync, sleeping for 1 second.") + time.Sleep(1 * time.Second) + } - for _, server := range servers { - if !server.SSLPassthrough { - continue - } + retry := wait.Backoff{ + Steps: 15, + Duration: 1 * time.Second, + Factor: 0.8, + Jitter: 0.1, + } - for _, loc := range server.Locations { - if loc.Path != rootLocation { - glog.Warningf("ignoring path %v of ssl passthrough host %v", loc.Path, server.Hostname) - continue - } - passUpstreams = append(passUpstreams, &ingress.SSLPassthroughBackend{ - Backend: loc.Backend, - Hostname: server.Hostname, - Service: loc.Service, - Port: loc.Port, - }) - break + err := wait.ExponentialBackoff(retry, func() (bool, error) { + err := configureDynamically(pcfg, n.cfg.DynamicCertificatesEnabled) + if err == nil { + klog.V(2).Infof("Dynamic reconfiguration succeeded.") + return true, nil } + + klog.Warningf("Dynamic reconfiguration failed: %v", err) + return false, err + }) + if err != nil { + klog.Errorf("Unexpected failure reconfiguring NGINX:\n%v", err) + return err } - pcfg := ingress.Configuration{ - Backends: upstreams, - Servers: servers, - TCPEndpoints: n.getStreamServices(n.cfg.TCPConfigMapName, apiv1.ProtocolTCP), - UDPEndpoints: n.getStreamServices(n.cfg.UDPConfigMapName, apiv1.ProtocolUDP), - PassthroughBackends: passUpstreams, + ri := getRemovedIngresses(n.runningConfig, pcfg) + re := getRemovedHosts(n.runningConfig, pcfg) + n.metricCollector.RemoveMetrics(ri, re) + + n.runningConfig = pcfg + + return nil +} + +// CheckIngress returns an error in case the provided ingress, when added +// to the current configuration, generates an invalid configuration +func (n *NGINXController) CheckIngress(ing *networking.Ingress) error { + //TODO: this is wrong + if n == nil { + return fmt.Errorf("cannot check ingress on a nil ingress controller") } - if !n.isForceReload() && n.runningConfig.Equal(&pcfg) { - glog.V(3).Infof("skipping backend reload (no changes detected)") + if ing == nil { + // no ingress to add, no state change return nil } - glog.Infof("backend reload required") + if !class.IsValid(ing) { + klog.Infof("ignoring ingress %v in %v based on annotation %v", ing.Name, ing.ObjectMeta.Namespace, class.IngressKey) + return nil + } + + if n.cfg.Namespace != "" && ing.ObjectMeta.Namespace != n.cfg.Namespace { + klog.Infof("ignoring ingress %v in namespace %v different from the namespace watched %s", ing.Name, ing.ObjectMeta.Namespace, n.cfg.Namespace) + return nil + } + + filter := func(toCheck *ingress.Ingress) bool { + return toCheck.ObjectMeta.Namespace == ing.ObjectMeta.Namespace && + toCheck.ObjectMeta.Name == ing.ObjectMeta.Name + } + + ings := n.store.ListIngresses(filter) + ings = append(ings, &ingress.Ingress{ + Ingress: *ing, + ParsedAnnotations: annotations.NewAnnotationExtractor(n.store).Extract(ing), + }) + + _, _, pcfg := n.getConfiguration(ings) + + cfg := n.store.GetBackendConfiguration() + cfg.Resolver = n.resolver - err := n.OnUpdate(pcfg) + content, err := n.generateTemplate(cfg, *pcfg) if err != nil { - incReloadErrorCount() - glog.Errorf("unexpected failure restarting the backend: \n%v", err) + n.metricCollector.IncCheckErrorCount(ing.ObjectMeta.Namespace, ing.Name) return err } - glog.Infof("ingress backend successfully reloaded...") - incReloadCount() - setSSLExpireTime(servers) - - n.runningConfig = &pcfg - n.SetForceReload(false) + err = n.testTemplate(content) + if err != nil { + n.metricCollector.IncCheckErrorCount(ing.ObjectMeta.Namespace, ing.Name) + } else { + n.metricCollector.IncCheckCount(ing.ObjectMeta.Namespace, ing.Name) + } - return nil + return err } func (n *NGINXController) getStreamServices(configmapName string, proto apiv1.Protocol) []ingress.L4Service { - glog.V(3).Infof("obtaining information about stream services of type %v located in configmap %v", proto, configmapName) if configmapName == "" { - // no configmap configured return []ingress.L4Service{} } - + klog.V(3).Infof("Obtaining information about %v stream services from ConfigMap %q", proto, configmapName) _, _, err := k8s.ParseNameNS(configmapName) if err != nil { - glog.Errorf("unexpected error reading configmap %v: %v", configmapName, err) + klog.Errorf("Error parsing ConfigMap reference %q: %v", configmapName, err) return []ingress.L4Service{} } - - configmap, err := n.listers.ConfigMap.GetByName(configmapName) + configmap, err := n.store.GetConfigMap(configmapName) if err != nil { - glog.Errorf("unexpected error reading configmap %v: %v", configmapName, err) + klog.Errorf("Error getting ConfigMap %q: %v", configmapName, err) return []ingress.L4Service{} } - var svcs []ingress.L4Service var svcProxyProtocol ingress.ProxyProtocol - // k -> port to expose - // v -> /: - for k, v := range configmap.Data { - externalPort, err := strconv.Atoi(k) + rp := []int{ + n.cfg.ListenPorts.HTTP, + n.cfg.ListenPorts.HTTPS, + n.cfg.ListenPorts.SSLProxy, + n.cfg.ListenPorts.Health, + n.cfg.ListenPorts.Default, + } + reserverdPorts := sets.NewInt(rp...) + // svcRef format: <(str)namespace>/<(str)service>:<(intstr)port>[:<("PROXY")decode>:<("PROXY")encode>] + for port, svcRef := range configmap.Data { + externalPort, err := strconv.Atoi(port) if err != nil { - glog.Warningf("%v is not valid as a TCP/UDP port", k) + klog.Warningf("%q is not a valid %v port number", port, proto) continue } - - rp := []int{ - n.cfg.ListenPorts.HTTP, - n.cfg.ListenPorts.HTTPS, - n.cfg.ListenPorts.SSLProxy, - n.cfg.ListenPorts.Status, - n.cfg.ListenPorts.Health, - n.cfg.ListenPorts.Default, - } - - if intInSlice(externalPort, rp) { - glog.Warningf("port %v cannot be used for TCP or UDP services. It is reserved for the Ingress controller", k) + if reserverdPorts.Has(externalPort) { + klog.Warningf("Port %d cannot be used for %v stream services. It is reserved for the Ingress controller.", externalPort, proto) continue } - - nsSvcPort := strings.Split(v, ":") + nsSvcPort := strings.Split(svcRef, ":") if len(nsSvcPort) < 2 { - glog.Warningf("invalid format (namespace/name:port:[PROXY]:[PROXY]) '%v'", k) + klog.Warningf("Invalid Service reference %q for %v port %d", svcRef, proto, externalPort) continue } - nsName := nsSvcPort[0] svcPort := nsSvcPort[1] svcProxyProtocol.Decode = false svcProxyProtocol.Encode = false - - // Proxy protocol is possible if the service is TCP + // Proxy Protocol is only compatible with TCP Services if len(nsSvcPort) >= 3 && proto == apiv1.ProtocolTCP { if len(nsSvcPort) >= 3 && strings.ToUpper(nsSvcPort[2]) == "PROXY" { svcProxyProtocol.Decode = true @@ -291,58 +306,46 @@ func (n *NGINXController) getStreamServices(configmapName string, proto apiv1.Pr svcProxyProtocol.Encode = true } } - svcNs, svcName, err := k8s.ParseNameNS(nsName) if err != nil { - glog.Warningf("%v", err) + klog.Warningf("%v", err) continue } - - svcObj, svcExists, err := n.listers.Service.GetByKey(nsName) + svc, err := n.store.GetService(nsName) if err != nil { - glog.Warningf("error getting service %v: %v", nsName, err) - continue - } - - if !svcExists { - glog.Warningf("service %v was not found", nsName) + klog.Warningf("Error getting Service %q: %v", nsName, err) continue } - - svc := svcObj.(*apiv1.Service) - var endps []ingress.Endpoint targetPort, err := strconv.Atoi(svcPort) if err != nil { - glog.V(3).Infof("searching service %v endpoints using the name '%v'", svcNs, svcName, svcPort) + // not a port number, fall back to using port name + klog.V(3).Infof("Searching Endpoints with %v port name %q for Service %q", proto, svcPort, nsName) for _, sp := range svc.Spec.Ports { if sp.Name == svcPort { if sp.Protocol == proto { - endps = n.getEndpoints(svc, &sp, proto, &healthcheck.Config{}) + endps = getEndpoints(svc, &sp, proto, n.store.GetServiceEndpoints) break } } } } else { - // we need to use the TargetPort (where the endpoints are running) - glog.V(3).Infof("searching service %v/%v endpoints using the target port '%v'", svcNs, svcName, targetPort) + klog.V(3).Infof("Searching Endpoints with %v port number %d for Service %q", proto, targetPort, nsName) for _, sp := range svc.Spec.Ports { if sp.Port == int32(targetPort) { if sp.Protocol == proto { - endps = n.getEndpoints(svc, &sp, proto, &healthcheck.Config{}) + endps = getEndpoints(svc, &sp, proto, n.store.GetServiceEndpoints) break } } } } - - // stream services cannot contain empty upstreams and there is no - // default backend equivalent + // stream services cannot contain empty upstreams and there is + // no default backend equivalent if len(endps) == 0 { - glog.Warningf("service %v/%v does not have any active endpoints for port %v and protocol %v", svcNs, svcName, svcPort, proto) + klog.Warningf("Service %q does not have any active Endpoint for %v port %v", nsName, proto, svcPort) continue } - svcs = append(svcs, ingress.L4Service{ Port: externalPort, Backend: ingress.L4Backend{ @@ -353,37 +356,39 @@ func (n *NGINXController) getStreamServices(configmapName string, proto apiv1.Pr ProxyProtocol: svcProxyProtocol, }, Endpoints: endps, + Service: svc, }) } - + // Keep upstream order sorted to reduce unnecessary nginx config reloads. + sort.SliceStable(svcs, func(i, j int) bool { + return svcs[i].Port < svcs[j].Port + }) return svcs } -// getDefaultUpstream returns an upstream associated with the -// default backend service. In case of error retrieving information -// configure the upstream to return http code 503. +// getDefaultUpstream returns the upstream associated with the default backend. +// Configures the upstream to return HTTP code 503 in case of error. func (n *NGINXController) getDefaultUpstream() *ingress.Backend { upstream := &ingress.Backend{ Name: defUpstreamName, } svcKey := n.cfg.DefaultService - svcObj, svcExists, err := n.listers.Service.GetByKey(svcKey) - if err != nil { - glog.Warningf("unexpected error searching the default backend %v: %v", n.cfg.DefaultService, err) + + if len(svcKey) == 0 { upstream.Endpoints = append(upstream.Endpoints, n.DefaultEndpoint()) return upstream } - if !svcExists { - glog.Warningf("service %v does not exist", svcKey) + svc, err := n.store.GetService(svcKey) + if err != nil { + klog.Warningf("Error getting default backend %q: %v", svcKey, err) upstream.Endpoints = append(upstream.Endpoints, n.DefaultEndpoint()) return upstream } - svc := svcObj.(*apiv1.Service) - endps := n.getEndpoints(svc, &svc.Spec.Ports[0], apiv1.ProtocolTCP, &healthcheck.Config{}) + endps := getEndpoints(svc, &svc.Spec.Ports[0], apiv1.ProtocolTCP, n.store.GetServiceEndpoints) if len(endps) == 0 { - glog.Warningf("service %v does not have any active endpoints", svcKey) + klog.Warningf("Service %q does not have any active Endpoint", svcKey) endps = []ingress.Endpoint{n.DefaultEndpoint()} } @@ -392,21 +397,71 @@ func (n *NGINXController) getDefaultUpstream() *ingress.Backend { return upstream } -// getBackendServers returns a list of Upstream and Server to be used by the backend -// An upstream can be used in multiple servers if the namespace, service name and port are the same -func (n *NGINXController) getBackendServers(ingresses []*extensions.Ingress) ([]*ingress.Backend, []*ingress.Server) { +// getConfiguration returns the configuration matching the standard kubernetes ingress +func (n *NGINXController) getConfiguration(ingresses []*ingress.Ingress) (sets.String, []*ingress.Server, *ingress.Configuration) { + upstreams, servers := n.getBackendServers(ingresses) + var passUpstreams []*ingress.SSLPassthroughBackend + + hosts := sets.NewString() + + for _, server := range servers { + if !hosts.Has(server.Hostname) { + hosts.Insert(server.Hostname) + } + if server.Alias != "" && !hosts.Has(server.Alias) { + hosts.Insert(server.Alias) + } + + if !server.SSLPassthrough { + continue + } + + for _, loc := range server.Locations { + if loc.Path != rootLocation { + klog.Warningf("Ignoring SSL Passthrough for location %q in server %q", loc.Path, server.Hostname) + continue + } + passUpstreams = append(passUpstreams, &ingress.SSLPassthroughBackend{ + Backend: loc.Backend, + Hostname: server.Hostname, + Service: loc.Service, + Port: loc.Port, + }) + break + } + } + + return hosts, servers, &ingress.Configuration{ + Backends: upstreams, + Servers: servers, + TCPEndpoints: n.getStreamServices(n.cfg.TCPConfigMapName, apiv1.ProtocolTCP), + UDPEndpoints: n.getStreamServices(n.cfg.UDPConfigMapName, apiv1.ProtocolUDP), + PassthroughBackends: passUpstreams, + BackendConfigChecksum: n.store.GetBackendConfiguration().Checksum, + ControllerPodsCount: n.store.GetRunningControllerPodsCount(), + } +} + +// getBackendServers returns a list of Upstream and Server to be used by the +// backend. An upstream can be used in multiple servers if the namespace, +// service name and port are the same. +func (n *NGINXController) getBackendServers(ingresses []*ingress.Ingress) ([]*ingress.Backend, []*ingress.Server) { du := n.getDefaultUpstream() upstreams := n.createUpstreams(ingresses, du) servers := n.createServers(ingresses, upstreams, du) + var canaryIngresses []*ingress.Ingress + for _, ing := range ingresses { - anns := n.getIngressAnnotations(ing) + ingKey := k8s.MetaNamespaceKey(ing) + anns := ing.ParsedAnnotations for _, rule := range ing.Spec.Rules { host := rule.Host if host == "" { host = defServerName } + server := servers[host] if server == nil { server = servers[defServerName] @@ -414,30 +469,40 @@ func (n *NGINXController) getBackendServers(ingresses []*extensions.Ingress) ([] if rule.HTTP == nil && host != defServerName { - glog.V(3).Infof("ingress rule %v/%v does not contain HTTP rules, using default backend", ing.Namespace, ing.Name) + klog.V(3).Infof("Ingress %q does not contain any HTTP rule, using default backend", ingKey) continue } + if server.AuthTLSError == "" && anns.CertificateAuth.AuthTLSError != "" { + server.AuthTLSError = anns.CertificateAuth.AuthTLSError + } + if server.CertificateAuth.CAFileName == "" { server.CertificateAuth = anns.CertificateAuth - // It is possible that no CAFileName is found in the secret - if server.CertificateAuth.CAFileName == "" { - glog.V(3).Infof("secret %v does not contain 'ca.crt', mutual authentication not enabled - ingress rule %v/%v.", server.CertificateAuth.Secret, ing.Namespace, ing.Name) - + if server.CertificateAuth.Secret != "" && server.CertificateAuth.CAFileName == "" { + klog.V(3).Infof("Secret %q has no 'ca.crt' key, mutual authentication disabled for Ingress %q", + server.CertificateAuth.Secret, ingKey) } } else { - glog.V(3).Infof("server %v already contains a mutual authentication configuration - ingress rule %v/%v", server.Hostname, ing.Namespace, ing.Name) + klog.V(3).Infof("Server %q is already configured for mutual authentication (Ingress %q)", + server.Hostname, ingKey) + } + + if rule.HTTP == nil { + klog.V(3).Infof("Ingress %q does not contain any HTTP rule, using default backend", ingKey) + continue } for _, path := range rule.HTTP.Paths { - upsName := fmt.Sprintf("%v-%v-%v", - ing.GetNamespace(), - path.Backend.ServiceName, - path.Backend.ServicePort.String()) + upsName := upstreamName(ing.Namespace, path.Backend.ServiceName, path.Backend.ServicePort) ups := upstreams[upsName] - // if there's no path defined we assume / + // Backend is not referenced to by a server + if ups.NoServer { + continue + } + nginxPath := rootLocation if path.Path != "" { nginxPath = path.Path @@ -449,30 +514,20 @@ func (n *NGINXController) getBackendServers(ingresses []*extensions.Ingress) ([] addLoc = false if !loc.IsDefBackend { - glog.V(3).Infof("avoiding replacement of ingress rule %v/%v location %v upstream %v (%v)", ing.Namespace, ing.Name, loc.Path, ups.Name, loc.Backend) + klog.V(3).Infof("Location %q already configured for server %q with upstream %q (Ingress %q)", + loc.Path, server.Hostname, loc.Backend, ingKey) break } - glog.V(3).Infof("replacing ingress rule %v/%v location %v upstream %v (%v)", ing.Namespace, ing.Name, loc.Path, ups.Name, loc.Backend) + klog.V(3).Infof("Replacing location %q for server %q with upstream %q to use upstream %q (Ingress %q)", + loc.Path, server.Hostname, loc.Backend, ups.Name, ingKey) + loc.Backend = ups.Name loc.IsDefBackend = false - loc.Backend = ups.Name loc.Port = ups.Port loc.Service = ups.Service loc.Ingress = ing - loc.BasicDigestAuth = anns.BasicDigestAuth - loc.ClientBodyBufferSize = anns.ClientBodyBufferSize - loc.ConfigurationSnippet = anns.ConfigurationSnippet - loc.CorsConfig = anns.CorsConfig - loc.ExternalAuth = anns.ExternalAuth - loc.Proxy = anns.Proxy - loc.RateLimit = anns.RateLimit - loc.Redirect = anns.Redirect - loc.Rewrite = anns.Rewrite - loc.UpstreamVhost = anns.UpstreamVhost - loc.VtsFilterKey = anns.VtsFilterKey - loc.Whitelist = anns.Whitelist - loc.Denied = anns.Denied + locationApplyAnnotations(loc, anns) if loc.Redirect.FromToWWW { server.RedirectFromToWWW = true @@ -480,30 +535,21 @@ func (n *NGINXController) getBackendServers(ingresses []*extensions.Ingress) ([] break } } - // is a new location + + // new location if addLoc { - glog.V(3).Infof("adding location %v in ingress rule %v/%v upstream %v", nginxPath, ing.Namespace, ing.Name, ups.Name) + klog.V(3).Infof("Adding location %q for server %q with upstream %q (Ingress %q)", + nginxPath, server.Hostname, ups.Name, ingKey) + loc := &ingress.Location{ - Path: nginxPath, - Backend: ups.Name, - IsDefBackend: false, - Service: ups.Service, - Port: ups.Port, - Ingress: ing, - BasicDigestAuth: anns.BasicDigestAuth, - ClientBodyBufferSize: anns.ClientBodyBufferSize, - ConfigurationSnippet: anns.ConfigurationSnippet, - CorsConfig: anns.CorsConfig, - ExternalAuth: anns.ExternalAuth, - Proxy: anns.Proxy, - RateLimit: anns.RateLimit, - Redirect: anns.Redirect, - Rewrite: anns.Rewrite, - UpstreamVhost: anns.UpstreamVhost, - VtsFilterKey: anns.VtsFilterKey, - Whitelist: anns.Whitelist, - Denied: anns.Denied, + Path: nginxPath, + Backend: ups.Name, + IsDefBackend: false, + Service: ups.Service, + Port: ups.Port, + Ingress: ing, } + locationApplyAnnotations(loc, anns) if loc.Redirect.FromToWWW { server.RedirectFromToWWW = true @@ -516,64 +562,79 @@ func (n *NGINXController) getBackendServers(ingresses []*extensions.Ingress) ([] } if anns.SessionAffinity.Type == "cookie" { + cookiePath := anns.SessionAffinity.Cookie.Path + if anns.Rewrite.UseRegex && cookiePath == "" { + klog.Warningf("session-cookie-path should be set when use-regex is true") + } + ups.SessionAffinity.CookieSessionAffinity.Name = anns.SessionAffinity.Cookie.Name - ups.SessionAffinity.CookieSessionAffinity.Hash = anns.SessionAffinity.Cookie.Hash + ups.SessionAffinity.CookieSessionAffinity.Expires = anns.SessionAffinity.Cookie.Expires + ups.SessionAffinity.CookieSessionAffinity.MaxAge = anns.SessionAffinity.Cookie.MaxAge + ups.SessionAffinity.CookieSessionAffinity.Path = cookiePath + ups.SessionAffinity.CookieSessionAffinity.ChangeOnFailure = anns.SessionAffinity.Cookie.ChangeOnFailure locs := ups.SessionAffinity.CookieSessionAffinity.Locations if _, ok := locs[host]; !ok { locs[host] = []string{} } - locs[host] = append(locs[host], path.Path) } } } + + // set aside canary ingresses to merge later + if anns.Canary.Enabled { + canaryIngresses = append(canaryIngresses, ing) + } + } + + if nonCanaryIngressExists(ingresses, canaryIngresses) { + for _, canaryIng := range canaryIngresses { + mergeAlternativeBackends(canaryIng, upstreams, servers) + } } aUpstreams := make([]*ingress.Backend, 0, len(upstreams)) for _, upstream := range upstreams { + aUpstreams = append(aUpstreams, upstream) + isHTTPSfrom := []*ingress.Server{} for _, server := range servers { for _, location := range server.Locations { - if upstream.Name == location.Backend { - if len(upstream.Endpoints) == 0 { - glog.V(3).Infof("upstream %v does not have any active endpoints.", upstream.Name) - location.Backend = "" - - // check if the location contains endpoints and a custom default backend - if location.DefaultBackend != nil { - sp := location.DefaultBackend.Spec.Ports[0] - endps := n.getEndpoints(location.DefaultBackend, &sp, apiv1.ProtocolTCP, &healthcheck.Config{}) - if len(endps) > 0 { - glog.V(3).Infof("using custom default backend in server %v location %v (service %v/%v)", - server.Hostname, location.Path, location.DefaultBackend.Namespace, location.DefaultBackend.Name) - b, err := cloner.DeepCopy(upstream) - if err != nil { - glog.Errorf("unexpected error copying Upstream: %v", err) - } else { - name := fmt.Sprintf("custom-default-backend-%v", upstream.Name) - nb := b.(*ingress.Backend) - nb.Name = name - nb.Endpoints = endps - aUpstreams = append(aUpstreams, nb) - location.Backend = name - } - } + if shouldCreateUpstreamForLocationDefaultBackend(upstream, location) { + sp := location.DefaultBackend.Spec.Ports[0] + endps := getEndpoints(location.DefaultBackend, &sp, apiv1.ProtocolTCP, n.store.GetServiceEndpoints) + if len(endps) > 0 { + + name := fmt.Sprintf("custom-default-backend-%v", location.DefaultBackend.GetName()) + klog.V(3).Infof("Creating \"%v\" upstream based on default backend annotation", name) + + nb := upstream.DeepCopy() + nb.Name = name + nb.Endpoints = endps + aUpstreams = append(aUpstreams, nb) + location.DefaultBackendUpstreamName = name + + if len(upstream.Endpoints) == 0 { + klog.V(3).Infof("Upstream %q has no active Endpoint, so using custom default backend for location %q in server %q (Service \"%v/%v\")", + upstream.Name, location.Path, server.Hostname, location.DefaultBackend.Namespace, location.DefaultBackend.Name) + + location.Backend = name } } - // Configure Backends[].SSLPassthrough if server.SSLPassthrough { if location.Path == rootLocation { if location.Backend == defUpstreamName { - glog.Warningf("ignoring ssl passthrough of %v as it doesn't have a default backend (root context)", server.Hostname) + klog.Warningf("Server %q has no default backend, ignoring SSL Passthrough.", server.Hostname) continue } - isHTTPSfrom = append(isHTTPSfrom, server) } } + } else { + location.DefaultBackendUpstreamName = "upstream-default-backend" } } } @@ -583,28 +644,22 @@ func (n *NGINXController) getBackendServers(ingresses []*extensions.Ingress) ([] } } - // create the list of upstreams and skip those without endpoints - for _, upstream := range upstreams { - if len(upstream.Endpoints) == 0 { - continue - } - aUpstreams = append(aUpstreams, upstream) - } - - if n.cfg.SortBackends { - sort.SliceStable(aUpstreams, func(a, b int) bool { - return aUpstreams[a].Name < aUpstreams[b].Name - }) - } - aServers := make([]*ingress.Server, 0, len(servers)) for _, value := range servers { sort.SliceStable(value.Locations, func(i, j int) bool { return value.Locations[i].Path > value.Locations[j].Path }) + + sort.SliceStable(value.Locations, func(i, j int) bool { + return len(value.Locations[i].Path) > len(value.Locations[j].Path) + }) aServers = append(aServers, value) } + sort.SliceStable(aUpstreams, func(a, b int) bool { + return aUpstreams[a].Name < aUpstreams[b].Name + }) + sort.SliceStable(aServers, func(i, j int) bool { return aServers[i].Hostname < aServers[j].Hostname }) @@ -612,78 +667,69 @@ func (n *NGINXController) getBackendServers(ingresses []*extensions.Ingress) ([] return aUpstreams, aServers } -// GetAuthCertificate is used by the auth-tls annotations to get a cert from a secret -func (n NGINXController) GetAuthCertificate(name string) (*resolver.AuthSSLCert, error) { - if _, exists := n.sslCertTracker.Get(name); !exists { - n.syncSecret(name) - } - - _, err := n.listers.Secret.GetByName(name) - if err != nil { - return &resolver.AuthSSLCert{}, fmt.Errorf("unexpected error: %v", err) - } - - bc, exists := n.sslCertTracker.Get(name) - if !exists { - return &resolver.AuthSSLCert{}, fmt.Errorf("secret %v does not exist", name) - } - cert := bc.(*ingress.SSLCert) - return &resolver.AuthSSLCert{ - Secret: name, - CAFileName: cert.CAFileName, - PemSHA: cert.PemSHA, - }, nil -} - -// createUpstreams creates the NGINX upstreams for each service referenced in -// Ingress rules. The servers inside the upstream are endpoints. -func (n *NGINXController) createUpstreams(data []*extensions.Ingress, du *ingress.Backend) map[string]*ingress.Backend { +// createUpstreams creates the NGINX upstreams (Endpoints) for each Service +// referenced in Ingress rules. +func (n *NGINXController) createUpstreams(data []*ingress.Ingress, du *ingress.Backend) map[string]*ingress.Backend { upstreams := make(map[string]*ingress.Backend) upstreams[defUpstreamName] = du for _, ing := range data { - anns := n.getIngressAnnotations(ing) + anns := ing.ParsedAnnotations var defBackend string if ing.Spec.Backend != nil { - defBackend = fmt.Sprintf("%v-%v-%v", - ing.GetNamespace(), - ing.Spec.Backend.ServiceName, - ing.Spec.Backend.ServicePort.String()) + defBackend = upstreamName(ing.Namespace, ing.Spec.Backend.ServiceName, ing.Spec.Backend.ServicePort) - glog.V(3).Infof("creating upstream %v", defBackend) + klog.V(3).Infof("Creating upstream %q", defBackend) upstreams[defBackend] = newUpstream(defBackend) - if !upstreams[defBackend].Secure { - upstreams[defBackend].Secure = anns.SecureUpstream.Secure - } - if upstreams[defBackend].SecureCACert.Secret == "" { - upstreams[defBackend].SecureCACert = anns.SecureUpstream.CACert - } - if upstreams[defBackend].UpstreamHashBy == "" { - upstreams[defBackend].UpstreamHashBy = anns.UpstreamHashBy + + upstreams[defBackend].SecureCACert = anns.SecureUpstream.CACert + + upstreams[defBackend].UpstreamHashBy.UpstreamHashBy = anns.UpstreamHashBy.UpstreamHashBy + upstreams[defBackend].UpstreamHashBy.UpstreamHashBySubset = anns.UpstreamHashBy.UpstreamHashBySubset + upstreams[defBackend].UpstreamHashBy.UpstreamHashBySubsetSize = anns.UpstreamHashBy.UpstreamHashBySubsetSize + + upstreams[defBackend].LoadBalancing = anns.LoadBalancing + if upstreams[defBackend].LoadBalancing == "" { + upstreams[defBackend].LoadBalancing = n.store.GetBackendConfiguration().LoadBalancing } - svcKey := fmt.Sprintf("%v/%v", ing.GetNamespace(), ing.Spec.Backend.ServiceName) + svcKey := fmt.Sprintf("%v/%v", ing.Namespace, ing.Spec.Backend.ServiceName) - // Add the service cluster endpoint as the upstream instead of individual endpoints - // if the serviceUpstream annotation is enabled + // add the service ClusterIP as a single Endpoint instead of individual Endpoints if anns.ServiceUpstream { endpoint, err := n.getServiceClusterEndpoint(svcKey, ing.Spec.Backend) if err != nil { - glog.Errorf("Failed to get service cluster endpoint for service %s: %v", svcKey, err) + klog.Errorf("Failed to determine a suitable ClusterIP Endpoint for Service %q: %v", svcKey, err) } else { upstreams[defBackend].Endpoints = []ingress.Endpoint{endpoint} } } + // configure traffic shaping for canary + if anns.Canary.Enabled { + upstreams[defBackend].NoServer = true + upstreams[defBackend].TrafficShapingPolicy = ingress.TrafficShapingPolicy{ + Weight: anns.Canary.Weight, + Header: anns.Canary.Header, + HeaderValue: anns.Canary.HeaderValue, + Cookie: anns.Canary.Cookie, + } + } + if len(upstreams[defBackend].Endpoints) == 0 { - endps, err := n.serviceEndpoints(svcKey, ing.Spec.Backend.ServicePort.String(), &anns.HealthCheck) + endps, err := n.serviceEndpoints(svcKey, ing.Spec.Backend.ServicePort.String()) upstreams[defBackend].Endpoints = append(upstreams[defBackend].Endpoints, endps...) if err != nil { - glog.Warningf("error creating upstream %v: %v", defBackend, err) + klog.Warningf("Error creating upstream %q: %v", defBackend, err) } } + s, err := n.store.GetService(svcKey) + if err != nil { + klog.Warningf("Error obtaining Service %q: %v", svcKey, err) + } + upstreams[defBackend].Service = s } for _, rule := range ing.Spec.Rules { @@ -692,56 +738,62 @@ func (n *NGINXController) createUpstreams(data []*extensions.Ingress, du *ingres } for _, path := range rule.HTTP.Paths { - name := fmt.Sprintf("%v-%v-%v", - ing.GetNamespace(), - path.Backend.ServiceName, - path.Backend.ServicePort.String()) + name := upstreamName(ing.Namespace, path.Backend.ServiceName, path.Backend.ServicePort) if _, ok := upstreams[name]; ok { continue } - glog.V(3).Infof("creating upstream %v", name) + klog.V(3).Infof("Creating upstream %q", name) upstreams[name] = newUpstream(name) upstreams[name].Port = path.Backend.ServicePort - if !upstreams[name].Secure { - upstreams[name].Secure = anns.SecureUpstream.Secure - } + upstreams[name].SecureCACert = anns.SecureUpstream.CACert - if upstreams[name].SecureCACert.Secret == "" { - upstreams[name].SecureCACert = anns.SecureUpstream.CACert - } + upstreams[name].UpstreamHashBy.UpstreamHashBy = anns.UpstreamHashBy.UpstreamHashBy + upstreams[name].UpstreamHashBy.UpstreamHashBySubset = anns.UpstreamHashBy.UpstreamHashBySubset + upstreams[name].UpstreamHashBy.UpstreamHashBySubsetSize = anns.UpstreamHashBy.UpstreamHashBySubsetSize - if upstreams[name].UpstreamHashBy == "" { - upstreams[name].UpstreamHashBy = anns.UpstreamHashBy + upstreams[name].LoadBalancing = anns.LoadBalancing + if upstreams[name].LoadBalancing == "" { + upstreams[name].LoadBalancing = n.store.GetBackendConfiguration().LoadBalancing } - svcKey := fmt.Sprintf("%v/%v", ing.GetNamespace(), path.Backend.ServiceName) + svcKey := fmt.Sprintf("%v/%v", ing.Namespace, path.Backend.ServiceName) - // Add the service cluster endpoint as the upstream instead of individual endpoints - // if the serviceUpstream annotation is enabled + // add the service ClusterIP as a single Endpoint instead of individual Endpoints if anns.ServiceUpstream { endpoint, err := n.getServiceClusterEndpoint(svcKey, &path.Backend) if err != nil { - glog.Errorf("failed to get service cluster endpoint for service %s: %v", svcKey, err) + klog.Errorf("Failed to determine a suitable ClusterIP Endpoint for Service %q: %v", svcKey, err) } else { upstreams[name].Endpoints = []ingress.Endpoint{endpoint} } } + // configure traffic shaping for canary + if anns.Canary.Enabled { + upstreams[name].NoServer = true + upstreams[name].TrafficShapingPolicy = ingress.TrafficShapingPolicy{ + Weight: anns.Canary.Weight, + Header: anns.Canary.Header, + HeaderValue: anns.Canary.HeaderValue, + Cookie: anns.Canary.Cookie, + } + } + if len(upstreams[name].Endpoints) == 0 { - endp, err := n.serviceEndpoints(svcKey, path.Backend.ServicePort.String(), &anns.HealthCheck) + endp, err := n.serviceEndpoints(svcKey, path.Backend.ServicePort.String()) if err != nil { - glog.Warningf("error obtaining service endpoints: %v", err) + klog.Warningf("Error obtaining Endpoints for Service %q: %v", svcKey, err) continue } upstreams[name].Endpoints = endp } - s, err := n.listers.Service.GetByName(svcKey) + s, err := n.store.GetService(svcKey) if err != nil { - glog.Warningf("error obtaining service: %v", err) + klog.Warningf("Error obtaining Service %q: %v", svcKey, err) continue } @@ -753,22 +805,22 @@ func (n *NGINXController) createUpstreams(data []*extensions.Ingress, du *ingres return upstreams } -func (n *NGINXController) getServiceClusterEndpoint(svcKey string, backend *extensions.IngressBackend) (endpoint ingress.Endpoint, err error) { - svcObj, svcExists, err := n.listers.Service.GetByKey(svcKey) - - if !svcExists { - return endpoint, fmt.Errorf("service %v does not exist", svcKey) +// getServiceClusterEndpoint returns an Endpoint corresponding to the ClusterIP +// field of a Service. +func (n *NGINXController) getServiceClusterEndpoint(svcKey string, backend *networking.IngressBackend) (endpoint ingress.Endpoint, err error) { + svc, err := n.store.GetService(svcKey) + if err != nil { + return endpoint, fmt.Errorf("service %q does not exist", svcKey) } - svc := svcObj.(*apiv1.Service) if svc.Spec.ClusterIP == "" || svc.Spec.ClusterIP == "None" { - return endpoint, fmt.Errorf("No ClusterIP found for service %s", svcKey) + return endpoint, fmt.Errorf("no ClusterIP found for Service %q", svcKey) } endpoint.Address = svc.Spec.ClusterIP - // If the service port in the ingress uses a name, lookup - // the actual port in the service spec + // if the Service port is referenced by name in the Ingress, lookup the + // actual port in the service spec if backend.ServicePort.Type == intstr.String { var port int32 = -1 for _, svcPort := range svc.Spec.Ports { @@ -778,7 +830,7 @@ func (n *NGINXController) getServiceClusterEndpoint(svcKey string, backend *exte } } if port == -1 { - return endpoint, fmt.Errorf("no port mapped for service %s and port name %s", svc.Name, backend.ServicePort.String()) + return endpoint, fmt.Errorf("service %q does not have a port named %q", svc.Name, backend.ServicePort) } endpoint.Port = fmt.Sprintf("%d", port) } else { @@ -788,50 +840,37 @@ func (n *NGINXController) getServiceClusterEndpoint(svcKey string, backend *exte return endpoint, err } -// serviceEndpoints returns the upstream servers (endpoints) associated -// to a service. -func (n *NGINXController) serviceEndpoints(svcKey, backendPort string, - hz *healthcheck.Config) ([]ingress.Endpoint, error) { - svc, err := n.listers.Service.GetByName(svcKey) +// serviceEndpoints returns the upstream servers (Endpoints) associated with a Service. +func (n *NGINXController) serviceEndpoints(svcKey, backendPort string) ([]ingress.Endpoint, error) { + svc, err := n.store.GetService(svcKey) var upstreams []ingress.Endpoint if err != nil { - return upstreams, fmt.Errorf("error getting service %v from the cache: %v", svcKey, err) + return upstreams, err } - glog.V(3).Infof("obtaining port information for service %v", svcKey) + klog.V(3).Infof("Obtaining ports information for Service %q", svcKey) for _, servicePort := range svc.Spec.Ports { - // targetPort could be a string, use the name or the port (int) + // targetPort could be a string, use either the port name or number (int) if strconv.Itoa(int(servicePort.Port)) == backendPort || servicePort.TargetPort.String() == backendPort || servicePort.Name == backendPort { - endps := n.getEndpoints(svc, &servicePort, apiv1.ProtocolTCP, hz) + endps := getEndpoints(svc, &servicePort, apiv1.ProtocolTCP, n.store.GetServiceEndpoints) if len(endps) == 0 { - glog.Warningf("service %v does not have any active endpoints", svcKey) + klog.Warningf("Service %q does not have any active Endpoint.", svcKey) } - if n.cfg.SortBackends { - sort.SliceStable(endps, func(i, j int) bool { - iName := endps[i].Address - jName := endps[j].Address - if iName != jName { - return iName < jName - } - - return endps[i].Port < endps[j].Port - }) - } upstreams = append(upstreams, endps...) break } } - // Ingress with an ExternalName service and no port defined in the service. + // Ingress with an ExternalName Service and no port defined for that Service if len(svc.Spec.Ports) == 0 && svc.Spec.Type == apiv1.ServiceTypeExternalName { externalPort, err := strconv.Atoi(backendPort) if err != nil { - glog.Warningf("only numeric ports are allowed in ExternalName services: %v is not valid as a TCP/UDP port", backendPort) + klog.Warningf("Only numeric ports are allowed in ExternalName Services: %q is not a valid port number.", backendPort) return upstreams, nil } @@ -840,9 +879,9 @@ func (n *NGINXController) serviceEndpoints(svcKey, backendPort string, Port: int32(externalPort), TargetPort: intstr.FromString(backendPort), } - endps := n.getEndpoints(svc, &servicePort, apiv1.ProtocolTCP, hz) + endps := getEndpoints(svc, &servicePort, apiv1.ProtocolTCP, n.store.GetServiceEndpoints) if len(endps) == 0 { - glog.Warningf("service %v does not have any active endpoints", svcKey) + klog.Warningf("Service %q does not have any active Endpoint.", svcKey) return upstreams, nil } @@ -850,61 +889,71 @@ func (n *NGINXController) serviceEndpoints(svcKey, backendPort string, return upstreams, nil } - if !n.cfg.SortBackends { - rand.Seed(time.Now().UnixNano()) - for i := range upstreams { - j := rand.Intn(i + 1) - upstreams[i], upstreams[j] = upstreams[j], upstreams[i] - } - } - return upstreams, nil } -// createServers initializes a map that contains information about the list of -// FDQN referenced by ingress rules and the common name field in the referenced -// SSL certificates. Each server is configured with location / using a default -// backend specified by the user or the one inside the ingress spec. -func (n *NGINXController) createServers(data []*extensions.Ingress, +// overridePemFileNameAndPemSHA should only be called when DynamicCertificatesEnabled +// ideally this function should not exist, the only reason why we use it is that +// we rely on PemFileName in nginx.tmpl to configure SSL directives +// and PemSHA to force reload +func (n *NGINXController) overridePemFileNameAndPemSHA(cert *ingress.SSLCert) { + // TODO(elvinefendi): It is not great but we currently use PemFileName to decide whether SSL needs to be configured + // in nginx configuration or not. The whole thing needs to be refactored, we should rely on a proper + // signal to configure SSL, not PemFileName. + cert.PemFileName = n.cfg.FakeCertificate.PemFileName + + // TODO(elvinefendi): This is again another hacky way of avoiding Nginx reload when certificate + // changes in dynamic SSL mode since FakeCertificate never changes. + cert.PemSHA = n.cfg.FakeCertificate.PemSHA +} + +// createServers builds a map of host name to Server structs from a map of +// already computed Upstream structs. Each Server is configured with at least +// one root location, which uses a default backend if left unspecified. +func (n *NGINXController) createServers(data []*ingress.Ingress, upstreams map[string]*ingress.Backend, du *ingress.Backend) map[string]*ingress.Server { servers := make(map[string]*ingress.Server, len(data)) - // If a server has a hostname equivalent to a pre-existing alias, then we - // remove the alias to avoid conflicts. aliases := make(map[string]string, len(data)) - bdef := n.GetDefaultBackend() + bdef := n.store.GetDefaultBackend() ngxProxy := proxy.Config{ - BodySize: bdef.ProxyBodySize, - ConnectTimeout: bdef.ProxyConnectTimeout, - SendTimeout: bdef.ProxySendTimeout, - ReadTimeout: bdef.ProxyReadTimeout, - BufferSize: bdef.ProxyBufferSize, - CookieDomain: bdef.ProxyCookieDomain, - CookiePath: bdef.ProxyCookiePath, - NextUpstream: bdef.ProxyNextUpstream, - RequestBuffering: bdef.ProxyRequestBuffering, - ProxyRedirectFrom: bdef.ProxyRedirectFrom, + BodySize: bdef.ProxyBodySize, + ConnectTimeout: bdef.ProxyConnectTimeout, + SendTimeout: bdef.ProxySendTimeout, + ReadTimeout: bdef.ProxyReadTimeout, + BuffersNumber: bdef.ProxyBuffersNumber, + BufferSize: bdef.ProxyBufferSize, + CookieDomain: bdef.ProxyCookieDomain, + CookiePath: bdef.ProxyCookiePath, + NextUpstream: bdef.ProxyNextUpstream, + NextUpstreamTimeout: bdef.ProxyNextUpstreamTimeout, + NextUpstreamTries: bdef.ProxyNextUpstreamTries, + RequestBuffering: bdef.ProxyRequestBuffering, + ProxyRedirectFrom: bdef.ProxyRedirectFrom, + ProxyBuffering: bdef.ProxyBuffering, } - // generated on Start() with createDefaultSSLCertificate() - defaultPemFileName := n.cfg.FakeCertificatePath - defaultPemSHA := n.cfg.FakeCertificateSHA + defaultCertificate := n.cfg.FakeCertificate - // Tries to fetch the default Certificate from nginx configuration. - // If it does not exists, use the ones generated on Start() - defaultCertificate, err := n.getPemCertificate(n.cfg.DefaultSSLCertificate) - if err == nil { - defaultPemFileName = defaultCertificate.PemFileName - defaultPemSHA = defaultCertificate.PemSHA + // read custom default SSL certificate, fall back to generated default certificate + if n.cfg.DefaultSSLCertificate != "" { + certificate, err := n.store.GetLocalSSLCert(n.cfg.DefaultSSLCertificate) + if err == nil { + defaultCertificate = certificate + if n.cfg.DynamicCertificatesEnabled { + n.overridePemFileNameAndPemSHA(defaultCertificate) + } + } else { + klog.Warningf("Error loading custom default certificate, falling back to generated default:\n%v", err) + } } - // initialize the default server + // initialize default server and root location servers[defServerName] = &ingress.Server{ - Hostname: defServerName, - SSLCertificate: defaultPemFileName, - SSLPemChecksum: defaultPemSHA, + Hostname: defServerName, + SSLCert: *defaultCertificate, Locations: []*ingress.Location{ { Path: rootLocation, @@ -912,47 +961,53 @@ func (n *NGINXController) createServers(data []*extensions.Ingress, Backend: du.Name, Proxy: ngxProxy, Service: du.Service, + Logs: log.Config{ + Access: n.store.GetBackendConfiguration().EnableAccessLogForDefaultBackend, + Rewrite: false, + }, }, }} - // initialize all the servers + // initialize all other servers for _, ing := range data { - anns := n.getIngressAnnotations(ing) + ingKey := k8s.MetaNamespaceKey(ing) + anns := ing.ParsedAnnotations - // default upstream server + // default upstream name un := du.Name + if anns.Canary.Enabled { + klog.V(2).Infof("Ingress %v is marked as Canary, ignoring", ingKey) + continue + } + if ing.Spec.Backend != nil { - // replace default backend - defUpstream := fmt.Sprintf("%v-%v-%v", ing.GetNamespace(), ing.Spec.Backend.ServiceName, ing.Spec.Backend.ServicePort.String()) + defUpstream := upstreamName(ing.Namespace, ing.Spec.Backend.ServiceName, ing.Spec.Backend.ServicePort) + if backendUpstream, ok := upstreams[defUpstream]; ok { + // use backend specified in Ingress as the default backend for all its rules un = backendUpstream.Name - // Special case: - // ingress only with a backend and no rules - // this case defines a "catch all" server + // special "catch all" case, Ingress with a backend but no rule defLoc := servers[defServerName].Locations[0] if defLoc.IsDefBackend && len(ing.Spec.Rules) == 0 { + klog.V(2).Infof("Ingress %q defines a backend but no rule. Using it to configure the catch-all server %q", + ingKey, defServerName) + defLoc.IsDefBackend = false defLoc.Backend = backendUpstream.Name defLoc.Service = backendUpstream.Service defLoc.Ingress = ing - // we need to use the ingress annotations - defLoc.BasicDigestAuth = anns.BasicDigestAuth - defLoc.ClientBodyBufferSize = anns.ClientBodyBufferSize - defLoc.ConfigurationSnippet = anns.ConfigurationSnippet - defLoc.CorsConfig = anns.CorsConfig - defLoc.ExternalAuth = anns.ExternalAuth - defLoc.Proxy = anns.Proxy - defLoc.RateLimit = anns.RateLimit - // TODO: Redirect and rewrite can affect the catch all behavior. Don't use this annotations for now - // defLoc.Redirect = anns.Redirect - // defLoc.Rewrite = anns.Rewrite - defLoc.UpstreamVhost = anns.UpstreamVhost - defLoc.VtsFilterKey = anns.VtsFilterKey - defLoc.Whitelist = anns.Whitelist - defLoc.Denied = anns.Denied + // TODO: Redirect and rewrite can affect the catch all behavior, skip for now + originalRedirect := defLoc.Redirect + originalRewrite := defLoc.Rewrite + locationApplyAnnotations(defLoc, anns) + defLoc.Redirect = originalRedirect + defLoc.Rewrite = originalRewrite + } else { + klog.V(3).Infof("Ingress %q defines both a backend and rules. Using its backend as default upstream for all its rules.", + ingKey) } } } @@ -967,25 +1022,34 @@ func (n *NGINXController) createServers(data []*extensions.Ingress, continue } + loc := &ingress.Location{ + Path: rootLocation, + IsDefBackend: true, + Backend: un, + Service: &apiv1.Service{}, + } + locationApplyAnnotations(loc, anns) + servers[host] = &ingress.Server{ Hostname: host, Locations: []*ingress.Location{ - { - Path: rootLocation, - IsDefBackend: true, - Backend: un, - Proxy: ngxProxy, - Service: &apiv1.Service{}, - }, + loc, }, SSLPassthrough: anns.SSLPassthrough, + SSLCiphers: anns.SSLCiphers, } } } // configure default location, alias, and SSL for _, ing := range data { - anns := n.getIngressAnnotations(ing) + ingKey := k8s.MetaNamespaceKey(ing) + anns := ing.ParsedAnnotations + + if anns.Canary.Enabled { + klog.V(2).Infof("Ingress %v is marked as Canary, ignoring", ingKey) + continue + } for _, rule := range ing.Spec.Rules { host := rule.Host @@ -993,7 +1057,6 @@ func (n *NGINXController) createServers(data []*extensions.Ingress, host = defServerName } - // setup server aliases if anns.Alias != "" { if servers[host].Alias == "" { servers[host].Alias = anns.Alias @@ -1001,89 +1064,82 @@ func (n *NGINXController) createServers(data []*extensions.Ingress, aliases["Alias"] = host } } else { - glog.Warningf("ingress %v/%v for host %v contains an Alias but one has already been configured.", - ing.Namespace, ing.Name, host) + klog.Warningf("Aliases already configured for server %q, skipping (Ingress %q)", + host, ingKey) } } - //notifying the user that it has already been configured. - if servers[host].ServerSnippet != "" && anns.ServerSnippet != "" { - glog.Warningf("ingress %v/%v for host %v contains a Server Snippet section that it has already been configured.", - ing.Namespace, ing.Name, host) + if anns.ServerSnippet != "" { + if servers[host].ServerSnippet == "" { + servers[host].ServerSnippet = anns.ServerSnippet + } else { + klog.Warningf("Server snippet already configured for server %q, skipping (Ingress %q)", + host, ingKey) + } } - // only add a server snippet if the server does not have one previously configured - if servers[host].ServerSnippet == "" && anns.ServerSnippet != "" { - servers[host].ServerSnippet = anns.ServerSnippet + // only add SSL ciphers if the server does not have them previously configured + if servers[host].SSLCiphers == "" && anns.SSLCiphers != "" { + servers[host].SSLCiphers = anns.SSLCiphers } // only add a certificate if the server does not have one previously configured - if servers[host].SSLCertificate != "" { + if servers[host].SSLCert.PemFileName != "" { continue } if len(ing.Spec.TLS) == 0 { - glog.V(3).Infof("ingress %v/%v for host %v does not contains a TLS section", ing.Namespace, ing.Name, host) + klog.V(3).Infof("Ingress %q does not contains a TLS section.", ingKey) continue } - tlsSecretName := "" - found := false - for _, tls := range ing.Spec.TLS { - if sets.NewString(tls.Hosts...).Has(host) { - tlsSecretName = tls.SecretName - found = true - break - } - } - - if !found { - // does not contains a TLS section but none of the host match - continue - } + tlsSecretName := extractTLSSecretName(host, ing, n.store.GetLocalSSLCert) if tlsSecretName == "" { - glog.V(3).Infof("host %v is listed on tls section but secretName is empty. Using default cert", host) - servers[host].SSLCertificate = defaultPemFileName - servers[host].SSLPemChecksum = defaultPemSHA + klog.V(3).Infof("Host %q is listed in the TLS section but secretName is empty. Using default certificate.", host) + servers[host].SSLCert = *defaultCertificate continue } - key := fmt.Sprintf("%v/%v", ing.Namespace, tlsSecretName) - bc, exists := n.sslCertTracker.Get(key) - if !exists { - glog.Warningf("ssl certificate \"%v\" does not exist in local store", key) + secrKey := fmt.Sprintf("%v/%v", ing.Namespace, tlsSecretName) + cert, err := n.store.GetLocalSSLCert(secrKey) + if err != nil { + klog.Warningf("Error getting SSL certificate %q: %v. Using default certificate", secrKey, err) + servers[host].SSLCert = *defaultCertificate continue } - cert := bc.(*ingress.SSLCert) err = cert.Certificate.VerifyHostname(host) if err != nil { - glog.Warningf("unexpected error validating SSL certificate %v for host %v. Reason: %v", key, host, err) - glog.Warningf("Validating certificate against DNS names. This will be deprecated in a future version.") - // check the common name field + klog.Warningf("Unexpected error validating SSL certificate %q for server %q: %v", secrKey, host, err) + klog.Warning("Validating certificate against DNS names. This will be deprecated in a future version.") + // check the Common Name field // https://github.com/golang/go/issues/22922 err := verifyHostname(host, cert.Certificate) if err != nil { - glog.Warningf("ssl certificate %v does not contain a Common Name or Subject Alternative Name for host %v. Reason: %v", key, host, err) + klog.Warningf("SSL certificate %q does not contain a Common Name or Subject Alternative Name for server %q: %v", + secrKey, host, err) + klog.Warningf("Using default certificate") + servers[host].SSLCert = *defaultCertificate continue } } - servers[host].SSLCertificate = cert.PemFileName - servers[host].SSLFullChainCertificate = cert.FullChainPemFileName - servers[host].SSLPemChecksum = cert.PemSHA - servers[host].SSLExpireTime = cert.ExpireTime + if n.cfg.DynamicCertificatesEnabled { + n.overridePemFileNameAndPemSHA(cert) + } + + servers[host].SSLCert = *cert if cert.ExpireTime.Before(time.Now().Add(240 * time.Hour)) { - glog.Warningf("ssl certificate for host %v is about to expire in 10 days", host) + klog.Warningf("SSL certificate for server %q is about to expire (%v)", host, cert.ExpireTime) } } } for alias, host := range aliases { if _, ok := servers[alias]; ok { - glog.Warningf("There is a conflict with server hostname '%v' and alias '%v' (in server %v). Removing alias to avoid conflicts.", alias, host) + klog.Warningf("Conflicting hostname (%v) and alias (%v). Removing alias to avoid conflicts.", host, alias) servers[host].Alias = "" } } @@ -1091,146 +1147,251 @@ func (n *NGINXController) createServers(data []*extensions.Ingress, return servers } -// getEndpoints returns a list of : for a given service/target port combination. -func (n *NGINXController) getEndpoints( - s *apiv1.Service, - servicePort *apiv1.ServicePort, - proto apiv1.Protocol, - hz *healthcheck.Config) []ingress.Endpoint { - - upsServers := []ingress.Endpoint{} - - // avoid duplicated upstream servers when the service - // contains multiple port definitions sharing the same - // targetport. - adus := make(map[string]bool) - - // ExternalName services - if s.Spec.Type == apiv1.ServiceTypeExternalName { - glog.V(3).Info("Ingress using a service %v of type=ExternalName : %v", s.Name) - - targetPort := servicePort.TargetPort.IntValue() - // check for invalid port value - if targetPort <= 0 { - glog.Errorf("ExternalName service with an invalid port: %v", targetPort) - return upsServers +func locationApplyAnnotations(loc *ingress.Location, anns *annotations.Ingress) { + loc.BasicDigestAuth = anns.BasicDigestAuth + loc.ClientBodyBufferSize = anns.ClientBodyBufferSize + loc.ConfigurationSnippet = anns.ConfigurationSnippet + loc.CorsConfig = anns.CorsConfig + loc.ExternalAuth = anns.ExternalAuth + loc.EnableGlobalAuth = anns.EnableGlobalAuth + loc.HTTP2PushPreload = anns.HTTP2PushPreload + loc.Proxy = anns.Proxy + loc.RateLimit = anns.RateLimit + loc.Redirect = anns.Redirect + loc.Rewrite = anns.Rewrite + loc.UpstreamVhost = anns.UpstreamVhost + loc.Whitelist = anns.Whitelist + loc.Denied = anns.Denied + loc.XForwardedPrefix = anns.XForwardedPrefix + loc.UsePortInRedirects = anns.UsePortInRedirects + loc.Connection = anns.Connection + loc.Logs = anns.Logs + loc.LuaRestyWAF = anns.LuaRestyWAF + loc.InfluxDB = anns.InfluxDB + loc.DefaultBackend = anns.DefaultBackend + loc.BackendProtocol = anns.BackendProtocol + loc.CustomHTTPErrors = anns.CustomHTTPErrors + loc.ModSecurity = anns.ModSecurity + loc.Satisfy = anns.Satisfy +} + +// OK to merge canary ingresses iff there exists one or more ingresses to potentially merge into +func nonCanaryIngressExists(ingresses []*ingress.Ingress, canaryIngresses []*ingress.Ingress) bool { + return len(ingresses)-len(canaryIngresses) > 0 +} + +// ensure that the following conditions are met +// 1) names of backends do not match and canary doesn't merge into itself +// 2) primary name is not the default upstream +// 3) the primary has a server +func canMergeBackend(primary *ingress.Backend, alternative *ingress.Backend) bool { + return alternative != nil && primary.Name != alternative.Name && primary.Name != defUpstreamName && !primary.NoServer +} + +// Performs the merge action and checks to ensure that one two alternative backends do not merge into each other +func mergeAlternativeBackend(priUps *ingress.Backend, altUps *ingress.Backend) bool { + if priUps.NoServer { + klog.Warningf("unable to merge alternative backend %v into primary backend %v because %v is a primary backend", + altUps.Name, priUps.Name, priUps.Name) + return false + } + + for _, ab := range priUps.AlternativeBackends { + if ab == altUps.Name { + klog.V(2).Infof("skip merge alternative backend %v into %v, it's already present", altUps.Name, priUps.Name) + return true } + } - if net.ParseIP(s.Spec.ExternalName) == nil { - _, err := net.LookupHost(s.Spec.ExternalName) - if err != nil { - glog.Errorf("unexpected error resolving host %v: %v", s.Spec.ExternalName, err) - return upsServers + priUps.AlternativeBackends = + append(priUps.AlternativeBackends, altUps.Name) + + return true +} + +// Compares an Ingress of a potential alternative backend's rules with each existing server and finds matching host + path pairs. +// If a match is found, we know that this server should back the alternative backend and add the alternative backend +// to a backend's alternative list. +// If no match is found, then the serverless backend is deleted. +func mergeAlternativeBackends(ing *ingress.Ingress, upstreams map[string]*ingress.Backend, + servers map[string]*ingress.Server) { + + // merge catch-all alternative backends + if ing.Spec.Backend != nil { + upsName := upstreamName(ing.Namespace, ing.Spec.Backend.ServiceName, ing.Spec.Backend.ServicePort) + + altUps := upstreams[upsName] + + if altUps == nil { + klog.Warningf("alternative backend %s has already been removed", upsName) + } else { + + merged := false + + for _, loc := range servers[defServerName].Locations { + priUps := upstreams[loc.Backend] + + if canMergeBackend(priUps, altUps) { + klog.V(2).Infof("matching backend %v found for alternative backend %v", + priUps.Name, altUps.Name) + + merged = mergeAlternativeBackend(priUps, altUps) + } } - } - return append(upsServers, ingress.Endpoint{ - Address: s.Spec.ExternalName, - Port: fmt.Sprintf("%v", targetPort), - MaxFails: hz.MaxFails, - FailTimeout: hz.FailTimeout, - }) + if !merged { + klog.Warningf("unable to find real backend for alternative backend %v. Deleting.", altUps.Name) + delete(upstreams, altUps.Name) + } + } } - glog.V(3).Infof("getting endpoints for service %v/%v and port %v", s.Namespace, s.Name, servicePort.String()) - ep, err := n.listers.Endpoint.GetServiceEndpoints(s) - if err != nil { - glog.Warningf("unexpected error obtaining service endpoints: %v", err) - return upsServers - } + for _, rule := range ing.Spec.Rules { + for _, path := range rule.HTTP.Paths { + upsName := upstreamName(ing.Namespace, path.Backend.ServiceName, path.Backend.ServicePort) - for _, ss := range ep.Subsets { - for _, epPort := range ss.Ports { + altUps := upstreams[upsName] - if !reflect.DeepEqual(epPort.Protocol, proto) { + if altUps == nil { + klog.Warningf("alternative backend %s has already been removed", upsName) continue } - var targetPort int32 + merged := false - if servicePort.Name == "" { - // ServicePort.Name is optional if there is only one port - targetPort = epPort.Port - } else if servicePort.Name == epPort.Name { - targetPort = epPort.Port - } + server, ok := servers[rule.Host] + if !ok { + klog.Errorf("cannot merge alternative backend %s into hostname %s that does not exist", + altUps.Name, + rule.Host) - // check for invalid port value - if targetPort <= 0 { continue } - for _, epAddress := range ss.Addresses { - ep := fmt.Sprintf("%v:%v", epAddress.IP, targetPort) - if _, exists := adus[ep]; exists { - continue - } - ups := ingress.Endpoint{ - Address: epAddress.IP, - Port: fmt.Sprintf("%v", targetPort), - MaxFails: hz.MaxFails, - FailTimeout: hz.FailTimeout, - Target: epAddress.TargetRef, + // find matching paths + for _, loc := range server.Locations { + priUps := upstreams[loc.Backend] + + if canMergeBackend(priUps, altUps) && loc.Path == path.Path { + klog.V(2).Infof("matching backend %v found for alternative backend %v", + priUps.Name, altUps.Name) + + merged = mergeAlternativeBackend(priUps, altUps) } - upsServers = append(upsServers, ups) - adus[ep] = true + } + + if !merged { + klog.Warningf("unable to find real backend for alternative backend %v. Deleting.", altUps.Name) + delete(upstreams, altUps.Name) } } } - - glog.V(3).Infof("endpoints found: %v", upsServers) - return upsServers } -// readSecrets extracts information about secrets from an Ingress rule -func (n *NGINXController) readSecrets(ing *extensions.Ingress) { +// extractTLSSecretName returns the name of the Secret containing a SSL +// certificate for the given host name, or an empty string. +func extractTLSSecretName(host string, ing *ingress.Ingress, + getLocalSSLCert func(string) (*ingress.SSLCert, error)) string { + + if ing == nil { + return "" + } + + // naively return Secret name from TLS spec if host name matches + for _, tls := range ing.Spec.TLS { + if sets.NewString(tls.Hosts...).Has(host) { + return tls.SecretName + } + } + + // no TLS host matching host name, try each TLS host for matching SAN or CN for _, tls := range ing.Spec.TLS { + if tls.SecretName == "" { + // There's no secretName specified, so it will never be available continue } - key := fmt.Sprintf("%v/%v", ing.Namespace, tls.SecretName) - n.syncSecret(key) - } + secrKey := fmt.Sprintf("%v/%v", ing.Namespace, tls.SecretName) + + cert, err := getLocalSSLCert(secrKey) + if err != nil { + klog.Warningf("Error getting SSL certificate %q: %v", secrKey, err) + continue + } + + if cert == nil { // for tests + continue + } - key, _ := parser.GetStringAnnotation("auth-tls-secret", ing) - if key == "" { - return + err = cert.Certificate.VerifyHostname(host) + if err != nil { + continue + } + klog.V(3).Infof("Found SSL certificate matching host %q: %q", host, secrKey) + return tls.SecretName } - n.syncSecret(key) -} -func (n *NGINXController) isForceReload() bool { - return atomic.LoadInt32(&n.forceReload) != 0 + return "" } -// SetForceReload sets if the ingress controller should be reloaded or not -func (n *NGINXController) SetForceReload(shouldReload bool) { - if shouldReload { - atomic.StoreInt32(&n.forceReload, 1) - n.syncQueue.Enqueue(&extensions.Ingress{}) - } else { - atomic.StoreInt32(&n.forceReload, 0) +// getRemovedHosts returns a list of the hostsnames +// that are not associated anymore to the NGINX configuration. +func getRemovedHosts(rucfg, newcfg *ingress.Configuration) []string { + old := sets.NewString() + new := sets.NewString() + + for _, s := range rucfg.Servers { + if !old.Has(s.Hostname) { + old.Insert(s.Hostname) + } } -} -func (n *NGINXController) extractAnnotations(ing *extensions.Ingress) { - anns := n.annotations.Extract(ing) - glog.V(3).Infof("updating annotations information for ingress %v/%v", anns.Namespace, anns.Name) - n.listers.IngressAnnotation.Update(anns) + for _, s := range newcfg.Servers { + if !new.Has(s.Hostname) { + new.Insert(s.Hostname) + } + } + + return old.Difference(new).List() } -// getByIngress returns the parsed annotations from an Ingress -func (n *NGINXController) getIngressAnnotations(ing *extensions.Ingress) *annotations.Ingress { - key := fmt.Sprintf("%v/%v", ing.Namespace, ing.Name) - item, exists, err := n.listers.IngressAnnotation.GetByKey(key) - if err != nil { - glog.Errorf("unexpected error getting ingress annotation %v: %v", key, err) - return &annotations.Ingress{} +func getRemovedIngresses(rucfg, newcfg *ingress.Configuration) []string { + oldIngresses := sets.NewString() + newIngresses := sets.NewString() + + for _, server := range rucfg.Servers { + for _, location := range server.Locations { + if location.Ingress == nil { + continue + } + + ingKey := k8s.MetaNamespaceKey(location.Ingress) + if !oldIngresses.Has(ingKey) { + oldIngresses.Insert(ingKey) + } + } } - if !exists { - glog.Errorf("ingress annotation %v was not found", key) - return &annotations.Ingress{} + + for _, server := range newcfg.Servers { + for _, location := range server.Locations { + if location.Ingress == nil { + continue + } + + ingKey := k8s.MetaNamespaceKey(location.Ingress) + if !newIngresses.Has(ingKey) { + newIngresses.Insert(ingKey) + } + } } - return item.(*annotations.Ingress) + + return oldIngresses.Difference(newIngresses).List() +} + +// checks conditions for whether or not an upstream should be created for a custom default backend +func shouldCreateUpstreamForLocationDefaultBackend(upstream *ingress.Backend, location *ingress.Location) bool { + return (upstream.Name == location.Backend) && + (len(upstream.Endpoints) == 0 || len(location.CustomHTTPErrors) != 0) && + location.DefaultBackend != nil } diff --git a/internal/ingress/controller/controller_test.go b/internal/ingress/controller/controller_test.go new file mode 100644 index 0000000000..232b1f1e5b --- /dev/null +++ b/internal/ingress/controller/controller_test.go @@ -0,0 +1,1158 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/eapache/channels" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + networking "k8s.io/api/networking/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/kubernetes/fake" + + "k8s.io/ingress-nginx/internal/file" + "k8s.io/ingress-nginx/internal/ingress" + "k8s.io/ingress-nginx/internal/ingress/annotations" + "k8s.io/ingress-nginx/internal/ingress/annotations/canary" + "k8s.io/ingress-nginx/internal/ingress/controller/config" + ngx_config "k8s.io/ingress-nginx/internal/ingress/controller/config" + "k8s.io/ingress-nginx/internal/ingress/controller/store" + "k8s.io/ingress-nginx/internal/ingress/defaults" + "k8s.io/ingress-nginx/internal/ingress/metric" + "k8s.io/ingress-nginx/internal/ingress/resolver" + "k8s.io/ingress-nginx/internal/k8s" + "k8s.io/ingress-nginx/internal/net/ssl" +) + +const fakeCertificateName = "default-fake-certificate" + +type fakeIngressStore struct { + ingresses []*ingress.Ingress +} + +func (fakeIngressStore) GetBackendConfiguration() ngx_config.Configuration { + return ngx_config.Configuration{} +} + +func (fakeIngressStore) GetConfigMap(key string) (*corev1.ConfigMap, error) { + return nil, fmt.Errorf("test error") +} + +func (fakeIngressStore) GetSecret(key string) (*corev1.Secret, error) { + return nil, fmt.Errorf("test error") +} + +func (fakeIngressStore) GetService(key string) (*corev1.Service, error) { + return nil, fmt.Errorf("test error") +} + +func (fakeIngressStore) GetServiceEndpoints(key string) (*corev1.Endpoints, error) { + return nil, fmt.Errorf("test error") +} + +func (fis fakeIngressStore) ListIngresses(store.IngressFilterFunc) []*ingress.Ingress { + return fis.ingresses +} + +func (fakeIngressStore) GetRunningControllerPodsCount() int { + return 0 +} + +func (fakeIngressStore) GetLocalSSLCert(name string) (*ingress.SSLCert, error) { + return nil, fmt.Errorf("test error") +} + +func (fakeIngressStore) ListLocalSSLCerts() []*ingress.SSLCert { + return nil +} + +func (fakeIngressStore) GetAuthCertificate(string) (*resolver.AuthSSLCert, error) { + return nil, fmt.Errorf("test error") +} + +func (fakeIngressStore) GetDefaultBackend() defaults.Backend { + return defaults.Backend{} +} + +func (fakeIngressStore) Run(stopCh chan struct{}) {} + +type testNginxTestCommand struct { + t *testing.T + expected string + out []byte + err error +} + +func (ntc testNginxTestCommand) ExecCommand(args ...string) *exec.Cmd { + return nil +} + +func (ntc testNginxTestCommand) Test(cfg string) ([]byte, error) { + fd, err := os.Open(cfg) + if err != nil { + ntc.t.Errorf("could not read generated nginx configuration: %v", err.Error()) + return nil, err + } + defer fd.Close() + bytes, err := ioutil.ReadAll(fd) + if err != nil { + ntc.t.Errorf("could not read generated nginx configuration: %v", err.Error()) + } + if string(bytes) != ntc.expected { + ntc.t.Errorf("unexpected generated configuration %v. Expecting %v", string(bytes), ntc.expected) + } + return ntc.out, ntc.err +} + +type fakeTemplate struct{} + +func (fakeTemplate) Write(conf config.TemplateConfig) ([]byte, error) { + r := []byte{} + for _, s := range conf.Servers { + if len(r) > 0 { + r = append(r, ',') + } + r = append(r, []byte(s.Hostname)...) + } + return r, nil +} + +func TestCheckIngress(t *testing.T) { + defer func() { + filepath.Walk(os.TempDir(), func(path string, info os.FileInfo, err error) error { + if info.IsDir() && os.TempDir() != path { + return filepath.SkipDir + } + if strings.HasPrefix(info.Name(), tempNginxPattern) { + os.Remove(path) + } + return nil + }) + }() + + // Ensure no panic with wrong arguments + var nginx *NGINXController + nginx.CheckIngress(nil) + nginx = newNGINXController(t) + nginx.CheckIngress(nil) + nginx.metricCollector = metric.DummyCollector{} + + nginx.t = fakeTemplate{} + nginx.store = fakeIngressStore{ + ingresses: []*ingress.Ingress{}, + } + + ing := &networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ingress", + Namespace: "user-namespace", + Annotations: map[string]string{}, + }, + Spec: networking.IngressSpec{ + Rules: []networking.IngressRule{ + { + Host: "example.com", + }, + }, + }, + } + + t.Run("When the ingress class differs from nginx", func(t *testing.T) { + ing.ObjectMeta.Annotations["kubernetes.io/ingress.class"] = "different" + nginx.command = testNginxTestCommand{ + t: t, + err: fmt.Errorf("test error"), + } + if nginx.CheckIngress(ing) != nil { + t.Errorf("with a different ingress class, no error should be returned") + } + }) + + t.Run("when the class is the nginx one", func(t *testing.T) { + ing.ObjectMeta.Annotations["kubernetes.io/ingress.class"] = "nginx" + nginx.command = testNginxTestCommand{ + t: t, + err: nil, + expected: "_,example.com", + } + if nginx.CheckIngress(ing) != nil { + t.Errorf("with a new ingress without error, no error should be returned") + } + + t.Run("When the hostname is updated", func(t *testing.T) { + nginx.store = fakeIngressStore{ + ingresses: []*ingress.Ingress{ + { + Ingress: *ing, + ParsedAnnotations: &annotations.Ingress{}, + }, + }, + } + ing.Spec.Rules[0].Host = "test.example.com" + nginx.command = testNginxTestCommand{ + t: t, + err: nil, + expected: "_,test.example.com", + } + if nginx.CheckIngress(ing) != nil { + t.Errorf("with a new ingress without error, no error should be returned") + } + }) + + t.Run("When nginx test returns an error", func(t *testing.T) { + nginx.command = testNginxTestCommand{ + t: t, + err: fmt.Errorf("test error"), + out: []byte("this is the test command output"), + expected: "_,test.example.com", + } + if nginx.CheckIngress(ing) == nil { + t.Errorf("with a new ingress with an error, an error should be returned") + } + }) + + t.Run("When the ingress is in a different namespace than the watched one", func(t *testing.T) { + nginx.command = testNginxTestCommand{ + t: t, + err: fmt.Errorf("test error"), + } + nginx.cfg.Namespace = "other-namespace" + ing.ObjectMeta.Namespace = "test-namespace" + if nginx.CheckIngress(ing) != nil { + t.Errorf("with a new ingress without error, no error should be returned") + } + }) + }) +} + +func TestMergeAlternativeBackends(t *testing.T) { + testCases := map[string]struct { + ingress *ingress.Ingress + upstreams map[string]*ingress.Backend + servers map[string]*ingress.Server + expUpstreams map[string]*ingress.Backend + expServers map[string]*ingress.Server + }{ + "alternative backend has no server and embeds into matching real backend": { + &ingress.Ingress{ + Ingress: networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "example", + }, + Spec: networking.IngressSpec{ + Rules: []networking.IngressRule{ + { + Host: "example.com", + IngressRuleValue: networking.IngressRuleValue{ + HTTP: &networking.HTTPIngressRuleValue{ + Paths: []networking.HTTPIngressPath{ + { + Path: "/", + Backend: networking.IngressBackend{ + ServiceName: "http-svc-canary", + ServicePort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: 80, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + map[string]*ingress.Backend{ + "example-http-svc-80": { + Name: "example-http-svc-80", + NoServer: false, + }, + "example-http-svc-canary-80": { + Name: "example-http-svc-canary-80", + NoServer: true, + TrafficShapingPolicy: ingress.TrafficShapingPolicy{ + Weight: 20, + }, + }, + }, + map[string]*ingress.Server{ + "example.com": { + Hostname: "example.com", + Locations: []*ingress.Location{ + { + Path: "/", + Backend: "example-http-svc-80", + }, + }, + }, + }, + map[string]*ingress.Backend{ + "example-http-svc-80": { + Name: "example-http-svc-80", + NoServer: false, + AlternativeBackends: []string{"example-http-svc-canary-80"}, + }, + "example-http-svc-canary-80": { + Name: "example-http-svc-canary-80", + NoServer: true, + TrafficShapingPolicy: ingress.TrafficShapingPolicy{ + Weight: 20, + }, + }, + }, + map[string]*ingress.Server{ + "example.com": { + Hostname: "example.com", + Locations: []*ingress.Location{ + { + Path: "/", + Backend: "example-http-svc-80", + }, + }, + }, + }, + }, + "alternative backend merges with the correct real backend when multiple are present": { + &ingress.Ingress{ + Ingress: networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "example", + }, + Spec: networking.IngressSpec{ + Rules: []networking.IngressRule{ + { + Host: "foo.bar", + IngressRuleValue: networking.IngressRuleValue{ + HTTP: &networking.HTTPIngressRuleValue{ + Paths: []networking.HTTPIngressPath{ + { + Path: "/", + Backend: networking.IngressBackend{ + ServiceName: "foo-http-svc-canary", + ServicePort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: 80, + }, + }, + }, + }, + }, + }, + }, + { + Host: "example.com", + IngressRuleValue: networking.IngressRuleValue{ + HTTP: &networking.HTTPIngressRuleValue{ + Paths: []networking.HTTPIngressPath{ + { + Path: "/", + Backend: networking.IngressBackend{ + ServiceName: "http-svc-canary", + ServicePort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: 80, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + map[string]*ingress.Backend{ + "example-foo-http-svc-80": { + Name: "example-foo-http-svc-80", + NoServer: false, + }, + "example-foo-http-svc-canary-80": { + Name: "example-foo-http-svc-canary-80", + NoServer: true, + TrafficShapingPolicy: ingress.TrafficShapingPolicy{ + Weight: 20, + }, + }, + "example-http-svc-80": { + Name: "example-http-svc-80", + NoServer: false, + AlternativeBackends: []string{"example-http-svc-canary-80"}, + }, + "example-http-svc-canary-80": { + Name: "example-http-svc-canary-80", + NoServer: true, + TrafficShapingPolicy: ingress.TrafficShapingPolicy{ + Weight: 20, + }, + }, + }, + map[string]*ingress.Server{ + "foo.bar": { + Hostname: "foo.bar", + Locations: []*ingress.Location{ + { + Path: "/", + Backend: "example-foo-http-svc-80", + }, + }, + }, + "example.com": { + Hostname: "example.com", + Locations: []*ingress.Location{ + { + Path: "/", + Backend: "example-http-svc-80", + }, + }, + }, + }, + map[string]*ingress.Backend{ + "example-foo-http-svc-80": { + Name: "example-foo-http-svc-80", + NoServer: false, + AlternativeBackends: []string{"example-foo-http-svc-canary-80"}, + }, + "example-foo-http-svc-canary-80": { + Name: "example-foo-http-svc-canary-80", + NoServer: true, + TrafficShapingPolicy: ingress.TrafficShapingPolicy{ + Weight: 20, + }, + }, + "example-http-svc-80": { + Name: "example-http-svc-80", + NoServer: false, + AlternativeBackends: []string{"example-http-svc-canary-80"}, + }, + "example-http-svc-canary-80": { + Name: "example-http-svc-canary-80", + NoServer: true, + TrafficShapingPolicy: ingress.TrafficShapingPolicy{ + Weight: 20, + }, + }, + }, + map[string]*ingress.Server{ + "example.com": { + Hostname: "example.com", + Locations: []*ingress.Location{ + { + Path: "/", + Backend: "example-http-svc-80", + }, + }, + }, + }, + }, + "alternative backend does not merge into itself": { + &ingress.Ingress{ + Ingress: networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "example", + }, + Spec: networking.IngressSpec{ + Rules: []networking.IngressRule{ + { + Host: "example.com", + IngressRuleValue: networking.IngressRuleValue{ + HTTP: &networking.HTTPIngressRuleValue{ + Paths: []networking.HTTPIngressPath{ + { + Path: "/", + Backend: networking.IngressBackend{ + ServiceName: "http-svc-canary", + ServicePort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: 80, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + map[string]*ingress.Backend{ + "example-http-svc-canary-80": { + Name: "example-http-svc-canary-80", + NoServer: true, + TrafficShapingPolicy: ingress.TrafficShapingPolicy{ + Weight: 20, + }, + }, + }, + map[string]*ingress.Server{}, + map[string]*ingress.Backend{}, + map[string]*ingress.Server{}, + }, + "catch-all alternative backend has no server and embeds into matching real backend": { + &ingress.Ingress{ + Ingress: networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "example", + }, + Spec: networking.IngressSpec{ + Backend: &networking.IngressBackend{ + ServiceName: "http-svc-canary", + ServicePort: intstr.IntOrString{ + IntVal: 80, + }, + }, + }, + }, + }, + map[string]*ingress.Backend{ + "example-http-svc-80": { + Name: "example-http-svc-80", + NoServer: false, + }, + "example-http-svc-canary-80": { + Name: "example-http-svc-canary-80", + NoServer: true, + TrafficShapingPolicy: ingress.TrafficShapingPolicy{ + Weight: 20, + }, + }, + }, + map[string]*ingress.Server{ + "_": { + Hostname: "_", + Locations: []*ingress.Location{ + { + Path: "/", + Backend: "example-http-svc-80", + }, + }, + }, + }, + map[string]*ingress.Backend{ + "example-http-svc-80": { + Name: "example-http-svc-80", + NoServer: false, + AlternativeBackends: []string{"example-http-svc-canary-80"}, + }, + "example-http-svc-canary-80": { + Name: "example-http-svc-canary-80", + NoServer: true, + TrafficShapingPolicy: ingress.TrafficShapingPolicy{ + Weight: 20, + }, + }, + }, + map[string]*ingress.Server{ + "_": { + Hostname: "_", + Locations: []*ingress.Location{ + { + Path: "/", + Backend: "example-http-svc-80", + }, + }, + }, + }, + }, + "catch-all alternative backend does not merge into itself": { + &ingress.Ingress{ + Ingress: networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "example", + }, + Spec: networking.IngressSpec{ + Backend: &networking.IngressBackend{ + ServiceName: "http-svc-canary", + ServicePort: intstr.IntOrString{ + IntVal: 80, + }, + }, + }, + }, + }, + map[string]*ingress.Backend{ + "upstream-default-backend": { + Name: "upstream-default-backend", + NoServer: false, + }, + "example-http-svc-canary-80": { + Name: "example-http-svc-canary-80", + NoServer: true, + TrafficShapingPolicy: ingress.TrafficShapingPolicy{ + Weight: 20, + }, + }, + }, + map[string]*ingress.Server{ + "_": { + Hostname: "_", + Locations: []*ingress.Location{ + { + Path: "/", + Backend: "upstream-default-backend", + }, + }, + }, + }, + map[string]*ingress.Backend{}, + map[string]*ingress.Server{ + "_": { + Hostname: "_", + Locations: []*ingress.Location{ + { + Path: "/", + Backend: "upstream-default-backend", + }, + }, + }, + }, + }, + } + + for title, tc := range testCases { + t.Run(title, func(t *testing.T) { + mergeAlternativeBackends(tc.ingress, tc.upstreams, tc.servers) + + for upsName, expUpstream := range tc.expUpstreams { + actualUpstream, ok := tc.upstreams[upsName] + if !ok { + t.Errorf("expected upstream %s to exist but it did not", upsName) + } + + if !actualUpstream.Equal(expUpstream) { + t.Logf("actual upstream %s alternative backends: %s", actualUpstream.Name, actualUpstream.AlternativeBackends) + t.Logf("expected upstream %s alternative backends: %s", expUpstream.Name, expUpstream.AlternativeBackends) + t.Errorf("upstream %s was not equal to what was expected: ", upsName) + } + } + + for serverName, expServer := range tc.expServers { + actualServer, ok := tc.servers[serverName] + if !ok { + t.Errorf("expected server %s to exist but it did not", serverName) + } + + if !actualServer.Equal(expServer) { + t.Errorf("server %s was not equal to what was expected", serverName) + } + } + }) + } +} + +func TestExtractTLSSecretName(t *testing.T) { + testCases := map[string]struct { + host string + ingress *ingress.Ingress + fn func(string) (*ingress.SSLCert, error) + expName string + }{ + "nil ingress": { + "foo.bar", + nil, + func(string) (*ingress.SSLCert, error) { + return nil, nil + }, + "", + }, + "empty ingress": { + "foo.bar", + &ingress.Ingress{}, + func(string) (*ingress.SSLCert, error) { + return nil, nil + }, + "", + }, + "ingress tls, nil secret": { + "foo.bar", + &ingress.Ingress{ + Ingress: networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + Spec: networking.IngressSpec{ + TLS: []networking.IngressTLS{ + {SecretName: "demo"}, + }, + Rules: []networking.IngressRule{ + { + Host: "foo.bar", + }, + }, + }, + }, + }, + func(string) (*ingress.SSLCert, error) { + return nil, nil + }, + "", + }, + "ingress tls, no host, matching cert cn": { + "foo.bar", + &ingress.Ingress{ + Ingress: networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + Spec: networking.IngressSpec{ + TLS: []networking.IngressTLS{ + {SecretName: "demo"}, + }, + Rules: []networking.IngressRule{ + { + Host: "foo.bar", + }, + }, + }, + }, + }, + func(string) (*ingress.SSLCert, error) { + return &ingress.SSLCert{ + Certificate: fakeX509Cert([]string{"foo.bar", "example.com"}), + }, nil + }, + "demo", + }, + "ingress tls, no host, wildcard cert with matching cn": { + "foo.bar", + &ingress.Ingress{ + Ingress: networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + Spec: networking.IngressSpec{ + TLS: []networking.IngressTLS{ + { + SecretName: "demo", + }, + }, + Rules: []networking.IngressRule{ + { + Host: "test.foo.bar", + }, + }, + }, + }, + }, + func(string) (*ingress.SSLCert, error) { + return &ingress.SSLCert{ + Certificate: fakeX509Cert([]string{"*.foo.bar", "foo.bar"}), + }, nil + }, + "demo", + }, + "ingress tls, hosts, matching cert cn": { + "foo.bar", + &ingress.Ingress{ + Ingress: networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + Spec: networking.IngressSpec{ + TLS: []networking.IngressTLS{ + { + Hosts: []string{"foo.bar", "example.com"}, + SecretName: "demo", + }, + }, + Rules: []networking.IngressRule{ + { + Host: "foo.bar", + }, + }, + }, + }, + }, + func(string) (*ingress.SSLCert, error) { + return nil, nil + }, + "demo", + }, + } + + for title, tc := range testCases { + t.Run(title, func(t *testing.T) { + name := extractTLSSecretName(tc.host, tc.ingress, tc.fn) + if name != tc.expName { + t.Errorf("Expected Secret name %q (got %q)", tc.expName, name) + } + }) + } +} + +func TestGetBackendServers(t *testing.T) { + ctl := newNGINXController(t) + + testCases := []struct { + Ingresses []*ingress.Ingress + Validate func(servers []*ingress.Server) + }{ + { + Ingresses: []*ingress.Ingress{ + { + Ingress: networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "example", + }, + Spec: networking.IngressSpec{ + Backend: &networking.IngressBackend{ + ServiceName: "http-svc-canary", + ServicePort: intstr.IntOrString{ + IntVal: 80, + }, + }, + }, + }, + ParsedAnnotations: &annotations.Ingress{ + Canary: canary.Config{ + Enabled: true, + }, + }, + }, + }, + Validate: func(servers []*ingress.Server) { + if len(servers) != 1 { + t.Errorf("servers count should be 1, got %d", len(servers)) + return + } + + s := servers[0] + if s.Hostname != "_" { + t.Errorf("server hostname should be '_', got '%s'", s.Hostname) + } + if !s.Locations[0].IsDefBackend { + t.Errorf("server location 0 should be default backend") + } + + if s.Locations[0].Backend != defUpstreamName { + t.Errorf("location backend should be '%s', got '%s'", defUpstreamName, s.Locations[0].Backend) + } + }, + }, + { + Ingresses: []*ingress.Ingress{ + { + Ingress: networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "example", + }, + Spec: networking.IngressSpec{ + Backend: &networking.IngressBackend{ + ServiceName: "http-svc-canary", + ServicePort: intstr.IntOrString{ + IntVal: 80, + }, + }, + }, + }, + ParsedAnnotations: &annotations.Ingress{ + Canary: canary.Config{ + Enabled: true, + }, + }, + }, + { + Ingress: networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "example", + }, + Spec: networking.IngressSpec{ + Backend: &networking.IngressBackend{ + ServiceName: "http-svc", + ServicePort: intstr.IntOrString{ + IntVal: 80, + }, + }, + }, + }, + ParsedAnnotations: &annotations.Ingress{ + Canary: canary.Config{ + Enabled: false, + }, + }, + }, + }, + Validate: func(servers []*ingress.Server) { + if len(servers) != 1 { + t.Errorf("servers count should be 1, got %d", len(servers)) + return + } + + s := servers[0] + if s.Hostname != "_" { + t.Errorf("server hostname should be '_', got '%s'", s.Hostname) + } + if s.Locations[0].IsDefBackend { + t.Errorf("server location 0 should not be default backend") + } + + if s.Locations[0].Backend != "example-http-svc-80" { + t.Errorf("location backend should be 'example-http-svc-80', got '%s'", s.Locations[0].Backend) + } + }, + }, + { + Ingresses: []*ingress.Ingress{ + { + Ingress: networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "example", + }, + Spec: networking.IngressSpec{ + Rules: []networking.IngressRule{ + { + Host: "example.com", + IngressRuleValue: networking.IngressRuleValue{ + HTTP: &networking.HTTPIngressRuleValue{ + Paths: []networking.HTTPIngressPath{ + { + Path: "/", + Backend: networking.IngressBackend{ + ServiceName: "http-svc-canary", + ServicePort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: 80, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + ParsedAnnotations: &annotations.Ingress{ + Canary: canary.Config{ + Enabled: true, + }, + }, + }, + }, + Validate: func(servers []*ingress.Server) { + if len(servers) != 1 { + t.Errorf("servers count should be 1, got %d", len(servers)) + return + } + + s := servers[0] + if s.Hostname != "_" { + t.Errorf("server hostname should be '_', got '%s'", s.Hostname) + } + if !s.Locations[0].IsDefBackend { + t.Errorf("server location 0 should be default backend") + } + + if s.Locations[0].Backend != defUpstreamName { + t.Errorf("location backend should be '%s', got '%s'", defUpstreamName, s.Locations[0].Backend) + } + }, + }, + { + Ingresses: []*ingress.Ingress{ + { + Ingress: networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "example", + Namespace: "example", + }, + Spec: networking.IngressSpec{ + Rules: []networking.IngressRule{ + { + Host: "example.com", + IngressRuleValue: networking.IngressRuleValue{ + HTTP: &networking.HTTPIngressRuleValue{ + Paths: []networking.HTTPIngressPath{ + { + Path: "/", + Backend: networking.IngressBackend{ + ServiceName: "http-svc", + ServicePort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: 80, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + ParsedAnnotations: &annotations.Ingress{ + Canary: canary.Config{ + Enabled: false, + }, + }, + }, + { + Ingress: networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "example-canary", + Namespace: "example", + }, + Spec: networking.IngressSpec{ + Rules: []networking.IngressRule{ + { + Host: "example.com", + IngressRuleValue: networking.IngressRuleValue{ + HTTP: &networking.HTTPIngressRuleValue{ + Paths: []networking.HTTPIngressPath{ + { + Path: "/", + Backend: networking.IngressBackend{ + ServiceName: "http-svc-canary", + ServicePort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: 80, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + ParsedAnnotations: &annotations.Ingress{ + Canary: canary.Config{ + Enabled: true, + }, + }, + }, + }, + Validate: func(servers []*ingress.Server) { + if len(servers) != 2 { + t.Errorf("servers count should be 2, got %d", len(servers)) + return + } + + s := servers[0] + if s.Hostname != "_" { + t.Errorf("server hostname should be '_', got '%s'", s.Hostname) + } + if !s.Locations[0].IsDefBackend { + t.Errorf("server location 0 should be default backend") + } + + if s.Locations[0].Backend != defUpstreamName { + t.Errorf("location backend should be '%s', got '%s'", defUpstreamName, s.Locations[0].Backend) + } + + s = servers[1] + if s.Hostname != "example.com" { + t.Errorf("server hostname should be 'example.com', got '%s'", s.Hostname) + } + + if s.Locations[0].Backend != "example-http-svc-80" { + t.Errorf("location backend should be 'example-http-svc-80', got '%s'", s.Locations[0].Backend) + } + }, + }, + } + + for _, testCase := range testCases { + _, servers := ctl.getBackendServers(testCase.Ingresses) + testCase.Validate(servers) + } +} + +func newNGINXController(t *testing.T) *NGINXController { + ns := v1.NamespaceDefault + pod := &k8s.PodInfo{ + Name: "testpod", + Namespace: ns, + Labels: map[string]string{ + "pod-template-hash": "1234", + }, + } + + clientSet := fake.NewSimpleClientset() + configMap := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "config", + SelfLink: fmt.Sprintf("/api/v1/namespaces/%s/configmaps/config", ns), + }, + } + _, err := clientSet.CoreV1().ConfigMaps(ns).Create(configMap) + if err != nil { + t.Fatalf("error creating the configuration map: %v", err) + } + + fs, err := file.NewFakeFS() + if err != nil { + t.Fatalf("error: %v", err) + } + + storer := store.New(true, + ns, + fmt.Sprintf("%v/config", ns), + fmt.Sprintf("%v/tcp", ns), + fmt.Sprintf("%v/udp", ns), + "", + 10*time.Minute, + clientSet, + fs, + channels.NewRingChannel(10), + false, + pod, + false) + + sslCert := ssl.GetFakeSSLCert(fs) + config := &Configuration{ + FakeCertificate: sslCert, + ListenPorts: &ngx_config.ListenPorts{ + Default: 80, + }, + } + + return &NGINXController{ + store: storer, + cfg: config, + command: NewNginxCommand(), + fileSystem: fs, + } +} + +var oidExtensionSubjectAltName = asn1.ObjectIdentifier{2, 5, 29, 17} + +func fakeX509Cert(dnsNames []string) *x509.Certificate { + return &x509.Certificate{ + DNSNames: dnsNames, + Extensions: []pkix.Extension{ + {Id: oidExtensionSubjectAltName}, + }, + } +} diff --git a/internal/ingress/controller/endpoints.go b/internal/ingress/controller/endpoints.go new file mode 100644 index 0000000000..c796ee7321 --- /dev/null +++ b/internal/ingress/controller/endpoints.go @@ -0,0 +1,138 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "fmt" + "net" + "reflect" + "strconv" + "time" + + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog" + + corev1 "k8s.io/api/core/v1" + + "k8s.io/ingress-nginx/internal/ingress" + "k8s.io/ingress-nginx/internal/k8s" +) + +// getEndpoints returns a list of Endpoint structs for a given service/target port combination. +func getEndpoints(s *corev1.Service, port *corev1.ServicePort, proto corev1.Protocol, + getServiceEndpoints func(string) (*corev1.Endpoints, error)) []ingress.Endpoint { + + upsServers := []ingress.Endpoint{} + + if s == nil || port == nil { + return upsServers + } + + // using a map avoids duplicated upstream servers when the service + // contains multiple port definitions sharing the same targetport + processedUpstreamServers := make(map[string]struct{}) + + svcKey := k8s.MetaNamespaceKey(s) + + // ExternalName services + if s.Spec.Type == corev1.ServiceTypeExternalName { + klog.V(3).Infof("Ingress using Service %q of type ExternalName.", svcKey) + + targetPort := port.TargetPort.IntValue() + if targetPort <= 0 { + klog.Errorf("ExternalName Service %q has an invalid port (%v)", svcKey, targetPort) + return upsServers + } + + // if the externalName is not an IP address we need to validate is a valid FQDN + if net.ParseIP(s.Spec.ExternalName) == nil { + defaultRetry := wait.Backoff{ + Steps: 2, + Duration: 1 * time.Second, + Factor: 1.5, + Jitter: 0.2, + } + + var lastErr error + err := wait.ExponentialBackoff(defaultRetry, func() (bool, error) { + _, err := net.LookupHost(s.Spec.ExternalName) + if err == nil { + return true, nil + } + + lastErr = err + return false, nil + }) + + if err != nil { + klog.Errorf("Error resolving host %q: %v", s.Spec.ExternalName, lastErr) + return upsServers + } + } + + return append(upsServers, ingress.Endpoint{ + Address: s.Spec.ExternalName, + Port: fmt.Sprintf("%v", targetPort), + }) + } + + klog.V(3).Infof("Getting Endpoints for Service %q and port %v", svcKey, port.String()) + ep, err := getServiceEndpoints(svcKey) + if err != nil { + klog.Warningf("Error obtaining Endpoints for Service %q: %v", svcKey, err) + return upsServers + } + + for _, ss := range ep.Subsets { + for _, epPort := range ss.Ports { + + if !reflect.DeepEqual(epPort.Protocol, proto) { + continue + } + + var targetPort int32 + + if port.Name == "" { + // port.Name is optional if there is only one port + targetPort = epPort.Port + } else if port.Name == epPort.Name { + targetPort = epPort.Port + } + + if targetPort <= 0 { + continue + } + + for _, epAddress := range ss.Addresses { + ep := net.JoinHostPort(epAddress.IP, strconv.Itoa(int(targetPort))) + if _, exists := processedUpstreamServers[ep]; exists { + continue + } + ups := ingress.Endpoint{ + Address: epAddress.IP, + Port: fmt.Sprintf("%v", targetPort), + Target: epAddress.TargetRef, + } + upsServers = append(upsServers, ups) + processedUpstreamServers[ep] = struct{}{} + } + } + } + + klog.V(3).Infof("Endpoints found for Service %q: %v", svcKey, upsServers) + return upsServers +} diff --git a/internal/ingress/controller/endpoints_test.go b/internal/ingress/controller/endpoints_test.go new file mode 100644 index 0000000000..44ed8f21de --- /dev/null +++ b/internal/ingress/controller/endpoints_test.go @@ -0,0 +1,394 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "fmt" + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/ingress-nginx/internal/ingress" +) + +func TestGetEndpoints(t *testing.T) { + tests := []struct { + name string + svc *corev1.Service + port *corev1.ServicePort + proto corev1.Protocol + fn func(string) (*corev1.Endpoints, error) + result []ingress.Endpoint + }{ + { + "no service should return 0 endpoint", + nil, + nil, + corev1.ProtocolTCP, + func(string) (*corev1.Endpoints, error) { + return nil, nil + }, + []ingress.Endpoint{}, + }, + { + "no service port should return 0 endpoint", + &corev1.Service{}, + nil, + corev1.ProtocolTCP, + func(string) (*corev1.Endpoints, error) { + return nil, nil + }, + []ingress.Endpoint{}, + }, + { + "a service without endpoint should return 0 endpoint", + &corev1.Service{}, + &corev1.ServicePort{Name: "default"}, + corev1.ProtocolTCP, + func(string) (*corev1.Endpoints, error) { + return &corev1.Endpoints{}, nil + }, + []ingress.Endpoint{}, + }, + { + "a service type ServiceTypeExternalName service with an invalid port should return 0 endpoint", + &corev1.Service{ + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeExternalName, + }, + }, + &corev1.ServicePort{Name: "default"}, + corev1.ProtocolTCP, + func(string) (*corev1.Endpoints, error) { + return &corev1.Endpoints{}, nil + }, + []ingress.Endpoint{}, + }, + { + "a service type ServiceTypeExternalName with a valid port should return one endpoint", + &corev1.Service{ + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeExternalName, + ExternalName: "www.10.0.0.1.xip.io", + Ports: []corev1.ServicePort{ + { + Name: "default", + TargetPort: intstr.FromInt(80), + }, + }, + }, + }, + &corev1.ServicePort{ + Name: "default", + TargetPort: intstr.FromInt(80), + }, + corev1.ProtocolTCP, + func(string) (*corev1.Endpoints, error) { + return &corev1.Endpoints{}, nil + }, + []ingress.Endpoint{ + { + Address: "www.10.0.0.1.xip.io", + Port: "80", + }, + }, + }, + { + "a service type ServiceTypeExternalName with an invalid ExternalName value should return one endpoint", + &corev1.Service{ + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeExternalName, + ExternalName: "foo.bar", + Ports: []corev1.ServicePort{ + { + Name: "default", + TargetPort: intstr.FromInt(80), + }, + }, + }, + }, + &corev1.ServicePort{ + Name: "default", + TargetPort: intstr.FromInt(80), + }, + corev1.ProtocolTCP, + func(string) (*corev1.Endpoints, error) { + return &corev1.Endpoints{}, nil + }, + []ingress.Endpoint{}, + }, + { + "should return no endpoint when there is an error searching for endpoints", + &corev1.Service{ + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + ClusterIP: "1.1.1.1", + Ports: []corev1.ServicePort{ + { + Name: "default", + TargetPort: intstr.FromInt(80), + }, + }, + }, + }, + &corev1.ServicePort{ + Name: "default", + TargetPort: intstr.FromInt(80), + }, + corev1.ProtocolTCP, + func(string) (*corev1.Endpoints, error) { + return nil, fmt.Errorf("unexpected error") + }, + []ingress.Endpoint{}, + }, + { + "should return no endpoint when the protocol does not match", + &corev1.Service{ + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + ClusterIP: "1.1.1.1", + Ports: []corev1.ServicePort{ + { + Name: "default", + TargetPort: intstr.FromInt(80), + }, + }, + }, + }, + &corev1.ServicePort{ + Name: "default", + TargetPort: intstr.FromInt(80), + }, + corev1.ProtocolTCP, + func(string) (*corev1.Endpoints, error) { + nodeName := "dummy" + return &corev1.Endpoints{ + Subsets: []corev1.EndpointSubset{ + { + Addresses: []corev1.EndpointAddress{ + { + IP: "1.1.1.1", + NodeName: &nodeName, + }, + }, + Ports: []corev1.EndpointPort{ + { + Protocol: corev1.ProtocolUDP, + }, + }, + }, + }, + }, nil + }, + []ingress.Endpoint{}, + }, + { + "should return no endpoint when there is no ready Addresses", + &corev1.Service{ + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + ClusterIP: "1.1.1.1", + Ports: []corev1.ServicePort{ + { + Name: "default", + TargetPort: intstr.FromInt(80), + }, + }, + }, + }, + &corev1.ServicePort{ + Name: "default", + TargetPort: intstr.FromInt(80), + }, + corev1.ProtocolTCP, + func(string) (*corev1.Endpoints, error) { + nodeName := "dummy" + return &corev1.Endpoints{ + Subsets: []corev1.EndpointSubset{ + { + NotReadyAddresses: []corev1.EndpointAddress{ + { + IP: "1.1.1.1", + NodeName: &nodeName, + }, + }, + Ports: []corev1.EndpointPort{ + { + Protocol: corev1.ProtocolUDP, + }, + }, + }, + }, + }, nil + }, + []ingress.Endpoint{}, + }, + { + "should return no endpoint when the name of the port name do not match any port in the endpoint Subsets", + &corev1.Service{ + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + ClusterIP: "1.1.1.1", + Ports: []corev1.ServicePort{ + { + Name: "default", + TargetPort: intstr.FromInt(80), + }, + }, + }, + }, + &corev1.ServicePort{ + Name: "default", + TargetPort: intstr.FromInt(80), + }, + corev1.ProtocolTCP, + func(string) (*corev1.Endpoints, error) { + nodeName := "dummy" + return &corev1.Endpoints{ + Subsets: []corev1.EndpointSubset{ + { + Addresses: []corev1.EndpointAddress{ + { + IP: "1.1.1.1", + NodeName: &nodeName, + }, + }, + Ports: []corev1.EndpointPort{ + { + Protocol: corev1.ProtocolTCP, + Port: int32(80), + Name: "another-name", + }, + }, + }, + }, + }, nil + }, + []ingress.Endpoint{}, + }, + { + "should return one endpoint when the name of the port name match a port in the endpoint Subsets", + &corev1.Service{ + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + ClusterIP: "1.1.1.1", + Ports: []corev1.ServicePort{ + { + Name: "default", + TargetPort: intstr.FromInt(80), + }, + }, + }, + }, + &corev1.ServicePort{ + Name: "default", + TargetPort: intstr.FromInt(80), + }, + corev1.ProtocolTCP, + func(string) (*corev1.Endpoints, error) { + nodeName := "dummy" + return &corev1.Endpoints{ + Subsets: []corev1.EndpointSubset{ + { + Addresses: []corev1.EndpointAddress{ + { + IP: "1.1.1.1", + NodeName: &nodeName, + }, + }, + Ports: []corev1.EndpointPort{ + { + Protocol: corev1.ProtocolTCP, + Port: int32(80), + Name: "default", + }, + }, + }, + }, + }, nil + }, + []ingress.Endpoint{ + { + Address: "1.1.1.1", + Port: "80", + }, + }, + }, + { + "should return one endpoint when the name of the port name match more than one port in the endpoint Subsets", + &corev1.Service{ + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + ClusterIP: "1.1.1.1", + Ports: []corev1.ServicePort{ + { + Name: "default", + TargetPort: intstr.FromString("port-1"), + }, + }, + }, + }, + &corev1.ServicePort{ + Name: "port-1", + TargetPort: intstr.FromString("port-1"), + }, + corev1.ProtocolTCP, + func(string) (*corev1.Endpoints, error) { + nodeName := "dummy" + return &corev1.Endpoints{ + Subsets: []corev1.EndpointSubset{ + { + Addresses: []corev1.EndpointAddress{ + { + IP: "1.1.1.1", + NodeName: &nodeName, + }, + }, + Ports: []corev1.EndpointPort{ + { + Name: "port-1", + Protocol: corev1.ProtocolTCP, + Port: 80, + }, + { + Name: "port-1", + Protocol: corev1.ProtocolTCP, + Port: 80, + }, + }, + }, + }, + }, nil + }, + []ingress.Endpoint{ + { + Address: "1.1.1.1", + Port: "80", + }, + }, + }, + } + + for _, testCase := range tests { + t.Run(testCase.name, func(t *testing.T) { + result := getEndpoints(testCase.svc, testCase.port, testCase.proto, testCase.fn) + if len(testCase.result) != len(result) { + t.Errorf("Expected %d Endpoints but got %d", len(testCase.result), len(result)) + } + }) + } +} diff --git a/internal/ingress/controller/listers.go b/internal/ingress/controller/listers.go deleted file mode 100644 index f3edc2f4b6..0000000000 --- a/internal/ingress/controller/listers.go +++ /dev/null @@ -1,229 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "fmt" - "reflect" - - "github.com/golang/glog" - - apiv1 "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/client-go/tools/cache" - cache_client "k8s.io/client-go/tools/cache" - - "k8s.io/ingress-nginx/internal/ingress" - "k8s.io/ingress-nginx/internal/ingress/annotations/class" -) - -type cacheController struct { - Ingress cache.Controller - Endpoint cache.Controller - Service cache.Controller - Secret cache.Controller - Configmap cache.Controller -} - -func (c *cacheController) Run(stopCh chan struct{}) { - go c.Ingress.Run(stopCh) - go c.Endpoint.Run(stopCh) - go c.Service.Run(stopCh) - go c.Secret.Run(stopCh) - go c.Configmap.Run(stopCh) - - // Wait for all involved caches to be synced, before processing items from the queue is started - if !cache.WaitForCacheSync(stopCh, - c.Ingress.HasSynced, - c.Endpoint.HasSynced, - c.Service.HasSynced, - c.Secret.HasSynced, - c.Configmap.HasSynced, - ) { - runtime.HandleError(fmt.Errorf("Timed out waiting for caches to sync")) - } -} - -func (n *NGINXController) createListers(stopCh chan struct{}) (*ingress.StoreLister, *cacheController) { - ingEventHandler := cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - addIng := obj.(*extensions.Ingress) - if !class.IsValid(addIng) { - a := addIng.GetAnnotations()[class.IngressKey] - glog.Infof("ignoring add for ingress %v based on annotation %v with value %v", addIng.Name, class.IngressKey, a) - return - } - - n.extractAnnotations(addIng) - n.recorder.Eventf(addIng, apiv1.EventTypeNormal, "CREATE", fmt.Sprintf("Ingress %s/%s", addIng.Namespace, addIng.Name)) - n.syncQueue.Enqueue(obj) - }, - DeleteFunc: func(obj interface{}) { - delIng, ok := obj.(*extensions.Ingress) - if !ok { - // If we reached here it means the ingress was deleted but its final state is unrecorded. - tombstone, ok := obj.(cache.DeletedFinalStateUnknown) - if !ok { - glog.Errorf("couldn't get object from tombstone %#v", obj) - return - } - delIng, ok = tombstone.Obj.(*extensions.Ingress) - if !ok { - glog.Errorf("Tombstone contained object that is not an Ingress: %#v", obj) - return - } - } - if !class.IsValid(delIng) { - glog.Infof("ignoring delete for ingress %v based on annotation %v", delIng.Name, class.IngressKey) - return - } - n.recorder.Eventf(delIng, apiv1.EventTypeNormal, "DELETE", fmt.Sprintf("Ingress %s/%s", delIng.Namespace, delIng.Name)) - n.listers.IngressAnnotation.Delete(delIng) - n.syncQueue.Enqueue(obj) - }, - UpdateFunc: func(old, cur interface{}) { - oldIng := old.(*extensions.Ingress) - curIng := cur.(*extensions.Ingress) - validOld := class.IsValid(oldIng) - validCur := class.IsValid(curIng) - - c := curIng.GetAnnotations()[class.IngressKey] - if !validOld && validCur { - glog.Infof("creating ingress %v/%v based on annotation %v with value '%v'", curIng.Namespace, curIng.Name, class.IngressKey, c) - n.recorder.Eventf(curIng, apiv1.EventTypeNormal, "CREATE", fmt.Sprintf("Ingress %s/%s", curIng.Namespace, curIng.Name)) - } else if validOld && !validCur { - glog.Infof("removing ingress %v/%v based on annotation %v with value '%v'", curIng.Namespace, curIng.Name, class.IngressKey, c) - n.recorder.Eventf(curIng, apiv1.EventTypeNormal, "DELETE", fmt.Sprintf("Ingress %s/%s", curIng.Namespace, curIng.Name)) - } else if validCur && !reflect.DeepEqual(old, cur) { - n.recorder.Eventf(curIng, apiv1.EventTypeNormal, "UPDATE", fmt.Sprintf("Ingress %s/%s", curIng.Namespace, curIng.Name)) - } - - n.extractAnnotations(curIng) - n.syncQueue.Enqueue(cur) - }, - } - - secrEventHandler := cache.ResourceEventHandlerFuncs{ - UpdateFunc: func(old, cur interface{}) { - if !reflect.DeepEqual(old, cur) { - sec := cur.(*apiv1.Secret) - key := fmt.Sprintf("%v/%v", sec.Namespace, sec.Name) - _, exists := n.sslCertTracker.Get(key) - if exists { - n.syncSecret(key) - } - } - }, - DeleteFunc: func(obj interface{}) { - sec, ok := obj.(*apiv1.Secret) - if !ok { - // If we reached here it means the secret was deleted but its final state is unrecorded. - tombstone, ok := obj.(cache.DeletedFinalStateUnknown) - if !ok { - glog.Errorf("couldn't get object from tombstone %#v", obj) - return - } - sec, ok = tombstone.Obj.(*apiv1.Secret) - if !ok { - glog.Errorf("Tombstone contained object that is not a Secret: %#v", obj) - return - } - } - key := fmt.Sprintf("%v/%v", sec.Namespace, sec.Name) - n.sslCertTracker.Delete(key) - n.syncQueue.Enqueue(key) - }, - } - - eventHandler := cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - n.syncQueue.Enqueue(obj) - }, - DeleteFunc: func(obj interface{}) { - n.syncQueue.Enqueue(obj) - }, - UpdateFunc: func(old, cur interface{}) { - oep := old.(*apiv1.Endpoints) - ocur := cur.(*apiv1.Endpoints) - if !reflect.DeepEqual(ocur.Subsets, oep.Subsets) { - n.syncQueue.Enqueue(cur) - } - }, - } - - mapEventHandler := cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - upCmap := obj.(*apiv1.ConfigMap) - mapKey := fmt.Sprintf("%s/%s", upCmap.Namespace, upCmap.Name) - if mapKey == n.cfg.ConfigMapName { - glog.V(2).Infof("adding configmap %v to backend", mapKey) - n.SetConfig(upCmap) - n.SetForceReload(true) - } - }, - UpdateFunc: func(old, cur interface{}) { - if !reflect.DeepEqual(old, cur) { - upCmap := cur.(*apiv1.ConfigMap) - mapKey := fmt.Sprintf("%s/%s", upCmap.Namespace, upCmap.Name) - if mapKey == n.cfg.ConfigMapName { - glog.V(2).Infof("updating configmap backend (%v)", mapKey) - n.SetConfig(upCmap) - n.SetForceReload(true) - } - // updates to configuration configmaps can trigger an update - if mapKey == n.cfg.ConfigMapName || mapKey == n.cfg.TCPConfigMapName || mapKey == n.cfg.UDPConfigMapName { - n.recorder.Eventf(upCmap, apiv1.EventTypeNormal, "UPDATE", fmt.Sprintf("ConfigMap %v", mapKey)) - n.syncQueue.Enqueue(cur) - } - } - }, - } - - watchNs := apiv1.NamespaceAll - if n.cfg.ForceNamespaceIsolation && n.cfg.Namespace != apiv1.NamespaceAll { - watchNs = n.cfg.Namespace - } - - lister := &ingress.StoreLister{} - lister.IngressAnnotation.Store = cache_client.NewStore(cache_client.DeletionHandlingMetaNamespaceKeyFunc) - - controller := &cacheController{} - - lister.Ingress.Store, controller.Ingress = cache.NewInformer( - cache.NewListWatchFromClient(n.cfg.Client.ExtensionsV1beta1().RESTClient(), "ingresses", n.cfg.Namespace, fields.Everything()), - &extensions.Ingress{}, n.cfg.ResyncPeriod, ingEventHandler) - - lister.Endpoint.Store, controller.Endpoint = cache.NewInformer( - cache.NewListWatchFromClient(n.cfg.Client.CoreV1().RESTClient(), "endpoints", n.cfg.Namespace, fields.Everything()), - &apiv1.Endpoints{}, n.cfg.ResyncPeriod, eventHandler) - - lister.Secret.Store, controller.Secret = cache.NewInformer( - cache.NewListWatchFromClient(n.cfg.Client.CoreV1().RESTClient(), "secrets", watchNs, fields.Everything()), - &apiv1.Secret{}, n.cfg.ResyncPeriod, secrEventHandler) - - lister.ConfigMap.Store, controller.Configmap = cache.NewInformer( - cache.NewListWatchFromClient(n.cfg.Client.CoreV1().RESTClient(), "configmaps", watchNs, fields.Everything()), - &apiv1.ConfigMap{}, n.cfg.ResyncPeriod, mapEventHandler) - - lister.Service.Store, controller.Service = cache.NewInformer( - cache.NewListWatchFromClient(n.cfg.Client.CoreV1().RESTClient(), "services", n.cfg.Namespace, fields.Everything()), - &apiv1.Service{}, n.cfg.ResyncPeriod, cache.ResourceEventHandlerFuncs{}) - - return lister, controller -} diff --git a/internal/ingress/controller/metric/collector/nginx.go b/internal/ingress/controller/metric/collector/nginx.go deleted file mode 100644 index 4ed6db57fc..0000000000 --- a/internal/ingress/controller/metric/collector/nginx.go +++ /dev/null @@ -1,151 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package collector - -import ( - "github.com/golang/glog" - - "github.com/prometheus/client_golang/prometheus" -) - -type ( - nginxStatusCollector struct { - scrapeChan chan scrapeRequest - ngxHealthPort int - ngxVtsPath string - data *nginxStatusData - watchNamespace string - ingressClass string - } - - nginxStatusData struct { - active *prometheus.Desc - accepted *prometheus.Desc - handled *prometheus.Desc - requests *prometheus.Desc - reading *prometheus.Desc - writing *prometheus.Desc - waiting *prometheus.Desc - } -) - -// NewNginxStatus returns a new prometheus collector the default nginx status module -func NewNginxStatus(watchNamespace, ingressClass string, ngxHealthPort int, ngxVtsPath string) Stopable { - - p := nginxStatusCollector{ - scrapeChan: make(chan scrapeRequest), - ngxHealthPort: ngxHealthPort, - ngxVtsPath: ngxVtsPath, - watchNamespace: watchNamespace, - ingressClass: ingressClass, - } - - p.data = &nginxStatusData{ - active: prometheus.NewDesc( - prometheus.BuildFQName(ns, "", "active_connections"), - "total number of active connections", - []string{"ingress_class", "namespace"}, nil), - - accepted: prometheus.NewDesc( - prometheus.BuildFQName(ns, "", "accepted_connections"), - "total number of accepted client connections", - []string{"ingress_class", "namespace"}, nil), - - handled: prometheus.NewDesc( - prometheus.BuildFQName(ns, "", "handled_connections"), - "total number of handled connections", - []string{"ingress_class", "namespace"}, nil), - - requests: prometheus.NewDesc( - prometheus.BuildFQName(ns, "", "total_requests"), - "total number of client requests", - []string{"ingress_class", "namespace"}, nil), - - reading: prometheus.NewDesc( - prometheus.BuildFQName(ns, "", "current_reading_connections"), - "current number of connections where nginx is reading the request header", - []string{"ingress_class", "namespace"}, nil), - - writing: prometheus.NewDesc( - prometheus.BuildFQName(ns, "", "current_writing_connections"), - "current number of connections where nginx is writing the response back to the client", - []string{"ingress_class", "namespace"}, nil), - - waiting: prometheus.NewDesc( - prometheus.BuildFQName(ns, "", "current_waiting_connections"), - "current number of idle client connections waiting for a request", - []string{"ingress_class", "namespace"}, nil), - } - - go p.start() - - return p -} - -// Describe implements prometheus.Collector. -func (p nginxStatusCollector) Describe(ch chan<- *prometheus.Desc) { - ch <- p.data.active - ch <- p.data.accepted - ch <- p.data.handled - ch <- p.data.requests - ch <- p.data.reading - ch <- p.data.writing - ch <- p.data.waiting -} - -// Collect implements prometheus.Collector. -func (p nginxStatusCollector) Collect(ch chan<- prometheus.Metric) { - req := scrapeRequest{results: ch, done: make(chan struct{})} - p.scrapeChan <- req - <-req.done -} - -func (p nginxStatusCollector) start() { - for req := range p.scrapeChan { - ch := req.results - p.scrape(ch) - req.done <- struct{}{} - } -} - -func (p nginxStatusCollector) Stop() { - close(p.scrapeChan) -} - -// nginxStatusCollector scrape the nginx status -func (p nginxStatusCollector) scrape(ch chan<- prometheus.Metric) { - s, err := getNginxStatus(p.ngxHealthPort, p.ngxVtsPath) - if err != nil { - glog.Warningf("unexpected error obtaining nginx status info: %v", err) - return - } - - ch <- prometheus.MustNewConstMetric(p.data.active, - prometheus.GaugeValue, float64(s.Active), p.ingressClass, p.watchNamespace) - ch <- prometheus.MustNewConstMetric(p.data.accepted, - prometheus.GaugeValue, float64(s.Accepted), p.ingressClass, p.watchNamespace) - ch <- prometheus.MustNewConstMetric(p.data.handled, - prometheus.GaugeValue, float64(s.Handled), p.ingressClass, p.watchNamespace) - ch <- prometheus.MustNewConstMetric(p.data.requests, - prometheus.GaugeValue, float64(s.Requests), p.ingressClass, p.watchNamespace) - ch <- prometheus.MustNewConstMetric(p.data.reading, - prometheus.GaugeValue, float64(s.Reading), p.ingressClass, p.watchNamespace) - ch <- prometheus.MustNewConstMetric(p.data.writing, - prometheus.GaugeValue, float64(s.Writing), p.ingressClass, p.watchNamespace) - ch <- prometheus.MustNewConstMetric(p.data.waiting, - prometheus.GaugeValue, float64(s.Waiting), p.ingressClass, p.watchNamespace) -} diff --git a/internal/ingress/controller/metric/collector/process.go b/internal/ingress/controller/metric/collector/process.go deleted file mode 100644 index 0167985254..0000000000 --- a/internal/ingress/controller/metric/collector/process.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package collector - -import ( - "path/filepath" - - "github.com/golang/glog" - - common "github.com/ncabatoff/process-exporter" - "github.com/ncabatoff/process-exporter/proc" - "github.com/prometheus/client_golang/prometheus" -) - -// BinaryNameMatcher ... -type BinaryNameMatcher struct { - Name string - Binary string -} - -// MatchAndName returns false if the match failed, otherwise -// true and the resulting name. -func (em BinaryNameMatcher) MatchAndName(nacl common.NameAndCmdline) (bool, string) { - if len(nacl.Cmdline) == 0 { - return false, "" - } - cmd := filepath.Base(em.Binary) - return em.Name == cmd, "" -} - -type namedProcessData struct { - numProcs *prometheus.Desc - cpuSecs *prometheus.Desc - readBytes *prometheus.Desc - writeBytes *prometheus.Desc - memResidentbytes *prometheus.Desc - memVirtualbytes *prometheus.Desc - startTime *prometheus.Desc -} - -type namedProcess struct { - *proc.Grouper - - scrapeChan chan scrapeRequest - fs *proc.FS - data namedProcessData -} - -// NewNamedProcess returns a new prometheus collector for the nginx process -func NewNamedProcess(children bool, mn common.MatchNamer) (prometheus.Collector, error) { - fs, err := proc.NewFS("/proc") - if err != nil { - return nil, err - } - p := namedProcess{ - scrapeChan: make(chan scrapeRequest), - Grouper: proc.NewGrouper(children, mn), - fs: fs, - } - _, err = p.Update(p.fs.AllProcs()) - if err != nil { - return nil, err - } - - p.data = namedProcessData{ - numProcs: prometheus.NewDesc( - "num_procs", - "number of processes", - nil, nil), - - cpuSecs: prometheus.NewDesc( - "cpu_seconds_total", - "Cpu usage in seconds", - nil, nil), - - readBytes: prometheus.NewDesc( - "read_bytes_total", - "number of bytes read", - nil, nil), - - writeBytes: prometheus.NewDesc( - "write_bytes_total", - "number of bytes written", - nil, nil), - - memResidentbytes: prometheus.NewDesc( - "resident_memory_bytes", - "number of bytes of memory in use", - nil, nil), - - memVirtualbytes: prometheus.NewDesc( - "virtual_memory_bytes", - "number of bytes of memory in use", - nil, nil), - - startTime: prometheus.NewDesc( - "oldest_start_time_seconds", - "start time in seconds since 1970/01/01", - nil, nil), - } - - go p.start() - - return p, nil -} - -// Describe implements prometheus.Collector. -func (p namedProcess) Describe(ch chan<- *prometheus.Desc) { - ch <- p.data.cpuSecs - ch <- p.data.numProcs - ch <- p.data.readBytes - ch <- p.data.writeBytes - ch <- p.data.memResidentbytes - ch <- p.data.memVirtualbytes - ch <- p.data.startTime -} - -// Collect implements prometheus.Collector. -func (p namedProcess) Collect(ch chan<- prometheus.Metric) { - req := scrapeRequest{results: ch, done: make(chan struct{})} - p.scrapeChan <- req - <-req.done -} - -func (p namedProcess) start() { - for req := range p.scrapeChan { - ch := req.results - p.scrape(ch) - req.done <- struct{}{} - } -} - -func (p namedProcess) Stop() { - close(p.scrapeChan) -} - -func (p namedProcess) scrape(ch chan<- prometheus.Metric) { - _, err := p.Update(p.fs.AllProcs()) - if err != nil { - glog.Warningf("unexpected error obtaining nginx process info: %v", err) - return - } - - for _, gcounts := range p.Groups() { - ch <- prometheus.MustNewConstMetric(p.data.numProcs, - prometheus.GaugeValue, float64(gcounts.Procs)) - ch <- prometheus.MustNewConstMetric(p.data.memResidentbytes, - prometheus.GaugeValue, float64(gcounts.Memresident)) - ch <- prometheus.MustNewConstMetric(p.data.memVirtualbytes, - prometheus.GaugeValue, float64(gcounts.Memvirtual)) - ch <- prometheus.MustNewConstMetric(p.data.startTime, - prometheus.GaugeValue, float64(gcounts.OldestStartTime.Unix())) - ch <- prometheus.MustNewConstMetric(p.data.cpuSecs, - prometheus.CounterValue, gcounts.Cpu) - ch <- prometheus.MustNewConstMetric(p.data.readBytes, - prometheus.CounterValue, float64(gcounts.ReadBytes)) - ch <- prometheus.MustNewConstMetric(p.data.writeBytes, - prometheus.CounterValue, float64(gcounts.WriteBytes)) - } -} diff --git a/internal/ingress/controller/metric/collector/scrape.go b/internal/ingress/controller/metric/collector/scrape.go deleted file mode 100644 index a078b28597..0000000000 --- a/internal/ingress/controller/metric/collector/scrape.go +++ /dev/null @@ -1,30 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package collector - -import "github.com/prometheus/client_golang/prometheus" - -// Stopable defines a prometheus collector that can be stopped -type Stopable interface { - prometheus.Collector - Stop() -} - -type scrapeRequest struct { - results chan<- prometheus.Metric - done chan struct{} -} diff --git a/internal/ingress/controller/metric/collector/status.go b/internal/ingress/controller/metric/collector/status.go deleted file mode 100644 index e195b045da..0000000000 --- a/internal/ingress/controller/metric/collector/status.go +++ /dev/null @@ -1,225 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package collector - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "regexp" - "strconv" - - "github.com/golang/glog" -) - -var ( - ac = regexp.MustCompile(`Active connections: (\d+)`) - sahr = regexp.MustCompile(`(\d+)\s(\d+)\s(\d+)`) - reading = regexp.MustCompile(`Reading: (\d+)`) - writing = regexp.MustCompile(`Writing: (\d+)`) - waiting = regexp.MustCompile(`Waiting: (\d+)`) -) - -type basicStatus struct { - // Active total number of active connections - Active int - // Accepted total number of accepted client connections - Accepted int - // Handled total number of handled connections. Generally, the parameter value is the same as accepts unless some resource limits have been reached (for example, the worker_connections limit). - Handled int - // Requests total number of client requests. - Requests int - // Reading current number of connections where nginx is reading the request header. - Reading int - // Writing current number of connections where nginx is writing the response back to the client. - Writing int - // Waiting current number of idle client connections waiting for a request. - Waiting int -} - -// https://github.com/vozlt/nginx-module-vts -type vts struct { - NginxVersion string `json:"nginxVersion"` - LoadMsec int `json:"loadMsec"` - NowMsec int `json:"nowMsec"` - // Total connections and requests(same as stub_status_module in NGINX) - Connections connections `json:"connections"` - // Traffic(in/out) and request and response counts and cache hit ratio per each server zone - ServerZones map[string]serverZone `json:"serverZones"` - // Traffic(in/out) and request and response counts and cache hit ratio per each server zone filtered through - // the vhost_traffic_status_filter_by_set_key directive - FilterZones map[string]map[string]filterZone `json:"filterZones"` - // Traffic(in/out) and request and response counts per server in each upstream group - UpstreamZones map[string][]upstreamZone `json:"upstreamZones"` -} - -type serverZone struct { - RequestCounter float64 `json:"requestCounter"` - InBytes float64 `json:"inBytes"` - OutBytes float64 `json:"outBytes"` - Responses response `json:"responses"` - Cache cache `json:"cache"` -} - -type filterZone struct { - RequestCounter float64 `json:"requestCounter"` - InBytes float64 `json:"inBytes"` - OutBytes float64 `json:"outBytes"` - Cache cache `json:"cache"` - Responses response `json:"responses"` -} - -type upstreamZone struct { - Responses response `json:"responses"` - Server string `json:"server"` - RequestCounter float64 `json:"requestCounter"` - InBytes float64 `json:"inBytes"` - OutBytes float64 `json:"outBytes"` - ResponseMsec float64 `json:"responseMsec"` - Weight float64 `json:"weight"` - MaxFails float64 `json:"maxFails"` - FailTimeout float64 `json:"failTimeout"` - Backup BoolToFloat64 `json:"backup"` - Down BoolToFloat64 `json:"down"` -} - -type cache struct { - Miss float64 `json:"miss"` - Bypass float64 `json:"bypass"` - Expired float64 `json:"expired"` - Stale float64 `json:"stale"` - Updating float64 `json:"updating"` - Revalidated float64 `json:"revalidated"` - Hit float64 `json:"hit"` - Scarce float64 `json:"scarce"` -} - -type response struct { - OneXx float64 `json:"1xx"` - TwoXx float64 `json:"2xx"` - TheeXx float64 `json:"3xx"` - FourXx float64 `json:"4xx"` - FiveXx float64 `json:"5xx"` -} - -type connections struct { - Active float64 `json:"active"` - Reading float64 `json:"reading"` - Writing float64 `json:"writing"` - Waiting float64 `json:"waiting"` - Accepted float64 `json:"accepted"` - Handled float64 `json:"handled"` - Requests float64 `json:"requests"` -} - -// BoolToFloat64 ... -type BoolToFloat64 float64 - -// UnmarshalJSON ... -func (bit BoolToFloat64) UnmarshalJSON(data []byte) error { - asString := string(data) - if asString == "1" || asString == "true" { - bit = 1 - } else if asString == "0" || asString == "false" { - bit = 0 - } else { - return fmt.Errorf(fmt.Sprintf("boolean unmarshal error: invalid input %s", asString)) - } - return nil -} - -func getNginxStatus(port int, path string) (*basicStatus, error) { - url := fmt.Sprintf("http://0.0.0.0:%v%v", port, path) - glog.V(3).Infof("start scraping url: %v", url) - - data, err := httpBody(url) - - if err != nil { - return nil, fmt.Errorf("unexpected error scraping nginx status page: %v", err) - } - - return parse(string(data)), nil -} - -func httpBody(url string) ([]byte, error) { - resp, err := http.DefaultClient.Get(url) - if err != nil { - return nil, fmt.Errorf("unexpected error scraping nginx : %v", err) - } - - data, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("unexpected error scraping nginx (%v)", err) - } - defer resp.Body.Close() - if resp.StatusCode < 200 || resp.StatusCode >= 400 { - return nil, fmt.Errorf("unexpected error scraping nginx (status %v)", resp.StatusCode) - } - - return data, nil -} - -func getNginxVtsMetrics(port int, path string) (*vts, error) { - url := fmt.Sprintf("http://0.0.0.0:%v%v", port, path) - glog.V(3).Infof("start scraping url: %v", url) - - data, err := httpBody(url) - - if err != nil { - return nil, fmt.Errorf("unexpected error scraping nginx vts (%v)", err) - } - - var vts *vts - err = json.Unmarshal(data, &vts) - if err != nil { - return nil, fmt.Errorf("unexpected error json unmarshal (%v)", err) - } - glog.V(3).Infof("scrape returned : %v", vts) - return vts, nil -} - -func parse(data string) *basicStatus { - acr := ac.FindStringSubmatch(data) - sahrr := sahr.FindStringSubmatch(data) - readingr := reading.FindStringSubmatch(data) - writingr := writing.FindStringSubmatch(data) - waitingr := waiting.FindStringSubmatch(data) - - return &basicStatus{ - toInt(acr, 1), - toInt(sahrr, 1), - toInt(sahrr, 2), - toInt(sahrr, 3), - toInt(readingr, 1), - toInt(writingr, 1), - toInt(waitingr, 1), - } -} - -func toInt(data []string, pos int) int { - if len(data) == 0 { - return 0 - } - if pos > len(data) { - return 0 - } - if v, err := strconv.Atoi(data[pos]); err == nil { - return v - } - return 0 -} diff --git a/internal/ingress/controller/metric/collector/status_test.go b/internal/ingress/controller/metric/collector/status_test.go deleted file mode 100644 index 5d3075dae7..0000000000 --- a/internal/ingress/controller/metric/collector/status_test.go +++ /dev/null @@ -1,72 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package collector - -import ( - "testing" - - "github.com/kylelemons/godebug/pretty" -) - -func TestParseStatus(t *testing.T) { - tests := []struct { - in string - out *basicStatus - }{ - {`Active connections: 43 -server accepts handled requests - 7368 7368 10993 -Reading: 0 Writing: 5 Waiting: 38`, - &basicStatus{43, 7368, 7368, 10993, 0, 5, 38}, - }, - {`Active connections: 0 -server accepts handled requests - 1 7 0 -Reading: A Writing: B Waiting: 38`, - &basicStatus{0, 1, 7, 0, 0, 0, 38}, - }, - } - - for _, test := range tests { - r := parse(test.in) - if diff := pretty.Compare(r, test.out); diff != "" { - t.Logf("%v", diff) - t.Fatalf("expected %v but returned %v", test.out, r) - } - } -} - -func TestToint(t *testing.T) { - tests := []struct { - in []string - pos int - exp int - }{ - {[]string{}, 0, 0}, - {[]string{}, 1, 0}, - {[]string{"A"}, 0, 0}, - {[]string{"1"}, 0, 1}, - {[]string{"a", "2"}, 1, 2}, - } - - for _, test := range tests { - v := toInt(test.in, test.pos) - if v != test.exp { - t.Fatalf("expected %v but returned %v", test.exp, v) - } - } -} diff --git a/internal/ingress/controller/metric/collector/vts.go b/internal/ingress/controller/metric/collector/vts.go deleted file mode 100644 index 33eeac492c..0000000000 --- a/internal/ingress/controller/metric/collector/vts.go +++ /dev/null @@ -1,273 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package collector - -import ( - "reflect" - - "github.com/golang/glog" - - "github.com/prometheus/client_golang/prometheus" -) - -const ns = "nginx" - -type ( - vtsCollector struct { - scrapeChan chan scrapeRequest - port int - path string - data *vtsData - watchNamespace string - ingressClass string - } - - vtsData struct { - bytes *prometheus.Desc - cache *prometheus.Desc - connections *prometheus.Desc - responses *prometheus.Desc - requests *prometheus.Desc - filterZoneBytes *prometheus.Desc - filterZoneResponses *prometheus.Desc - filterZoneCache *prometheus.Desc - upstreamBackup *prometheus.Desc - upstreamBytes *prometheus.Desc - upstreamDown *prometheus.Desc - upstreamFailTimeout *prometheus.Desc - upstreamMaxFails *prometheus.Desc - upstreamResponses *prometheus.Desc - upstreamRequests *prometheus.Desc - upstreamResponseMsec *prometheus.Desc - upstreamWeight *prometheus.Desc - } -) - -// NewNGINXVTSCollector returns a new prometheus collector for the VTS module -func NewNGINXVTSCollector(watchNamespace, ingressClass string, port int, path string) Stopable { - - p := vtsCollector{ - scrapeChan: make(chan scrapeRequest), - port: port, - path: path, - watchNamespace: watchNamespace, - ingressClass: ingressClass, - } - - p.data = &vtsData{ - bytes: prometheus.NewDesc( - prometheus.BuildFQName(ns, "", "bytes_total"), - "Nginx bytes count", - []string{"ingress_class", "namespace", "server_zone", "direction"}, nil), - - cache: prometheus.NewDesc( - prometheus.BuildFQName(ns, "", "cache_total"), - "Nginx cache count", - []string{"ingress_class", "namespace", "server_zone", "type"}, nil), - - connections: prometheus.NewDesc( - prometheus.BuildFQName(ns, "", "connections_total"), - "Nginx connections count", - []string{"ingress_class", "namespace", "type"}, nil), - - responses: prometheus.NewDesc( - prometheus.BuildFQName(ns, "", "responses_total"), - "The number of responses with status codes 1xx, 2xx, 3xx, 4xx, and 5xx.", - []string{"ingress_class", "namespace", "server_zone", "status_code"}, nil), - - requests: prometheus.NewDesc( - prometheus.BuildFQName(ns, "", "requests_total"), - "The total number of requested client connections.", - []string{"ingress_class", "namespace", "server_zone"}, nil), - - filterZoneBytes: prometheus.NewDesc( - prometheus.BuildFQName(ns, "", "filterzone_bytes_total"), - "Nginx bytes count", - []string{"ingress_class", "namespace", "server_zone", "key", "direction"}, nil), - - filterZoneResponses: prometheus.NewDesc( - prometheus.BuildFQName(ns, "", "filterzone_responses_total"), - "The number of responses with status codes 1xx, 2xx, 3xx, 4xx, and 5xx.", - []string{"ingress_class", "namespace", "server_zone", "key", "status_code"}, nil), - - filterZoneCache: prometheus.NewDesc( - prometheus.BuildFQName(ns, "", "filterzone_cache_total"), - "Nginx cache count", - []string{"ingress_class", "namespace", "server_zone", "key", "type"}, nil), - - upstreamBackup: prometheus.NewDesc( - prometheus.BuildFQName(ns, "", "upstream_backup"), - "Current backup setting of the server.", - []string{"ingress_class", "namespace", "upstream", "server"}, nil), - - upstreamBytes: prometheus.NewDesc( - prometheus.BuildFQName(ns, "", "upstream_bytes_total"), - "The total number of bytes sent to this server.", - []string{"ingress_class", "namespace", "upstream", "server", "direction"}, nil), - - upstreamDown: prometheus.NewDesc( - prometheus.BuildFQName(ns, "", "vts_upstream_down_total"), - "Current down setting of the server.", - []string{"ingress_class", "namespace", "upstream", "server"}, nil), - - upstreamFailTimeout: prometheus.NewDesc( - prometheus.BuildFQName(ns, "", "upstream_fail_timeout"), - "Current fail_timeout setting of the server.", - []string{"ingress_class", "namespace", "upstream", "server"}, nil), - - upstreamMaxFails: prometheus.NewDesc( - prometheus.BuildFQName(ns, "", "upstream_maxfails"), - "Current max_fails setting of the server.", - []string{"ingress_class", "namespace", "upstream", "server"}, nil), - - upstreamResponses: prometheus.NewDesc( - prometheus.BuildFQName(ns, "", "upstream_responses_total"), - "The number of upstream responses with status codes 1xx, 2xx, 3xx, 4xx, and 5xx.", - []string{"ingress_class", "namespace", "upstream", "server", "status_code"}, nil), - - upstreamRequests: prometheus.NewDesc( - prometheus.BuildFQName(ns, "", "upstream_requests_total"), - "The total number of client connections forwarded to this server.", - []string{"ingress_class", "namespace", "upstream", "server"}, nil), - - upstreamResponseMsec: prometheus.NewDesc( - prometheus.BuildFQName(ns, "", "upstream_response_msecs_avg"), - "The average of only upstream response processing times in milliseconds.", - []string{"ingress_class", "namespace", "upstream", "server"}, nil), - - upstreamWeight: prometheus.NewDesc( - prometheus.BuildFQName(ns, "", "upstream_weight"), - "Current upstream weight setting of the server.", - []string{"ingress_class", "namespace", "upstream", "server"}, nil), - } - - go p.start() - - return p -} - -// Describe implements prometheus.Collector. -func (p vtsCollector) Describe(ch chan<- *prometheus.Desc) { - ch <- p.data.bytes - ch <- p.data.cache - ch <- p.data.connections - ch <- p.data.requests - ch <- p.data.responses - ch <- p.data.upstreamBackup - ch <- p.data.upstreamBytes - ch <- p.data.upstreamDown - ch <- p.data.upstreamFailTimeout - ch <- p.data.upstreamMaxFails - ch <- p.data.upstreamRequests - ch <- p.data.upstreamResponseMsec - ch <- p.data.upstreamResponses - ch <- p.data.upstreamWeight - ch <- p.data.filterZoneBytes - ch <- p.data.filterZoneCache - ch <- p.data.filterZoneResponses -} - -// Collect implements prometheus.Collector. -func (p vtsCollector) Collect(ch chan<- prometheus.Metric) { - req := scrapeRequest{results: ch, done: make(chan struct{})} - p.scrapeChan <- req - <-req.done -} - -func (p vtsCollector) start() { - for req := range p.scrapeChan { - ch := req.results - p.scrapeVts(ch) - req.done <- struct{}{} - } -} - -func (p vtsCollector) Stop() { - close(p.scrapeChan) -} - -// scrapeVts scrape nginx vts metrics -func (p vtsCollector) scrapeVts(ch chan<- prometheus.Metric) { - nginxMetrics, err := getNginxVtsMetrics(p.port, p.path) - if err != nil { - glog.Warningf("unexpected error obtaining nginx status info: %v", err) - return - } - - reflectMetrics(&nginxMetrics.Connections, p.data.connections, ch, p.ingressClass, p.watchNamespace) - - for name, zones := range nginxMetrics.UpstreamZones { - for pos, value := range zones { - reflectMetrics(&zones[pos].Responses, p.data.upstreamResponses, ch, p.ingressClass, p.watchNamespace, name, value.Server) - - ch <- prometheus.MustNewConstMetric(p.data.upstreamRequests, - prometheus.CounterValue, zones[pos].RequestCounter, p.ingressClass, p.watchNamespace, name, value.Server) - ch <- prometheus.MustNewConstMetric(p.data.upstreamDown, - prometheus.CounterValue, float64(zones[pos].Down), p.ingressClass, p.watchNamespace, name, value.Server) - ch <- prometheus.MustNewConstMetric(p.data.upstreamWeight, - prometheus.CounterValue, zones[pos].Weight, p.ingressClass, p.watchNamespace, name, value.Server) - ch <- prometheus.MustNewConstMetric(p.data.upstreamResponseMsec, - prometheus.CounterValue, zones[pos].ResponseMsec, p.ingressClass, p.watchNamespace, name, value.Server) - ch <- prometheus.MustNewConstMetric(p.data.upstreamBackup, - prometheus.CounterValue, float64(zones[pos].Backup), p.ingressClass, p.watchNamespace, name, value.Server) - ch <- prometheus.MustNewConstMetric(p.data.upstreamFailTimeout, - prometheus.CounterValue, zones[pos].FailTimeout, p.ingressClass, p.watchNamespace, name, value.Server) - ch <- prometheus.MustNewConstMetric(p.data.upstreamMaxFails, - prometheus.CounterValue, zones[pos].MaxFails, p.ingressClass, p.watchNamespace, name, value.Server) - ch <- prometheus.MustNewConstMetric(p.data.upstreamBytes, - prometheus.CounterValue, zones[pos].InBytes, p.ingressClass, p.watchNamespace, name, value.Server, "in") - ch <- prometheus.MustNewConstMetric(p.data.upstreamBytes, - prometheus.CounterValue, zones[pos].OutBytes, p.ingressClass, p.watchNamespace, name, value.Server, "out") - } - } - - for name, zone := range nginxMetrics.ServerZones { - reflectMetrics(&zone.Responses, p.data.responses, ch, p.ingressClass, p.watchNamespace, name) - reflectMetrics(&zone.Cache, p.data.cache, ch, p.ingressClass, p.watchNamespace, name) - - ch <- prometheus.MustNewConstMetric(p.data.requests, - prometheus.CounterValue, zone.RequestCounter, p.ingressClass, p.watchNamespace, name) - ch <- prometheus.MustNewConstMetric(p.data.bytes, - prometheus.CounterValue, zone.InBytes, p.ingressClass, p.watchNamespace, name, "in") - ch <- prometheus.MustNewConstMetric(p.data.bytes, - prometheus.CounterValue, zone.OutBytes, p.ingressClass, p.watchNamespace, name, "out") - } - - for serverZone, keys := range nginxMetrics.FilterZones { - for name, zone := range keys { - reflectMetrics(&zone.Responses, p.data.filterZoneResponses, ch, p.ingressClass, p.watchNamespace, serverZone, name) - reflectMetrics(&zone.Cache, p.data.filterZoneCache, ch, p.ingressClass, p.watchNamespace, serverZone, name) - - ch <- prometheus.MustNewConstMetric(p.data.filterZoneBytes, - prometheus.CounterValue, zone.InBytes, p.ingressClass, p.watchNamespace, serverZone, name, "in") - ch <- prometheus.MustNewConstMetric(p.data.filterZoneBytes, - prometheus.CounterValue, zone.OutBytes, p.ingressClass, p.watchNamespace, serverZone, name, "out") - } - } -} - -func reflectMetrics(value interface{}, desc *prometheus.Desc, ch chan<- prometheus.Metric, labels ...string) { - val := reflect.ValueOf(value).Elem() - - for i := 0; i < val.NumField(); i++ { - tag := val.Type().Field(i).Tag - l := append(labels, tag.Get("json")) - ch <- prometheus.MustNewConstMetric(desc, - prometheus.CounterValue, val.Field(i).Interface().(float64), - l...) - } -} diff --git a/internal/ingress/controller/metrics.go b/internal/ingress/controller/metrics.go deleted file mode 100644 index 9a311563d2..0000000000 --- a/internal/ingress/controller/metrics.go +++ /dev/null @@ -1,81 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "github.com/prometheus/client_golang/prometheus" - - "k8s.io/ingress-nginx/internal/ingress" -) - -const ( - ns = "ingress_controller" - operation = "count" - reloadLabel = "reloads" - sslLabelExpire = "ssl_expire_time_seconds" - sslLabelHost = "host" -) - -func init() { - prometheus.MustRegister(reloadOperation) - prometheus.MustRegister(reloadOperationErrors) - prometheus.MustRegister(sslExpireTime) -} - -var ( - reloadOperation = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: ns, - Name: "success", - Help: "Cumulative number of Ingress controller reload operations", - }, - []string{operation}, - ) - reloadOperationErrors = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: ns, - Name: "errors", - Help: "Cumulative number of Ingress controller errors during reload operations", - }, - []string{operation}, - ) - sslExpireTime = prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: ns, - Name: sslLabelExpire, - Help: "Number of seconds since 1970 to the SSL Certificate expire. An example to check if this " + - "certificate will expire in 10 days is: \"ingress_controller_ssl_expire_time_seconds < (time() + (10 * 24 * 3600))\"", - }, - []string{sslLabelHost}, - ) -) - -func incReloadCount() { - reloadOperation.WithLabelValues(reloadLabel).Inc() -} - -func incReloadErrorCount() { - reloadOperationErrors.WithLabelValues(reloadLabel).Inc() -} - -func setSSLExpireTime(servers []*ingress.Server) { - for _, s := range servers { - if s.Hostname != defServerName { - sslExpireTime.WithLabelValues(s.Hostname).Set(float64(s.SSLExpireTime.Unix())) - } - } -} diff --git a/internal/ingress/controller/nginx.go b/internal/ingress/controller/nginx.go index 1798c733cf..4d32cc73f6 100644 --- a/internal/ingress/controller/nginx.go +++ b/internal/ingress/controller/nginx.go @@ -18,283 +18,340 @@ package controller import ( "bytes" - "encoding/base64" + "encoding/json" "errors" "fmt" "io/ioutil" + "math" "net" + "net/http" "os" "os/exec" - "runtime" + "path/filepath" "strconv" "strings" "sync" + "sync/atomic" "syscall" + "text/template" "time" - "github.com/golang/glog" - + proxyproto "github.com/armon/go-proxyproto" + "github.com/eapache/channels" apiv1 "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/kubernetes/scheme" v1core "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/flowcontrol" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/filesystem" + adm_controler "k8s.io/ingress-nginx/internal/admission/controller" "k8s.io/ingress-nginx/internal/file" "k8s.io/ingress-nginx/internal/ingress" - "k8s.io/ingress-nginx/internal/ingress/annotations" "k8s.io/ingress-nginx/internal/ingress/annotations/class" ngx_config "k8s.io/ingress-nginx/internal/ingress/controller/config" "k8s.io/ingress-nginx/internal/ingress/controller/process" + "k8s.io/ingress-nginx/internal/ingress/controller/store" ngx_template "k8s.io/ingress-nginx/internal/ingress/controller/template" - "k8s.io/ingress-nginx/internal/ingress/defaults" + "k8s.io/ingress-nginx/internal/ingress/metric" "k8s.io/ingress-nginx/internal/ingress/status" - "k8s.io/ingress-nginx/internal/ingress/store" + "k8s.io/ingress-nginx/internal/k8s" ing_net "k8s.io/ingress-nginx/internal/net" "k8s.io/ingress-nginx/internal/net/dns" "k8s.io/ingress-nginx/internal/net/ssl" + "k8s.io/ingress-nginx/internal/nginx" "k8s.io/ingress-nginx/internal/task" "k8s.io/ingress-nginx/internal/watch" ) -type statusModule string - const ( - ngxHealthPath = "/healthz" - - defaultStatusModule statusModule = "default" - vtsStatusModule statusModule = "vts" + tempNginxPattern = "nginx-cfg" ) var ( - tmplPath = "/etc/nginx/template/nginx.tmpl" - cfgPath = "/etc/nginx/nginx.conf" - nginxBinary = "/usr/sbin/nginx" + tmplPath = "/etc/nginx/template/nginx.tmpl" ) // NewNGINXController creates a new NGINX Ingress controller. -// If the environment variable NGINX_BINARY exists it will be used -// as source for nginx commands -func NewNGINXController(config *Configuration, fs file.Filesystem) *NGINXController { - ngx := os.Getenv("NGINX_BINARY") - if ngx == "" { - ngx = nginxBinary - } - +func NewNGINXController(config *Configuration, mc metric.Collector, fs file.Filesystem) *NGINXController { eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartLogging(glog.Infof) + eventBroadcaster.StartLogging(klog.Infof) eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{ Interface: config.Client.CoreV1().Events(config.Namespace), }) h, err := dns.GetSystemNameServers() if err != nil { - glog.Warningf("unexpected error reading system nameservers: %v", err) + klog.Warningf("Error reading system nameservers: %v", err) } n := &NGINXController{ - backendDefaults: ngx_config.NewDefault().Backend, - binary: ngx, - - configmap: &apiv1.ConfigMap{}, - isIPV6Enabled: ing_net.IsIPv6Enabled(), resolver: h, cfg: config, - sslCertTracker: store.NewSSLCertTracker(), - syncRateLimiter: flowcontrol.NewTokenBucketRateLimiter(0.3, 1), + syncRateLimiter: flowcontrol.NewTokenBucketRateLimiter(config.SyncRateLimit, 1), recorder: eventBroadcaster.NewRecorder(scheme.Scheme, apiv1.EventSource{ Component: "nginx-ingress-controller", }), stopCh: make(chan struct{}), + updateCh: channels.NewRingChannel(1024), + stopLock: &sync.Mutex{}, fileSystem: fs, - // create an empty configuration. - runningConfig: &ingress.Configuration{}, + runningConfig: new(ingress.Configuration), + + Proxy: &TCPProxy{}, + + metricCollector: mc, + + command: NewNginxCommand(), } - n.listers, n.controllers = n.createListers(n.stopCh) + if n.cfg.ValidationWebhook != "" { + n.validationWebhookServer = &http.Server{ + Addr: config.ValidationWebhook, + Handler: adm_controler.NewAdmissionControllerServer(&adm_controler.IngressAdmission{Checker: n}), + TLSConfig: ssl.NewTLSListener(n.cfg.ValidationWebhookCertPath, n.cfg.ValidationWebhookKeyPath).TLSConfig(), + } + } - n.stats = newStatsCollector(config.Namespace, class.IngressClass, n.binary, n.cfg.ListenPorts.Status) + pod, err := k8s.GetPodDetails(config.Client) + if err != nil { + klog.Fatalf("unexpected error obtaining pod information: %v", err) + } + n.podInfo = pod + + n.store = store.New( + config.EnableSSLChainCompletion, + config.Namespace, + config.ConfigMapName, + config.TCPConfigMapName, + config.UDPConfigMapName, + config.DefaultSSLCertificate, + config.ResyncPeriod, + config.Client, + fs, + n.updateCh, + config.DynamicCertificatesEnabled, + pod, + config.DisableCatchAll) n.syncQueue = task.NewTaskQueue(n.syncIngress) - n.annotations = annotations.NewAnnotationExtractor(n) - if config.UpdateStatus { - n.syncStatus = status.NewStatusSyncer(status.Config{ + n.syncStatus = status.NewStatusSyncer(pod, status.Config{ Client: config.Client, PublishService: config.PublishService, - IngressLister: n.listers.Ingress, - ElectionID: config.ElectionID, - IngressClass: class.IngressClass, - DefaultIngressClass: class.DefaultClass, + PublishStatusAddress: config.PublishStatusAddress, + IngressLister: n.store, UpdateStatusOnShutdown: config.UpdateStatusOnShutdown, UseNodeInternalIP: config.UseNodeInternalIP, }) } else { - glog.Warning("Update of ingress status is disabled (flag --update-status=false was specified)") + klog.Warning("Update of Ingress status is disabled (flag --update-status)") } - var onChange func() - onChange = func() { + onTemplateChange := func() { template, err := ngx_template.NewTemplate(tmplPath, fs) if err != nil { // this error is different from the rest because it must be clear why nginx is not working - glog.Errorf(` + klog.Errorf(` ------------------------------------------------------------------------------- -Error loading new template : %v +Error loading new template: %v ------------------------------------------------------------------------------- `, err) return } n.t = template - glog.Info("new NGINX template loaded") - n.SetForceReload(true) + klog.Info("New NGINX configuration template loaded.") + n.syncQueue.EnqueueTask(task.GetDummyObject("template-change")) } ngxTpl, err := ngx_template.NewTemplate(tmplPath, fs) if err != nil { - glog.Fatalf("invalid NGINX template: %v", err) + klog.Fatalf("Invalid NGINX configuration template: %v", err) } n.t = ngxTpl - // TODO: refactor if _, ok := fs.(filesystem.DefaultFs); !ok { - watch.NewDummyFileWatcher(tmplPath, onChange) - } else { - _, err = watch.NewFileWatcher(tmplPath, onChange) + // do not setup watchers on tests + return n + } + + _, err = watch.NewFileWatcher(tmplPath, onTemplateChange) + if err != nil { + klog.Fatalf("Error creating file watcher for %v: %v", tmplPath, err) + } + + filesToWatch := []string{} + err = filepath.Walk("/etc/nginx/geoip/", func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + if info.IsDir() { + return nil + } + + filesToWatch = append(filesToWatch, path) + return nil + }) + + if err != nil { + klog.Fatalf("Error creating file watchers: %v", err) + } + + for _, f := range filesToWatch { + _, err = watch.NewFileWatcher(f, func() { + klog.Infof("File %v changed. Reloading NGINX", f) + n.syncQueue.EnqueueTask(task.GetDummyObject("file-change")) + }) if err != nil { - glog.Fatalf("unexpected error watching template %v: %v", tmplPath, err) + klog.Fatalf("Error creating file watcher for %v: %v", f, err) } } return n } -// NGINXController ... +// NGINXController describes a NGINX Ingress controller. type NGINXController struct { - cfg *Configuration - - listers *ingress.StoreLister - controllers *cacheController + podInfo *k8s.PodInfo - annotations annotations.Extractor + cfg *Configuration recorder record.EventRecorder syncQueue *task.Queue - syncStatus status.Sync - - // local store of SSL certificates - // (only certificates used in ingress) - sslCertTracker *store.SSLCertTracker + syncStatus status.Syncer syncRateLimiter flowcontrol.RateLimiter - // stopLock is used to enforce only a single call to Stop is active. - // Needed because we allow stopping through an http endpoint and + // stopLock is used to enforce that only a single call to Stop send at + // a given time. We allow stopping through an HTTP endpoint and // allowing concurrent stoppers leads to stack traces. stopLock *sync.Mutex - stopCh chan struct{} + stopCh chan struct{} + updateCh *channels.RingChannel - // ngxErrCh channel used to detect errors with the nginx processes + // ngxErrCh is used to detect errors with the NGINX processes ngxErrCh chan error // runningConfig contains the running configuration in the Backend runningConfig *ingress.Configuration - forceReload int32 - - t *ngx_template.Template - - configmap *apiv1.ConfigMap + t ngx_template.TemplateWriter - binary string resolver []net.IP - stats *statsCollector - statusModule statusModule - - // returns true if IPV6 is enabled in the pod isIPV6Enabled bool - // returns true if proxy protocol es enabled - IsProxyProtocolEnabled bool - - isSSLPassthroughEnabled bool - isShuttingDown bool Proxy *TCPProxy - backendDefaults defaults.Backend + store store.Storer fileSystem filesystem.Filesystem -} -// Start start a new NGINX master process running in foreground. -func (n *NGINXController) Start() { - glog.Infof("starting Ingress controller") + metricCollector metric.Collector - n.controllers.Run(n.stopCh) + currentLeader uint32 - // initial sync of secrets to avoid unnecessary reloads - glog.Info("running initial sync of secrets") - for _, obj := range n.listers.Ingress.List() { - ing := obj.(*extensions.Ingress) + validationWebhookServer *http.Server - if !class.IsValid(ing) { - a := ing.GetAnnotations()[class.IngressKey] - glog.Infof("ignoring add for ingress %v based on annotation %v with value %v", ing.Name, class.IngressKey, a) - continue - } + command NginxExecTester +} - n.readSecrets(ing) - } +// Start starts a new NGINX master process running in the foreground. +func (n *NGINXController) Start() { + klog.Info("Starting NGINX Ingress controller") - if n.cfg.EnableSSLChainCompletion { - go wait.Until(n.checkSSLChainIssues, 60*time.Second, n.stopCh) - } + n.store.Run(n.stopCh) - if n.syncStatus != nil { - go n.syncStatus.Run(n.stopCh) + // we need to use the defined ingress class to allow multiple leaders + // in order to update information about ingress status + electionID := fmt.Sprintf("%v-%v", n.cfg.ElectionID, class.DefaultClass) + if class.IngressClass != "" { + electionID = fmt.Sprintf("%v-%v", n.cfg.ElectionID, class.IngressClass) } - go wait.Until(n.checkMissingSecrets, 30*time.Second, n.stopCh) + setupLeaderElection(&leaderElectionConfig{ + Client: n.cfg.Client, + ElectionID: electionID, + OnStartedLeading: func(stopCh chan struct{}) { + if n.syncStatus != nil { + go n.syncStatus.Run(stopCh) + } + + n.setLeader(true) + n.metricCollector.OnStartedLeading(electionID) + // manually update SSL expiration metrics + // (to not wait for a reload) + n.metricCollector.SetSSLExpireTime(n.runningConfig.Servers) + }, + OnStoppedLeading: func() { + n.setLeader(false) + n.metricCollector.OnStoppedLeading(electionID) + }, + PodName: n.podInfo.Name, + PodNamespace: n.podInfo.Namespace, + }) - done := make(chan error, 1) - cmd := exec.Command(n.binary, "-c", cfgPath) + cmd := n.command.ExecCommand() - // put nginx in another process group to prevent it + // put NGINX in another process group to prevent it // to receive signals meant for the controller cmd.SysProcAttr = &syscall.SysProcAttr{ Setpgid: true, Pgid: 0, } - glog.Info("starting NGINX process...") + if n.cfg.EnableSSLPassthrough { + n.setupSSLProxy() + } + + klog.Info("Starting NGINX process") n.start(cmd) go n.syncQueue.Run(time.Second, n.stopCh) // force initial sync - n.syncQueue.Enqueue(&extensions.Ingress{}) + n.syncQueue.EnqueueTask(task.GetDummyObject("initial-sync")) + + // In case of error the temporal configuration file will + // be available up to five minutes after the error + go func() { + for { + time.Sleep(5 * time.Minute) + err := cleanTempNginxCfg() + if err != nil { + klog.Infof("Unexpected error removing temporal configuration files: %v", err) + } + } + }() + + if n.validationWebhookServer != nil { + klog.Infof("Starting validation webhook on %s with keys %s %s", n.validationWebhookServer.Addr, n.cfg.ValidationWebhookCertPath, n.cfg.ValidationWebhookKeyPath) + go func() { + klog.Error(n.validationWebhookServer.ListenAndServeTLS("", "")) + }() + } for { select { - case err := <-done: + case err := <-n.ngxErrCh: if n.isShuttingDown { break } @@ -308,10 +365,30 @@ func (n *NGINXController) Start() { process.WaitUntilPortIsAvailable(n.cfg.ListenPorts.HTTP) // release command resources cmd.Process.Release() - cmd = exec.Command(n.binary, "-c", cfgPath) // start a new nginx master process if the controller is not being stopped + cmd = n.command.ExecCommand() + cmd.SysProcAttr = &syscall.SysProcAttr{ + Setpgid: true, + Pgid: 0, + } n.start(cmd) } + case event := <-n.updateCh.Out(): + if n.isShuttingDown { + break + } + if evt, ok := event.(store.Event); ok { + klog.V(3).Infof("Event %v received - object %v", evt.Type, evt.Obj) + if evt.Type == store.ConfigurationEvent { + // TODO: is this necessary? Consider removing this special case + n.syncQueue.EnqueueTask(task.GetDummyObject("configmap-change")) + continue + } + + n.syncQueue.EnqueueSkippableTask(evt.Obj) + } else { + klog.Warningf("Unexpected event type received %T", event) + } case <-n.stopCh: break } @@ -325,21 +402,28 @@ func (n *NGINXController) Stop() error { n.stopLock.Lock() defer n.stopLock.Unlock() - // Only try draining the workqueue if we haven't already. if n.syncQueue.IsShuttingDown() { return fmt.Errorf("shutdown already in progress") } - glog.Infof("shutting down controller queues") + klog.Info("Shutting down controller queues") close(n.stopCh) go n.syncQueue.Shutdown() if n.syncStatus != nil { n.syncStatus.Shutdown() } - // Send stop signal to Nginx - glog.Info("stopping NGINX process...") - cmd := exec.Command(n.binary, "-c", cfgPath, "-s", "quit") + if n.validationWebhookServer != nil { + klog.Info("Stopping admission controller") + err := n.validationWebhookServer.Close() + if err != nil { + return err + } + } + + // send stop signal to NGINX + klog.Info("Stopping NGINX process") + cmd := n.command.ExecCommand("-s", "quit") cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr err := cmd.Run() @@ -347,11 +431,11 @@ func (n *NGINXController) Stop() error { return err } - // Wait for the Nginx process disappear + // wait for the NGINX process to terminate timer := time.NewTicker(time.Second * 1) for range timer.C { if !process.IsNginxRunning() { - glog.Info("NGINX process has stopped") + klog.Info("NGINX process has stopped") timer.Stop() break } @@ -364,7 +448,7 @@ func (n *NGINXController) start(cmd *exec.Cmd) { cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr if err := cmd.Start(); err != nil { - glog.Fatalf("nginx error: %v", err) + klog.Fatalf("NGINX error: %v", err) n.ngxErrCh <- err return } @@ -383,226 +467,132 @@ func (n NGINXController) DefaultEndpoint() ingress.Endpoint { } } -// testTemplate checks if the NGINX configuration inside the byte array is valid -// running the command "nginx -t" using a temporal file. -func (n NGINXController) testTemplate(cfg []byte) error { - if len(cfg) == 0 { - return fmt.Errorf("invalid nginx configuration (empty)") - } - tmpfile, err := ioutil.TempFile("", "nginx-cfg") - if err != nil { - return err - } - defer tmpfile.Close() - err = ioutil.WriteFile(tmpfile.Name(), cfg, 0644) - if err != nil { - return err - } - out, err := exec.Command(n.binary, "-t", "-c", tmpfile.Name()).CombinedOutput() - if err != nil { - // this error is different from the rest because it must be clear why nginx is not working - oe := fmt.Sprintf(` -------------------------------------------------------------------------------- -Error: %v -%v -------------------------------------------------------------------------------- -`, err, string(out)) - return errors.New(oe) - } - - os.Remove(tmpfile.Name()) - return nil -} - -// SetConfig sets the configured configmap -func (n *NGINXController) SetConfig(cmap *apiv1.ConfigMap) { - n.configmap = cmap - n.IsProxyProtocolEnabled = false - - m := map[string]string{} - if cmap != nil { - m = cmap.Data - } - - val, ok := m["use-proxy-protocol"] - if ok { - b, err := strconv.ParseBool(val) - if err == nil { - n.IsProxyProtocolEnabled = b - } - } +// generateTemplate returns the nginx configuration file content +func (n NGINXController) generateTemplate(cfg ngx_config.Configuration, ingressCfg ingress.Configuration) ([]byte, error) { - c := ngx_template.ReadConfig(m) - if c.SSLSessionTicketKey != "" { - d, err := base64.StdEncoding.DecodeString(c.SSLSessionTicketKey) - if err != nil { - glog.Warningf("unexpected error decoding key ssl-session-ticket-key: %v", err) - c.SSLSessionTicketKey = "" - } - ioutil.WriteFile("/etc/nginx/tickets.key", d, 0644) - } - - n.backendDefaults = c.Backend -} - -// OnUpdate is called periodically by syncQueue to keep the configuration in sync. -// -// 1. converts configmap configuration to custom configuration object -// 2. write the custom template (the complexity depends on the implementation) -// 3. write the configuration file -// -// returning nill implies the backend will be reloaded. -// if an error is returned means requeue the update -func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error { - cfg := ngx_template.ReadConfig(n.configmap.Data) - cfg.Resolver = n.resolver - - servers := []*TCPServer{} - for _, pb := range ingressCfg.PassthroughBackends { - svc := pb.Service - if svc == nil { - glog.Warningf("missing service for PassthroughBackends %v", pb.Backend) - continue - } - port, err := strconv.Atoi(pb.Port.String()) - if err != nil { - for _, sp := range svc.Spec.Ports { - if sp.Name == pb.Port.String() { - port = int(sp.Port) - break - } + if n.cfg.EnableSSLPassthrough { + servers := []*TCPServer{} + for _, pb := range ingressCfg.PassthroughBackends { + svc := pb.Service + if svc == nil { + klog.Warningf("Missing Service for SSL Passthrough backend %q", pb.Backend) + continue } - } else { - for _, sp := range svc.Spec.Ports { - if sp.Port == int32(port) { - port = int(sp.Port) - break + port, err := strconv.Atoi(pb.Port.String()) + if err != nil { + for _, sp := range svc.Spec.Ports { + if sp.Name == pb.Port.String() { + port = int(sp.Port) + break + } + } + } else { + for _, sp := range svc.Spec.Ports { + if sp.Port == int32(port) { + port = int(sp.Port) + break + } } } - } - //TODO: Allow PassthroughBackends to specify they support proxy-protocol - servers = append(servers, &TCPServer{ - Hostname: pb.Hostname, - IP: svc.Spec.ClusterIP, - Port: port, - ProxyProtocol: false, - }) - } + // TODO: Allow PassthroughBackends to specify they support proxy-protocol + servers = append(servers, &TCPServer{ + Hostname: pb.Hostname, + IP: svc.Spec.ClusterIP, + Port: port, + ProxyProtocol: false, + }) + } - if n.isSSLPassthroughEnabled { n.Proxy.ServerList = servers } - // we need to check if the status module configuration changed - if cfg.EnableVtsStatus { - n.setupMonitor(vtsStatusModule) - } else { - n.setupMonitor(defaultStatusModule) - } - - // NGINX cannot resize the hash tables used to store server names. - // For this reason we check if the defined size defined is correct - // for the FQDN defined in the ingress rules adjusting the value - // if is required. + // NGINX cannot resize the hash tables used to store server names. For + // this reason we check if the current size is correct for the host + // names defined in the Ingress rules and adjust the value if + // necessary. // https://trac.nginx.org/nginx/ticket/352 // https://trac.nginx.org/nginx/ticket/631 var longestName int var serverNameBytes int - redirectServers := make(map[string]string) + for _, srv := range ingressCfg.Servers { if longestName < len(srv.Hostname) { longestName = len(srv.Hostname) } serverNameBytes += len(srv.Hostname) - if srv.RedirectFromToWWW { - var n string - if strings.HasPrefix(srv.Hostname, "www.") { - n = strings.TrimLeft(srv.Hostname, "www.") - } else { - n = fmt.Sprintf("www.%v", srv.Hostname) - } - glog.V(3).Infof("creating redirect from %v to %v", srv.Hostname, n) - if _, ok := redirectServers[n]; !ok { - found := false - for _, esrv := range ingressCfg.Servers { - if esrv.Hostname == n { - found = true - break - } - } - if !found { - redirectServers[n] = srv.Hostname - } - } - } } + if cfg.ServerNameHashBucketSize == 0 { nameHashBucketSize := nginxHashBucketSize(longestName) - glog.V(3).Infof("adjusting ServerNameHashBucketSize variable to %v", nameHashBucketSize) + klog.V(3).Infof("Adjusting ServerNameHashBucketSize variable to %d", nameHashBucketSize) cfg.ServerNameHashBucketSize = nameHashBucketSize } + serverNameHashMaxSize := nextPowerOf2(serverNameBytes) if cfg.ServerNameHashMaxSize < serverNameHashMaxSize { - glog.V(3).Infof("adjusting ServerNameHashMaxSize variable to %v", serverNameHashMaxSize) + klog.V(3).Infof("Adjusting ServerNameHashMaxSize variable to %d", serverNameHashMaxSize) cfg.ServerNameHashMaxSize = serverNameHashMaxSize } - // the limit of open files is per worker process - // and we leave some room to avoid consuming all the FDs available - wp, err := strconv.Atoi(cfg.WorkerProcesses) - glog.V(3).Infof("number of worker processes: %v", wp) - if err != nil { - wp = 1 + if cfg.MaxWorkerOpenFiles == 0 { + // the limit of open files is per worker process + // and we leave some room to avoid consuming all the FDs available + wp, err := strconv.Atoi(cfg.WorkerProcesses) + klog.V(3).Infof("Number of worker processes: %d", wp) + if err != nil { + wp = 1 + } + maxOpenFiles := (rlimitMaxNumFiles() / wp) - 1024 + klog.V(3).Infof("Maximum number of open file descriptors: %d", maxOpenFiles) + if maxOpenFiles < 1024 { + // this means the value of RLIMIT_NOFILE is too low. + maxOpenFiles = 1024 + } + klog.V(3).Infof("Adjusting MaxWorkerOpenFiles variable to %d", maxOpenFiles) + cfg.MaxWorkerOpenFiles = maxOpenFiles } - maxOpenFiles := (sysctlFSFileMax() / wp) - 1024 - glog.V(3).Infof("maximum number of open file descriptors : %v", sysctlFSFileMax()) - if maxOpenFiles < 1024 { - // this means the value of RLIMIT_NOFILE is too low. - maxOpenFiles = 1024 + + if cfg.MaxWorkerConnections == 0 { + maxWorkerConnections := int(math.Ceil(float64(cfg.MaxWorkerOpenFiles * 3.0 / 4))) + klog.V(3).Infof("Adjusting MaxWorkerConnections variable to %d", maxWorkerConnections) + cfg.MaxWorkerConnections = maxWorkerConnections } setHeaders := map[string]string{} if cfg.ProxySetHeaders != "" { - cmap, exists, err := n.listers.ConfigMap.GetByKey(cfg.ProxySetHeaders) + cmap, err := n.store.GetConfigMap(cfg.ProxySetHeaders) if err != nil { - glog.Warningf("unexpected error reading configmap %v: %v", cfg.ProxySetHeaders, err) - } - - if exists { - setHeaders = cmap.(*apiv1.ConfigMap).Data + klog.Warningf("Error reading ConfigMap %q from local store: %v", cfg.ProxySetHeaders, err) + } else { + setHeaders = cmap.Data } } addHeaders := map[string]string{} if cfg.AddHeaders != "" { - cmap, exists, err := n.listers.ConfigMap.GetByKey(cfg.AddHeaders) + cmap, err := n.store.GetConfigMap(cfg.AddHeaders) if err != nil { - glog.Warningf("unexpected error reading configmap %v: %v", cfg.AddHeaders, err) - } - - if exists { - addHeaders = cmap.(*apiv1.ConfigMap).Data + klog.Warningf("Error reading ConfigMap %q from local store: %v", cfg.AddHeaders, err) + } else { + addHeaders = cmap.Data } } sslDHParam := "" if cfg.SSLDHParam != "" { secretName := cfg.SSLDHParam - s, exists, err := n.listers.Secret.GetByKey(secretName) - if err != nil { - glog.Warningf("unexpected error reading secret %v: %v", secretName, err) - } - if exists { - secret := s.(*apiv1.Secret) + secret, err := n.store.GetSecret(secretName) + if err != nil { + klog.Warningf("Error reading Secret %q from local store: %v", secretName, err) + } else { nsSecName := strings.Replace(secretName, "/", "-", -1) - dh, ok := secret.Data["dhparam.pem"] if ok { - pemFileName, err := ssl.AddOrUpdateDHParam(nsSecName, dh) + pemFileName, err := ssl.AddOrUpdateDHParam(nsSecName, dh, n.fileSystem) if err != nil { - glog.Warningf("unexpected error adding or updating dhparam %v file: %v", nsSecName, err) + klog.Warningf("Error adding or updating dhparam file %v: %v", nsSecName, err) } else { sslDHParam = pemFileName } @@ -612,47 +602,96 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error { cfg.SSLDHParam = sslDHParam - // disable features are not available in some platforms - switch runtime.GOARCH { - case "arm", "arm64", "ppc64le": - cfg.EnableModsecurity = false - case "s390x": - cfg.EnableModsecurity = false - cfg.EnableBrotli = false + tc := ngx_config.TemplateConfig{ + ProxySetHeaders: setHeaders, + AddHeaders: addHeaders, + BacklogSize: sysctlSomaxconn(), + Backends: ingressCfg.Backends, + PassthroughBackends: ingressCfg.PassthroughBackends, + Servers: ingressCfg.Servers, + TCPBackends: ingressCfg.TCPEndpoints, + UDPBackends: ingressCfg.UDPEndpoints, + Cfg: cfg, + IsIPV6Enabled: n.isIPV6Enabled && !cfg.DisableIpv6, + NginxStatusIpv4Whitelist: cfg.NginxStatusIpv4Whitelist, + NginxStatusIpv6Whitelist: cfg.NginxStatusIpv6Whitelist, + RedirectServers: buildRedirects(ingressCfg.Servers), + IsSSLPassthroughEnabled: n.cfg.EnableSSLPassthrough, + ListenPorts: n.cfg.ListenPorts, + PublishService: n.GetPublishService(), + DynamicCertificatesEnabled: n.cfg.DynamicCertificatesEnabled, + EnableMetrics: n.cfg.EnableMetrics, + + HealthzURI: nginx.HealthPath, + PID: nginx.PID, + StatusSocket: nginx.StatusSocket, + StatusPath: nginx.StatusPath, + StreamSocket: nginx.StreamSocket, } - tc := ngx_config.TemplateConfig{ - ProxySetHeaders: setHeaders, - AddHeaders: addHeaders, - MaxOpenFiles: maxOpenFiles, - BacklogSize: sysctlSomaxconn(), - Backends: ingressCfg.Backends, - PassthroughBackends: ingressCfg.PassthroughBackends, - Servers: ingressCfg.Servers, - TCPBackends: ingressCfg.TCPEndpoints, - UDPBackends: ingressCfg.UDPEndpoints, - HealthzURI: ngxHealthPath, - CustomErrors: len(cfg.CustomHTTPErrors) > 0, - Cfg: cfg, - IsIPV6Enabled: n.isIPV6Enabled && !cfg.DisableIpv6, - RedirectServers: redirectServers, - IsSSLPassthroughEnabled: n.isSSLPassthroughEnabled, - ListenPorts: n.cfg.ListenPorts, - PublishService: n.GetPublishService(), - } - - content, err := n.t.Write(tc) + tc.Cfg.Checksum = ingressCfg.ConfigurationChecksum + return n.t.Write(tc) +} + +// testTemplate checks if the NGINX configuration inside the byte array is valid +// running the command "nginx -t" using a temporal file. +func (n NGINXController) testTemplate(cfg []byte) error { + if len(cfg) == 0 { + return fmt.Errorf("invalid NGINX configuration (empty)") + } + tmpfile, err := ioutil.TempFile("", tempNginxPattern) + if err != nil { + return err + } + defer tmpfile.Close() + err = ioutil.WriteFile(tmpfile.Name(), cfg, file.ReadWriteByUser) if err != nil { return err } + out, err := n.command.Test(tmpfile.Name()) + if err != nil { + // this error is different from the rest because it must be clear why nginx is not working + oe := fmt.Sprintf(` +------------------------------------------------------------------------------- +Error: %v +%v +------------------------------------------------------------------------------- +`, err, string(out)) + + return errors.New(oe) + } + + os.Remove(tmpfile.Name()) + return nil +} + +// OnUpdate is called by the synchronization loop whenever configuration +// changes were detected. The received backend Configuration is merged with the +// configuration ConfigMap before generating the final configuration file. +// Returns nil in case the backend was successfully reloaded. +func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error { + cfg := n.store.GetBackendConfiguration() + cfg.Resolver = n.resolver + + content, err := n.generateTemplate(cfg, ingressCfg) + if err != nil { + return err + } + + if cfg.EnableOpentracing { + err := createOpentracingCfg(cfg) + if err != nil { + return err + } + } err = n.testTemplate(content) if err != nil { return err } - if glog.V(2) { + if klog.V(2) { src, _ := ioutil.ReadFile(cfgPath) if !bytes.Equal(src, content) { tmpfile, err := ioutil.TempFile("", "new-nginx-cfg") @@ -660,31 +699,30 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error { return err } defer tmpfile.Close() - err = ioutil.WriteFile(tmpfile.Name(), content, 0644) + err = ioutil.WriteFile(tmpfile.Name(), content, file.ReadWriteByUser) if err != nil { return err } - // executing diff can return exit code != 0 - diffOutput, _ := exec.Command("diff", "-u", cfgPath, tmpfile.Name()).CombinedOutput() + diffOutput, err := exec.Command("diff", "-u", cfgPath, tmpfile.Name()).CombinedOutput() + if err != nil { + klog.Warningf("Failed to executing diff command: %v", err) + } - glog.Infof("NGINX configuration diff\n") - glog.Infof("%v\n", string(diffOutput)) + klog.Infof("NGINX configuration diff:\n%v", string(diffOutput)) - // Do not use defer to remove the temporal file. - // This is helpful when there is an error in the - // temporal configuration (we can manually inspect the file). - // Only remove the file when no error occurred. + // we do not defer the deletion of temp files in order + // to keep them around for inspection in case of error os.Remove(tmpfile.Name()) } } - err = ioutil.WriteFile(cfgPath, content, 0644) + err = ioutil.WriteFile(cfgPath, content, file.ReadWriteByUser) if err != nil { return err } - o, err := exec.Command(n.binary, "-s", "reload", "-c", cfgPath).CombinedOutput() + o, err := n.command.ExecCommand("-s", "reload").CombinedOutput() if err != nil { return fmt.Errorf("%v\n%v", err, string(o)) } @@ -692,9 +730,10 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error { return nil } -// nginxHashBucketSize computes the correct nginx hash_bucket_size for a hash with the given longest key +// nginxHashBucketSize computes the correct NGINX hash_bucket_size for a hash +// with the given longest key. func nginxHashBucketSize(longestString int) int { - // See https://github.com/kubernetes/ingress-nginxs/issues/623 for an explanation + // see https://github.com/kubernetes/ingress-nginxs/issues/623 for an explanation wordSize := 8 // Assume 64 bit CPU n := longestString + 2 aligned := (n + wordSize - 1) & ^(wordSize - 1) @@ -715,3 +754,446 @@ func nextPowerOf2(v int) int { return v } + +func (n *NGINXController) setupSSLProxy() { + cfg := n.store.GetBackendConfiguration() + sslPort := n.cfg.ListenPorts.HTTPS + proxyPort := n.cfg.ListenPorts.SSLProxy + + klog.Info("Starting TLS proxy for SSL Passthrough") + n.Proxy = &TCPProxy{ + Default: &TCPServer{ + Hostname: "localhost", + IP: "127.0.0.1", + Port: proxyPort, + ProxyProtocol: true, + }, + } + + listener, err := net.Listen("tcp", fmt.Sprintf(":%v", sslPort)) + if err != nil { + klog.Fatalf("%v", err) + } + + proxyList := &proxyproto.Listener{Listener: listener, ProxyHeaderTimeout: cfg.ProxyProtocolHeaderTimeout} + + // accept TCP connections on the configured HTTPS port + go func() { + for { + var conn net.Conn + var err error + + if n.store.GetBackendConfiguration().UseProxyProtocol { + // wrap the listener in order to decode Proxy + // Protocol before handling the connection + conn, err = proxyList.Accept() + } else { + conn, err = listener.Accept() + } + + if err != nil { + klog.Warningf("Error accepting TCP connection: %v", err) + continue + } + + klog.V(3).Infof("Handling connection from remote address %s to local %s", conn.RemoteAddr(), conn.LocalAddr()) + go n.Proxy.Handle(conn) + } + }() +} + +// Helper function to clear Certificates from the ingress configuration since they should be ignored when +// checking if the new configuration changes can be applied dynamically if dynamic certificates is on +func clearCertificates(config *ingress.Configuration) { + var clearedServers []*ingress.Server + for _, server := range config.Servers { + copyOfServer := *server + copyOfServer.SSLCert = ingress.SSLCert{PemFileName: copyOfServer.SSLCert.PemFileName} + clearedServers = append(clearedServers, ©OfServer) + } + config.Servers = clearedServers +} + +// Helper function to clear endpoints from the ingress configuration since they should be ignored when +// checking if the new configuration changes can be applied dynamically. +func clearL4serviceEndpoints(config *ingress.Configuration) { + var clearedTCPL4Services []ingress.L4Service + var clearedUDPL4Services []ingress.L4Service + for _, service := range config.TCPEndpoints { + copyofService := ingress.L4Service{ + Port: service.Port, + Backend: service.Backend, + Endpoints: []ingress.Endpoint{}, + Service: nil, + } + clearedTCPL4Services = append(clearedTCPL4Services, copyofService) + } + for _, service := range config.UDPEndpoints { + copyofService := ingress.L4Service{ + Port: service.Port, + Backend: service.Backend, + Endpoints: []ingress.Endpoint{}, + Service: nil, + } + clearedUDPL4Services = append(clearedUDPL4Services, copyofService) + } + config.TCPEndpoints = clearedTCPL4Services + config.UDPEndpoints = clearedUDPL4Services +} + +// IsDynamicConfigurationEnough returns whether a Configuration can be +// dynamically applied, without reloading the backend. +func (n *NGINXController) IsDynamicConfigurationEnough(pcfg *ingress.Configuration) bool { + copyOfRunningConfig := *n.runningConfig + copyOfPcfg := *pcfg + + copyOfRunningConfig.Backends = []*ingress.Backend{} + copyOfPcfg.Backends = []*ingress.Backend{} + + clearL4serviceEndpoints(©OfRunningConfig) + clearL4serviceEndpoints(©OfPcfg) + + copyOfRunningConfig.ControllerPodsCount = 0 + copyOfPcfg.ControllerPodsCount = 0 + + if n.cfg.DynamicCertificatesEnabled { + clearCertificates(©OfRunningConfig) + clearCertificates(©OfPcfg) + } + + return copyOfRunningConfig.Equal(©OfPcfg) +} + +// configureDynamically encodes new Backends in JSON format and POSTs the +// payload to an internal HTTP endpoint handled by Lua. +func configureDynamically(pcfg *ingress.Configuration, isDynamicCertificatesEnabled bool) error { + backends := make([]*ingress.Backend, len(pcfg.Backends)) + + for i, backend := range pcfg.Backends { + var service *apiv1.Service + if backend.Service != nil { + service = &apiv1.Service{Spec: backend.Service.Spec} + } + luaBackend := &ingress.Backend{ + Name: backend.Name, + Port: backend.Port, + SSLPassthrough: backend.SSLPassthrough, + SessionAffinity: backend.SessionAffinity, + UpstreamHashBy: backend.UpstreamHashBy, + LoadBalancing: backend.LoadBalancing, + Service: service, + NoServer: backend.NoServer, + TrafficShapingPolicy: backend.TrafficShapingPolicy, + AlternativeBackends: backend.AlternativeBackends, + } + + var endpoints []ingress.Endpoint + for _, endpoint := range backend.Endpoints { + endpoints = append(endpoints, ingress.Endpoint{ + Address: endpoint.Address, + Port: endpoint.Port, + }) + } + + luaBackend.Endpoints = endpoints + backends[i] = luaBackend + } + + statusCode, _, err := nginx.NewPostStatusRequest("/configuration/backends", "application/json", backends) + if err != nil { + return err + } + + if statusCode != http.StatusCreated { + return fmt.Errorf("unexpected error code: %d", statusCode) + } + + streams := make([]ingress.Backend, 0) + for _, ep := range pcfg.TCPEndpoints { + var service *apiv1.Service + if ep.Service != nil { + service = &apiv1.Service{Spec: ep.Service.Spec} + } + + key := fmt.Sprintf("tcp-%v-%v-%v", ep.Backend.Namespace, ep.Backend.Name, ep.Backend.Port.String()) + streams = append(streams, ingress.Backend{ + Name: key, + Endpoints: ep.Endpoints, + Port: intstr.FromInt(ep.Port), + Service: service, + }) + } + for _, ep := range pcfg.UDPEndpoints { + var service *apiv1.Service + if ep.Service != nil { + service = &apiv1.Service{Spec: ep.Service.Spec} + } + + key := fmt.Sprintf("udp-%v-%v-%v", ep.Backend.Namespace, ep.Backend.Name, ep.Backend.Port.String()) + streams = append(streams, ingress.Backend{ + Name: key, + Endpoints: ep.Endpoints, + Port: intstr.FromInt(ep.Port), + Service: service, + }) + } + + err = updateStreamConfiguration(streams) + if err != nil { + return err + } + + statusCode, _, err = nginx.NewPostStatusRequest("/configuration/general", "application/json", ingress.GeneralConfig{ + ControllerPodsCount: pcfg.ControllerPodsCount, + }) + if err != nil { + return err + } + + if statusCode != http.StatusCreated { + return fmt.Errorf("unexpected error code: %d", statusCode) + } + + if isDynamicCertificatesEnabled { + err = configureCertificates(pcfg) + if err != nil { + return err + } + } + + return nil +} + +func updateStreamConfiguration(streams []ingress.Backend) error { + conn, err := net.Dial("unix", nginx.StreamSocket) + if err != nil { + return err + } + defer conn.Close() + + buf, err := json.Marshal(streams) + if err != nil { + return err + } + + _, err = conn.Write(buf) + if err != nil { + return err + } + _, err = fmt.Fprintf(conn, "\r\n") + if err != nil { + return err + } + + return nil +} + +// configureCertificates JSON encodes certificates and POSTs it to an internal HTTP endpoint +// that is handled by Lua +func configureCertificates(pcfg *ingress.Configuration) error { + var servers []*ingress.Server + + for _, server := range pcfg.Servers { + servers = append(servers, &ingress.Server{ + Hostname: server.Hostname, + SSLCert: ingress.SSLCert{ + PemCertKey: server.SSLCert.PemCertKey, + }, + }) + + if server.Alias != "" && server.SSLCert.PemCertKey != "" && + ssl.IsValidHostname(server.Alias, server.SSLCert.CN) { + servers = append(servers, &ingress.Server{ + Hostname: server.Alias, + SSLCert: ingress.SSLCert{ + PemCertKey: server.SSLCert.PemCertKey, + }, + }) + } + } + + redirects := buildRedirects(pcfg.Servers) + for _, redirect := range redirects { + servers = append(servers, &ingress.Server{ + Hostname: redirect.From, + SSLCert: ingress.SSLCert{ + PemCertKey: redirect.SSLCert.PemCertKey, + }, + }) + } + + statusCode, _, err := nginx.NewPostStatusRequest("/configuration/servers", "application/json", servers) + if err != nil { + return err + } + + if statusCode != http.StatusCreated { + return fmt.Errorf("unexpected error code: %d", statusCode) + } + + return nil +} + +const zipkinTmpl = `{ + "service_name": "{{ .ZipkinServiceName }}", + "collector_host": "{{ .ZipkinCollectorHost }}", + "collector_port": {{ .ZipkinCollectorPort }}, + "sample_rate": {{ .ZipkinSampleRate }} +}` + +const jaegerTmpl = `{ + "service_name": "{{ .JaegerServiceName }}", + "sampler": { + "type": "{{ .JaegerSamplerType }}", + "param": {{ .JaegerSamplerParam }}, + "samplingServerURL": "{{ .JaegerSamplerHost }}:{{ .JaegerSamplerPort }}/sampling" + }, + "reporter": { + "localAgentHostPort": "{{ .JaegerCollectorHost }}:{{ .JaegerCollectorPort }}" + } +}` + +const datadogTmpl = `{ + "service": "{{ .DatadogServiceName }}", + "agent_host": "{{ .DatadogCollectorHost }}", + "agent_port": {{ .DatadogCollectorPort }}, + "operation_name_override": "{{ .DatadogOperationNameOverride }}" +}` + +func createOpentracingCfg(cfg ngx_config.Configuration) error { + var tmpl *template.Template + var err error + + if cfg.ZipkinCollectorHost != "" { + tmpl, err = template.New("zipkin").Parse(zipkinTmpl) + if err != nil { + return err + } + } else if cfg.JaegerCollectorHost != "" { + tmpl, err = template.New("jaeger").Parse(jaegerTmpl) + if err != nil { + return err + } + } else if cfg.DatadogCollectorHost != "" { + tmpl, err = template.New("datadog").Parse(datadogTmpl) + if err != nil { + return err + } + } else { + tmpl, _ = template.New("empty").Parse("{}") + } + + tmplBuf := bytes.NewBuffer(make([]byte, 0)) + err = tmpl.Execute(tmplBuf, cfg) + if err != nil { + return err + } + + // Expand possible environment variables before writing the configuration to file. + expanded := os.ExpandEnv(string(tmplBuf.Bytes())) + + return ioutil.WriteFile("/etc/nginx/opentracing.json", []byte(expanded), file.ReadWriteByUser) +} + +func cleanTempNginxCfg() error { + var files []string + + err := filepath.Walk(os.TempDir(), func(path string, info os.FileInfo, err error) error { + if info.IsDir() && os.TempDir() != path { + return filepath.SkipDir + } + + dur, _ := time.ParseDuration("-5m") + fiveMinutesAgo := time.Now().Add(dur) + if strings.HasPrefix(info.Name(), tempNginxPattern) && info.ModTime().Before(fiveMinutesAgo) { + files = append(files, path) + } + return nil + }) + if err != nil { + return err + } + + for _, file := range files { + err := os.Remove(file) + if err != nil { + return err + } + } + + return nil +} + +type redirect struct { + From string + To string + SSLCert ingress.SSLCert +} + +func buildRedirects(servers []*ingress.Server) []*redirect { + names := sets.String{} + redirectServers := make([]*redirect, 0) + + for _, srv := range servers { + if !srv.RedirectFromToWWW { + continue + } + + to := srv.Hostname + + var from string + if strings.HasPrefix(to, "www.") { + from = strings.TrimPrefix(to, "www.") + } else { + from = fmt.Sprintf("www.%v", to) + } + + if names.Has(to) { + continue + } + + klog.V(3).Infof("Creating redirect from %q to %q", from, to) + found := false + for _, esrv := range servers { + if esrv.Hostname == from { + found = true + break + } + } + + if found { + klog.Warningf("Already exists an Ingress with %q hostname. Skipping creation of redirection from %q to %q.", from, from, to) + continue + } + + r := &redirect{ + From: from, + To: to, + } + + if srv.SSLCert.PemSHA != "" { + if ssl.IsValidHostname(from, srv.SSLCert.CN) { + r.SSLCert = srv.SSLCert + } else { + klog.Warningf("the server %v has SSL configured but the SSL certificate does not contains a CN for %v. Redirects will not work for HTTPS to HTTPS", from, to) + } + } + + redirectServers = append(redirectServers, r) + names.Insert(to) + } + + return redirectServers +} + +func (n *NGINXController) setLeader(leader bool) { + var i uint32 + if leader { + i = 1 + } + atomic.StoreUint32(&n.currentLeader, i) +} + +func (n *NGINXController) isLeader() bool { + return atomic.LoadUint32(&n.currentLeader) != 0 +} diff --git a/internal/ingress/controller/nginx_test.go b/internal/ingress/controller/nginx_test.go index 4b85ed2fc8..1de35f97db 100644 --- a/internal/ingress/controller/nginx_test.go +++ b/internal/ingress/controller/nginx_test.go @@ -16,7 +16,309 @@ limitations under the License. package controller -import "testing" +import ( + "io" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "strings" + "testing" + "time" + + jsoniter "github.com/json-iterator/go" + apiv1 "k8s.io/api/core/v1" + + "k8s.io/ingress-nginx/internal/ingress" + "k8s.io/ingress-nginx/internal/nginx" +) + +func TestIsDynamicConfigurationEnough(t *testing.T) { + backends := []*ingress.Backend{{ + Name: "fakenamespace-myapp-80", + Endpoints: []ingress.Endpoint{ + { + Address: "10.0.0.1", + Port: "8080", + }, + { + Address: "10.0.0.2", + Port: "8080", + }, + }, + }} + + servers := []*ingress.Server{{ + Hostname: "myapp.fake", + Locations: []*ingress.Location{ + { + Path: "/", + Backend: "fakenamespace-myapp-80", + }, + }, + SSLCert: ingress.SSLCert{ + PemCertKey: "fake-certificate", + }, + }} + + commonConfig := &ingress.Configuration{ + Backends: backends, + Servers: servers, + } + + n := &NGINXController{ + runningConfig: &ingress.Configuration{ + Backends: backends, + Servers: servers, + }, + cfg: &Configuration{ + DynamicCertificatesEnabled: false, + }, + } + + newConfig := commonConfig + if !n.IsDynamicConfigurationEnough(newConfig) { + t.Errorf("When new config is same as the running config it should be deemed as dynamically configurable") + } + + newConfig = &ingress.Configuration{ + Backends: []*ingress.Backend{{Name: "another-backend-8081"}}, + Servers: []*ingress.Server{{Hostname: "myapp1.fake"}}, + } + if n.IsDynamicConfigurationEnough(newConfig) { + t.Errorf("Expected to not be dynamically configurable when there's more than just backends change") + } + + newConfig = &ingress.Configuration{ + Backends: []*ingress.Backend{{Name: "a-backend-8080"}}, + Servers: servers, + } + if !n.IsDynamicConfigurationEnough(newConfig) { + t.Errorf("Expected to be dynamically configurable when only backends change") + } + + n.cfg.DynamicCertificatesEnabled = true + + newServers := []*ingress.Server{{ + Hostname: "myapp1.fake", + Locations: []*ingress.Location{ + { + Path: "/", + Backend: "fakenamespace-myapp-80", + }, + }, + SSLCert: ingress.SSLCert{ + PemCertKey: "fake-certificate", + }, + }} + + newConfig = &ingress.Configuration{ + Backends: backends, + Servers: newServers, + } + if n.IsDynamicConfigurationEnough(newConfig) { + t.Errorf("Expected to not be dynamically configurable when dynamic certificates is enabled and a non-certificate field in servers is updated") + } + + newServers[0].Hostname = "myapp.fake" + newServers[0].SSLCert.PemCertKey = "new-fake-certificate" + + newConfig = &ingress.Configuration{ + Backends: backends, + Servers: newServers, + } + if !n.IsDynamicConfigurationEnough(newConfig) { + t.Errorf("Expected to be dynamically configurable when only SSLCert changes") + } + + newConfig = &ingress.Configuration{ + Backends: []*ingress.Backend{{Name: "a-backend-8080"}}, + Servers: newServers, + } + if !n.IsDynamicConfigurationEnough(newConfig) { + t.Errorf("Expected to be dynamically configurable when backend and SSLCert changes") + } + + if !n.runningConfig.Equal(commonConfig) { + t.Errorf("Expected running config to not change") + } + + if !newConfig.Equal(&ingress.Configuration{Backends: []*ingress.Backend{{Name: "a-backend-8080"}}, Servers: newServers}) { + t.Errorf("Expected new config to not change") + } +} + +func TestConfigureDynamically(t *testing.T) { + listener, err := net.Listen("unix", nginx.StatusSocket) + if err != nil { + t.Errorf("crating unix listener: %s", err) + } + defer listener.Close() + defer os.Remove(nginx.StatusSocket) + + streamListener, err := net.Listen("unix", nginx.StreamSocket) + if err != nil { + t.Errorf("crating unix listener: %s", err) + } + defer streamListener.Close() + defer os.Remove(nginx.StreamSocket) + + server := &httptest.Server{ + Listener: listener, + Config: &http.Server{ + Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusCreated) + + if r.Method != "POST" { + t.Errorf("expected a 'POST' request, got '%s'", r.Method) + } + + b, err := ioutil.ReadAll(r.Body) + if err != nil && err != io.EOF { + t.Fatal(err) + } + body := string(b) + + switch r.URL.Path { + case "/configuration/backends": + { + if strings.Contains(body, "target") { + t.Errorf("unexpected target reference in JSON content: %v", body) + } + + if !strings.Contains(body, "service") { + t.Errorf("service reference should be present in JSON content: %v", body) + } + } + case "/configuration/general": + { + if !strings.Contains(body, "controllerPodsCount") { + t.Errorf("controllerPodsCount should be present in JSON content: %v", body) + } + } + default: + t.Errorf("unknown request to %s", r.URL.Path) + } + }), + }, + } + defer server.Close() + server.Start() + + target := &apiv1.ObjectReference{} + + backends := []*ingress.Backend{{ + Name: "fakenamespace-myapp-80", + Service: &apiv1.Service{}, + Endpoints: []ingress.Endpoint{ + { + Address: "10.0.0.1", + Port: "8080", + Target: target, + }, + { + Address: "10.0.0.2", + Port: "8080", + Target: target, + }, + }, + }} + + servers := []*ingress.Server{{ + Hostname: "myapp.fake", + Locations: []*ingress.Location{ + { + Path: "/", + Backend: "fakenamespace-myapp-80", + Service: &apiv1.Service{}, + }, + }, + }} + + commonConfig := &ingress.Configuration{ + Backends: backends, + Servers: servers, + ControllerPodsCount: 2, + } + + err = configureDynamically(commonConfig, false) + if err != nil { + t.Errorf("unexpected error posting dynamic configuration: %v", err) + } + + if commonConfig.Backends[0].Endpoints[0].Target != target { + t.Errorf("unexpected change in the configuration object after configureDynamically invocation") + } +} + +func TestConfigureCertificates(t *testing.T) { + listener, err := net.Listen("unix", nginx.StatusSocket) + if err != nil { + t.Errorf("crating unix listener: %s", err) + } + defer listener.Close() + defer os.Remove(nginx.StatusSocket) + + streamListener, err := net.Listen("unix", nginx.StreamSocket) + if err != nil { + t.Errorf("crating unix listener: %s", err) + } + defer streamListener.Close() + defer os.Remove(nginx.StreamSocket) + + servers := []*ingress.Server{{ + Hostname: "myapp.fake", + SSLCert: ingress.SSLCert{ + PemCertKey: "fake-cert", + }, + }} + + server := &httptest.Server{ + Listener: listener, + Config: &http.Server{ + Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusCreated) + + if r.Method != "POST" { + t.Errorf("expected a 'POST' request, got '%s'", r.Method) + } + + b, err := ioutil.ReadAll(r.Body) + if err != nil && err != io.EOF { + t.Fatal(err) + } + var postedServers []ingress.Server + err = jsoniter.ConfigCompatibleWithStandardLibrary.Unmarshal(b, &postedServers) + if err != nil { + t.Fatal(err) + } + + if len(servers) != len(postedServers) { + t.Errorf("Expected servers to be the same length as the posted servers") + } + + for i, server := range servers { + if !server.Equal(&postedServers[i]) { + t.Errorf("Expected servers and posted servers to be equal") + } + } + }), + }, + } + defer server.Close() + server.Start() + + commonConfig := &ingress.Configuration{ + Servers: servers, + } + + err = configureCertificates(commonConfig) + if err != nil { + t.Errorf("unexpected error posting dynamic certificate configuration: %v", err) + } +} func TestNginxHashBucketSize(t *testing.T) { tests := []struct { @@ -113,3 +415,58 @@ func TestNextPowerOf2(t *testing.T) { t.Errorf("TestNextPowerOf2: expected %d but returned %d.", 0, actual) } } + +func TestCleanTempNginxCfg(t *testing.T) { + err := cleanTempNginxCfg() + if err != nil { + t.Fatal(err) + } + + tmpfile, err := ioutil.TempFile("", tempNginxPattern) + if err != nil { + t.Fatal(err) + } + defer tmpfile.Close() + + dur, err := time.ParseDuration("-10m") + if err != nil { + t.Fatal(err) + } + + oldTime := time.Now().Add(dur) + err = os.Chtimes(tmpfile.Name(), oldTime, oldTime) + if err != nil { + t.Fatal(err) + } + + tmpfile, err = ioutil.TempFile("", tempNginxPattern) + if err != nil { + t.Fatal(err) + } + defer tmpfile.Close() + + err = cleanTempNginxCfg() + if err != nil { + t.Fatal(err) + } + + var files []string + + err = filepath.Walk(os.TempDir(), func(path string, info os.FileInfo, err error) error { + if info.IsDir() && os.TempDir() != path { + return filepath.SkipDir + } + + if strings.HasPrefix(info.Name(), tempNginxPattern) { + files = append(files, path) + } + return nil + }) + if err != nil { + t.Fatal(err) + } + + if len(files) != 1 { + t.Errorf("expected one file but %d were found", len(files)) + } +} diff --git a/internal/ingress/controller/process/nginx.go b/internal/ingress/controller/process/nginx.go index 4a925a3e11..2f31dbc806 100644 --- a/internal/ingress/controller/process/nginx.go +++ b/internal/ingress/controller/process/nginx.go @@ -24,9 +24,9 @@ import ( "syscall" "time" - "github.com/golang/glog" ps "github.com/mitchellh/go-ps" "github.com/ncabatoff/process-exporter/proc" + "k8s.io/klog" ) // IsRespawnIfRequired checks if error type is exec.ExitError or not @@ -37,7 +37,7 @@ func IsRespawnIfRequired(err error) bool { } waitStatus := exitError.Sys().(syscall.WaitStatus) - glog.Warningf(` + klog.Warningf(` ------------------------------------------------------------------------------- NGINX master process died (%v): %v ------------------------------------------------------------------------------- @@ -45,6 +45,8 @@ NGINX master process died (%v): %v return true } +// WaitUntilPortIsAvailable waits until there is no NGINX master or worker +// process/es listening in a particular port. func WaitUntilPortIsAvailable(port int) { // we wait until the workers are killed for { @@ -54,9 +56,9 @@ func WaitUntilPortIsAvailable(port int) { } conn.Close() // kill nginx worker processes - fs, err := proc.NewFS("/proc") + fs, err := proc.NewFS("/proc", false) if err != nil { - glog.Errorf("unexpected error reading /proc information: %v", err) + klog.Errorf("unexpected error reading /proc information: %v", err) continue } @@ -64,14 +66,14 @@ func WaitUntilPortIsAvailable(port int) { for _, p := range procs { pn, err := p.Comm() if err != nil { - glog.Errorf("unexpected error obtaining process information: %v", err) + klog.Errorf("unexpected error obtaining process information: %v", err) continue } if pn == "nginx" { osp, err := os.FindProcess(p.PID) if err != nil { - glog.Errorf("unexpected error obtaining process information: %v", err) + klog.Errorf("unexpected error obtaining process information: %v", err) continue } osp.Signal(syscall.SIGQUIT) diff --git a/internal/ingress/controller/stat_collector.go b/internal/ingress/controller/stat_collector.go deleted file mode 100644 index ad3434d156..0000000000 --- a/internal/ingress/controller/stat_collector.go +++ /dev/null @@ -1,97 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "github.com/golang/glog" - "github.com/prometheus/client_golang/prometheus" - - "k8s.io/ingress-nginx/internal/ingress/controller/metric/collector" -) - -const ( - ngxStatusPath = "/nginx_status" - ngxVtsPath = "/nginx_status/format/json" -) - -func (n *NGINXController) setupMonitor(sm statusModule) { - csm := n.statusModule - if csm != sm { - glog.Infof("changing prometheus collector from %v to %v", csm, sm) - n.stats.stop(csm) - n.stats.start(sm) - n.statusModule = sm - } -} - -type statsCollector struct { - process prometheus.Collector - basic collector.Stopable - vts collector.Stopable - - namespace string - watchClass string - - port int -} - -func (s *statsCollector) stop(sm statusModule) { - switch sm { - case defaultStatusModule: - s.basic.Stop() - prometheus.Unregister(s.basic) - case vtsStatusModule: - s.vts.Stop() - prometheus.Unregister(s.vts) - } -} - -func (s *statsCollector) start(sm statusModule) { - switch sm { - case defaultStatusModule: - s.basic = collector.NewNginxStatus(s.namespace, s.watchClass, s.port, ngxStatusPath) - prometheus.Register(s.basic) - break - case vtsStatusModule: - s.vts = collector.NewNGINXVTSCollector(s.namespace, s.watchClass, s.port, ngxVtsPath) - prometheus.Register(s.vts) - break - } -} - -func newStatsCollector(ns, class, binary string, port int) *statsCollector { - glog.Infof("starting new nginx stats collector for Ingress controller running in namespace %v (class %v)", ns, class) - glog.Infof("collector extracting information from port %v", port) - pc, err := collector.NewNamedProcess(true, collector.BinaryNameMatcher{ - Name: "nginx", - Binary: binary, - }) - if err != nil { - glog.Fatalf("unexpected error registering nginx collector: %v", err) - } - err = prometheus.Register(pc) - if err != nil { - glog.Fatalf("unexpected error registering nginx collector: %v", err) - } - - return &statsCollector{ - namespace: ns, - watchClass: class, - process: pc, - port: port, - } -} diff --git a/internal/ingress/controller/status.go b/internal/ingress/controller/status.go new file mode 100644 index 0000000000..f6f562b919 --- /dev/null +++ b/internal/ingress/controller/status.go @@ -0,0 +1,123 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "os" + "time" + + "k8s.io/klog" + + apiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/tools/leaderelection" + "k8s.io/client-go/tools/leaderelection/resourcelock" + "k8s.io/client-go/tools/record" +) + +type leaderElectionConfig struct { + PodName string + PodNamespace string + + Client clientset.Interface + + ElectionID string + + OnStartedLeading func(chan struct{}) + OnStoppedLeading func() +} + +func setupLeaderElection(config *leaderElectionConfig) { + var elector *leaderelection.LeaderElector + + // start a new context + ctx := context.Background() + + var cancelContext context.CancelFunc + + var newLeaderCtx = func(ctx context.Context) context.CancelFunc { + // allow to cancel the context in case we stop being the leader + leaderCtx, cancel := context.WithCancel(ctx) + go elector.Run(leaderCtx) + return cancel + } + + var stopCh chan struct{} + callbacks := leaderelection.LeaderCallbacks{ + OnStartedLeading: func(ctx context.Context) { + klog.V(2).Infof("I am the new leader") + stopCh = make(chan struct{}) + + if config.OnStartedLeading != nil { + config.OnStartedLeading(stopCh) + } + }, + OnStoppedLeading: func() { + klog.V(2).Info("I am not leader anymore") + close(stopCh) + + // cancel the context + cancelContext() + + cancelContext = newLeaderCtx(ctx) + + if config.OnStoppedLeading != nil { + config.OnStoppedLeading() + } + }, + OnNewLeader: func(identity string) { + klog.Infof("new leader elected: %v", identity) + }, + } + + broadcaster := record.NewBroadcaster() + hostname, _ := os.Hostname() + + recorder := broadcaster.NewRecorder(scheme.Scheme, apiv1.EventSource{ + Component: "ingress-leader-elector", + Host: hostname, + }) + + lock := resourcelock.ConfigMapLock{ + ConfigMapMeta: metav1.ObjectMeta{Namespace: config.PodNamespace, Name: config.ElectionID}, + Client: config.Client.CoreV1(), + LockConfig: resourcelock.ResourceLockConfig{ + Identity: config.PodName, + EventRecorder: recorder, + }, + } + + ttl := 30 * time.Second + var err error + + elector, err = leaderelection.NewLeaderElector(leaderelection.LeaderElectionConfig{ + Lock: &lock, + LeaseDuration: ttl, + RenewDeadline: ttl / 2, + RetryPeriod: ttl / 4, + + Callbacks: callbacks, + }) + if err != nil { + klog.Fatalf("unexpected error starting leader election: %v", err) + } + + cancelContext = newLeaderCtx(ctx) +} diff --git a/internal/ingress/controller/store/backend_ssl.go b/internal/ingress/controller/store/backend_ssl.go new file mode 100644 index 0000000000..adb94be68e --- /dev/null +++ b/internal/ingress/controller/store/backend_ssl.go @@ -0,0 +1,225 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package store + +import ( + "fmt" + "strings" + + "github.com/imdario/mergo" + "k8s.io/klog" + + apiv1 "k8s.io/api/core/v1" + networking "k8s.io/api/networking/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "k8s.io/ingress-nginx/internal/file" + "k8s.io/ingress-nginx/internal/ingress" + "k8s.io/ingress-nginx/internal/k8s" + "k8s.io/ingress-nginx/internal/net/ssl" +) + +// syncSecret synchronizes the content of a TLS Secret (certificate(s), secret +// key) with the filesystem. The resulting files can be used by NGINX. +func (s *k8sStore) syncSecret(key string) { + s.syncSecretMu.Lock() + defer s.syncSecretMu.Unlock() + + klog.V(3).Infof("Syncing Secret %q", key) + + // TODO: getPemCertificate should not write to disk to avoid unnecessary overhead + cert, err := s.getPemCertificate(key) + if err != nil { + if !isErrSecretForAuth(err) { + klog.Warningf("Error obtaining X.509 certificate: %v", err) + } + return + } + + // create certificates and add or update the item in the store + cur, err := s.GetLocalSSLCert(key) + if err == nil { + if cur.Equal(cert) { + // no need to update + return + } + klog.Infof("Updating Secret %q in the local store", key) + s.sslStore.Update(key, cert) + // this update must trigger an update + // (like an update event from a change in Ingress) + s.sendDummyEvent() + return + } + + klog.Infof("Adding Secret %q to the local store", key) + s.sslStore.Add(key, cert) + // this update must trigger an update + // (like an update event from a change in Ingress) + s.sendDummyEvent() +} + +// getPemCertificate receives a secret, and creates a ingress.SSLCert as return. +// It parses the secret and verifies if it's a keypair, or a 'ca.crt' secret only. +func (s *k8sStore) getPemCertificate(secretName string) (*ingress.SSLCert, error) { + secret, err := s.listers.Secret.ByKey(secretName) + if err != nil { + return nil, err + } + + cert, okcert := secret.Data[apiv1.TLSCertKey] + key, okkey := secret.Data[apiv1.TLSPrivateKeyKey] + ca := secret.Data["ca.crt"] + + auth := secret.Data["auth"] + + // namespace/secretName -> namespace-secretName + nsSecName := strings.Replace(secretName, "/", "-", -1) + + var sslCert *ingress.SSLCert + if okcert && okkey { + if cert == nil { + return nil, fmt.Errorf("key 'tls.crt' missing from Secret %q", secretName) + } + if key == nil { + return nil, fmt.Errorf("key 'tls.key' missing from Secret %q", secretName) + } + + sslCert, err = ssl.CreateSSLCert(cert, key) + if err != nil { + return nil, fmt.Errorf("unexpected error creating SSL Cert: %v", err) + } + + if !s.isDynamicCertificatesEnabled || len(ca) > 0 { + err = ssl.StoreSSLCertOnDisk(s.filesystem, nsSecName, sslCert) + if err != nil { + return nil, fmt.Errorf("error while storing certificate and key: %v", err) + } + } + + if len(ca) > 0 { + err = ssl.ConfigureCACertWithCertAndKey(s.filesystem, nsSecName, ca, sslCert) + if err != nil { + return nil, fmt.Errorf("error configuring CA certificate: %v", err) + } + } + + msg := fmt.Sprintf("Configuring Secret %q for TLS encryption (CN: %v)", secretName, sslCert.CN) + if ca != nil { + msg += " and authentication" + } + klog.V(3).Info(msg) + + } else if ca != nil && len(ca) > 0 { + sslCert, err = ssl.CreateCACert(ca) + if err != nil { + return nil, fmt.Errorf("unexpected error creating SSL Cert: %v", err) + } + + err = ssl.ConfigureCACert(s.filesystem, nsSecName, ca, sslCert) + if err != nil { + return nil, fmt.Errorf("error configuring CA certificate: %v", err) + } + + // makes this secret in 'syncSecret' to be used for Certificate Authentication + // this does not enable Certificate Authentication + klog.V(3).Infof("Configuring Secret %q for TLS authentication", secretName) + + } else { + if auth != nil { + return nil, ErrSecretForAuth + } + + return nil, fmt.Errorf("secret %q contains no keypair or CA certificate", secretName) + } + + sslCert.Name = secret.Name + sslCert.Namespace = secret.Namespace + + return sslCert, nil +} + +func (s *k8sStore) checkSSLChainIssues() { + for _, item := range s.ListLocalSSLCerts() { + secrKey := k8s.MetaNamespaceKey(item) + secret, err := s.GetLocalSSLCert(secrKey) + if err != nil { + continue + } + + if secret.FullChainPemFileName != "" { + // chain already checked + continue + } + + data, err := ssl.FullChainCert(secret.PemFileName, s.filesystem) + if err != nil { + klog.Errorf("Error generating CA certificate chain for Secret %q: %v", secrKey, err) + continue + } + + fullChainPemFileName := fmt.Sprintf("%v/%v-%v-full-chain.pem", file.DefaultSSLDirectory, secret.Namespace, secret.Name) + + file, err := s.filesystem.Create(fullChainPemFileName) + if err != nil { + klog.Errorf("Error creating SSL certificate file for Secret %q: %v", secrKey, err) + continue + } + + _, err = file.Write(data) + if err != nil { + klog.Errorf("Error creating SSL certificate for Secret %q: %v", secrKey, err) + continue + } + + dst := &ingress.SSLCert{} + + err = mergo.MergeWithOverwrite(dst, secret) + if err != nil { + klog.Errorf("Error creating SSL certificate for Secret %q: %v", secrKey, err) + continue + } + + dst.FullChainPemFileName = fullChainPemFileName + + klog.Infof("Updating local copy of SSL certificate %q with missing intermediate CA certs", secrKey) + s.sslStore.Update(secrKey, dst) + // this update must trigger an update + // (like an update event from a change in Ingress) + s.sendDummyEvent() + } +} + +// sendDummyEvent sends a dummy event to trigger an update +// This is used in when a secret change +func (s *k8sStore) sendDummyEvent() { + s.updateCh.In() <- Event{ + Type: UpdateEvent, + Obj: &networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + Namespace: "dummy", + }, + }, + } +} + +// ErrSecretForAuth error to indicate a secret is used for authentication +var ErrSecretForAuth = fmt.Errorf("secret is used for authentication") + +func isErrSecretForAuth(e error) bool { + return e == ErrSecretForAuth +} diff --git a/internal/ingress/controller/backend_ssl_test.go b/internal/ingress/controller/store/backend_ssl_test.go similarity index 92% rename from internal/ingress/controller/backend_ssl_test.go rename to internal/ingress/controller/store/backend_ssl_test.go index 16892da621..5fb44c06ab 100644 --- a/internal/ingress/controller/backend_ssl_test.go +++ b/internal/ingress/controller/store/backend_ssl_test.go @@ -14,24 +14,15 @@ See the License for the specific language governing permissions and limitations under the License. */ -package controller +package store import ( "encoding/base64" - "fmt" - "io/ioutil" - "testing" apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" testclient "k8s.io/client-go/kubernetes/fake" cache_client "k8s.io/client-go/tools/cache" - "k8s.io/client-go/util/flowcontrol" - - "k8s.io/ingress-nginx/internal/ingress" - "k8s.io/ingress-nginx/internal/ingress/store" - "k8s.io/ingress-nginx/internal/task" - "k8s.io/kubernetes/pkg/api" ) const ( @@ -66,8 +57,8 @@ func buildSimpleClientSetForBackendSSL() *testclient.Clientset { return testclient.NewSimpleClientset() } -func buildIngListenerForBackendSSL() store.IngressLister { - ingLister := store.IngressLister{} +func buildIngListenerForBackendSSL() IngressLister { + ingLister := IngressLister{} ingLister.Store = cache_client.NewStore(cache_client.DeletionHandlingMetaNamespaceKeyFunc) return ingLister } @@ -76,25 +67,26 @@ func buildSecretForBackendSSL() *apiv1.Secret { return &apiv1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "foo_secret", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, }, } } -func buildSecrListerForBackendSSL() store.SecretLister { - secrLister := store.SecretLister{} +func buildSecrListerForBackendSSL() SecretLister { + secrLister := SecretLister{} secrLister.Store = cache_client.NewStore(cache_client.DeletionHandlingMetaNamespaceKeyFunc) return secrLister } +/* func buildListers() *ingress.StoreLister { sl := &ingress.StoreLister{} sl.Ingress.Store = buildIngListenerForBackendSSL() sl.Secret.Store = buildSecrListerForBackendSSL() return sl } - +*/ func buildControllerForBackendSSL() cache_client.Controller { cfg := &cache_client.Config{ Queue: &MockQueue{Synced: true}, @@ -103,6 +95,7 @@ func buildControllerForBackendSSL() cache_client.Controller { return cache_client.New(cfg) } +/* func buildGenericControllerForBackendSSL() *NGINXController { gc := &NGINXController{ syncRateLimiter: flowcontrol.NewTokenBucketRateLimiter(0.3, 1), @@ -110,21 +103,15 @@ func buildGenericControllerForBackendSSL() *NGINXController { Client: buildSimpleClientSetForBackendSSL(), }, listers: buildListers(), - sslCertTracker: store.NewSSLCertTracker(), + sslCertTracker: NewSSLCertTracker(), } gc.syncQueue = task.NewTaskQueue(gc.syncIngress) return gc } +*/ func buildCrtKeyAndCA() ([]byte, []byte, []byte, error) { - // prepare - td, err := ioutil.TempDir("", "ssl") - if err != nil { - return nil, nil, nil, fmt.Errorf("error occurs while creating temp directory: %v", err) - } - ingress.DefaultSSLDirectory = td - dCrt, err := base64.StdEncoding.DecodeString(tlsCrt) if err != nil { return nil, nil, nil, err @@ -140,6 +127,7 @@ func buildCrtKeyAndCA() ([]byte, []byte, []byte, error) { return dCrt, dKey, dCa, nil } +/* func TestSyncSecret(t *testing.T) { // prepare for test dCrt, dKey, dCa, err := buildCrtKeyAndCA() @@ -198,7 +186,7 @@ func TestGetPemCertificate(t *testing.T) { Data map[string][]byte eErr bool }{ - {"sceret_not_exist", "default/foo_secret_not_exist", nil, true}, + {"secret_not_exist", "default/foo_secret_not_exist", nil, true}, {"data_not_complete_all_not_exist", "default/foo_secret", map[string][]byte{}, true}, {"data_not_complete_TLSCertKey_not_exist", "default/foo_secret", map[string][]byte{api.TLSPrivateKeyKey: dKey, tlscaName: dCa}, false}, {"data_not_complete_TLSCertKeyAndCA_not_exist", "default/foo_secret", map[string][]byte{api.TLSPrivateKeyKey: dKey}, true}, @@ -232,3 +220,4 @@ func TestGetPemCertificate(t *testing.T) { }) } } +*/ diff --git a/internal/ingress/controller/store/configmap.go b/internal/ingress/controller/store/configmap.go new file mode 100644 index 0000000000..a57f7e7876 --- /dev/null +++ b/internal/ingress/controller/store/configmap.go @@ -0,0 +1,39 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package store + +import ( + apiv1 "k8s.io/api/core/v1" + "k8s.io/client-go/tools/cache" +) + +// ConfigMapLister makes a Store that lists Configmaps. +type ConfigMapLister struct { + cache.Store +} + +// ByKey returns the ConfigMap matching key in the local ConfigMap Store. +func (cml *ConfigMapLister) ByKey(key string) (*apiv1.ConfigMap, error) { + s, exists, err := cml.GetByKey(key) + if err != nil { + return nil, err + } + if !exists { + return nil, NotExistsError(key) + } + return s.(*apiv1.ConfigMap), nil +} diff --git a/internal/ingress/controller/store/endpoint.go b/internal/ingress/controller/store/endpoint.go new file mode 100644 index 0000000000..ff6fbd7150 --- /dev/null +++ b/internal/ingress/controller/store/endpoint.go @@ -0,0 +1,39 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package store + +import ( + apiv1 "k8s.io/api/core/v1" + "k8s.io/client-go/tools/cache" +) + +// EndpointLister makes a Store that lists Endpoints. +type EndpointLister struct { + cache.Store +} + +// ByKey returns the Endpoints of the Service matching key in the local Endpoint Store. +func (s *EndpointLister) ByKey(key string) (*apiv1.Endpoints, error) { + eps, exists, err := s.GetByKey(key) + if err != nil { + return nil, err + } + if !exists { + return nil, NotExistsError(key) + } + return eps.(*apiv1.Endpoints), nil +} diff --git a/internal/ingress/controller/store/ingress.go b/internal/ingress/controller/store/ingress.go new file mode 100644 index 0000000000..2b16468cc2 --- /dev/null +++ b/internal/ingress/controller/store/ingress.go @@ -0,0 +1,39 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package store + +import ( + networking "k8s.io/api/networking/v1beta1" + "k8s.io/client-go/tools/cache" +) + +// IngressLister makes a Store that lists Ingress. +type IngressLister struct { + cache.Store +} + +// ByKey returns the Ingress matching key in the local Ingress Store. +func (il IngressLister) ByKey(key string) (*networking.Ingress, error) { + i, exists, err := il.GetByKey(key) + if err != nil { + return nil, err + } + if !exists { + return nil, NotExistsError(key) + } + return i.(*networking.Ingress), nil +} diff --git a/internal/ingress/controller/store/ingress_annotation.go b/internal/ingress/controller/store/ingress_annotation.go new file mode 100644 index 0000000000..c9c596d1ed --- /dev/null +++ b/internal/ingress/controller/store/ingress_annotation.go @@ -0,0 +1,39 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package store + +import ( + "k8s.io/client-go/tools/cache" + "k8s.io/ingress-nginx/internal/ingress" +) + +// IngressWithAnnotationsLister makes a Store that lists Ingress rules with annotations already parsed +type IngressWithAnnotationsLister struct { + cache.Store +} + +// ByKey returns the Ingress with annotations matching key in the local store or an error +func (il IngressWithAnnotationsLister) ByKey(key string) (*ingress.Ingress, error) { + i, exists, err := il.GetByKey(key) + if err != nil { + return nil, err + } + if !exists { + return nil, NotExistsError(key) + } + return i.(*ingress.Ingress), nil +} diff --git a/internal/ingress/controller/store/local_secret.go b/internal/ingress/controller/store/local_secret.go new file mode 100644 index 0000000000..ee3ced3987 --- /dev/null +++ b/internal/ingress/controller/store/local_secret.go @@ -0,0 +1,46 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package store + +import ( + "fmt" + + "k8s.io/client-go/tools/cache" + + "k8s.io/ingress-nginx/internal/ingress" +) + +// SSLCertTracker holds a store of referenced Secrets in Ingress rules +type SSLCertTracker struct { + cache.ThreadSafeStore +} + +// NewSSLCertTracker creates a new SSLCertTracker store +func NewSSLCertTracker() *SSLCertTracker { + return &SSLCertTracker{ + cache.NewThreadSafeStore(cache.Indexers{}, cache.Indices{}), + } +} + +// ByKey searches for an ingress in the local ingress Store +func (s SSLCertTracker) ByKey(key string) (*ingress.SSLCert, error) { + cert, exists := s.Get(key) + if !exists { + return nil, fmt.Errorf("local SSL certificate %v was not found", key) + } + return cert.(*ingress.SSLCert), nil +} diff --git a/internal/ingress/controller/store/local_secret_test.go b/internal/ingress/controller/store/local_secret_test.go new file mode 100644 index 0000000000..0b532c41df --- /dev/null +++ b/internal/ingress/controller/store/local_secret_test.go @@ -0,0 +1,39 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package store + +import "testing" + +func TestSSLCertTracker(t *testing.T) { + tracker := NewSSLCertTracker() + + items := len(tracker.List()) + if items != 0 { + t.Errorf("expected 0 items in the store but %v returned", items) + } + + tracker.Add("key", "value") + items = len(tracker.List()) + if items != 1 { + t.Errorf("expected 1 item in the store but %v returned", items) + } + + item, exists := tracker.Get("key") + if !exists || item == nil { + t.Errorf("expected an item from the store but none returned") + } +} diff --git a/internal/ingress/controller/store/objectref.go b/internal/ingress/controller/store/objectref.go new file mode 100644 index 0000000000..9ef13bf079 --- /dev/null +++ b/internal/ingress/controller/store/objectref.go @@ -0,0 +1,130 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package store + +import ( + "sync" + + "k8s.io/apimachinery/pkg/util/sets" +) + +// ObjectRefMap is a map of references from object(s) to object (1:n). It is +// used to keep track of which data objects (Secrets) are used within Ingress +// objects. +type ObjectRefMap interface { + Insert(consumer string, ref ...string) + Delete(consumer string) + Len() int + Has(ref string) bool + HasConsumer(consumer string) bool + Reference(ref string) []string + ReferencedBy(consumer string) []string +} + +type objectRefMap struct { + sync.Mutex + v map[string]sets.String +} + +// NewObjectRefMap returns a new ObjectRefMap. +func NewObjectRefMap() ObjectRefMap { + return &objectRefMap{ + v: make(map[string]sets.String), + } +} + +// Insert adds a consumer to one or more referenced objects. +func (o *objectRefMap) Insert(consumer string, ref ...string) { + o.Lock() + defer o.Unlock() + + for _, r := range ref { + if _, ok := o.v[r]; !ok { + o.v[r] = sets.NewString(consumer) + continue + } + o.v[r].Insert(consumer) + } +} + +// Delete deletes a consumer from all referenced objects. +func (o *objectRefMap) Delete(consumer string) { + o.Lock() + defer o.Unlock() + + for ref, consumers := range o.v { + consumers.Delete(consumer) + if consumers.Len() == 0 { + delete(o.v, ref) + } + } +} + +// Len returns the count of referenced objects. +func (o *objectRefMap) Len() int { + return len(o.v) +} + +// Has returns whether the given object is referenced by any other object. +func (o *objectRefMap) Has(ref string) bool { + o.Lock() + defer o.Unlock() + + if _, ok := o.v[ref]; ok { + return true + } + return false +} + +// HasConsumer returns whether the store contains the given consumer. +func (o *objectRefMap) HasConsumer(consumer string) bool { + o.Lock() + defer o.Unlock() + + for _, consumers := range o.v { + if consumers.Has(consumer) { + return true + } + } + return false +} + +// Reference returns all objects referencing the given object. +func (o *objectRefMap) Reference(ref string) []string { + o.Lock() + defer o.Unlock() + + consumers, ok := o.v[ref] + if !ok { + return make([]string, 0) + } + return consumers.List() +} + +// ReferencedBy returns all objects referenced by the given object. +func (o *objectRefMap) ReferencedBy(consumer string) []string { + o.Lock() + defer o.Unlock() + + refs := make([]string, 0) + for ref, consumers := range o.v { + if consumers.Has(consumer) { + refs = append(refs, ref) + } + } + return refs +} diff --git a/internal/ingress/controller/store/objectref_test.go b/internal/ingress/controller/store/objectref_test.go new file mode 100644 index 0000000000..06eed4282e --- /dev/null +++ b/internal/ingress/controller/store/objectref_test.go @@ -0,0 +1,65 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package store + +import "testing" + +func TestObjectRefMapOperations(t *testing.T) { + orm := NewObjectRefMap() + + items := []struct { + consumer string + ref []string + }{ + {"ns/ingress1", []string{"ns/tls1"}}, + {"ns/ingress2", []string{"ns/tls1", "ns/tls2"}}, + {"ns/ingress3", []string{"ns/tls1", "ns/tls2", "ns/tls3"}}, + } + + // populate map with test data + for _, i := range items { + orm.Insert(i.consumer, i.ref...) + } + if l := orm.Len(); l != 3 { + t.Fatalf("Expected 3 referenced objects (got %d)", l) + } + + // add already existing item + orm.Insert("ns/ingress1", "ns/tls1") + if l := len(orm.ReferencedBy("ns/ingress1")); l != 1 { + t.Error("Expected existing item not to be added again") + } + + // find consumer by name + if !orm.HasConsumer("ns/ingress1") { + t.Error("Expected the \"ns/ingress1\" consumer to exist in the map") + } + + // count references to object + if l := len(orm.Reference("ns/tls1")); l != 3 { + t.Errorf("Expected \"ns/tls1\" to be referenced by 3 objects (got %d)", l) + } + + // delete consumer + orm.Delete("ns/ingress3") + if l := orm.Len(); l != 2 { + t.Errorf("Expected 2 referenced objects (got %d)", l) + } + if orm.Has("ns/tls3") { + t.Error("Expected \"ns/tls3\" not to be referenced") + } +} diff --git a/internal/ingress/controller/store/pod.go b/internal/ingress/controller/store/pod.go new file mode 100644 index 0000000000..1a437d7692 --- /dev/null +++ b/internal/ingress/controller/store/pod.go @@ -0,0 +1,26 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package store + +import ( + "k8s.io/client-go/tools/cache" +) + +// PodLister makes a Store that lists Pods. +type PodLister struct { + cache.Store +} diff --git a/internal/ingress/controller/store/secret.go b/internal/ingress/controller/store/secret.go new file mode 100644 index 0000000000..c749155ede --- /dev/null +++ b/internal/ingress/controller/store/secret.go @@ -0,0 +1,39 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package store + +import ( + apiv1 "k8s.io/api/core/v1" + "k8s.io/client-go/tools/cache" +) + +// SecretLister makes a Store that lists Secrets. +type SecretLister struct { + cache.Store +} + +// ByKey returns the Secret matching key in the local Secret Store. +func (sl *SecretLister) ByKey(key string) (*apiv1.Secret, error) { + s, exists, err := sl.GetByKey(key) + if err != nil { + return nil, err + } + if !exists { + return nil, NotExistsError(key) + } + return s.(*apiv1.Secret), nil +} diff --git a/internal/ingress/controller/store/service.go b/internal/ingress/controller/store/service.go new file mode 100644 index 0000000000..7e47319764 --- /dev/null +++ b/internal/ingress/controller/store/service.go @@ -0,0 +1,39 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package store + +import ( + apiv1 "k8s.io/api/core/v1" + "k8s.io/client-go/tools/cache" +) + +// ServiceLister makes a Store that lists Services. +type ServiceLister struct { + cache.Store +} + +// ByKey returns the Service matching key in the local Service Store. +func (sl *ServiceLister) ByKey(key string) (*apiv1.Service, error) { + s, exists, err := sl.GetByKey(key) + if err != nil { + return nil, err + } + if !exists { + return nil, NotExistsError(key) + } + return s.(*apiv1.Service), nil +} diff --git a/internal/ingress/controller/store/store.go b/internal/ingress/controller/store/store.go new file mode 100644 index 0000000000..01776e3de6 --- /dev/null +++ b/internal/ingress/controller/store/store.go @@ -0,0 +1,939 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package store + +import ( + "encoding/base64" + "fmt" + "io/ioutil" + "reflect" + "sort" + "sync" + "time" + + "k8s.io/klog" + + "github.com/eapache/channels" + corev1 "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + networkingv1beta1 "k8s.io/api/networking/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + k8sruntime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/informers" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + clientcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + + "k8s.io/ingress-nginx/internal/file" + "k8s.io/ingress-nginx/internal/ingress" + "k8s.io/ingress-nginx/internal/ingress/annotations" + "k8s.io/ingress-nginx/internal/ingress/annotations/class" + "k8s.io/ingress-nginx/internal/ingress/annotations/parser" + ngx_config "k8s.io/ingress-nginx/internal/ingress/controller/config" + ngx_template "k8s.io/ingress-nginx/internal/ingress/controller/template" + "k8s.io/ingress-nginx/internal/ingress/defaults" + "k8s.io/ingress-nginx/internal/ingress/errors" + "k8s.io/ingress-nginx/internal/ingress/resolver" + "k8s.io/ingress-nginx/internal/k8s" +) + +// IngressFilterFunc decides if an Ingress should be ommited or not +type IngressFilterFunc func(*ingress.Ingress) bool + +// Storer is the interface that wraps the required methods to gather information +// about ingresses, services, secrets and ingress annotations. +type Storer interface { + // GetBackendConfiguration returns the nginx configuration stored in a configmap + GetBackendConfiguration() ngx_config.Configuration + + // GetConfigMap returns the ConfigMap matching key. + GetConfigMap(key string) (*corev1.ConfigMap, error) + + // GetSecret returns the Secret matching key. + GetSecret(key string) (*corev1.Secret, error) + + // GetService returns the Service matching key. + GetService(key string) (*corev1.Service, error) + + // GetServiceEndpoints returns the Endpoints of a Service matching key. + GetServiceEndpoints(key string) (*corev1.Endpoints, error) + + // ListIngresses returns a list of all Ingresses in the store. + ListIngresses(IngressFilterFunc) []*ingress.Ingress + + // GetRunningControllerPodsCount returns the number of Running ingress-nginx controller Pods. + GetRunningControllerPodsCount() int + + // GetLocalSSLCert returns the local copy of a SSLCert + GetLocalSSLCert(name string) (*ingress.SSLCert, error) + + // ListLocalSSLCerts returns the list of local SSLCerts + ListLocalSSLCerts() []*ingress.SSLCert + + // GetAuthCertificate resolves a given secret name into an SSL certificate. + // The secret must contain 3 keys named: + // ca.crt: contains the certificate chain used for authentication + GetAuthCertificate(string) (*resolver.AuthSSLCert, error) + + // GetDefaultBackend returns the default backend configuration + GetDefaultBackend() defaults.Backend + + // Run initiates the synchronization of the controllers + Run(stopCh chan struct{}) +} + +// EventType type of event associated with an informer +type EventType string + +const ( + // CreateEvent event associated with new objects in an informer + CreateEvent EventType = "CREATE" + // UpdateEvent event associated with an object update in an informer + UpdateEvent EventType = "UPDATE" + // DeleteEvent event associated when an object is removed from an informer + DeleteEvent EventType = "DELETE" + // ConfigurationEvent event associated when a controller configuration object is created or updated + ConfigurationEvent EventType = "CONFIGURATION" +) + +// Event holds the context of an event. +type Event struct { + Type EventType + Obj interface{} +} + +// Informer defines the required SharedIndexInformers that interact with the API server. +type Informer struct { + Ingress cache.SharedIndexInformer + Endpoint cache.SharedIndexInformer + Service cache.SharedIndexInformer + Secret cache.SharedIndexInformer + ConfigMap cache.SharedIndexInformer + Pod cache.SharedIndexInformer +} + +// Lister contains object listers (stores). +type Lister struct { + Ingress IngressLister + Service ServiceLister + Endpoint EndpointLister + Secret SecretLister + ConfigMap ConfigMapLister + IngressWithAnnotation IngressWithAnnotationsLister + Pod PodLister +} + +// NotExistsError is returned when an object does not exist in a local store. +type NotExistsError string + +// Error implements the error interface. +func (e NotExistsError) Error() string { + return fmt.Sprintf("no object matching key %q in local store", string(e)) +} + +// Run initiates the synchronization of the informers against the API server. +func (i *Informer) Run(stopCh chan struct{}) { + go i.Endpoint.Run(stopCh) + go i.Service.Run(stopCh) + go i.Secret.Run(stopCh) + go i.ConfigMap.Run(stopCh) + go i.Pod.Run(stopCh) + + // wait for all involved caches to be synced before processing items + // from the queue + if !cache.WaitForCacheSync(stopCh, + i.Endpoint.HasSynced, + i.Service.HasSynced, + i.Secret.HasSynced, + i.ConfigMap.HasSynced, + ) { + runtime.HandleError(fmt.Errorf("Timed out waiting for caches to sync")) + } + + // in big clusters, deltas can keep arriving even after HasSynced + // functions have returned 'true' + time.Sleep(1 * time.Second) + + // we can start syncing ingress objects only after other caches are + // ready, because ingress rules require content from other listers, and + // 'add' events get triggered in the handlers during caches population. + go i.Ingress.Run(stopCh) + if !cache.WaitForCacheSync(stopCh, + i.Ingress.HasSynced, + ) { + runtime.HandleError(fmt.Errorf("Timed out waiting for caches to sync")) + } +} + +// k8sStore internal Storer implementation using informers and thread safe stores +type k8sStore struct { + isOCSPCheckEnabled bool + + // backendConfig contains the running configuration from the configmap + // this is required because this rarely changes but is a very expensive + // operation to execute in each OnUpdate invocation + backendConfig ngx_config.Configuration + + // informer contains the cache Informers + informers *Informer + + // listers contains the cache.Store interfaces used in the ingress controller + listers *Lister + + // sslStore local store of SSL certificates (certificates used in ingress) + // this is required because the certificates must be present in the + // container filesystem + sslStore *SSLCertTracker + + annotations annotations.Extractor + + // secretIngressMap contains information about which ingress references a + // secret in the annotations. + secretIngressMap ObjectRefMap + + filesystem file.Filesystem + + // updateCh + updateCh *channels.RingChannel + + // syncSecretMu protects against simultaneous invocations of syncSecret + syncSecretMu *sync.Mutex + + // backendConfigMu protects against simultaneous read/write of backendConfig + backendConfigMu *sync.RWMutex + + defaultSSLCertificate string + + isDynamicCertificatesEnabled bool + + pod *k8s.PodInfo +} + +// New creates a new object store to be used in the ingress controller +func New(checkOCSP bool, + namespace, configmap, tcp, udp, defaultSSLCertificate string, + resyncPeriod time.Duration, + client clientset.Interface, + fs file.Filesystem, + updateCh *channels.RingChannel, + isDynamicCertificatesEnabled bool, + pod *k8s.PodInfo, + disableCatchAll bool) Storer { + + store := &k8sStore{ + isOCSPCheckEnabled: checkOCSP, + informers: &Informer{}, + listers: &Lister{}, + sslStore: NewSSLCertTracker(), + filesystem: fs, + updateCh: updateCh, + backendConfig: ngx_config.NewDefault(), + syncSecretMu: &sync.Mutex{}, + backendConfigMu: &sync.RWMutex{}, + secretIngressMap: NewObjectRefMap(), + defaultSSLCertificate: defaultSSLCertificate, + isDynamicCertificatesEnabled: isDynamicCertificatesEnabled, + pod: pod, + } + + eventBroadcaster := record.NewBroadcaster() + eventBroadcaster.StartLogging(klog.Infof) + eventBroadcaster.StartRecordingToSink(&clientcorev1.EventSinkImpl{ + Interface: client.CoreV1().Events(namespace), + }) + recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{ + Component: "nginx-ingress-controller", + }) + + // k8sStore fulfills resolver.Resolver interface + store.annotations = annotations.NewAnnotationExtractor(store) + + store.listers.IngressWithAnnotation.Store = cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc) + + // create informers factory, enable and assign required informers + infFactory := informers.NewSharedInformerFactoryWithOptions(client, resyncPeriod, + informers.WithNamespace(namespace), + informers.WithTweakListOptions(func(*metav1.ListOptions) {})) + + if k8s.IsNetworkingIngressAvailable { + store.informers.Ingress = infFactory.Networking().V1beta1().Ingresses().Informer() + } else { + store.informers.Ingress = infFactory.Extensions().V1beta1().Ingresses().Informer() + } + + store.listers.Ingress.Store = store.informers.Ingress.GetStore() + + store.informers.Endpoint = infFactory.Core().V1().Endpoints().Informer() + store.listers.Endpoint.Store = store.informers.Endpoint.GetStore() + + store.informers.Secret = infFactory.Core().V1().Secrets().Informer() + store.listers.Secret.Store = store.informers.Secret.GetStore() + + store.informers.ConfigMap = infFactory.Core().V1().ConfigMaps().Informer() + store.listers.ConfigMap.Store = store.informers.ConfigMap.GetStore() + + store.informers.Service = infFactory.Core().V1().Services().Informer() + store.listers.Service.Store = store.informers.Service.GetStore() + + labelSelector := labels.SelectorFromSet(store.pod.Labels) + store.informers.Pod = cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (k8sruntime.Object, error) { + options.LabelSelector = labelSelector.String() + return client.CoreV1().Pods(store.pod.Namespace).List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + options.LabelSelector = labelSelector.String() + return client.CoreV1().Pods(store.pod.Namespace).Watch(options) + }, + }, + &corev1.Pod{}, + resyncPeriod, + cache.Indexers{}, + ) + store.listers.Pod.Store = store.informers.Pod.GetStore() + + ingDeleteHandler := func(obj interface{}) { + ing, ok := toIngress(obj) + if !ok { + // If we reached here it means the ingress was deleted but its final state is unrecorded. + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + klog.Errorf("couldn't get object from tombstone %#v", obj) + return + } + ing, ok = tombstone.Obj.(*networkingv1beta1.Ingress) + if !ok { + klog.Errorf("Tombstone contained object that is not an Ingress: %#v", obj) + return + } + } + + if !class.IsValid(ing) { + klog.Infof("ignoring delete for ingress %v based on annotation %v", ing.Name, class.IngressKey) + return + } + if isCatchAllIngress(ing.Spec) && disableCatchAll { + klog.Infof("ignoring delete for catch-all ingress %v/%v because of --disable-catch-all", ing.Namespace, ing.Name) + return + } + recorder.Eventf(ing, corev1.EventTypeNormal, "DELETE", fmt.Sprintf("Ingress %s/%s", ing.Namespace, ing.Name)) + + store.listers.IngressWithAnnotation.Delete(ing) + + key := k8s.MetaNamespaceKey(ing) + store.secretIngressMap.Delete(key) + + updateCh.In() <- Event{ + Type: DeleteEvent, + Obj: obj, + } + } + + ingEventHandler := cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + ing, _ := toIngress(obj) + if !class.IsValid(ing) { + a, _ := parser.GetStringAnnotation(class.IngressKey, ing) + klog.Infof("ignoring add for ingress %v based on annotation %v with value %v", ing.Name, class.IngressKey, a) + return + } + if isCatchAllIngress(ing.Spec) && disableCatchAll { + klog.Infof("ignoring add for catch-all ingress %v/%v because of --disable-catch-all", ing.Namespace, ing.Name) + return + } + recorder.Eventf(ing, corev1.EventTypeNormal, "CREATE", fmt.Sprintf("Ingress %s/%s", ing.Namespace, ing.Name)) + + store.syncIngress(ing) + store.updateSecretIngressMap(ing) + store.syncSecrets(ing) + + updateCh.In() <- Event{ + Type: CreateEvent, + Obj: obj, + } + }, + DeleteFunc: ingDeleteHandler, + UpdateFunc: func(old, cur interface{}) { + oldIng, _ := toIngress(old) + curIng, _ := toIngress(cur) + + validOld := class.IsValid(oldIng) + validCur := class.IsValid(curIng) + if !validOld && validCur { + if isCatchAllIngress(curIng.Spec) && disableCatchAll { + klog.Infof("ignoring update for catch-all ingress %v/%v because of --disable-catch-all", curIng.Namespace, curIng.Name) + return + } + + klog.Infof("creating ingress %v based on annotation %v", curIng.Name, class.IngressKey) + recorder.Eventf(curIng, corev1.EventTypeNormal, "CREATE", fmt.Sprintf("Ingress %s/%s", curIng.Namespace, curIng.Name)) + } else if validOld && !validCur { + klog.Infof("removing ingress %v based on annotation %v", curIng.Name, class.IngressKey) + ingDeleteHandler(old) + return + } else if validCur && !reflect.DeepEqual(old, cur) { + if isCatchAllIngress(curIng.Spec) && disableCatchAll { + klog.Infof("ignoring update for catch-all ingress %v/%v and delete old one because of --disable-catch-all", curIng.Namespace, curIng.Name) + ingDeleteHandler(old) + return + } + + recorder.Eventf(curIng, corev1.EventTypeNormal, "UPDATE", fmt.Sprintf("Ingress %s/%s", curIng.Namespace, curIng.Name)) + } else { + klog.V(3).Infof("No changes on ingress %v/%v. Skipping update", curIng.Namespace, curIng.Name) + return + } + + store.syncIngress(curIng) + store.updateSecretIngressMap(curIng) + store.syncSecrets(curIng) + + updateCh.In() <- Event{ + Type: UpdateEvent, + Obj: cur, + } + }, + } + + secrEventHandler := cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + sec := obj.(*corev1.Secret) + key := k8s.MetaNamespaceKey(sec) + + if store.defaultSSLCertificate == key { + store.syncSecret(store.defaultSSLCertificate) + } + + // find references in ingresses and update local ssl certs + if ings := store.secretIngressMap.Reference(key); len(ings) > 0 { + klog.Infof("secret %v was added and it is used in ingress annotations. Parsing...", key) + for _, ingKey := range ings { + ing, err := store.getIngress(ingKey) + if err != nil { + klog.Errorf("could not find Ingress %v in local store", ingKey) + continue + } + store.syncIngress(ing) + store.syncSecrets(ing) + } + updateCh.In() <- Event{ + Type: CreateEvent, + Obj: obj, + } + } + }, + UpdateFunc: func(old, cur interface{}) { + if !reflect.DeepEqual(old, cur) { + sec := cur.(*corev1.Secret) + key := k8s.MetaNamespaceKey(sec) + + if store.defaultSSLCertificate == key { + store.syncSecret(store.defaultSSLCertificate) + } + + // find references in ingresses and update local ssl certs + if ings := store.secretIngressMap.Reference(key); len(ings) > 0 { + klog.Infof("secret %v was updated and it is used in ingress annotations. Parsing...", key) + for _, ingKey := range ings { + ing, err := store.getIngress(ingKey) + if err != nil { + klog.Errorf("could not find Ingress %v in local store", ingKey) + continue + } + store.syncIngress(ing) + store.syncSecrets(ing) + } + updateCh.In() <- Event{ + Type: UpdateEvent, + Obj: cur, + } + } + } + }, + DeleteFunc: func(obj interface{}) { + sec, ok := obj.(*corev1.Secret) + if !ok { + // If we reached here it means the secret was deleted but its final state is unrecorded. + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + klog.Errorf("couldn't get object from tombstone %#v", obj) + return + } + sec, ok = tombstone.Obj.(*corev1.Secret) + if !ok { + klog.Errorf("Tombstone contained object that is not a Secret: %#v", obj) + return + } + } + + store.sslStore.Delete(k8s.MetaNamespaceKey(sec)) + + key := k8s.MetaNamespaceKey(sec) + + // find references in ingresses + if ings := store.secretIngressMap.Reference(key); len(ings) > 0 { + klog.Infof("secret %v was deleted and it is used in ingress annotations. Parsing...", key) + for _, ingKey := range ings { + ing, err := store.getIngress(ingKey) + if err != nil { + klog.Errorf("could not find Ingress %v in local store", ingKey) + continue + } + store.syncIngress(ing) + } + updateCh.In() <- Event{ + Type: DeleteEvent, + Obj: obj, + } + } + }, + } + + epEventHandler := cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + updateCh.In() <- Event{ + Type: CreateEvent, + Obj: obj, + } + }, + DeleteFunc: func(obj interface{}) { + updateCh.In() <- Event{ + Type: DeleteEvent, + Obj: obj, + } + }, + UpdateFunc: func(old, cur interface{}) { + oep := old.(*corev1.Endpoints) + cep := cur.(*corev1.Endpoints) + if !reflect.DeepEqual(cep.Subsets, oep.Subsets) { + updateCh.In() <- Event{ + Type: UpdateEvent, + Obj: cur, + } + } + }, + } + + cmEventHandler := cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + cm := obj.(*corev1.ConfigMap) + key := k8s.MetaNamespaceKey(cm) + // updates to configuration configmaps can trigger an update + if key == configmap || key == tcp || key == udp { + recorder.Eventf(cm, corev1.EventTypeNormal, "CREATE", fmt.Sprintf("ConfigMap %v", key)) + if key == configmap { + store.setConfig(cm) + } + updateCh.In() <- Event{ + Type: ConfigurationEvent, + Obj: obj, + } + } + }, + UpdateFunc: func(old, cur interface{}) { + if !reflect.DeepEqual(old, cur) { + cm := cur.(*corev1.ConfigMap) + key := k8s.MetaNamespaceKey(cm) + // updates to configuration configmaps can trigger an update + if key == configmap || key == tcp || key == udp { + recorder.Eventf(cm, corev1.EventTypeNormal, "UPDATE", fmt.Sprintf("ConfigMap %v", key)) + if key == configmap { + store.setConfig(cm) + } + + ings := store.listers.IngressWithAnnotation.List() + for _, ingKey := range ings { + key := k8s.MetaNamespaceKey(ingKey) + ing, err := store.getIngress(key) + if err != nil { + klog.Errorf("could not find Ingress %v in local store: %v", key, err) + continue + } + store.syncIngress(ing) + } + + updateCh.In() <- Event{ + Type: ConfigurationEvent, + Obj: cur, + } + } + } + }, + } + + podEventHandler := cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + updateCh.In() <- Event{ + Type: CreateEvent, + Obj: obj, + } + }, + UpdateFunc: func(old, cur interface{}) { + oldPod := old.(*corev1.Pod) + curPod := cur.(*corev1.Pod) + + if oldPod.Status.Phase == curPod.Status.Phase { + return + } + + updateCh.In() <- Event{ + Type: UpdateEvent, + Obj: cur, + } + }, + DeleteFunc: func(obj interface{}) { + updateCh.In() <- Event{ + Type: DeleteEvent, + Obj: obj, + } + }, + } + + store.informers.Ingress.AddEventHandler(ingEventHandler) + store.informers.Endpoint.AddEventHandler(epEventHandler) + store.informers.Secret.AddEventHandler(secrEventHandler) + store.informers.ConfigMap.AddEventHandler(cmEventHandler) + store.informers.Service.AddEventHandler(cache.ResourceEventHandlerFuncs{}) + store.informers.Pod.AddEventHandler(podEventHandler) + + // do not wait for informers to read the configmap configuration + ns, name, _ := k8s.ParseNameNS(configmap) + cm, err := client.CoreV1().ConfigMaps(ns).Get(name, metav1.GetOptions{}) + if err != nil { + klog.Warningf("Unexpected error reading configuration configmap: %v", err) + } + + store.setConfig(cm) + return store +} + +// isCatchAllIngress returns whether or not an ingress produces a +// catch-all server, and so should be ignored when --disable-catch-all is set +func isCatchAllIngress(spec networkingv1beta1.IngressSpec) bool { + return spec.Backend != nil && len(spec.Rules) == 0 +} + +// syncIngress parses ingress annotations converting the value of the +// annotation to a go struct +func (s *k8sStore) syncIngress(ing *networkingv1beta1.Ingress) { + key := k8s.MetaNamespaceKey(ing) + klog.V(3).Infof("updating annotations information for ingress %v", key) + + copyIng := &networkingv1beta1.Ingress{} + ing.ObjectMeta.DeepCopyInto(©Ing.ObjectMeta) + ing.Spec.DeepCopyInto(©Ing.Spec) + ing.Status.DeepCopyInto(©Ing.Status) + + for ri, rule := range copyIng.Spec.Rules { + if rule.HTTP == nil { + continue + } + + for pi, path := range rule.HTTP.Paths { + if path.Path == "" { + copyIng.Spec.Rules[ri].HTTP.Paths[pi].Path = "/" + } + } + } + + err := s.listers.IngressWithAnnotation.Update(&ingress.Ingress{ + Ingress: *copyIng, + ParsedAnnotations: s.annotations.Extract(ing), + }) + if err != nil { + klog.Error(err) + } +} + +// updateSecretIngressMap takes an Ingress and updates all Secret objects it +// references in secretIngressMap. +func (s *k8sStore) updateSecretIngressMap(ing *networkingv1beta1.Ingress) { + key := k8s.MetaNamespaceKey(ing) + klog.V(3).Infof("updating references to secrets for ingress %v", key) + + // delete all existing references first + s.secretIngressMap.Delete(key) + + var refSecrets []string + + for _, tls := range ing.Spec.TLS { + secrName := tls.SecretName + if secrName != "" { + secrKey := fmt.Sprintf("%v/%v", ing.Namespace, secrName) + refSecrets = append(refSecrets, secrKey) + } + } + + // We can not rely on cached ingress annotations because these are + // discarded when the referenced secret does not exist in the local + // store. As a result, adding a secret *after* the ingress(es) which + // references it would not trigger a resync of that secret. + secretAnnotations := []string{ + "auth-secret", + "auth-tls-secret", + } + for _, ann := range secretAnnotations { + secrKey, err := objectRefAnnotationNsKey(ann, ing) + if err != nil && !errors.IsMissingAnnotations(err) { + klog.Errorf("error reading secret reference in annotation %q: %s", ann, err) + continue + } + if secrKey != "" { + refSecrets = append(refSecrets, secrKey) + } + } + + // populate map with all secret references + s.secretIngressMap.Insert(key, refSecrets...) +} + +// objectRefAnnotationNsKey returns an object reference formatted as a +// 'namespace/name' key from the given annotation name. +func objectRefAnnotationNsKey(ann string, ing *networkingv1beta1.Ingress) (string, error) { + annValue, err := parser.GetStringAnnotation(ann, ing) + if err != nil { + return "", err + } + + secrNs, secrName, err := cache.SplitMetaNamespaceKey(annValue) + if secrName == "" { + return "", err + } + + if secrNs == "" { + return fmt.Sprintf("%v/%v", ing.Namespace, secrName), nil + } + return annValue, nil +} + +// syncSecrets synchronizes data from all Secrets referenced by the given +// Ingress with the local store and file system. +func (s *k8sStore) syncSecrets(ing *networkingv1beta1.Ingress) { + key := k8s.MetaNamespaceKey(ing) + for _, secrKey := range s.secretIngressMap.ReferencedBy(key) { + s.syncSecret(secrKey) + } +} + +// GetSecret returns the Secret matching key. +func (s *k8sStore) GetSecret(key string) (*corev1.Secret, error) { + return s.listers.Secret.ByKey(key) +} + +// ListLocalSSLCerts returns the list of local SSLCerts +func (s *k8sStore) ListLocalSSLCerts() []*ingress.SSLCert { + var certs []*ingress.SSLCert + for _, item := range s.sslStore.List() { + if s, ok := item.(*ingress.SSLCert); ok { + certs = append(certs, s) + } + } + + return certs +} + +// GetService returns the Service matching key. +func (s *k8sStore) GetService(key string) (*corev1.Service, error) { + return s.listers.Service.ByKey(key) +} + +// getIngress returns the Ingress matching key. +func (s *k8sStore) getIngress(key string) (*networkingv1beta1.Ingress, error) { + ing, err := s.listers.IngressWithAnnotation.ByKey(key) + if err != nil { + return nil, err + } + + return &ing.Ingress, nil +} + +// ListIngresses returns the list of Ingresses +func (s *k8sStore) ListIngresses(filter IngressFilterFunc) []*ingress.Ingress { + // filter ingress rules + ingresses := make([]*ingress.Ingress, 0) + for _, item := range s.listers.IngressWithAnnotation.List() { + ing := item.(*ingress.Ingress) + + if filter != nil && filter(ing) { + continue + } + + ingresses = append(ingresses, ing) + } + + // sort Ingresses using the CreationTimestamp field + sort.SliceStable(ingresses, func(i, j int) bool { + ir := ingresses[i].CreationTimestamp + jr := ingresses[j].CreationTimestamp + return ir.Before(&jr) + }) + + return ingresses +} + +// GetLocalSSLCert returns the local copy of a SSLCert +func (s *k8sStore) GetLocalSSLCert(key string) (*ingress.SSLCert, error) { + return s.sslStore.ByKey(key) +} + +// GetConfigMap returns the ConfigMap matching key. +func (s *k8sStore) GetConfigMap(key string) (*corev1.ConfigMap, error) { + return s.listers.ConfigMap.ByKey(key) +} + +// GetServiceEndpoints returns the Endpoints of a Service matching key. +func (s *k8sStore) GetServiceEndpoints(key string) (*corev1.Endpoints, error) { + return s.listers.Endpoint.ByKey(key) +} + +// GetAuthCertificate is used by the auth-tls annotations to get a cert from a secret +func (s *k8sStore) GetAuthCertificate(name string) (*resolver.AuthSSLCert, error) { + if _, err := s.GetLocalSSLCert(name); err != nil { + s.syncSecret(name) + } + + cert, err := s.GetLocalSSLCert(name) + if err != nil { + return nil, err + } + + return &resolver.AuthSSLCert{ + Secret: name, + CAFileName: cert.CAFileName, + PemSHA: cert.PemSHA, + }, nil +} + +func (s *k8sStore) writeSSLSessionTicketKey(cmap *corev1.ConfigMap, fileName string) { + ticketString := ngx_template.ReadConfig(cmap.Data).SSLSessionTicketKey + s.backendConfig.SSLSessionTicketKey = "" + + if ticketString != "" { + ticketBytes := base64.StdEncoding.WithPadding(base64.StdPadding).DecodedLen(len(ticketString)) + + // 81 used instead of 80 because of padding + if !(ticketBytes == 48 || ticketBytes == 81) { + klog.Warningf("ssl-session-ticket-key must contain either 48 or 80 bytes") + } + + decodedTicket, err := base64.StdEncoding.DecodeString(ticketString) + if err != nil { + klog.Errorf("unexpected error decoding ssl-session-ticket-key: %v", err) + return + } + + err = ioutil.WriteFile(fileName, decodedTicket, file.ReadWriteByUser) + if err != nil { + klog.Errorf("unexpected error writing ssl-session-ticket-key to %s: %v", fileName, err) + return + } + + s.backendConfig.SSLSessionTicketKey = ticketString + } +} + +// GetDefaultBackend returns the default backend +func (s *k8sStore) GetDefaultBackend() defaults.Backend { + return s.GetBackendConfiguration().Backend +} + +func (s *k8sStore) GetBackendConfiguration() ngx_config.Configuration { + s.backendConfigMu.RLock() + defer s.backendConfigMu.RUnlock() + + return s.backendConfig +} + +func (s *k8sStore) setConfig(cmap *corev1.ConfigMap) { + s.backendConfigMu.Lock() + defer s.backendConfigMu.Unlock() + + s.backendConfig = ngx_template.ReadConfig(cmap.Data) + s.writeSSLSessionTicketKey(cmap, "/etc/nginx/tickets.key") +} + +// Run initiates the synchronization of the informers and the initial +// synchronization of the secrets. +func (s *k8sStore) Run(stopCh chan struct{}) { + // start informers + s.informers.Run(stopCh) + + if s.isOCSPCheckEnabled { + go wait.Until(s.checkSSLChainIssues, 60*time.Second, stopCh) + } +} + +// GetRunningControllerPodsCount returns the number of Running ingress-nginx controller Pods +func (s k8sStore) GetRunningControllerPodsCount() int { + count := 0 + + for _, i := range s.listers.Pod.List() { + pod := i.(*corev1.Pod) + + if pod.Status.Phase != corev1.PodRunning { + continue + } + + count++ + } + + return count +} + +var runtimeScheme = k8sruntime.NewScheme() + +func init() { + extensionsv1beta1.AddToScheme(runtimeScheme) + networkingv1beta1.AddToScheme(runtimeScheme) +} + +func fromExtensions(old *extensionsv1beta1.Ingress) (*networkingv1beta1.Ingress, error) { + networkingIngress := &networkingv1beta1.Ingress{} + + err := runtimeScheme.Convert(old, networkingIngress, nil) + if err != nil { + return nil, err + } + + return networkingIngress, nil +} + +func toIngress(obj interface{}) (*networkingv1beta1.Ingress, bool) { + oldVersion, inExtension := obj.(*extensionsv1beta1.Ingress) + if inExtension { + ing, err := fromExtensions(oldVersion) + if err != nil { + klog.Errorf("unexpected error converting Ingress from extensions package: %v", err) + return nil, false + } + + return ing, true + } + + if ing, ok := obj.(*networkingv1beta1.Ingress); ok { + return ing, true + } + + return nil, false +} diff --git a/internal/ingress/controller/store/store_test.go b/internal/ingress/controller/store/store_test.go new file mode 100644 index 0000000000..d0adb660c6 --- /dev/null +++ b/internal/ingress/controller/store/store_test.go @@ -0,0 +1,1180 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package store + +import ( + "bytes" + "encoding/base64" + "fmt" + "io/ioutil" + "os" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/eapache/channels" + v1 "k8s.io/api/core/v1" + extensions "k8s.io/api/extensions/v1beta1" + networking "k8s.io/api/networking/v1beta1" + k8sErrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" + "sigs.k8s.io/controller-runtime/pkg/envtest" + + "k8s.io/ingress-nginx/internal/file" + "k8s.io/ingress-nginx/internal/ingress" + "k8s.io/ingress-nginx/internal/ingress/annotations/parser" + "k8s.io/ingress-nginx/internal/k8s" + "k8s.io/ingress-nginx/test/e2e/framework" +) + +func TestStore(t *testing.T) { + k8s.IsNetworkingIngressAvailable = true + + pod := &k8s.PodInfo{ + Name: "testpod", + Namespace: v1.NamespaceDefault, + Labels: map[string]string{ + "pod-template-hash": "1234", + }, + } + + //TODO: move env definition to docker image? + os.Setenv("KUBEBUILDER_ASSETS", "/usr/local/bin") + + te := &envtest.Environment{} + cfg, err := te.Start() + if err != nil { + t.Fatalf("error: %v", err) + } + + defer te.Stop() + + clientSet, err := kubernetes.NewForConfig(cfg) + if err != nil { + t.Fatalf("error: %v", err) + } + + t.Run("should return an error searching for non existing objects", func(t *testing.T) { + ns := createNamespace(clientSet, t) + defer deleteNamespace(ns, clientSet, t) + createConfigMap(clientSet, ns, t) + + stopCh := make(chan struct{}) + updateCh := channels.NewRingChannel(1024) + + go func(ch *channels.RingChannel) { + for { + <-ch.Out() + } + }(updateCh) + + fs := newFS(t) + storer := New(true, + ns, + fmt.Sprintf("%v/config", ns), + fmt.Sprintf("%v/tcp", ns), + fmt.Sprintf("%v/udp", ns), + "", + 10*time.Minute, + clientSet, + fs, + updateCh, + false, + pod, + false) + + storer.Run(stopCh) + + key := fmt.Sprintf("%v/anything", ns) + ls, err := storer.GetLocalSSLCert(key) + if err == nil { + t.Errorf("expected an error but none returned") + } + if ls != nil { + t.Errorf("expected an Ingres but none returned") + } + + s, err := storer.GetSecret(key) + if err == nil { + t.Errorf("expected an error but none returned") + } + if s != nil { + t.Errorf("expected an Ingres but none returned") + } + + svc, err := storer.GetService(key) + if err == nil { + t.Errorf("expected an error but none returned") + } + if svc != nil { + t.Errorf("expected an Ingres but none returned") + } + }) + + t.Run("should return one event for add, update and delete of ingress", func(t *testing.T) { + ns := createNamespace(clientSet, t) + defer deleteNamespace(ns, clientSet, t) + createConfigMap(clientSet, ns, t) + + stopCh := make(chan struct{}) + updateCh := channels.NewRingChannel(1024) + + var add uint64 + var upd uint64 + var del uint64 + + go func(ch *channels.RingChannel) { + for { + evt, ok := <-ch.Out() + if !ok { + return + } + + e := evt.(Event) + if e.Obj == nil { + continue + } + if _, ok := e.Obj.(*networking.Ingress); !ok { + continue + } + + switch e.Type { + case CreateEvent: + atomic.AddUint64(&add, 1) + case UpdateEvent: + atomic.AddUint64(&upd, 1) + case DeleteEvent: + atomic.AddUint64(&del, 1) + } + } + }(updateCh) + + fs := newFS(t) + storer := New(true, + ns, + fmt.Sprintf("%v/config", ns), + fmt.Sprintf("%v/tcp", ns), + fmt.Sprintf("%v/udp", ns), + "", + 10*time.Minute, + clientSet, + fs, + updateCh, + false, + pod, + false) + + storer.Run(stopCh) + + ing := ensureIngress(&networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + Namespace: ns, + }, + Spec: networking.IngressSpec{ + Rules: []networking.IngressRule{ + { + Host: "dummy", + IngressRuleValue: networking.IngressRuleValue{ + HTTP: &networking.HTTPIngressRuleValue{ + Paths: []networking.HTTPIngressPath{ + { + Path: "/", + Backend: networking.IngressBackend{ + ServiceName: "http-svc", + ServicePort: intstr.FromInt(80), + }, + }, + }, + }, + }, + }, + }, + }, + }, clientSet, t) + + err := framework.WaitForIngressInNamespace(clientSet, ns, ing.Name) + if err != nil { + t.Errorf("error waiting for secret: %v", err) + } + time.Sleep(1 * time.Second) + + // create an invalid ingress (different class) + invalidIngress := ensureIngress(&networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "custom-class", + Namespace: ns, + Annotations: map[string]string{ + "kubernetes.io/ingress.class": "something", + }, + }, + Spec: networking.IngressSpec{ + Rules: []networking.IngressRule{ + { + Host: "dummy", + IngressRuleValue: networking.IngressRuleValue{ + HTTP: &networking.HTTPIngressRuleValue{ + Paths: []networking.HTTPIngressPath{ + { + Path: "/", + Backend: networking.IngressBackend{ + ServiceName: "http-svc", + ServicePort: intstr.FromInt(80), + }, + }, + }, + }, + }, + }, + }, + }, + }, clientSet, t) + defer deleteIngress(invalidIngress, clientSet, t) + + ni := ing.DeepCopy() + ni.Spec.Rules[0].Host = "update-dummy" + _ = ensureIngress(ni, clientSet, t) + if err != nil { + t.Errorf("error creating ingress: %v", err) + } + // Secret takes a bit to update + time.Sleep(3 * time.Second) + + err = clientSet.NetworkingV1beta1().Ingresses(ni.Namespace).Delete(ni.Name, &metav1.DeleteOptions{}) + if err != nil { + t.Errorf("error creating ingress: %v", err) + } + + err = framework.WaitForNoIngressInNamespace(clientSet, ni.Namespace, ni.Name) + if err != nil { + t.Errorf("error waiting for secret: %v", err) + } + time.Sleep(1 * time.Second) + + if atomic.LoadUint64(&add) != 1 { + t.Errorf("expected 1 event of type Create but %v occurred", add) + } + if atomic.LoadUint64(&upd) != 1 { + t.Errorf("expected 1 event of type Update but %v occurred", upd) + } + if atomic.LoadUint64(&del) != 1 { + t.Errorf("expected 1 event of type Delete but %v occurred", del) + } + }) + + t.Run("should not receive updates for ingress with invalid class", func(t *testing.T) { + ns := createNamespace(clientSet, t) + defer deleteNamespace(ns, clientSet, t) + createConfigMap(clientSet, ns, t) + + stopCh := make(chan struct{}) + updateCh := channels.NewRingChannel(1024) + + var add uint64 + var upd uint64 + var del uint64 + + go func(ch *channels.RingChannel) { + for { + evt, ok := <-ch.Out() + if !ok { + return + } + + e := evt.(Event) + if e.Obj == nil { + continue + } + if _, ok := e.Obj.(*networking.Ingress); !ok { + continue + } + + switch e.Type { + case CreateEvent: + atomic.AddUint64(&add, 1) + case UpdateEvent: + atomic.AddUint64(&upd, 1) + case DeleteEvent: + atomic.AddUint64(&del, 1) + } + } + }(updateCh) + + fs := newFS(t) + storer := New(true, + ns, + fmt.Sprintf("%v/config", ns), + fmt.Sprintf("%v/tcp", ns), + fmt.Sprintf("%v/udp", ns), + "", + 10*time.Minute, + clientSet, + fs, + updateCh, + false, + pod, + false) + + storer.Run(stopCh) + + // create an invalid ingress (different class) + invalidIngress := ensureIngress(&networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "custom-class", + Namespace: ns, + Annotations: map[string]string{ + "kubernetes.io/ingress.class": "something", + }, + }, + Spec: networking.IngressSpec{ + Rules: []networking.IngressRule{ + { + Host: "dummy", + IngressRuleValue: networking.IngressRuleValue{ + HTTP: &networking.HTTPIngressRuleValue{ + Paths: []networking.HTTPIngressPath{ + { + Path: "/", + Backend: networking.IngressBackend{ + ServiceName: "http-svc", + ServicePort: intstr.FromInt(80), + }, + }, + }, + }, + }, + }, + }, + }, + }, clientSet, t) + err := framework.WaitForIngressInNamespace(clientSet, ns, invalidIngress.Name) + if err != nil { + t.Errorf("error waiting for ingress: %v", err) + } + time.Sleep(1 * time.Second) + + invalidIngressUpdated := invalidIngress.DeepCopy() + invalidIngressUpdated.Spec.Rules[0].Host = "update-dummy" + _ = ensureIngress(invalidIngressUpdated, clientSet, t) + if err != nil { + t.Errorf("error creating ingress: %v", err) + } + // Secret takes a bit to update + time.Sleep(3 * time.Second) + + if atomic.LoadUint64(&add) != 0 { + t.Errorf("expected 0 event of type Create but %v occurred", add) + } + if atomic.LoadUint64(&upd) != 0 { + t.Errorf("expected 0 event of type Update but %v occurred", upd) + } + if atomic.LoadUint64(&del) != 0 { + t.Errorf("expected 0 event of type Delete but %v occurred", del) + } + }) + + t.Run("should not receive events from secret not referenced from ingress", func(t *testing.T) { + ns := createNamespace(clientSet, t) + defer deleteNamespace(ns, clientSet, t) + createConfigMap(clientSet, ns, t) + + stopCh := make(chan struct{}) + updateCh := channels.NewRingChannel(1024) + + var add uint64 + var upd uint64 + var del uint64 + + go func(ch *channels.RingChannel) { + for { + evt, ok := <-ch.Out() + if !ok { + return + } + + e := evt.(Event) + if e.Obj == nil { + continue + } + switch e.Type { + case CreateEvent: + atomic.AddUint64(&add, 1) + case UpdateEvent: + atomic.AddUint64(&upd, 1) + case DeleteEvent: + atomic.AddUint64(&del, 1) + } + } + }(updateCh) + + fs := newFS(t) + storer := New(true, + ns, + fmt.Sprintf("%v/config", ns), + fmt.Sprintf("%v/tcp", ns), + fmt.Sprintf("%v/udp", ns), + "", + 10*time.Minute, + clientSet, + fs, + updateCh, + false, + pod, + false) + + storer.Run(stopCh) + + secretName := "not-referenced" + _, err := framework.CreateIngressTLSSecret(clientSet, []string{"foo"}, secretName, ns) + if err != nil { + t.Errorf("error creating secret: %v", err) + } + + err = framework.WaitForSecretInNamespace(clientSet, ns, secretName) + if err != nil { + t.Errorf("error waiting for secret: %v", err) + } + + if atomic.LoadUint64(&add) != 0 { + t.Errorf("expected 0 events of type Create but %v occurred", add) + } + if atomic.LoadUint64(&upd) != 0 { + t.Errorf("expected 0 events of type Update but %v occurred", upd) + } + if atomic.LoadUint64(&del) != 0 { + t.Errorf("expected 0 events of type Delete but %v occurred", del) + } + + err = clientSet.CoreV1().Secrets(ns).Delete(secretName, &metav1.DeleteOptions{}) + if err != nil { + t.Errorf("error deleting secret: %v", err) + } + + time.Sleep(1 * time.Second) + + if atomic.LoadUint64(&add) != 0 { + t.Errorf("expected 0 events of type Create but %v occurred", add) + } + if atomic.LoadUint64(&upd) != 0 { + t.Errorf("expected 0 events of type Update but %v occurred", upd) + } + if atomic.LoadUint64(&del) != 0 { + t.Errorf("expected 0 events of type Delete but %v occurred", del) + } + }) + + t.Run("should receive events from secret referenced from ingress", func(t *testing.T) { + ns := createNamespace(clientSet, t) + defer deleteNamespace(ns, clientSet, t) + createConfigMap(clientSet, ns, t) + + stopCh := make(chan struct{}) + updateCh := channels.NewRingChannel(1024) + + var add uint64 + var upd uint64 + var del uint64 + + go func(ch *channels.RingChannel) { + for { + evt, ok := <-ch.Out() + if !ok { + return + } + + e := evt.(Event) + if e.Obj == nil { + continue + } + switch e.Type { + case CreateEvent: + atomic.AddUint64(&add, 1) + case UpdateEvent: + atomic.AddUint64(&upd, 1) + case DeleteEvent: + atomic.AddUint64(&del, 1) + } + } + }(updateCh) + + fs := newFS(t) + storer := New(true, + ns, + fmt.Sprintf("%v/config", ns), + fmt.Sprintf("%v/tcp", ns), + fmt.Sprintf("%v/udp", ns), + "", + 10*time.Minute, + clientSet, + fs, + updateCh, + false, + pod, + false) + + storer.Run(stopCh) + + ingressName := "ingress-with-secret" + secretName := "referenced" + + ing := ensureIngress(&networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: ingressName, + Namespace: ns, + }, + Spec: networking.IngressSpec{ + TLS: []networking.IngressTLS{ + { + SecretName: secretName, + }, + }, + Backend: &networking.IngressBackend{ + ServiceName: "http-svc", + ServicePort: intstr.FromInt(80), + }, + }, + }, clientSet, t) + defer deleteIngress(ing, clientSet, t) + + err := framework.WaitForIngressInNamespace(clientSet, ns, ingressName) + if err != nil { + t.Errorf("error waiting for secret: %v", err) + } + + _, err = framework.CreateIngressTLSSecret(clientSet, []string{"foo"}, secretName, ns) + if err != nil { + t.Errorf("error creating secret: %v", err) + } + + err = framework.WaitForSecretInNamespace(clientSet, ns, secretName) + if err != nil { + t.Errorf("error waiting for secret: %v", err) + } + + // take into account secret sync + time.Sleep(3 * time.Second) + + if atomic.LoadUint64(&add) != 2 { + t.Errorf("expected 2 events of type Create but %v occurred", add) + } + // secret sync triggers a dummy event + if atomic.LoadUint64(&upd) != 1 { + t.Errorf("expected 1 events of type Update but %v occurred", upd) + } + + err = clientSet.CoreV1().Secrets(ns).Delete(secretName, &metav1.DeleteOptions{}) + if err != nil { + t.Errorf("error deleting secret: %v", err) + } + + time.Sleep(1 * time.Second) + + if atomic.LoadUint64(&del) != 1 { + t.Errorf("expected 1 events of type Delete but %v occurred", del) + } + + }) + + t.Run("should create an ingress with a secret which does not exist", func(t *testing.T) { + ns := createNamespace(clientSet, t) + defer deleteNamespace(ns, clientSet, t) + createConfigMap(clientSet, ns, t) + + stopCh := make(chan struct{}) + updateCh := channels.NewRingChannel(1024) + + var add uint64 + var upd uint64 + var del uint64 + + go func(ch *channels.RingChannel) { + for { + evt, ok := <-ch.Out() + if !ok { + return + } + + e := evt.(Event) + if e.Obj == nil { + continue + } + switch e.Type { + case CreateEvent: + atomic.AddUint64(&add, 1) + case UpdateEvent: + atomic.AddUint64(&upd, 1) + case DeleteEvent: + atomic.AddUint64(&del, 1) + } + } + }(updateCh) + + fs := newFS(t) + storer := New(true, + ns, + fmt.Sprintf("%v/config", ns), + fmt.Sprintf("%v/tcp", ns), + fmt.Sprintf("%v/udp", ns), + "", + 10*time.Minute, + clientSet, + fs, + updateCh, + false, + pod, + false) + + storer.Run(stopCh) + + name := "ingress-with-secret" + secretHosts := []string{name} + + ing := ensureIngress(&networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ns, + }, + Spec: networking.IngressSpec{ + TLS: []networking.IngressTLS{ + { + Hosts: secretHosts, + SecretName: name, + }, + }, + Rules: []networking.IngressRule{ + { + Host: name, + IngressRuleValue: networking.IngressRuleValue{ + HTTP: &networking.HTTPIngressRuleValue{ + Paths: []networking.HTTPIngressPath{ + { + Path: "/", + Backend: networking.IngressBackend{ + ServiceName: "http-svc", + ServicePort: intstr.FromInt(80), + }, + }, + }, + }, + }, + }, + }, + }, + }, clientSet, t) + defer deleteIngress(ing, clientSet, t) + + err := framework.WaitForIngressInNamespace(clientSet, ns, name) + if err != nil { + t.Errorf("error waiting for ingress: %v", err) + } + + // take into account delay caused by: + // * ingress annotations extraction + // * secretIngressMap update + // * secrets sync + time.Sleep(3 * time.Second) + + if atomic.LoadUint64(&add) != 1 { + t.Errorf("expected 1 events of type Create but %v occurred", add) + } + if atomic.LoadUint64(&upd) != 0 { + t.Errorf("expected 0 events of type Update but %v occurred", upd) + } + if atomic.LoadUint64(&del) != 0 { + t.Errorf("expected 0 events of type Delete but %v occurred", del) + } + + _, err = framework.CreateIngressTLSSecret(clientSet, secretHosts, name, ns) + if err != nil { + t.Errorf("error creating secret: %v", err) + } + + t.Run("should exists a secret in the local store and filesystem", func(t *testing.T) { + err := framework.WaitForSecretInNamespace(clientSet, ns, name) + if err != nil { + t.Errorf("error waiting for secret: %v", err) + } + + time.Sleep(5 * time.Second) + + pemFile := fmt.Sprintf("%v/%v-%v.pem", file.DefaultSSLDirectory, ns, name) + err = framework.WaitForFileInFS(pemFile, fs) + if err != nil { + t.Errorf("error waiting for file to exist on the file system: %v", err) + } + + secretName := fmt.Sprintf("%v/%v", ns, name) + sslCert, err := storer.GetLocalSSLCert(secretName) + if err != nil { + t.Errorf("error reading local secret %v: %v", secretName, err) + } + + if sslCert == nil { + t.Errorf("expected a secret but none returned") + } + + pemSHA := file.SHA1(pemFile) + if sslCert.PemSHA != pemSHA { + t.Errorf("SHA of secret on disk differs from local secret store (%v != %v)", pemSHA, sslCert.PemSHA) + } + }) + }) + + // test add ingress with secret it doesn't exists and then add secret + // check secret is generated on fs + // check ocsp + // check invalid secret (missing crt) + // check invalid secret (missing key) + // check invalid secret (missing ca) +} + +func createNamespace(clientSet kubernetes.Interface, t *testing.T) string { + t.Helper() + + namespace := &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("store-test-%v", time.Now().Unix()), + }, + } + + ns, err := clientSet.CoreV1().Namespaces().Create(namespace) + if err != nil { + t.Errorf("error creating the namespace: %v", err) + } + + return ns.Name +} + +func deleteNamespace(ns string, clientSet kubernetes.Interface, t *testing.T) { + t.Helper() + + err := clientSet.CoreV1().Namespaces().Delete(ns, &metav1.DeleteOptions{}) + if err != nil { + t.Errorf("error deleting the namespace: %v", err) + } +} + +func createConfigMap(clientSet kubernetes.Interface, ns string, t *testing.T) string { + t.Helper() + + configMap := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "config", + }, + } + + cm, err := clientSet.CoreV1().ConfigMaps(ns).Create(configMap) + if err != nil { + t.Errorf("error creating the configuration map: %v", err) + } + + return cm.Name +} + +func ensureIngress(ingress *networking.Ingress, clientSet kubernetes.Interface, t *testing.T) *networking.Ingress { + t.Helper() + ing, err := clientSet.NetworkingV1beta1().Ingresses(ingress.Namespace).Update(ingress) + + if err != nil { + if k8sErrors.IsNotFound(err) { + t.Logf("Ingress %v not found, creating", ingress) + + ing, err = clientSet.NetworkingV1beta1().Ingresses(ingress.Namespace).Create(ingress) + if err != nil { + t.Fatalf("error creating ingress %+v: %v", ingress, err) + } + + t.Logf("Ingress %+v created", ingress) + return ing + } + + t.Fatalf("error updating ingress %+v: %v", ingress, err) + } + + return ing +} + +func deleteIngress(ingress *networking.Ingress, clientSet kubernetes.Interface, t *testing.T) { + t.Helper() + err := clientSet.NetworkingV1beta1().Ingresses(ingress.Namespace).Delete(ingress.Name, &metav1.DeleteOptions{}) + + if err != nil { + t.Errorf("failed to delete ingress %+v: %v", ingress, err) + } + + t.Logf("Ingress %+v deleted", ingress) +} + +func newFS(t *testing.T) file.Filesystem { + fs, err := file.NewFakeFS() + if err != nil { + t.Fatalf("error creating filesystem: %v", err) + } + return fs +} + +// newStore creates a new mock object store for tests which do not require the +// use of Informers. +func newStore(t *testing.T) *k8sStore { + fs, err := file.NewFakeFS() + if err != nil { + t.Fatalf("error: %v", err) + } + + pod := &k8s.PodInfo{ + Name: "ingress-1", + Namespace: v1.NamespaceDefault, + Labels: map[string]string{ + "pod-template-hash": "1234", + }, + } + + return &k8sStore{ + listers: &Lister{ + // add more listers if needed + Ingress: IngressLister{cache.NewStore(cache.MetaNamespaceKeyFunc)}, + IngressWithAnnotation: IngressWithAnnotationsLister{cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc)}, + Pod: PodLister{cache.NewStore(cache.MetaNamespaceKeyFunc)}, + }, + sslStore: NewSSLCertTracker(), + filesystem: fs, + updateCh: channels.NewRingChannel(10), + syncSecretMu: new(sync.Mutex), + backendConfigMu: new(sync.RWMutex), + secretIngressMap: NewObjectRefMap(), + pod: pod, + } +} + +func TestUpdateSecretIngressMap(t *testing.T) { + s := newStore(t) + + ingTpl := &networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "testns", + }, + } + s.listers.Ingress.Add(ingTpl) + + t.Run("with TLS secret", func(t *testing.T) { + ing := ingTpl.DeepCopy() + ing.Spec = networking.IngressSpec{ + TLS: []networking.IngressTLS{{SecretName: "tls"}}, + } + s.listers.Ingress.Update(ing) + s.updateSecretIngressMap(ing) + + if l := s.secretIngressMap.Len(); !(l == 1 && s.secretIngressMap.Has("testns/tls")) { + t.Errorf("Expected \"testns/tls\" to be the only referenced Secret (got %d)", l) + } + }) + + t.Run("with annotation in simple name format", func(t *testing.T) { + ing := ingTpl.DeepCopy() + ing.ObjectMeta.SetAnnotations(map[string]string{ + parser.GetAnnotationWithPrefix("auth-secret"): "auth", + }) + s.listers.Ingress.Update(ing) + s.updateSecretIngressMap(ing) + + if l := s.secretIngressMap.Len(); !(l == 1 && s.secretIngressMap.Has("testns/auth")) { + t.Errorf("Expected \"testns/auth\" to be the only referenced Secret (got %d)", l) + } + }) + + t.Run("with annotation in namespace/name format", func(t *testing.T) { + ing := ingTpl.DeepCopy() + ing.ObjectMeta.SetAnnotations(map[string]string{ + parser.GetAnnotationWithPrefix("auth-secret"): "otherns/auth", + }) + s.listers.Ingress.Update(ing) + s.updateSecretIngressMap(ing) + + if l := s.secretIngressMap.Len(); !(l == 1 && s.secretIngressMap.Has("otherns/auth")) { + t.Errorf("Expected \"otherns/auth\" to be the only referenced Secret (got %d)", l) + } + }) + + t.Run("with annotation in invalid format", func(t *testing.T) { + ing := ingTpl.DeepCopy() + ing.ObjectMeta.SetAnnotations(map[string]string{ + parser.GetAnnotationWithPrefix("auth-secret"): "ns/name/garbage", + }) + s.listers.Ingress.Update(ing) + s.updateSecretIngressMap(ing) + + if l := s.secretIngressMap.Len(); l != 0 { + t.Errorf("Expected 0 referenced Secret (got %d)", l) + } + }) +} + +func TestListIngresses(t *testing.T) { + s := newStore(t) + + ingressToIgnore := &ingress.Ingress{ + Ingress: networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-2", + Namespace: "testns", + Annotations: map[string]string{ + "kubernetes.io/ingress.class": "something", + }, + CreationTimestamp: metav1.NewTime(time.Now()), + }, + Spec: networking.IngressSpec{ + Backend: &networking.IngressBackend{ + ServiceName: "demo", + ServicePort: intstr.FromInt(80), + }, + }, + }, + } + s.listers.IngressWithAnnotation.Add(ingressToIgnore) + + ingressWithoutPath := &ingress.Ingress{ + Ingress: networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-3", + Namespace: "testns", + CreationTimestamp: metav1.NewTime(time.Now()), + }, + Spec: networking.IngressSpec{ + Rules: []networking.IngressRule{ + { + Host: "foo.bar", + IngressRuleValue: networking.IngressRuleValue{ + HTTP: &networking.HTTPIngressRuleValue{ + Paths: []networking.HTTPIngressPath{ + { + Backend: networking.IngressBackend{ + ServiceName: "demo", + ServicePort: intstr.FromInt(80), + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + s.listers.IngressWithAnnotation.Add(ingressWithoutPath) + + ingressWithNginxClass := &ingress.Ingress{ + Ingress: networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-4", + Namespace: "testns", + Annotations: map[string]string{ + "kubernetes.io/ingress.class": "nginx", + }, + CreationTimestamp: metav1.NewTime(time.Now()), + }, + Spec: networking.IngressSpec{ + Rules: []networking.IngressRule{ + { + Host: "foo.bar", + IngressRuleValue: networking.IngressRuleValue{ + HTTP: &networking.HTTPIngressRuleValue{ + Paths: []networking.HTTPIngressPath{ + { + Path: "/demo", + Backend: networking.IngressBackend{ + ServiceName: "demo", + ServicePort: intstr.FromInt(80), + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + s.listers.IngressWithAnnotation.Add(ingressWithNginxClass) + + ingresses := s.ListIngresses(nil) + + if s := len(ingresses); s != 3 { + t.Errorf("Expected 3 Ingresses but got %v", s) + } + + if ingresses[0].Name != "test-2" { + t.Errorf("Expected Ingress test-2 but got %v", ingresses[0].Name) + } + + if ingresses[1].Name != "test-3" { + t.Errorf("Expected Ingress test-3 but got %v", ingresses[1].Name) + } + + if ingresses[2].Name != "test-4" { + t.Errorf("Expected Ingress test-4 but got %v", ingresses[2].Name) + } +} + +func TestWriteSSLSessionTicketKey(t *testing.T) { + tests := []string{ + "9DyULjtYWz520d1rnTLbc4BOmN2nLAVfd3MES/P3IxWuwXkz9Fby0lnOZZUdNEMV", + "9SvN1C9AB5DvNde5fMKoJwAwICpqdjiMyxR+cv6NpAWv22rFd3gKt4wMyGxCm7l9Wh6BQPG0+csyBZSHHr2NOWj52Wx8xCegXf4NsSMBUqA=", + } + + for _, test := range tests { + s := newStore(t) + + cmap := &v1.ConfigMap{ + Data: map[string]string{ + "ssl-session-ticket-key": test, + }, + } + + f, err := ioutil.TempFile("", "ssl-session-ticket-test") + if err != nil { + t.Fatal(err) + } + + s.writeSSLSessionTicketKey(cmap, f.Name()) + + content, err := ioutil.ReadFile(f.Name()) + if err != nil { + t.Fatal(err) + } + encodedContent := base64.StdEncoding.EncodeToString(content) + + f.Close() + + if test != encodedContent { + t.Fatalf("expected %v but returned %s", test, encodedContent) + } + } +} + +func TestGetRunningControllerPodsCount(t *testing.T) { + os.Setenv("POD_NAMESPACE", "testns") + os.Setenv("POD_NAME", "ingress-1") + + s := newStore(t) + s.pod = &k8s.PodInfo{ + Name: "ingress-1", + Namespace: "testns", + Labels: map[string]string{ + "pod-template-hash": "1234", + }, + } + + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ingress-1", + Namespace: "testns", + Labels: map[string]string{ + "pod-template-hash": "1234", + }, + }, + Status: v1.PodStatus{ + Phase: v1.PodRunning, + }, + } + s.listers.Pod.Add(pod) + + pod = &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ingress-2", + Namespace: "testns", + Labels: map[string]string{ + "pod-template-hash": "1234", + }, + }, + Status: v1.PodStatus{ + Phase: v1.PodRunning, + }, + } + s.listers.Pod.Add(pod) + + pod = &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ingress-3", + Namespace: "testns", + Labels: map[string]string{ + "pod-template-hash": "1234", + }, + }, + Status: v1.PodStatus{ + Phase: v1.PodFailed, + }, + } + s.listers.Pod.Add(pod) + + podsCount := s.GetRunningControllerPodsCount() + if podsCount != 2 { + t.Errorf("Expected 1 controller Pods but got %v", s) + } +} + +func TestIngressConversion(t *testing.T) { + ing := &extensions.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "old-ingress", + Namespace: "demo", + CreationTimestamp: metav1.NewTime(time.Now()), + }, + Spec: extensions.IngressSpec{ + Rules: []extensions.IngressRule{ + { + Host: "foo.bar", + IngressRuleValue: extensions.IngressRuleValue{ + HTTP: &extensions.HTTPIngressRuleValue{ + Paths: []extensions.HTTPIngressPath{ + { + Backend: extensions.IngressBackend{ + ServiceName: "demo", + ServicePort: intstr.FromInt(80), + }, + }, + }, + }, + }, + }, + }, + }, + } + + new, err := fromExtensions(ing) + if err != nil { + t.Fatalf("unexpected error converting ingress: %v", err) + } + + m1, err := new.Marshal() + if err != nil { + t.Fatalf("unexpected error marshalling Ingress: %v", err) + } + + m2, err := ing.Marshal() + if err != nil { + t.Fatalf("unexpected error marshalling Ingress: %v", err) + } + + if bytes.Compare(m1, m2) != 0 { + t.Fatalf("Expected marshalling of types should be equal") + } +} diff --git a/internal/ingress/controller/tcp.go b/internal/ingress/controller/tcp.go index efcc830771..ea029718e4 100644 --- a/internal/ingress/controller/tcp.go +++ b/internal/ingress/controller/tcp.go @@ -21,11 +21,12 @@ import ( "io" "net" - "github.com/golang/glog" + "k8s.io/klog" "github.com/paultag/sniff/parser" ) +// TCPServer describes a server that works in passthrough mode. type TCPServer struct { Hostname string IP string @@ -33,11 +34,13 @@ type TCPServer struct { ProxyProtocol bool } +// TCPProxy describes the passthrough servers and a default as catch all. type TCPProxy struct { ServerList []*TCPServer Default *TCPServer } +// Get returns the TCPServer to use for a given host. func (p *TCPProxy) Get(host string) *TCPServer { if p.ServerList == nil { return p.Default @@ -52,25 +55,27 @@ func (p *TCPProxy) Get(host string) *TCPServer { return p.Default } +// Handle reads enough information from the connection to extract the hostname +// and open a connection to the passthrough server. func (p *TCPProxy) Handle(conn net.Conn) { defer conn.Close() data := make([]byte, 4096) length, err := conn.Read(data) if err != nil { - glog.V(4).Infof("error reading the first 4k of the connection: %s", err) + klog.V(4).Infof("Error reading the first 4k of the connection: %v", err) return } proxy := p.Default hostname, err := parser.GetHostname(data[:]) if err == nil { - glog.V(4).Infof("parsed hostname from TLS Client Hello: %s", hostname) + klog.V(4).Infof("Parsed hostname from TLS Client Hello: %s", hostname) proxy = p.Get(hostname) } if proxy == nil { - glog.V(4).Infof("there is no configured proxy for SSL connections") + klog.V(4).Info("There is no configured proxy for SSL connections.") return } @@ -81,7 +86,7 @@ func (p *TCPProxy) Handle(conn net.Conn) { defer clientConn.Close() if proxy.ProxyProtocol { - //Write out the proxy-protocol header + // write out the Proxy Protocol header localAddr := conn.LocalAddr().(*net.TCPAddr) remoteAddr := conn.RemoteAddr().(*net.TCPAddr) protocol := "UNKNOWN" @@ -91,16 +96,16 @@ func (p *TCPProxy) Handle(conn net.Conn) { protocol = "TCP6" } proxyProtocolHeader := fmt.Sprintf("PROXY %s %s %s %d %d\r\n", protocol, remoteAddr.IP.String(), localAddr.IP.String(), remoteAddr.Port, localAddr.Port) - glog.V(4).Infof("Writing proxy protocol header - %s", proxyProtocolHeader) + klog.V(4).Infof("Writing Proxy Protocol header: %s", proxyProtocolHeader) _, err = fmt.Fprintf(clientConn, proxyProtocolHeader) } if err != nil { - glog.Errorf("unexpected error writing proxy-protocol header: %s", err) + klog.Errorf("Error writing Proxy Protocol header: %v", err) clientConn.Close() } else { _, err = clientConn.Write(data[:length]) if err != nil { - glog.Errorf("unexpected error writing first 4k of proxy data: %s", err) + klog.Errorf("Error writing the first 4k of proxy data: %v", err) clientConn.Close() } } diff --git a/internal/ingress/controller/template/configmap.go b/internal/ingress/controller/template/configmap.go index 5a65364e1a..6eaafc42cc 100644 --- a/internal/ingress/controller/template/configmap.go +++ b/internal/ingress/controller/template/configmap.go @@ -21,27 +21,46 @@ import ( "net" "strconv" "strings" + "time" - "github.com/golang/glog" + "k8s.io/klog" + "github.com/mitchellh/hashstructure" "github.com/mitchellh/mapstructure" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/ingress-nginx/internal/ingress/annotations/authreq" "k8s.io/ingress-nginx/internal/ingress/controller/config" ing_net "k8s.io/ingress-nginx/internal/net" + "k8s.io/ingress-nginx/internal/runtime" ) const ( - customHTTPErrors = "custom-http-errors" - skipAccessLogUrls = "skip-access-log-urls" - whitelistSourceRange = "whitelist-source-range" - proxyRealIPCIDR = "proxy-real-ip-cidr" - bindAddress = "bind-address" - httpRedirectCode = "http-redirect-code" - proxyStreamResponses = "proxy-stream-responses" + customHTTPErrors = "custom-http-errors" + skipAccessLogUrls = "skip-access-log-urls" + whitelistSourceRange = "whitelist-source-range" + proxyRealIPCIDR = "proxy-real-ip-cidr" + bindAddress = "bind-address" + httpRedirectCode = "http-redirect-code" + blockCIDRs = "block-cidrs" + blockUserAgents = "block-user-agents" + blockReferers = "block-referers" + proxyStreamResponses = "proxy-stream-responses" + hideHeaders = "hide-headers" + nginxStatusIpv4Whitelist = "nginx-status-ipv4-whitelist" + nginxStatusIpv6Whitelist = "nginx-status-ipv6-whitelist" + proxyHeaderTimeout = "proxy-protocol-header-timeout" + workerProcesses = "worker-processes" + globalAuthURL = "global-auth-url" + globalAuthMethod = "global-auth-method" + globalAuthSignin = "global-auth-signin" + globalAuthResponseHeaders = "global-auth-response-headers" + globalAuthRequestRedirect = "global-auth-request-redirect" + globalAuthSnippet = "global-auth-snippet" ) var ( - validRedirectCodes = []int{301, 302, 307, 308} + validRedirectCodes = sets.NewInt([]int{301, 302, 307, 308}...) ) // ReadConfig obtains the configuration defined by the user merged with the defaults. @@ -52,38 +71,49 @@ func ReadConfig(src map[string]string) config.Configuration { conf[k] = v } + to := config.NewDefault() errors := make([]int, 0) skipUrls := make([]string, 0) - whitelist := make([]string, 0) - proxylist := make([]string, 0) + whiteList := make([]string, 0) + proxyList := make([]string, 0) + hideHeadersList := make([]string, 0) + bindAddressIpv4List := make([]string, 0) bindAddressIpv6List := make([]string, 0) - redirectCode := 308 + + blockCIDRList := make([]string, 0) + blockUserAgentList := make([]string, 0) + blockRefererList := make([]string, 0) + responseHeaders := make([]string, 0) if val, ok := conf[customHTTPErrors]; ok { delete(conf, customHTTPErrors) for _, i := range strings.Split(val, ",") { j, err := strconv.Atoi(i) if err != nil { - glog.Warningf("%v is not a valid http code: %v", i, err) + klog.Warningf("%v is not a valid http code: %v", i, err) } else { errors = append(errors, j) } } } + if val, ok := conf[hideHeaders]; ok { + delete(conf, hideHeaders) + hideHeadersList = strings.Split(val, ",") + } if val, ok := conf[skipAccessLogUrls]; ok { delete(conf, skipAccessLogUrls) skipUrls = strings.Split(val, ",") } if val, ok := conf[whitelistSourceRange]; ok { delete(conf, whitelistSourceRange) - whitelist = append(whitelist, strings.Split(val, ",")...) + whiteList = append(whiteList, strings.Split(val, ",")...) } if val, ok := conf[proxyRealIPCIDR]; ok { delete(conf, proxyRealIPCIDR) - proxylist = append(proxylist, strings.Split(val, ",")...) + proxyList = append(proxyList, strings.Split(val, ",")...) } else { - proxylist = append(proxylist, "0.0.0.0/0") + proxyList = append(proxyList, "0.0.0.0/0") } if val, ok := conf[bindAddress]; ok { delete(conf, bindAddress) @@ -96,45 +126,166 @@ func ReadConfig(src map[string]string) config.Configuration { bindAddressIpv4List = append(bindAddressIpv4List, fmt.Sprintf("%v", ns)) } } else { - glog.Warningf("%v is not a valid textual representation of an IP address", i) + klog.Warningf("%v is not a valid textual representation of an IP address", i) } } } + if val, ok := conf[blockCIDRs]; ok { + delete(conf, blockCIDRs) + blockCIDRList = strings.Split(val, ",") + } + if val, ok := conf[blockUserAgents]; ok { + delete(conf, blockUserAgents) + blockUserAgentList = strings.Split(val, ",") + } + if val, ok := conf[blockReferers]; ok { + delete(conf, blockReferers) + blockRefererList = strings.Split(val, ",") + } + if val, ok := conf[httpRedirectCode]; ok { delete(conf, httpRedirectCode) j, err := strconv.Atoi(val) if err != nil { - glog.Warningf("%v is not a valid HTTP code: %v", val, err) + klog.Warningf("%v is not a valid HTTP code: %v", val, err) } else { - if intInSlice(j, validRedirectCodes) { - redirectCode = j + if validRedirectCodes.Has(j) { + to.HTTPRedirectCode = j } else { - glog.Warningf("The code %v is not a valid as HTTP redirect code. Using the default.", val) + klog.Warningf("The code %v is not a valid as HTTP redirect code. Using the default.", val) } } } + // Verify that the configured global external authorization URL is parsable as URL. if not, set the default value + if val, ok := conf[globalAuthURL]; ok { + delete(conf, globalAuthURL) + + authURL, message := authreq.ParseStringToURL(val) + if authURL == nil { + klog.Warningf("Global auth location denied - %v.", message) + } else { + to.GlobalExternalAuth.URL = val + to.GlobalExternalAuth.Host = authURL.Hostname() + } + } + + // Verify that the configured global external authorization method is a valid HTTP method. if not, set the default value + if val, ok := conf[globalAuthMethod]; ok { + delete(conf, globalAuthMethod) + + if len(val) != 0 && !authreq.ValidMethod(val) { + klog.Warningf("Global auth location denied - %v.", "invalid HTTP method") + } else { + to.GlobalExternalAuth.Method = val + } + } + + // Verify that the configured global external authorization error page is set and valid. if not, set the default value + if val, ok := conf[globalAuthSignin]; ok { + delete(conf, globalAuthSignin) + + signinURL, _ := authreq.ParseStringToURL(val) + if signinURL == nil { + klog.Warningf("Global auth location denied - %v.", "global-auth-signin setting is undefined and will not be set") + } else { + to.GlobalExternalAuth.SigninURL = val + } + } + + // Verify that the configured global external authorization response headers are valid. if not, set the default value + if val, ok := conf[globalAuthResponseHeaders]; ok { + delete(conf, globalAuthResponseHeaders) + + if len(val) != 0 { + harr := strings.Split(val, ",") + for _, header := range harr { + header = strings.TrimSpace(header) + if len(header) > 0 { + if !authreq.ValidHeader(header) { + klog.Warningf("Global auth location denied - %v.", "invalid headers list") + } else { + responseHeaders = append(responseHeaders, header) + } + } + } + } + to.GlobalExternalAuth.ResponseHeaders = responseHeaders + } + + if val, ok := conf[globalAuthRequestRedirect]; ok { + delete(conf, globalAuthRequestRedirect) + + to.GlobalExternalAuth.RequestRedirect = val + } + + if val, ok := conf[globalAuthSnippet]; ok { + delete(conf, globalAuthSnippet) + + to.GlobalExternalAuth.AuthSnippet = val + } + + // Verify that the configured timeout is parsable as a duration. if not, set the default value + if val, ok := conf[proxyHeaderTimeout]; ok { + delete(conf, proxyHeaderTimeout) + duration, err := time.ParseDuration(val) + if err != nil { + klog.Warningf("proxy-protocol-header-timeout of %v encountered an error while being parsed %v. Switching to use default value instead.", val, err) + } else { + to.ProxyProtocolHeaderTimeout = duration + } + } + streamResponses := 1 if val, ok := conf[proxyStreamResponses]; ok { delete(conf, proxyStreamResponses) j, err := strconv.Atoi(val) if err != nil { - glog.Warningf("%v is not a valid number: %v", val, err) + klog.Warningf("%v is not a valid number: %v", val, err) } else { streamResponses = j } } - to := config.NewDefault() + // Nginx Status whitelist + if val, ok := conf[nginxStatusIpv4Whitelist]; ok { + whitelist := make([]string, 0) + whitelist = append(whitelist, strings.Split(val, ",")...) + to.NginxStatusIpv4Whitelist = whitelist + + delete(conf, nginxStatusIpv4Whitelist) + } + if val, ok := conf[nginxStatusIpv6Whitelist]; ok { + whitelist := make([]string, 0) + whitelist = append(whitelist, strings.Split(val, ",")...) + to.NginxStatusIpv6Whitelist = whitelist + + delete(conf, nginxStatusIpv6Whitelist) + } + + if val, ok := conf[workerProcesses]; ok { + to.WorkerProcesses = val + + if val == "auto" { + to.WorkerProcesses = strconv.Itoa(runtime.NumCPU()) + } + + delete(conf, workerProcesses) + } + to.CustomHTTPErrors = filterErrors(errors) to.SkipAccessLogURLs = skipUrls - to.WhitelistSourceRange = whitelist - to.ProxyRealIPCIDR = proxylist + to.WhitelistSourceRange = whiteList + to.ProxyRealIPCIDR = proxyList to.BindAddressIpv4 = bindAddressIpv4List to.BindAddressIpv6 = bindAddressIpv6List - to.HTTPRedirectCode = redirectCode + to.BlockCIDRs = blockCIDRList + to.BlockUserAgents = blockUserAgentList + to.BlockReferers = blockRefererList + to.HideHeaders = hideHeadersList to.ProxyStreamResponses = streamResponses + to.DisableIpv6DNS = !ing_net.IsIPv6Enabled() config := &mapstructure.DecoderConfig{ Metadata: nil, @@ -145,13 +296,22 @@ func ReadConfig(src map[string]string) config.Configuration { decoder, err := mapstructure.NewDecoder(config) if err != nil { - glog.Warningf("unexpected error merging defaults: %v", err) + klog.Warningf("unexpected error merging defaults: %v", err) } err = decoder.Decode(conf) if err != nil { - glog.Warningf("unexpected error merging defaults: %v", err) + klog.Warningf("unexpected error merging defaults: %v", err) } + hash, err := hashstructure.Hash(to, &hashstructure.HashOptions{ + TagName: "json", + }) + if err != nil { + klog.Warningf("unexpected error obtaining hash: %v", err) + } + + to.Checksum = fmt.Sprintf("%v", hash) + return to } @@ -161,18 +321,9 @@ func filterErrors(codes []int) []int { if code > 299 && code < 600 { fa = append(fa, code) } else { - glog.Warningf("error code %v is not valid for custom error pages", code) + klog.Warningf("error code %v is not valid for custom error pages", code) } } return fa } - -func intInSlice(i int, list []int) bool { - for _, v := range list { - if v == i { - return true - } - } - return false -} diff --git a/internal/ingress/controller/template/configmap_test.go b/internal/ingress/controller/template/configmap_test.go index 1df9eae3f7..b0afbd8ce2 100644 --- a/internal/ingress/controller/template/configmap_test.go +++ b/internal/ingress/controller/template/configmap_test.go @@ -17,9 +17,13 @@ limitations under the License. package template import ( + "fmt" + "reflect" "testing" + "time" "github.com/kylelemons/godebug/pretty" + "github.com/mitchellh/hashstructure" "k8s.io/ingress-nginx/internal/ingress/controller/config" ) @@ -31,26 +35,49 @@ func TestFilterErrors(t *testing.T) { } } +func TestProxyTimeoutParsing(t *testing.T) { + testCases := map[string]struct { + input string + expect time.Duration // duration in seconds + }{ + "valid duration": {"35s", time.Duration(35) * time.Second}, + "invalid duration": {"3zxs", time.Duration(5) * time.Second}, + } + for n, tc := range testCases { + cfg := ReadConfig(map[string]string{"proxy-protocol-header-timeout": tc.input}) + if cfg.ProxyProtocolHeaderTimeout.Seconds() != tc.expect.Seconds() { + t.Errorf("Testing %v. Expected %v seconds but got %v seconds", n, tc.expect, cfg.ProxyProtocolHeaderTimeout) + } + } +} + func TestMergeConfigMapToStruct(t *testing.T) { conf := map[string]string{ - "custom-http-errors": "300,400,demo", - "proxy-read-timeout": "1", - "proxy-send-timeout": "2", - "skip-access-log-urls": "/log,/demo,/test", - "use-proxy-protocol": "true", - "disable-access-log": "true", - "access-log-path": "/var/log/test/access.log", - "error-log-path": "/var/log/test/error.log", - "use-gzip": "true", - "enable-dynamic-tls-records": "false", - "gzip-types": "text/html", - "proxy-real-ip-cidr": "1.1.1.1/8,2.2.2.2/24", - "bind-address": "1.1.1.1,2.2.2.2,3.3.3,2001:db8:a0b:12f0::1,3731:54:65fe:2::a7,33:33:33::33::33", - "worker-shutdown-timeout": "99s", + "custom-http-errors": "300,400,demo", + "proxy-read-timeout": "1", + "proxy-send-timeout": "2", + "skip-access-log-urls": "/log,/demo,/test", + "use-proxy-protocol": "true", + "disable-access-log": "true", + "access-log-params": "buffer=4k gzip", + "access-log-path": "/var/log/test/access.log", + "error-log-path": "/var/log/test/error.log", + "use-gzip": "true", + "enable-dynamic-tls-records": "false", + "gzip-level": "9", + "gzip-types": "text/html", + "proxy-real-ip-cidr": "1.1.1.1/8,2.2.2.2/24", + "bind-address": "1.1.1.1,2.2.2.2,3.3.3,2001:db8:a0b:12f0::1,3731:54:65fe:2::a7,33:33:33::33::33", + "worker-shutdown-timeout": "99s", + "nginx-status-ipv4-whitelist": "127.0.0.1,10.0.0.0/24", + "nginx-status-ipv6-whitelist": "::1,2001::/16", + "proxy-add-original-uri-header": "false", + "disable-ipv6-dns": "true", } def := config.NewDefault() def.CustomHTTPErrors = []int{300, 400} def.DisableAccessLog = true + def.AccessLogParams = "buffer=4k gzip" def.AccessLogPath = "/var/log/test/access.log" def.ErrorLogPath = "/var/log/test/error.log" def.SkipAccessLogURLs = []string{"/log", "/demo", "/test"} @@ -58,27 +85,77 @@ func TestMergeConfigMapToStruct(t *testing.T) { def.ProxySendTimeout = 2 def.EnableDynamicTLSRecords = false def.UseProxyProtocol = true + def.GzipLevel = 9 def.GzipTypes = "text/html" def.ProxyRealIPCIDR = []string{"1.1.1.1/8", "2.2.2.2/24"} def.BindAddressIpv4 = []string{"1.1.1.1", "2.2.2.2"} def.BindAddressIpv6 = []string{"[2001:db8:a0b:12f0::1]", "[3731:54:65fe:2::a7]"} def.WorkerShutdownTimeout = "99s" + def.NginxStatusIpv4Whitelist = []string{"127.0.0.1", "10.0.0.0/24"} + def.NginxStatusIpv6Whitelist = []string{"::1", "2001::/16"} + def.ProxyAddOriginalURIHeader = false + + def.DisableIpv6DNS = true + + hash, err := hashstructure.Hash(def, &hashstructure.HashOptions{ + TagName: "json", + }) + if err != nil { + t.Fatalf("unexpected error obtaining hash: %v", err) + } + def.Checksum = fmt.Sprintf("%v", hash) to := ReadConfig(conf) if diff := pretty.Compare(to, def); diff != "" { t.Errorf("unexpected diff: (-got +want)\n%s", diff) } + to = ReadConfig(conf) + def.BindAddressIpv4 = []string{} + def.BindAddressIpv6 = []string{} + + if !reflect.DeepEqual(to.BindAddressIpv4, []string{"1.1.1.1", "2.2.2.2"}) { + t.Errorf("unexpected bindAddressIpv4") + } + + if !reflect.DeepEqual(to.BindAddressIpv6, []string{"[2001:db8:a0b:12f0::1]", "[3731:54:65fe:2::a7]"}) { + t.Logf("%v", to.BindAddressIpv6) + t.Errorf("unexpected bindAddressIpv6") + } + def = config.NewDefault() - to = ReadConfig(map[string]string{}) + def.DisableIpv6DNS = true + + hash, err = hashstructure.Hash(def, &hashstructure.HashOptions{ + TagName: "json", + }) + if err != nil { + t.Fatalf("unexpected error obtaining hash: %v", err) + } + def.Checksum = fmt.Sprintf("%v", hash) + + to = ReadConfig(map[string]string{ + "disable-ipv6-dns": "true", + }) if diff := pretty.Compare(to, def); diff != "" { t.Errorf("unexpected diff: (-got +want)\n%s", diff) } def = config.NewDefault() def.WhitelistSourceRange = []string{"1.1.1.1/32"} + def.DisableIpv6DNS = true + + hash, err = hashstructure.Hash(def, &hashstructure.HashOptions{ + TagName: "json", + }) + if err != nil { + t.Fatalf("unexpected error obtaining hash: %v", err) + } + def.Checksum = fmt.Sprintf("%v", hash) + to = ReadConfig(map[string]string{ "whitelist-source-range": "1.1.1.1/32", + "disable-ipv6-dns": "true", }) if diff := pretty.Compare(to, def); diff != "" { @@ -86,10 +163,120 @@ func TestMergeConfigMapToStruct(t *testing.T) { } } -func TestDefaultLoadBalance(t *testing.T) { - conf := map[string]string{} - to := ReadConfig(conf) - if to.LoadBalanceAlgorithm != "least_conn" { - t.Errorf("default load balance algorithm wrong") +func TestGlobalExternalAuthURLParsing(t *testing.T) { + errorURL := "" + validURL := "http://bar.foo.com/external-auth" + + testCases := map[string]struct { + url string + expect string + }{ + "no scheme": {"bar", errorURL}, + "invalid host": {"http://", errorURL}, + "invalid host (multiple dots)": {"http://foo..bar.com", errorURL}, + "valid URL": {validURL, validURL}, + } + + for n, tc := range testCases { + cfg := ReadConfig(map[string]string{"global-auth-url": tc.url}) + if cfg.GlobalExternalAuth.URL != tc.expect { + t.Errorf("Testing %v. Expected \"%v\" but \"%v\" was returned", n, tc.expect, cfg.GlobalExternalAuth.URL) + } + } +} + +func TestGlobalExternalAuthMethodParsing(t *testing.T) { + testCases := map[string]struct { + method string + expect string + }{ + "invalid method": {"FOO", ""}, + "valid method": {"POST", "POST"}, + } + + for n, tc := range testCases { + cfg := ReadConfig(map[string]string{"global-auth-method": tc.method}) + if cfg.GlobalExternalAuth.Method != tc.expect { + t.Errorf("Testing %v. Expected \"%v\" but \"%v\" was returned", n, tc.expect, cfg.GlobalExternalAuth.Method) + } + } +} + +func TestGlobalExternalAuthSigninParsing(t *testing.T) { + errorURL := "" + validURL := "http://bar.foo.com/auth-error-page" + + testCases := map[string]struct { + signin string + expect string + }{ + "no scheme": {"bar", errorURL}, + "invalid host": {"http://", errorURL}, + "invalid host (multiple dots)": {"http://foo..bar.com", errorURL}, + "valid URL": {validURL, validURL}, + } + + for n, tc := range testCases { + cfg := ReadConfig(map[string]string{"global-auth-signin": tc.signin}) + if cfg.GlobalExternalAuth.SigninURL != tc.expect { + t.Errorf("Testing %v. Expected \"%v\" but \"%v\" was returned", n, tc.expect, cfg.GlobalExternalAuth.SigninURL) + } + } +} + +func TestGlobalExternalAuthResponseHeadersParsing(t *testing.T) { + testCases := map[string]struct { + headers string + expect []string + }{ + "single header": {"h1", []string{"h1"}}, + "nothing": {"", []string{}}, + "spaces": {" ", []string{}}, + "two headers": {"1,2", []string{"1", "2"}}, + "two headers and empty entries": {",1,,2,", []string{"1", "2"}}, + "header with spaces": {"1 2", []string{}}, + "header with other bad symbols": {"1+2", []string{}}, + } + + for n, tc := range testCases { + cfg := ReadConfig(map[string]string{"global-auth-response-headers": tc.headers}) + + if !reflect.DeepEqual(cfg.GlobalExternalAuth.ResponseHeaders, tc.expect) { + t.Errorf("Testing %v. Expected \"%v\" but \"%v\" was returned", n, tc.expect, cfg.GlobalExternalAuth.ResponseHeaders) + } + } +} + +func TestGlobalExternalAuthRequestRedirectParsing(t *testing.T) { + testCases := map[string]struct { + requestRedirect string + expect string + }{ + "empty": {"", ""}, + "valid request redirect": {"http://foo.com/redirect-me", "http://foo.com/redirect-me"}, + } + + for n, tc := range testCases { + cfg := ReadConfig(map[string]string{"global-auth-request-redirect": tc.requestRedirect}) + if cfg.GlobalExternalAuth.RequestRedirect != tc.expect { + t.Errorf("Testing %v. Expected \"%v\" but \"%v\" was returned", n, tc.expect, cfg.GlobalExternalAuth.RequestRedirect) + } + } +} + +func TestGlobalExternalAuthSnippetParsing(t *testing.T) { + testCases := map[string]struct { + authSnippet string + expect string + }{ + "empty": {"", ""}, + "auth snippet": {"proxy_set_header My-Custom-Header 42;", "proxy_set_header My-Custom-Header 42;"}, + } + + for n, tc := range testCases { + cfg := ReadConfig(map[string]string{"global-auth-snippet": tc.authSnippet}) + if cfg.GlobalExternalAuth.AuthSnippet != tc.expect { + t.Errorf("Testing %v. Expected \"%v\" but \"%v\" was returned", n, tc.expect, cfg.GlobalExternalAuth.AuthSnippet) + } } } diff --git a/internal/ingress/controller/template/template.go b/internal/ingress/controller/template/template.go index 08ff249927..4a1d4b1f3a 100644 --- a/internal/ingress/controller/template/template.go +++ b/internal/ingress/controller/template/template.go @@ -17,29 +17,32 @@ limitations under the License. package template import ( + "bytes" "encoding/base64" "encoding/json" "fmt" + "math/rand" "net" "net/url" "os" "os/exec" - "strconv" + "reflect" + "regexp" + "sort" "strings" text_template "text/template" + "time" - "github.com/golang/glog" "github.com/pkg/errors" - "github.com/pborman/uuid" - - extensions "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/ingress-nginx/internal/file" "k8s.io/ingress-nginx/internal/ingress" + "k8s.io/ingress-nginx/internal/ingress/annotations/influxdb" "k8s.io/ingress-nginx/internal/ingress/annotations/ratelimit" "k8s.io/ingress-nginx/internal/ingress/controller/config" ing_net "k8s.io/ingress-nginx/internal/net" + "k8s.io/klog" ) const ( @@ -48,6 +51,11 @@ const ( defBufferSize = 65535 ) +// TemplateWriter is the interface to render a template +type TemplateWriter interface { + Write(conf config.TemplateConfig) ([]byte, error) +} + // Template ... type Template struct { tmpl *text_template.Template @@ -83,12 +91,12 @@ func (t *Template) Write(conf config.TemplateConfig) ([]byte, error) { outCmdBuf := t.bp.Get() defer t.bp.Put(outCmdBuf) - if glog.V(3) { + if klog.V(3) { b, err := json.Marshal(conf) if err != nil { - glog.Errorf("unexpected error: %v", err) + klog.Errorf("unexpected error: %v", err) } - glog.Infof("NGINX configuration: %v", string(b)) + klog.Infof("NGINX configuration: %v", string(b)) } err := t.tmpl.Execute(tmplBuf, conf) @@ -102,7 +110,7 @@ func (t *Template) Write(conf config.TemplateConfig) ([]byte, error) { cmd.Stdin = tmplBuf cmd.Stdout = outCmdBuf if err := cmd.Run(); err != nil { - glog.Warningf("unexpected error cleaning template: %v", err) + klog.Warningf("unexpected error cleaning template: %v", err) return tmplBuf.Bytes(), nil } @@ -118,36 +126,68 @@ var ( } return true }, - "buildLocation": buildLocation, - "buildAuthLocation": buildAuthLocation, - "buildAuthResponseHeaders": buildAuthResponseHeaders, - "buildProxyPass": buildProxyPass, - "filterRateLimits": filterRateLimits, - "buildRateLimitZones": buildRateLimitZones, - "buildRateLimit": buildRateLimit, - "buildResolvers": buildResolvers, - "buildUpstreamName": buildUpstreamName, - "isLocationAllowed": isLocationAllowed, - "buildLogFormatUpstream": buildLogFormatUpstream, - "buildDenyVariable": buildDenyVariable, - "getenv": os.Getenv, - "contains": strings.Contains, - "hasPrefix": strings.HasPrefix, - "hasSuffix": strings.HasSuffix, - "toUpper": strings.ToUpper, - "toLower": strings.ToLower, - "formatIP": formatIP, - "buildNextUpstream": buildNextUpstream, - "getIngressInformation": getIngressInformation, + "escapeLiteralDollar": escapeLiteralDollar, + "shouldConfigureLuaRestyWAF": shouldConfigureLuaRestyWAF, + "buildLuaSharedDictionaries": buildLuaSharedDictionaries, + "buildLocation": buildLocation, + "buildAuthLocation": buildAuthLocation, + "shouldApplyGlobalAuth": shouldApplyGlobalAuth, + "buildAuthResponseHeaders": buildAuthResponseHeaders, + "buildProxyPass": buildProxyPass, + "filterRateLimits": filterRateLimits, + "buildRateLimitZones": buildRateLimitZones, + "buildRateLimit": buildRateLimit, + "buildResolversForLua": buildResolversForLua, + "configForLua": configForLua, + "locationConfigForLua": locationConfigForLua, + "buildResolvers": buildResolvers, + "buildUpstreamName": buildUpstreamName, + "isLocationInLocationList": isLocationInLocationList, + "isLocationAllowed": isLocationAllowed, + "buildLogFormatUpstream": buildLogFormatUpstream, + "buildDenyVariable": buildDenyVariable, + "getenv": os.Getenv, + "contains": strings.Contains, + "hasPrefix": strings.HasPrefix, + "hasSuffix": strings.HasSuffix, + "trimSpace": strings.TrimSpace, + "toUpper": strings.ToUpper, + "toLower": strings.ToLower, + "formatIP": formatIP, + "buildNextUpstream": buildNextUpstream, + "getIngressInformation": getIngressInformation, "serverConfig": func(all config.TemplateConfig, server *ingress.Server) interface{} { return struct{ First, Second interface{} }{all, server} }, - "isValidClientBodyBufferSize": isValidClientBodyBufferSize, - "buildForwardedFor": buildForwardedFor, - "buildAuthSignURL": buildAuthSignURL, + "isValidByteSize": isValidByteSize, + "buildForwardedFor": buildForwardedFor, + "buildAuthSignURL": buildAuthSignURL, + "buildOpentracing": buildOpentracing, + "proxySetHeader": proxySetHeader, + "buildInfluxDB": buildInfluxDB, + "enforceRegexModifier": enforceRegexModifier, + "stripLocationModifer": stripLocationModifer, + "buildCustomErrorDeps": buildCustomErrorDeps, + "opentracingPropagateContext": opentracingPropagateContext, + "buildCustomErrorLocationsPerServer": buildCustomErrorLocationsPerServer, + "shouldLoadModSecurityModule": shouldLoadModSecurityModule, } ) +// escapeLiteralDollar will replace the $ character with ${literal_dollar} +// which is made to work via the following configuration in the http section of +// the template: +// geo $literal_dollar { +// default "$"; +// } +func escapeLiteralDollar(input interface{}) string { + inputStr, ok := input.(string) + if !ok { + return "" + } + return strings.Replace(inputStr, `$`, `${literal_dollar}`, -1) +} + // formatIP will wrap IPv6 addresses in [] and return IPv4 addresses // without modification. If the input cannot be parsed as an IP address // it is returned without modification. @@ -162,12 +202,132 @@ func formatIP(input string) string { return fmt.Sprintf("[%s]", input) } +func shouldConfigureLuaRestyWAF(disableLuaRestyWAF bool, mode string) bool { + if !disableLuaRestyWAF && len(mode) > 0 { + return true + } + + return false +} + +func buildLuaSharedDictionaries(s interface{}, disableLuaRestyWAF bool) string { + servers, ok := s.([]*ingress.Server) + if !ok { + klog.Errorf("expected an '[]*ingress.Server' type but %T was returned", s) + return "" + } + + out := []string{ + "lua_shared_dict configuration_data 15M", + "lua_shared_dict certificate_data 16M", + } + + if !disableLuaRestyWAF { + luaRestyWAFEnabled := func() bool { + for _, server := range servers { + for _, location := range server.Locations { + if len(location.LuaRestyWAF.Mode) > 0 { + return true + } + } + } + return false + }() + if luaRestyWAFEnabled { + out = append(out, "lua_shared_dict waf_storage 64M") + } + } + + return strings.Join(out, ";\n\r") + ";" +} + +func buildResolversForLua(res interface{}, disableIpv6 interface{}) string { + nss, ok := res.([]net.IP) + if !ok { + klog.Errorf("expected a '[]net.IP' type but %T was returned", res) + return "" + } + no6, ok := disableIpv6.(bool) + if !ok { + klog.Errorf("expected a 'bool' type but %T was returned", disableIpv6) + return "" + } + + if len(nss) == 0 { + return "" + } + + r := []string{} + for _, ns := range nss { + if ing_net.IsIPV6(ns) { + if no6 { + continue + } + r = append(r, fmt.Sprintf("\"[%v]\"", ns)) + } else { + r = append(r, fmt.Sprintf("\"%v\"", ns)) + } + } + + return strings.Join(r, ", ") +} + +// configForLua returns some general configuration as Lua table represented as string +func configForLua(input interface{}) string { + all, ok := input.(config.TemplateConfig) + if !ok { + klog.Errorf("expected a 'config.TemplateConfig' type but %T was given", input) + return "{}" + } + + return fmt.Sprintf(`{ + use_forwarded_headers = %t, + is_ssl_passthrough_enabled = %t, + http_redirect_code = %v, + listen_ports = { ssl_proxy = "%v", https = "%v" }, + }`, all.Cfg.UseForwardedHeaders, all.IsSSLPassthroughEnabled, all.Cfg.HTTPRedirectCode, all.ListenPorts.SSLProxy, all.ListenPorts.HTTPS) +} + +// locationConfigForLua formats some location specific configuration into Lua table represented as string +func locationConfigForLua(l interface{}, s interface{}, a interface{}) string { + location, ok := l.(*ingress.Location) + if !ok { + klog.Errorf("expected an '*ingress.Location' type but %T was given", l) + return "{}" + } + + server, ok := s.(*ingress.Server) + if !ok { + klog.Errorf("expected an '*ingress.Server' type but %T was given", s) + return "{}" + } + + all, ok := a.(config.TemplateConfig) + if !ok { + klog.Errorf("expected a 'config.TemplateConfig' type but %T was given", a) + return "{}" + } + + forceSSLRedirect := location.Rewrite.ForceSSLRedirect || (len(server.SSLCert.PemFileName) > 0 && location.Rewrite.SSLRedirect) + forceSSLRedirect = forceSSLRedirect && !isLocationInLocationList(l, all.Cfg.NoTLSRedirectLocations) + + return fmt.Sprintf(`{ + force_ssl_redirect = %t, + use_port_in_redirects = %t, + }`, forceSSLRedirect, location.UsePortInRedirects) +} + // buildResolvers returns the resolvers reading the /etc/resolv.conf file -func buildResolvers(input interface{}) string { +func buildResolvers(res interface{}, disableIpv6 interface{}) string { // NGINX need IPV6 addresses to be surrounded by brackets - nss, ok := input.([]net.IP) + nss, ok := res.([]net.IP) + if !ok { + klog.Errorf("expected a '[]net.IP' type but %T was returned", res) + return "" + } + no6, ok := disableIpv6.(bool) if !ok { - glog.Errorf("expected a '[]net.IP' type but %T was returned", input) + klog.Errorf("expected a 'bool' type but %T was returned", disableIpv6) return "" } @@ -178,73 +338,107 @@ func buildResolvers(input interface{}) string { r := []string{"resolver"} for _, ns := range nss { if ing_net.IsIPV6(ns) { + if no6 { + continue + } r = append(r, fmt.Sprintf("[%v]", ns)) } else { r = append(r, fmt.Sprintf("%v", ns)) } } - r = append(r, "valid=30s;") + r = append(r, "valid=30s") + + if no6 { + r = append(r, "ipv6=off") + } + + return strings.Join(r, " ") + ";" +} + +func needsRewrite(location *ingress.Location) bool { + if len(location.Rewrite.Target) > 0 && location.Rewrite.Target != location.Path { + return true + } + return false +} - return strings.Join(r, " ") +func stripLocationModifer(path string) string { + return strings.TrimLeft(path, "~* ") +} + +// enforceRegexModifier checks if the "rewrite-target" or "use-regex" annotation +// is used on any location path within a server +func enforceRegexModifier(input interface{}) bool { + locations, ok := input.([]*ingress.Location) + if !ok { + klog.Errorf("expected an '[]*ingress.Location' type but %T was returned", input) + return false + } + + for _, location := range locations { + if needsRewrite(location) || location.Rewrite.UseRegex { + return true + } + } + return false } // buildLocation produces the location string, if the ingress has redirects -// (specified through the nginx.ingress.kubernetes.io/rewrite-to annotation) -func buildLocation(input interface{}) string { +// (specified through the nginx.ingress.kubernetes.io/rewrite-target annotation) +func buildLocation(input interface{}, enforceRegex bool) string { location, ok := input.(*ingress.Location) if !ok { - glog.Errorf("expected an '*ingress.Location' type but %T was returned", input) + klog.Errorf("expected an '*ingress.Location' type but %T was returned", input) return slash } path := location.Path - if len(location.Rewrite.Target) > 0 && location.Rewrite.Target != path { - if path == slash { - return fmt.Sprintf("~* %s", path) - } - // baseuri regex will parse basename from the given location - baseuri := `(?.*)` - if !strings.HasSuffix(path, slash) { - // Not treat the slash after "location path" as a part of baseuri - baseuri = fmt.Sprintf(`\/?%s`, baseuri) - } - return fmt.Sprintf(`~* ^%s%s`, path, baseuri) + if enforceRegex { + return fmt.Sprintf(`~* "^%s"`, path) } - return path } -// TODO: Needs Unit Tests -func buildAuthLocation(input interface{}) string { +func buildAuthLocation(input interface{}, globalExternalAuthURL string) string { location, ok := input.(*ingress.Location) if !ok { - glog.Errorf("expected an '*ingress.Location' type but %T was returned", input) + klog.Errorf("expected an '*ingress.Location' type but %T was returned", input) return "" } - if location.ExternalAuth.URL == "" { + if (location.ExternalAuth.URL == "") && (!shouldApplyGlobalAuth(input, globalExternalAuthURL)) { return "" } str := base64.URLEncoding.EncodeToString([]byte(location.Path)) - // avoid locations containing the = char + // removes "=" after encoding str = strings.Replace(str, "=", "", -1) return fmt.Sprintf("/_external-auth-%v", str) } -func buildAuthResponseHeaders(input interface{}) []string { +// shouldApplyGlobalAuth returns true only in case when ExternalAuth.URL is not set and +// GlobalExternalAuth is set and enabled +func shouldApplyGlobalAuth(input interface{}, globalExternalAuthURL string) bool { location, ok := input.(*ingress.Location) - res := []string{} if !ok { - glog.Errorf("expected an '*ingress.Location' type but %T was returned", input) - return res + klog.Errorf("expected an '*ingress.Location' type but %T was returned", input) } - if len(location.ExternalAuth.ResponseHeaders) == 0 { + if (location.ExternalAuth.URL == "") && (globalExternalAuthURL != "") && (location.EnableGlobalAuth) { + return true + } + + return false +} + +func buildAuthResponseHeaders(headers []string) []string { + res := []string{} + + if len(headers) == 0 { return res } - for i, h := range location.ExternalAuth.ResponseHeaders { + for i, h := range headers { hvar := strings.ToLower(h) hvar = strings.NewReplacer("-", "_").Replace(hvar) res = append(res, fmt.Sprintf("auth_request_set $authHeader%v $upstream_http_%v;", i, hvar)) @@ -256,7 +450,7 @@ func buildAuthResponseHeaders(input interface{}) []string { func buildLogFormatUpstream(input interface{}) string { cfg, ok := input.(config.Configuration) if !ok { - glog.Errorf("expected a 'config.Configuration' type but %T was returned", input) + klog.Errorf("expected a 'config.Configuration' type but %T was returned", input) return "" } @@ -264,34 +458,51 @@ func buildLogFormatUpstream(input interface{}) string { } // buildProxyPass produces the proxy pass string, if the ingress has redirects -// (specified through the nginx.ingress.kubernetes.io/rewrite-to annotation) +// (specified through the nginx.ingress.kubernetes.io/rewrite-target annotation) // If the annotation nginx.ingress.kubernetes.io/add-base-url:"true" is specified it will // add a base tag in the head of the response from the service func buildProxyPass(host string, b interface{}, loc interface{}) string { backends, ok := b.([]*ingress.Backend) if !ok { - glog.Errorf("expected an '[]*ingress.Backend' type but %T was returned", b) + klog.Errorf("expected an '[]*ingress.Backend' type but %T was returned", b) return "" } location, ok := loc.(*ingress.Location) if !ok { - glog.Errorf("expected a '*ingress.Location' type but %T was returned", loc) + klog.Errorf("expected a '*ingress.Location' type but %T was returned", loc) return "" } path := location.Path - proto := "http" + proto := "http://" + + proxyPass := "proxy_pass" + + switch location.BackendProtocol { + case "HTTPS": + proto = "https://" + case "GRPC": + proto = "grpc://" + proxyPass = "grpc_pass" + case "GRPCS": + proto = "grpcs://" + proxyPass = "grpc_pass" + case "AJP": + proto = "" + proxyPass = "ajp_pass" + } + + upstreamName := "upstream_balancer" - upstreamName := location.Backend for _, backend := range backends { if backend.Name == location.Backend { - if backend.Secure || backend.SSLPassthrough { - proto = "https" - } + if backend.SSLPassthrough { + proto = "https://" - if isSticky(host, location, backend.SessionAffinity.CookieSessionAffinity.Locations) { - upstreamName = fmt.Sprintf("sticky-%v", upstreamName) + if location.BackendProtocol == "GRPCS" { + proto = "grpcs://" + } } break @@ -299,45 +510,23 @@ func buildProxyPass(host string, b interface{}, loc interface{}) string { } // defProxyPass returns the default proxy_pass, just the name of the upstream - defProxyPass := fmt.Sprintf("proxy_pass %s://%s;", proto, upstreamName) + defProxyPass := fmt.Sprintf("%v %s%s;", proxyPass, proto, upstreamName) + // if the path in the ingress rule is equals to the target: no special rewrite if path == location.Rewrite.Target { return defProxyPass } - if !strings.HasSuffix(path, slash) { - path = fmt.Sprintf("%s/", path) - } - if len(location.Rewrite.Target) > 0 { - abu := "" - if location.Rewrite.AddBaseURL { - // path has a slash suffix, so that it can be connected with baseuri directly - bPath := fmt.Sprintf("%s%s", path, "$baseuri") - regex := `(<(?:H|h)(?:E|e)(?:A|a)(?:D|d)(?:[^">]|"[^"]*")*>)` - if len(location.Rewrite.BaseURLScheme) > 0 { - abu = fmt.Sprintf(`subs_filter '%v' '$1' ro; - `, regex, location.Rewrite.BaseURLScheme, bPath) - } else { - abu = fmt.Sprintf(`subs_filter '%v' '$1' ro; - `, regex, bPath) - } - } + var xForwardedPrefix string - if location.Rewrite.Target == slash { - // special case redirect to / - // ie /something to / - return fmt.Sprintf(` - rewrite %s(.*) /$1 break; - rewrite %s / break; - proxy_pass %s://%s; - %v`, path, location.Path, proto, upstreamName, abu) + if len(location.XForwardedPrefix) > 0 { + xForwardedPrefix = fmt.Sprintf("proxy_set_header X-Forwarded-Prefix \"%s\";\n", location.XForwardedPrefix) } return fmt.Sprintf(` - rewrite %s(.*) %s/$1 break; - proxy_pass %s://%s; - %v`, path, location.Rewrite.Target, proto, upstreamName, abu) +rewrite "(?i)%s" %s break; +%v%v %s%s;`, path, location.Rewrite.Target, xForwardedPrefix, proxyPass, proto, upstreamName) } // default proxy_pass @@ -351,7 +540,7 @@ func filterRateLimits(input interface{}) []ratelimit.Config { servers, ok := input.([]*ingress.Server) if !ok { - glog.Errorf("expected a '[]ratelimit.RateLimit' type but %T was returned", input) + klog.Errorf("expected a '[]ratelimit.RateLimit' type but %T was returned", input) return ratelimits } for _, server := range servers { @@ -375,7 +564,7 @@ func buildRateLimitZones(input interface{}) []string { servers, ok := input.([]*ingress.Server) if !ok { - glog.Errorf("expected a '[]*ingress.Server' type but %T was returned", input) + klog.Errorf("expected a '[]*ingress.Server' type but %T was returned", input) return zones.List() } @@ -425,7 +614,7 @@ func buildRateLimit(input interface{}) []string { loc, ok := input.(*ingress.Location) if !ok { - glog.Errorf("expected an '*ingress.Location' type but %T was returned", input) + klog.Errorf("expected an '*ingress.Location' type but %T was returned", input) return limits } @@ -462,10 +651,32 @@ func buildRateLimit(input interface{}) []string { return limits } +func isLocationInLocationList(location interface{}, rawLocationList string) bool { + loc, ok := location.(*ingress.Location) + if !ok { + klog.Errorf("expected an '*ingress.Location' type but %T was returned", location) + return false + } + + locationList := strings.Split(rawLocationList, ",") + + for _, locationListItem := range locationList { + locationListItem = strings.Trim(locationListItem, " ") + if locationListItem == "" { + continue + } + if strings.HasPrefix(loc.Path, locationListItem) { + return true + } + } + + return false +} + func isLocationAllowed(input interface{}) bool { loc, ok := input.(*ingress.Location) if !ok { - glog.Errorf("expected an '*ingress.Location' type but %T was returned", input) + klog.Errorf("expected an '*ingress.Location' type but %T was returned", input) return false } @@ -484,65 +695,33 @@ var ( func buildDenyVariable(a interface{}) string { l, ok := a.(string) if !ok { - glog.Errorf("expected a 'string' type but %T was returned", a) + klog.Errorf("expected a 'string' type but %T was returned", a) return "" } if _, ok := denyPathSlugMap[l]; !ok { - denyPathSlugMap[l] = buildRandomUUID() + denyPathSlugMap[l] = randomString() } return fmt.Sprintf("$deny_%v", denyPathSlugMap[l]) } -// TODO: Needs Unit Tests -func buildUpstreamName(host string, b interface{}, loc interface{}) string { - - backends, ok := b.([]*ingress.Backend) - if !ok { - glog.Errorf("expected an '[]*ingress.Backend' type but %T was returned", b) - return "" - } - +func buildUpstreamName(loc interface{}) string { location, ok := loc.(*ingress.Location) if !ok { - glog.Errorf("expected a '*ingress.Location' type but %T was returned", loc) + klog.Errorf("expected a '*ingress.Location' type but %T was returned", loc) return "" } upstreamName := location.Backend - for _, backend := range backends { - if backend.Name == location.Backend { - if backend.SessionAffinity.AffinityType == "cookie" && - isSticky(host, location, backend.SessionAffinity.CookieSessionAffinity.Locations) { - upstreamName = fmt.Sprintf("sticky-%v", upstreamName) - } - - break - } - } - return upstreamName } -// TODO: Needs Unit Tests -func isSticky(host string, loc *ingress.Location, stickyLocations map[string][]string) bool { - if _, ok := stickyLocations[host]; ok { - for _, sl := range stickyLocations[host] { - if sl == loc.Path { - return true - } - } - } - - return false -} - func buildNextUpstream(i, r interface{}) string { nextUpstream, ok := i.(string) if !ok { - glog.Errorf("expected a 'string' type but %T was returned", i) + klog.Errorf("expected a 'string' type but %T was returned", i) return "" } @@ -568,44 +747,32 @@ func buildNextUpstream(i, r interface{}) string { return strings.Join(nextUpstreamCodes, " ") } -// buildRandomUUID return a random string to be used in the template -func buildRandomUUID() string { - s := uuid.New() - return strings.Replace(s, "-", "", -1) -} +// refer to http://nginx.org/en/docs/syntax.html +// Nginx differentiates between size and offset +// offset directives support gigabytes in addition +var nginxSizeRegex = regexp.MustCompile("^[0-9]+[kKmM]{0,1}$") +var nginxOffsetRegex = regexp.MustCompile("^[0-9]+[kKmMgG]{0,1}$") -func isValidClientBodyBufferSize(input interface{}) bool { +// isValidByteSize validates size units valid in nginx +// http://nginx.org/en/docs/syntax.html +func isValidByteSize(input interface{}, isOffset bool) bool { s, ok := input.(string) if !ok { - glog.Errorf("expected an 'string' type but %T was returned", input) + klog.Errorf("expected an 'string' type but %T was returned", input) return false } + s = strings.TrimSpace(s) if s == "" { + klog.V(2).Info("empty byte size, hence it will not be set") return false } - _, err := strconv.Atoi(s) - if err != nil { - sLowercase := strings.ToLower(s) - - kCheck := strings.TrimSuffix(sLowercase, "k") - _, err := strconv.Atoi(kCheck) - if err == nil { - return true - } - - mCheck := strings.TrimSuffix(sLowercase, "m") - _, err = strconv.Atoi(mCheck) - if err == nil { - return true - } - - glog.Errorf("client-body-buffer-size '%v' was provided in an incorrect format, hence it will not be set.", s) - return false + if isOffset { + return nginxOffsetRegex.MatchString(s) } - return true + return nginxSizeRegex.MatchString(s) } type ingressInformation struct { @@ -615,16 +782,39 @@ type ingressInformation struct { Annotations map[string]string } -func getIngressInformation(i, p interface{}) *ingressInformation { - ing, ok := i.(*extensions.Ingress) +func (info *ingressInformation) Equal(other *ingressInformation) bool { + if info.Namespace != other.Namespace { + return false + } + if info.Rule != other.Rule { + return false + } + if info.Service != other.Service { + return false + } + if !reflect.DeepEqual(info.Annotations, other.Annotations) { + return false + } + + return true +} + +func getIngressInformation(i, h, p interface{}) *ingressInformation { + ing, ok := i.(*ingress.Ingress) + if !ok { + klog.Errorf("expected an '*ingress.Ingress' type but %T was returned", i) + return &ingressInformation{} + } + + hostname, ok := h.(string) if !ok { - glog.Errorf("expected an '*extensions.Ingress' type but %T was returned", i) + klog.Errorf("expected a 'string' type but %T was returned", h) return &ingressInformation{} } path, ok := p.(string) if !ok { - glog.Errorf("expected a 'string' type but %T was returned", p) + klog.Errorf("expected a 'string' type but %T was returned", p) return &ingressInformation{} } @@ -647,6 +837,10 @@ func getIngressInformation(i, p interface{}) *ingressInformation { continue } + if hostname != "" && hostname != rule.Host { + continue + } + for _, rPath := range rule.HTTP.Paths { if path == rPath.Path { info.Service = rPath.Backend.ServiceName @@ -661,7 +855,7 @@ func getIngressInformation(i, p interface{}) *ingressInformation { func buildForwardedFor(input interface{}) string { s, ok := input.(string) if !ok { - glog.Errorf("expected a 'string' type but %T was returned", input) + klog.Errorf("expected a 'string' type but %T was returned", input) return "" } @@ -673,19 +867,214 @@ func buildForwardedFor(input interface{}) string { func buildAuthSignURL(input interface{}) string { s, ok := input.(string) if !ok { - glog.Errorf("expected an 'string' type but %T was returned", input) + klog.Errorf("expected an 'string' type but %T was returned", input) return "" } u, _ := url.Parse(s) q := u.Query() if len(q) == 0 { - return fmt.Sprintf("%v?rd=$pass_access_scheme://$http_host$request_uri", s) + return fmt.Sprintf("%v?rd=$pass_access_scheme://$http_host$escaped_request_uri", s) } if q.Get("rd") != "" { return s } - return fmt.Sprintf("%v&rd=$pass_access_scheme://$http_host$request_uri", s) + return fmt.Sprintf("%v&rd=$pass_access_scheme://$http_host$escaped_request_uri", s) +} + +var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +func randomString() string { + b := make([]rune, 32) + for i := range b { + b[i] = letters[rand.Intn(len(letters))] + } + + return string(b) +} + +func buildOpentracing(input interface{}) string { + cfg, ok := input.(config.Configuration) + if !ok { + klog.Errorf("expected a 'config.Configuration' type but %T was returned", input) + return "" + } + + if !cfg.EnableOpentracing { + return "" + } + + buf := bytes.NewBufferString("") + if cfg.ZipkinCollectorHost != "" { + buf.WriteString("opentracing_load_tracer /usr/local/lib/libzipkin_opentracing.so /etc/nginx/opentracing.json;") + } else if cfg.JaegerCollectorHost != "" { + buf.WriteString("opentracing_load_tracer /usr/local/lib/libjaegertracing_plugin.so /etc/nginx/opentracing.json;") + } else if cfg.DatadogCollectorHost != "" { + buf.WriteString("opentracing_load_tracer /usr/local/lib/libdd_opentracing.so /etc/nginx/opentracing.json;") + } + + buf.WriteString("\r\n") + + return buf.String() +} + +// buildInfluxDB produces the single line configuration +// needed by the InfluxDB module to send request's metrics +// for the current resource +func buildInfluxDB(input interface{}) string { + cfg, ok := input.(influxdb.Config) + if !ok { + klog.Errorf("expected an 'influxdb.Config' type but %T was returned", input) + return "" + } + + if !cfg.InfluxDBEnabled { + return "" + } + + return fmt.Sprintf( + "influxdb server_name=%s host=%s port=%s measurement=%s enabled=true;", + cfg.InfluxDBServerName, + cfg.InfluxDBHost, + cfg.InfluxDBPort, + cfg.InfluxDBMeasurement, + ) +} + +func proxySetHeader(loc interface{}) string { + location, ok := loc.(*ingress.Location) + if !ok { + klog.Errorf("expected a '*ingress.Location' type but %T was returned", loc) + return "proxy_set_header" + } + + if location.BackendProtocol == "GRPC" || location.BackendProtocol == "GRPCS" { + return "grpc_set_header" + } + + return "proxy_set_header" +} + +// buildCustomErrorDeps is a utility function returning a struct wrapper with +// the data required to build the 'CUSTOM_ERRORS' template +func buildCustomErrorDeps(upstreamName string, errorCodes []int, enableMetrics bool) interface{} { + return struct { + UpstreamName string + ErrorCodes []int + EnableMetrics bool + }{ + UpstreamName: upstreamName, + ErrorCodes: errorCodes, + EnableMetrics: enableMetrics, + } +} + +type errorLocation struct { + UpstreamName string + Codes []int +} + +// buildCustomErrorLocationsPerServer is a utility function which will collect all +// custom error codes for all locations of a server block, deduplicates them, +// and returns a set which is unique by default-upstream and error code. It returns an array +// of errorLocations, each of which contain the upstream name and a list of +// error codes for that given upstream, so that sufficiently unique +// @custom error location blocks can be created in the template +func buildCustomErrorLocationsPerServer(input interface{}) interface{} { + server, ok := input.(*ingress.Server) + if !ok { + klog.Errorf("expected a '*ingress.Server' type but %T was returned", input) + return nil + } + + codesMap := make(map[string]map[int]bool) + for _, loc := range server.Locations { + backendUpstream := loc.DefaultBackendUpstreamName + + var dedupedCodes map[int]bool + if existingMap, ok := codesMap[backendUpstream]; ok { + dedupedCodes = existingMap + } else { + dedupedCodes = make(map[int]bool) + } + + for _, code := range loc.CustomHTTPErrors { + dedupedCodes[code] = true + } + codesMap[backendUpstream] = dedupedCodes + } + + errorLocations := []errorLocation{} + + for upstream, dedupedCodes := range codesMap { + codesForUpstream := []int{} + for code := range dedupedCodes { + codesForUpstream = append(codesForUpstream, code) + } + sort.Ints(codesForUpstream) + errorLocations = append(errorLocations, errorLocation{ + UpstreamName: upstream, + Codes: codesForUpstream, + }) + } + + sort.Slice(errorLocations, func(i, j int) bool { + return errorLocations[i].UpstreamName < errorLocations[j].UpstreamName + }) + + return errorLocations +} + +func opentracingPropagateContext(loc interface{}) string { + location, ok := loc.(*ingress.Location) + if !ok { + klog.Errorf("expected a '*ingress.Location' type but %T was returned", loc) + return "opentracing_propagate_context" + } + + if location.BackendProtocol == "GRPC" || location.BackendProtocol == "GRPCS" { + return "opentracing_grpc_propagate_context" + } + + return "opentracing_propagate_context" +} + +// shouldLoadModSecurityModule determines whether or not the ModSecurity module needs to be loaded. +// First, it checks if `enable-modsecurity` is set in the ConfigMap. If it is not, it iterates over all locations to +// check if ModSecurity is enabled by the annotation `nginx.ingress.kubernetes.io/enable-modsecurity`. +func shouldLoadModSecurityModule(c interface{}, s interface{}) bool { + cfg, ok := c.(config.Configuration) + if !ok { + klog.Errorf("expected a 'config.Configuration' type but %T was returned", c) + return false + } + + servers, ok := s.([]*ingress.Server) + if !ok { + klog.Errorf("expected an '[]*ingress.Server' type but %T was returned", s) + return false + } + + // Determine if ModSecurity is enabled globally. + if cfg.EnableModsecurity { + return true + } + + // If ModSecurity is not enabled globally, check if any location has it enabled via annotation. + for _, server := range servers { + for _, location := range server.Locations { + if location.ModSecurity.Enable { + return true + } + } + } + + // Not enabled globally nor via annotation on a location, no need to load the module. + return false } diff --git a/internal/ingress/controller/template/template_test.go b/internal/ingress/controller/template/template_test.go index 1e4c9b75bf..c0f7a221ea 100644 --- a/internal/ingress/controller/template/template_test.go +++ b/internal/ingress/controller/template/template_test.go @@ -17,7 +17,6 @@ limitations under the License. package template import ( - "encoding/json" "io/ioutil" "net" "os" @@ -26,77 +25,181 @@ import ( "strings" "testing" + "encoding/base64" + "fmt" + + jsoniter "github.com/json-iterator/go" + networking "k8s.io/api/networking/v1beta1" "k8s.io/ingress-nginx/internal/file" "k8s.io/ingress-nginx/internal/ingress" "k8s.io/ingress-nginx/internal/ingress/annotations/authreq" + "k8s.io/ingress-nginx/internal/ingress/annotations/influxdb" + "k8s.io/ingress-nginx/internal/ingress/annotations/luarestywaf" + "k8s.io/ingress-nginx/internal/ingress/annotations/modsecurity" + "k8s.io/ingress-nginx/internal/ingress/annotations/ratelimit" "k8s.io/ingress-nginx/internal/ingress/annotations/rewrite" "k8s.io/ingress-nginx/internal/ingress/controller/config" ) var ( - // TODO: add tests for secure endpoints + // TODO: add tests for SSLPassthrough tmplFuncTestcases = map[string]struct { - Path string - Target string - Location string - ProxyPass string - AddBaseURL bool - BaseURLScheme string - Sticky bool + Path string + Target string + Location string + ProxyPass string + Sticky bool + XForwardedPrefix string + SecureBackend bool + enforceRegex bool }{ - "invalid redirect / to /": {"/", "/", "/", "proxy_pass http://upstream-name;", false, "", false}, - "redirect / to /jenkins": {"/", "/jenkins", "~* /", + "when secure backend enabled": { + "/", + "/", + "/", + "proxy_pass https://upstream_balancer;", + false, + "", + true, + false, + }, + "when secure backend and dynamic config enabled": { + "/", + "/", + "/", + "proxy_pass https://upstream_balancer;", + false, + "", + true, + false, + }, + "when secure backend, stickeness and dynamic config enabled": { + "/", + "/", + "/", + "proxy_pass https://upstream_balancer;", + true, + "", + true, + false, + }, + "invalid redirect / to / with dynamic config enabled": { + "/", + "/", + "/", + "proxy_pass http://upstream_balancer;", + false, + "", + false, + false, + }, + "invalid redirect / to /": { + "/", + "/", + "/", + "proxy_pass http://upstream_balancer;", + false, + "", + false, + false, + }, + "redirect / to /jenkins": { + "/", + "/jenkins", + `~* "^/"`, + ` +rewrite "(?i)/" /jenkins break; +proxy_pass http://upstream_balancer;`, + false, + "", + false, + true, + }, + "redirect / to /something with sticky enabled": { + "/", + "/something", + `~* "^/"`, + ` +rewrite "(?i)/" /something break; +proxy_pass http://upstream_balancer;`, + true, + "", + false, + true, + }, + "redirect / to /something with sticky and dynamic config enabled": { + "/", + "/something", + `~* "^/"`, ` - rewrite /(.*) /jenkins/$1 break; - proxy_pass http://upstream-name; - `, false, "", false}, - "redirect /something to /": {"/something", "/", `~* ^/something\/?(?.*)`, ` - rewrite /something/(.*) /$1 break; - rewrite /something / break; - proxy_pass http://upstream-name; - `, false, "", false}, - "redirect /end-with-slash/ to /not-root": {"/end-with-slash/", "/not-root", "~* ^/end-with-slash/(?.*)", ` - rewrite /end-with-slash/(.*) /not-root/$1 break; - proxy_pass http://upstream-name; - `, false, "", false}, - "redirect /something-complex to /not-root": {"/something-complex", "/not-root", `~* ^/something-complex\/?(?.*)`, ` - rewrite /something-complex/(.*) /not-root/$1 break; - proxy_pass http://upstream-name; - `, false, "", false}, - "redirect / to /jenkins and rewrite": {"/", "/jenkins", "~* /", ` - rewrite /(.*) /jenkins/$1 break; - proxy_pass http://upstream-name; - subs_filter '(<(?:H|h)(?:E|e)(?:A|a)(?:D|d)(?:[^">]|"[^"]*")*>)' '$1' ro; - `, true, "", false}, - "redirect /something to / and rewrite": {"/something", "/", `~* ^/something\/?(?.*)`, ` - rewrite /something/(.*) /$1 break; - rewrite /something / break; - proxy_pass http://upstream-name; - subs_filter '(<(?:H|h)(?:E|e)(?:A|a)(?:D|d)(?:[^">]|"[^"]*")*>)' '$1' ro; - `, true, "", false}, - "redirect /end-with-slash/ to /not-root and rewrite": {"/end-with-slash/", "/not-root", `~* ^/end-with-slash/(?.*)`, ` - rewrite /end-with-slash/(.*) /not-root/$1 break; - proxy_pass http://upstream-name; - subs_filter '(<(?:H|h)(?:E|e)(?:A|a)(?:D|d)(?:[^">]|"[^"]*")*>)' '$1' ro; - `, true, "", false}, - "redirect /something-complex to /not-root and rewrite": {"/something-complex", "/not-root", `~* ^/something-complex\/?(?.*)`, ` - rewrite /something-complex/(.*) /not-root/$1 break; - proxy_pass http://upstream-name; - subs_filter '(<(?:H|h)(?:E|e)(?:A|a)(?:D|d)(?:[^">]|"[^"]*")*>)' '$1' ro; - `, true, "", false}, - "redirect /something to / and rewrite with specific scheme": {"/something", "/", `~* ^/something\/?(?.*)`, ` - rewrite /something/(.*) /$1 break; - rewrite /something / break; - proxy_pass http://upstream-name; - subs_filter '(<(?:H|h)(?:E|e)(?:A|a)(?:D|d)(?:[^">]|"[^"]*")*>)' '$1' ro; - `, true, "http", false}, - "redirect / to /something with sticky enabled": {"/", "/something", `~* /`, ` - rewrite /(.*) /something/$1 break; - proxy_pass http://sticky-upstream-name; - `, false, "http", true}, +rewrite "(?i)/" /something break; +proxy_pass http://upstream_balancer;`, + true, + "", + false, + true, + }, + "add the X-Forwarded-Prefix header": { + "/there", + "/something", + `~* "^/there"`, + ` +rewrite "(?i)/there" /something break; +proxy_set_header X-Forwarded-Prefix "/there"; +proxy_pass http://upstream_balancer;`, + true, + "/there", + false, + true, + }, + "use ~* location modifier when ingress does not use rewrite/regex target but at least one other ingress does": { + "/something", + "/something", + `~* "^/something"`, + "proxy_pass http://upstream_balancer;", + false, + "", + false, + true, + }, } ) +func TestBuildLuaSharedDictionaries(t *testing.T) { + invalidType := &ingress.Ingress{} + expected := "" + actual := buildLuaSharedDictionaries(invalidType, true) + + if !reflect.DeepEqual(expected, actual) { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } + + servers := []*ingress.Server{ + { + Hostname: "foo.bar", + Locations: []*ingress.Location{{Path: "/", LuaRestyWAF: luarestywaf.Config{}}}, + }, + { + Hostname: "another.host", + Locations: []*ingress.Location{{Path: "/", LuaRestyWAF: luarestywaf.Config{}}}, + }, + } + + configuration := buildLuaSharedDictionaries(servers, false) + if !strings.Contains(configuration, "lua_shared_dict configuration_data") { + t.Errorf("expected to include 'configuration_data' but got %s", configuration) + } + if strings.Contains(configuration, "waf_storage") { + t.Errorf("expected to not include 'waf_storage' but got %s", configuration) + } + + servers[1].Locations[0].LuaRestyWAF = luarestywaf.Config{Mode: "ACTIVE"} + configuration = buildLuaSharedDictionaries(servers, false) + if !strings.Contains(configuration, "lua_shared_dict waf_storage") { + t.Errorf("expected to configure 'waf_storage', but got %s", configuration) + } +} + func TestFormatIP(t *testing.T) { cases := map[string]struct { Input, Output string @@ -117,13 +220,21 @@ func TestFormatIP(t *testing.T) { } func TestBuildLocation(t *testing.T) { + invalidType := &ingress.Ingress{} + expected := "/" + actual := buildLocation(invalidType, true) + + if !reflect.DeepEqual(expected, actual) { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } + for k, tc := range tmplFuncTestcases { loc := &ingress.Location{ Path: tc.Path, - Rewrite: rewrite.Config{Target: tc.Target, AddBaseURL: tc.AddBaseURL}, + Rewrite: rewrite.Config{Target: tc.Target}, } - newLoc := buildLocation(loc) + newLoc := buildLocation(loc, tc.enforceRegex) if tc.Location != newLoc { t.Errorf("%s: expected '%v' but returned %v", k, tc.Location, newLoc) } @@ -136,28 +247,33 @@ func TestBuildProxyPass(t *testing.T) { for k, tc := range tmplFuncTestcases { loc := &ingress.Location{ - Path: tc.Path, - Rewrite: rewrite.Config{Target: tc.Target, AddBaseURL: tc.AddBaseURL, BaseURLScheme: tc.BaseURLScheme}, - Backend: defaultBackend, + Path: tc.Path, + Rewrite: rewrite.Config{Target: tc.Target}, + Backend: defaultBackend, + XForwardedPrefix: tc.XForwardedPrefix, + } + + if tc.SecureBackend { + loc.BackendProtocol = "HTTPS" + } + + backend := &ingress.Backend{ + Name: defaultBackend, } - backends := []*ingress.Backend{} if tc.Sticky { - backends = []*ingress.Backend{ - { - Name: defaultBackend, - SessionAffinity: ingress.SessionAffinityConfig{ - AffinityType: "cookie", - CookieSessionAffinity: ingress.CookieSessionAffinity{ - Locations: map[string][]string{ - defaultHost: {tc.Path}, - }, - }, + backend.SessionAffinity = ingress.SessionAffinityConfig{ + AffinityType: "cookie", + CookieSessionAffinity: ingress.CookieSessionAffinity{ + Locations: map[string][]string{ + defaultHost: {tc.Path}, }, }, } } + backends := []*ingress.Backend{backend} + pp := buildProxyPass(defaultHost, backends, loc) if !strings.EqualFold(tc.ProxyPass, pp) { t.Errorf("%s: expected \n'%v'\nbut returned \n'%v'", k, tc.ProxyPass, pp) @@ -165,11 +281,100 @@ func TestBuildProxyPass(t *testing.T) { } } -func TestBuildAuthResponseHeaders(t *testing.T) { +func TestBuildAuthLocation(t *testing.T) { + invalidType := &ingress.Ingress{} + expected := "" + actual := buildAuthLocation(invalidType, "") + + if !reflect.DeepEqual(expected, actual) { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } + + authURL := "foo.com/auth" + globalAuthURL := "foo.com/global-auth" + loc := &ingress.Location{ - ExternalAuth: authreq.Config{ResponseHeaders: []string{"h1", "H-With-Caps-And-Dashes"}}, + ExternalAuth: authreq.Config{ + URL: authURL, + }, + Path: "/cat", + EnableGlobalAuth: true, } - headers := buildAuthResponseHeaders(loc) + + encodedAuthURL := strings.Replace(base64.URLEncoding.EncodeToString([]byte(loc.Path)), "=", "", -1) + externalAuthPath := fmt.Sprintf("/_external-auth-%v", encodedAuthURL) + + testCases := []struct { + title string + authURL string + globalAuthURL string + enableglobalExternalAuth bool + expected string + }{ + {"authURL, globalAuthURL and enabled", authURL, globalAuthURL, true, externalAuthPath}, + {"authURL, globalAuthURL and disabled", authURL, globalAuthURL, false, externalAuthPath}, + {"authURL, empty globalAuthURL and enabled", authURL, "", true, externalAuthPath}, + {"authURL, empty globalAuthURL and disabled", authURL, "", false, externalAuthPath}, + {"globalAuthURL and enabled", "", globalAuthURL, true, externalAuthPath}, + {"globalAuthURL and disabled", "", globalAuthURL, false, ""}, + {"all empty and enabled", "", "", true, ""}, + {"all empty and disabled", "", "", false, ""}, + } + + for _, testCase := range testCases { + loc.ExternalAuth.URL = testCase.authURL + loc.EnableGlobalAuth = testCase.enableglobalExternalAuth + + str := buildAuthLocation(loc, testCase.globalAuthURL) + if str != testCase.expected { + t.Errorf("%v: expected '%v' but returned '%v'", testCase.title, testCase.expected, str) + } + } +} + +func TestShouldApplyGlobalAuth(t *testing.T) { + + authURL := "foo.com/auth" + globalAuthURL := "foo.com/global-auth" + + loc := &ingress.Location{ + ExternalAuth: authreq.Config{ + URL: authURL, + }, + Path: "/cat", + EnableGlobalAuth: true, + } + + testCases := []struct { + title string + authURL string + globalAuthURL string + enableglobalExternalAuth bool + expected bool + }{ + {"authURL, globalAuthURL and enabled", authURL, globalAuthURL, true, false}, + {"authURL, globalAuthURL and disabled", authURL, globalAuthURL, false, false}, + {"authURL, empty globalAuthURL and enabled", authURL, "", true, false}, + {"authURL, empty globalAuthURL and disabled", authURL, "", false, false}, + {"globalAuthURL and enabled", "", globalAuthURL, true, true}, + {"globalAuthURL and disabled", "", globalAuthURL, false, false}, + {"all empty and enabled", "", "", true, false}, + {"all empty and disabled", "", "", false, false}, + } + + for _, testCase := range testCases { + loc.ExternalAuth.URL = testCase.authURL + loc.EnableGlobalAuth = testCase.enableglobalExternalAuth + + result := shouldApplyGlobalAuth(loc, testCase.globalAuthURL) + if result != testCase.expected { + t.Errorf("%v: expected '%v' but returned '%v'", testCase.title, testCase.expected, result) + } + } +} + +func TestBuildAuthResponseHeaders(t *testing.T) { + externalAuthResponseHeaders := []string{"h1", "H-With-Caps-And-Dashes"} expected := []string{ "auth_request_set $authHeader0 $upstream_http_h1;", "proxy_set_header 'h1' $authHeader0;", @@ -177,6 +382,8 @@ func TestBuildAuthResponseHeaders(t *testing.T) { "proxy_set_header 'H-With-Caps-And-Dashes' $authHeader1;", } + headers := buildAuthResponseHeaders(externalAuthResponseHeaders) + if !reflect.DeepEqual(expected, headers) { t.Errorf("Expected \n'%v'\nbut returned \n'%v'", expected, headers) } @@ -194,7 +401,7 @@ func TestTemplateWithData(t *testing.T) { t.Error("unexpected error reading json file: ", err) } var dat config.TemplateConfig - if err := json.Unmarshal(data, &dat); err != nil { + if err := jsoniter.ConfigCompatibleWithStandardLibrary.Unmarshal(data, &dat); err != nil { t.Errorf("unexpected error unmarshalling json: %v", err) } if dat.ListenPorts == nil { @@ -211,10 +418,22 @@ func TestTemplateWithData(t *testing.T) { t.Errorf("invalid NGINX template: %v", err) } - _, err = ngxTpl.Write(dat) + rt, err := ngxTpl.Write(dat) if err != nil { t.Errorf("invalid NGINX template: %v", err) } + + if !strings.Contains(string(rt), "listen [2001:db8:a0b:12f0::1]") { + t.Errorf("invalid NGINX template, expected IPV6 listen address not present") + } + + if !strings.Contains(string(rt), "listen [3731:54:65fe:2::a7]") { + t.Errorf("invalid NGINX template, expected IPV6 listen address not present") + } + + if !strings.Contains(string(rt), "listen 2.2.2.2") { + t.Errorf("invalid NGINX template, expected IPV4 listen address not present") + } } func BenchmarkTemplateWithData(b *testing.B) { @@ -229,7 +448,7 @@ func BenchmarkTemplateWithData(b *testing.B) { b.Error("unexpected error reading json file: ", err) } var dat config.TemplateConfig - if err := json.Unmarshal(data, &dat); err != nil { + if err := jsoniter.ConfigCompatibleWithStandardLibrary.Unmarshal(data, &dat); err != nil { b.Errorf("unexpected error unmarshalling json: %v", err) } @@ -249,6 +468,14 @@ func BenchmarkTemplateWithData(b *testing.B) { } func TestBuildDenyVariable(t *testing.T) { + invalidType := &ingress.Ingress{} + expected := "" + actual := buildDenyVariable(invalidType) + + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } + a := buildDenyVariable("host1.example.com_/.well-known/acme-challenge") b := buildDenyVariable("host1.example.com_/.well-known/acme-challenge") if !reflect.DeepEqual(a, b) { @@ -256,46 +483,45 @@ func TestBuildDenyVariable(t *testing.T) { } } -func TestBuildClientBodyBufferSize(t *testing.T) { - a := isValidClientBodyBufferSize("1000") - if a != true { - t.Errorf("Expected '%v' but returned '%v'", true, a) - } - b := isValidClientBodyBufferSize("1000k") - if b != true { - t.Errorf("Expected '%v' but returned '%v'", true, b) - } - c := isValidClientBodyBufferSize("1000m") - if c != true { - t.Errorf("Expected '%v' but returned '%v'", true, c) - } - d := isValidClientBodyBufferSize("1000km") - if d != false { - t.Errorf("Expected '%v' but returned '%v'", false, d) - } - e := isValidClientBodyBufferSize("1000mk") - if e != false { - t.Errorf("Expected '%v' but returned '%v'", false, e) - } - f := isValidClientBodyBufferSize("1000kk") - if f != false { - t.Errorf("Expected '%v' but returned '%v'", false, f) - } - g := isValidClientBodyBufferSize("1000mm") - if g != false { - t.Errorf("Expected '%v' but returned '%v'", false, g) - } - h := isValidClientBodyBufferSize(nil) - if h != false { - t.Errorf("Expected '%v' but returned '%v'", false, h) +func TestBuildByteSize(t *testing.T) { + cases := []struct { + value interface{} + isOffset bool + expected bool + }{ + {"1000", false, true}, + {"1000k", false, true}, + {"1m", false, true}, + {"10g", false, false}, + {" 1m ", false, true}, + {"1000kk", false, false}, + {"1000km", false, false}, + {"1mm", false, false}, + {nil, false, false}, + {"", false, false}, + {" ", false, false}, + {"1G", true, true}, + {"1000kk", true, false}, + {"", true, false}, } - i := isValidClientBodyBufferSize("") - if i != false { - t.Errorf("Expected '%v' but returned '%v'", false, i) + + for _, tc := range cases { + val := isValidByteSize(tc.value, tc.isOffset) + if tc.expected != val { + t.Errorf("Expected '%v' but returned '%v'", tc.expected, val) + } } } func TestIsLocationAllowed(t *testing.T) { + invalidType := &ingress.Ingress{} + expected := false + actual := isLocationAllowed(invalidType) + + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } + loc := ingress.Location{ Denied: nil, } @@ -307,13 +533,57 @@ func TestIsLocationAllowed(t *testing.T) { } func TestBuildForwardedFor(t *testing.T) { + invalidType := &ingress.Ingress{} + expected := "" + actual := buildForwardedFor(invalidType) + + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } + inputStr := "X-Forwarded-For" - outputStr := buildForwardedFor(inputStr) + expected = "$http_x_forwarded_for" + actual = buildForwardedFor(inputStr) - validStr := "$http_x_forwarded_for" + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } +} + +func TestBuildResolversForLua(t *testing.T) { + + ipOne := net.ParseIP("192.0.0.1") + ipTwo := net.ParseIP("2001:db8:1234:0000:0000:0000:0000:0000") + ipList := []net.IP{ipOne, ipTwo} - if outputStr != validStr { - t.Errorf("Expected '%v' but returned '%v'", validStr, outputStr) + invalidType := &ingress.Ingress{} + expected := "" + actual := buildResolversForLua(invalidType, false) + + // Invalid Type for []net.IP + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } + + actual = buildResolversForLua(ipList, invalidType) + + // Invalid Type for bool + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } + + expected = "\"192.0.0.1\", \"[2001:db8:1234::]\"" + actual = buildResolversForLua(ipList, false) + + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } + + expected = "\"192.0.0.1\"" + actual = buildResolversForLua(ipList, true) + + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) } } @@ -322,8 +592,31 @@ func TestBuildResolvers(t *testing.T) { ipTwo := net.ParseIP("2001:db8:1234:0000:0000:0000:0000:0000") ipList := []net.IP{ipOne, ipTwo} + invalidType := &ingress.Ingress{} + expected := "" + actual := buildResolvers(invalidType, false) + + // Invalid Type for []net.IP + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } + + actual = buildResolvers(ipList, invalidType) + + // Invalid Type for bool + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } + validResolver := "resolver 192.0.0.1 [2001:db8:1234::] valid=30s;" - resolver := buildResolvers(ipList) + resolver := buildResolvers(ipList, false) + + if resolver != validResolver { + t.Errorf("Expected '%v' but returned '%v'", validResolver, resolver) + } + + validResolver = "resolver 192.0.0.1 valid=30s ipv6=off;" + resolver = buildResolvers(ipList, true) if resolver != validResolver { t.Errorf("Expected '%v' but returned '%v'", validResolver, resolver) @@ -331,6 +624,14 @@ func TestBuildResolvers(t *testing.T) { } func TestBuildNextUpstream(t *testing.T) { + invalidType := &ingress.Ingress{} + expected := "" + actual := buildNextUpstream(invalidType, "") + + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } + cases := map[string]struct { NextUpstream string NonIdempotent bool @@ -369,6 +670,14 @@ func TestBuildNextUpstream(t *testing.T) { } func TestBuildRateLimit(t *testing.T) { + invalidType := &ingress.Ingress{} + expected := []string{} + actual := buildRateLimit(invalidType) + + if !reflect.DeepEqual(expected, actual) { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } + loc := &ingress.Location{} loc.RateLimit.Connections.Name = "con" @@ -400,14 +709,50 @@ func TestBuildRateLimit(t *testing.T) { t.Errorf("Expected '%v' but returned '%v'", validLimits, limits) } } + + // Invalid limit + limits = buildRateLimit(&ingress.Ingress{}) + if !reflect.DeepEqual(expected, limits) { + t.Errorf("Expected '%v' but returned '%v'", expected, limits) + } +} + +// TODO: Needs more tests +func TestBuildRateLimitZones(t *testing.T) { + invalidType := &ingress.Ingress{} + expected := []string{} + actual := buildRateLimitZones(invalidType) + + if !reflect.DeepEqual(expected, actual) { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } +} + +// TODO: Needs more tests +func TestFilterRateLimits(t *testing.T) { + invalidType := &ingress.Ingress{} + expected := []ratelimit.Config{} + actual := filterRateLimits(invalidType) + + if !reflect.DeepEqual(expected, actual) { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } } func TestBuildAuthSignURL(t *testing.T) { + invalidType := &ingress.Ingress{} + expected := "" + actual := buildAuthSignURL(invalidType) + + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } + cases := map[string]struct { Input, Output string }{ - "default url": {"http://google.com", "http://google.com?rd=$pass_access_scheme://$http_host$request_uri"}, - "with random field": {"http://google.com?cat=0", "http://google.com?cat=0&rd=$pass_access_scheme://$http_host$request_uri"}, + "default url": {"http://google.com", "http://google.com?rd=$pass_access_scheme://$http_host$escaped_request_uri"}, + "with random field": {"http://google.com?cat=0", "http://google.com?cat=0&rd=$pass_access_scheme://$http_host$escaped_request_uri"}, "with rd field": {"http://google.com?cat&rd=$request", "http://google.com?cat&rd=$request"}, } for k, tc := range cases { @@ -417,3 +762,507 @@ func TestBuildAuthSignURL(t *testing.T) { } } } + +func TestIsLocationInLocationList(t *testing.T) { + invalidType := &ingress.Ingress{} + expected := false + actual := isLocationInLocationList(invalidType, "") + + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } + + testCases := []struct { + location *ingress.Location + rawLocationList string + expected bool + }{ + {&ingress.Location{Path: "/match"}, "/match", true}, + {&ingress.Location{Path: "/match"}, ",/match", true}, + {&ingress.Location{Path: "/match"}, "/dontmatch", false}, + {&ingress.Location{Path: "/match"}, ",/dontmatch", false}, + {&ingress.Location{Path: "/match"}, "/dontmatch,/match", true}, + {&ingress.Location{Path: "/match"}, "/dontmatch,/dontmatcheither", false}, + } + + for _, testCase := range testCases { + result := isLocationInLocationList(testCase.location, testCase.rawLocationList) + if result != testCase.expected { + t.Errorf(" expected %v but return %v, path: '%s', rawLocation: '%s'", testCase.expected, result, testCase.location.Path, testCase.rawLocationList) + } + } +} + +func TestBuildUpstreamName(t *testing.T) { + invalidType := &ingress.Ingress{} + expected := "" + actual := buildUpstreamName(invalidType) + + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } + + defaultBackend := "upstream-name" + defaultHost := "example.com" + + for k, tc := range tmplFuncTestcases { + loc := &ingress.Location{ + Path: tc.Path, + Rewrite: rewrite.Config{Target: tc.Target}, + Backend: defaultBackend, + XForwardedPrefix: tc.XForwardedPrefix, + } + + if tc.SecureBackend { + loc.BackendProtocol = "HTTPS" + } + + backend := &ingress.Backend{ + Name: defaultBackend, + } + + expected := defaultBackend + + if tc.Sticky { + backend.SessionAffinity = ingress.SessionAffinityConfig{ + AffinityType: "cookie", + CookieSessionAffinity: ingress.CookieSessionAffinity{ + Locations: map[string][]string{ + defaultHost: {tc.Path}, + }, + }, + } + } + + pp := buildUpstreamName(loc) + if !strings.EqualFold(expected, pp) { + t.Errorf("%s: expected \n'%v'\nbut returned \n'%v'", k, expected, pp) + } + } +} + +func TestEscapeLiteralDollar(t *testing.T) { + escapedPath := escapeLiteralDollar("/$") + expected := "/${literal_dollar}" + if escapedPath != expected { + t.Errorf("Expected %v but returned %v", expected, escapedPath) + } + + escapedPath = escapeLiteralDollar("/hello-$/world-$/") + expected = "/hello-${literal_dollar}/world-${literal_dollar}/" + if escapedPath != expected { + t.Errorf("Expected %v but returned %v", expected, escapedPath) + } + + leaveUnchagned := "/leave-me/unchagned" + escapedPath = escapeLiteralDollar(leaveUnchagned) + if escapedPath != leaveUnchagned { + t.Errorf("Expected %v but returned %v", leaveUnchagned, escapedPath) + } + + escapedPath = escapeLiteralDollar(false) + expected = "" + if escapedPath != expected { + t.Errorf("Expected %v but returned %v", expected, escapedPath) + } +} + +func TestOpentracingPropagateContext(t *testing.T) { + tests := map[interface{}]string{ + &ingress.Location{BackendProtocol: "HTTP"}: "opentracing_propagate_context", + &ingress.Location{BackendProtocol: "HTTPS"}: "opentracing_propagate_context", + &ingress.Location{BackendProtocol: "GRPC"}: "opentracing_grpc_propagate_context", + &ingress.Location{BackendProtocol: "GRPCS"}: "opentracing_grpc_propagate_context", + &ingress.Location{BackendProtocol: "AJP"}: "opentracing_propagate_context", + "not a location": "opentracing_propagate_context", + } + + for loc, expectedDirective := range tests { + actualDirective := opentracingPropagateContext(loc) + if actualDirective != expectedDirective { + t.Errorf("Expected %v but returned %v", expectedDirective, actualDirective) + } + } +} + +func TestGetIngressInformation(t *testing.T) { + validIngress := &ingress.Ingress{} + invalidIngress := "wrongtype" + host := "host1" + validPath := "/ok" + invalidPath := 10 + + info := getIngressInformation(invalidIngress, host, validPath) + expected := &ingressInformation{} + if !info.Equal(expected) { + t.Errorf("Expected %v, but got %v", expected, info) + } + + info = getIngressInformation(validIngress, host, invalidPath) + if !info.Equal(expected) { + t.Errorf("Expected %v, but got %v", expected, info) + } + + // Setup Ingress Resource + validIngress.Namespace = "default" + validIngress.Name = "validIng" + validIngress.Annotations = map[string]string{ + "ingress.annotation": "ok", + } + validIngress.Spec.Backend = &networking.IngressBackend{ + ServiceName: "a-svc", + } + + info = getIngressInformation(validIngress, host, validPath) + expected = &ingressInformation{ + Namespace: "default", + Rule: "validIng", + Annotations: map[string]string{ + "ingress.annotation": "ok", + }, + Service: "a-svc", + } + if !info.Equal(expected) { + t.Errorf("Expected %v, but got %v", expected, info) + } + + validIngress.Spec.Backend = nil + validIngress.Spec.Rules = []networking.IngressRule{ + { + Host: host, + IngressRuleValue: networking.IngressRuleValue{ + HTTP: &networking.HTTPIngressRuleValue{ + Paths: []networking.HTTPIngressPath{ + { + Path: "/ok", + Backend: networking.IngressBackend{ + ServiceName: "b-svc", + }, + }, + }, + }, + }, + }, + {}, + } + + info = getIngressInformation(validIngress, host, validPath) + expected = &ingressInformation{ + Namespace: "default", + Rule: "validIng", + Annotations: map[string]string{ + "ingress.annotation": "ok", + }, + Service: "b-svc", + } + if !info.Equal(expected) { + t.Errorf("Expected %v, but got %v", expected, info) + } + + validIngress.Spec.Rules = append(validIngress.Spec.Rules, networking.IngressRule{ + Host: "host2", + IngressRuleValue: networking.IngressRuleValue{ + HTTP: &networking.HTTPIngressRuleValue{ + Paths: []networking.HTTPIngressPath{ + { + Path: "/ok", + Backend: networking.IngressBackend{ + ServiceName: "c-svc", + }, + }, + }, + }, + }, + }) + + info = getIngressInformation(validIngress, host, validPath) + if !info.Equal(expected) { + t.Errorf("Expected %v, but got %v", expected, info) + } + + info = getIngressInformation(validIngress, "host2", validPath) + expected.Service = "c-svc" + if !info.Equal(expected) { + t.Errorf("Expected %v, but got %v", expected, info) + } +} + +func TestBuildCustomErrorLocationsPerServer(t *testing.T) { + testCases := []struct { + server interface{} + expectedResults []errorLocation + }{ + { // Single ingress + &ingress.Server{Locations: []*ingress.Location{ + { + DefaultBackendUpstreamName: "custom-default-backend-test-backend", + CustomHTTPErrors: []int{401, 402}, + }, + }}, + []errorLocation{ + { + UpstreamName: "custom-default-backend-test-backend", + Codes: []int{401, 402}, + }, + }, + }, + { // Two ingresses, overlapping error codes, same backend + &ingress.Server{Locations: []*ingress.Location{ + { + DefaultBackendUpstreamName: "custom-default-backend-test-backend", + CustomHTTPErrors: []int{401, 402}, + }, + { + DefaultBackendUpstreamName: "custom-default-backend-test-backend", + CustomHTTPErrors: []int{402, 403}, + }, + }}, + []errorLocation{ + { + UpstreamName: "custom-default-backend-test-backend", + Codes: []int{401, 402, 403}, + }, + }, + }, + { // Two ingresses, overlapping error codes, different backends + &ingress.Server{Locations: []*ingress.Location{ + { + DefaultBackendUpstreamName: "custom-default-backend-test-one", + CustomHTTPErrors: []int{401, 402}, + }, + { + DefaultBackendUpstreamName: "custom-default-backend-test-two", + CustomHTTPErrors: []int{402, 403}, + }, + }}, + []errorLocation{ + { + UpstreamName: "custom-default-backend-test-one", + Codes: []int{401, 402}, + }, + { + UpstreamName: "custom-default-backend-test-two", + Codes: []int{402, 403}, + }, + }, + }, + { // Many ingresses, overlapping error codes, different backends + &ingress.Server{Locations: []*ingress.Location{ + { + DefaultBackendUpstreamName: "custom-default-backend-test-one", + CustomHTTPErrors: []int{401, 402}, + }, + { + DefaultBackendUpstreamName: "custom-default-backend-test-one", + CustomHTTPErrors: []int{501, 502}, + }, + { + DefaultBackendUpstreamName: "custom-default-backend-test-two", + CustomHTTPErrors: []int{409, 410}, + }, + { + DefaultBackendUpstreamName: "custom-default-backend-test-two", + CustomHTTPErrors: []int{504, 505}, + }, + }}, + []errorLocation{ + { + UpstreamName: "custom-default-backend-test-one", + Codes: []int{401, 402, 501, 502}, + }, + { + UpstreamName: "custom-default-backend-test-two", + Codes: []int{409, 410, 504, 505}, + }, + }, + }, + } + + for _, c := range testCases { + response := buildCustomErrorLocationsPerServer(c.server) + if results, ok := response.([]errorLocation); ok { + if !reflect.DeepEqual(c.expectedResults, results) { + t.Errorf("Expected %+v but got %+v", c.expectedResults, results) + } + } else { + t.Error("Unable to convert to []errorLocation") + } + } +} + +func TestProxySetHeader(t *testing.T) { + invalidType := &ingress.Ingress{} + expected := "proxy_set_header" + actual := proxySetHeader(invalidType) + + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } + + grpcBackend := &ingress.Location{ + BackendProtocol: "GRPC", + } + + expected = "grpc_set_header" + actual = proxySetHeader(grpcBackend) + + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } +} + +func TestBuildInfluxDB(t *testing.T) { + invalidType := &ingress.Ingress{} + expected := "" + actual := buildInfluxDB(invalidType) + + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } + + cfg := influxdb.Config{ + InfluxDBEnabled: true, + InfluxDBServerName: "ok.com", + InfluxDBHost: "host.com", + InfluxDBPort: "5252", + InfluxDBMeasurement: "ok", + } + expected = "influxdb server_name=ok.com host=host.com port=5252 measurement=ok enabled=true;" + actual = buildInfluxDB(cfg) + + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } +} + +func TestBuildOpenTracing(t *testing.T) { + invalidType := &ingress.Ingress{} + expected := "" + actual := buildOpentracing(invalidType) + + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } + + cfgJaeger := config.Configuration{ + EnableOpentracing: true, + JaegerCollectorHost: "jaeger-host.com", + } + expected = "opentracing_load_tracer /usr/local/lib/libjaegertracing_plugin.so /etc/nginx/opentracing.json;\r\n" + actual = buildOpentracing(cfgJaeger) + + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } + + cfgZipkin := config.Configuration{ + EnableOpentracing: true, + ZipkinCollectorHost: "zipkin-host.com", + } + expected = "opentracing_load_tracer /usr/local/lib/libzipkin_opentracing.so /etc/nginx/opentracing.json;\r\n" + actual = buildOpentracing(cfgZipkin) + + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } + + cfgDatadog := config.Configuration{ + EnableOpentracing: true, + DatadogCollectorHost: "datadog-host.com", + } + expected = "opentracing_load_tracer /usr/local/lib/libdd_opentracing.so /etc/nginx/opentracing.json;\r\n" + actual = buildOpentracing(cfgDatadog) + + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } + +} + +func TestEnforceRegexModifier(t *testing.T) { + invalidType := &ingress.Ingress{} + expected := false + actual := enforceRegexModifier(invalidType) + + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } + + locs := []*ingress.Location{ + { + Rewrite: rewrite.Config{ + Target: "/alright", + UseRegex: true, + }, + Path: "/ok", + }, + } + expected = true + actual = enforceRegexModifier(locs) + + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } +} + +func TestStripLocationModifer(t *testing.T) { + expected := "ok.com" + actual := stripLocationModifer("~*ok.com") + + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } +} + +func TestShouldLoadModSecurityModule(t *testing.T) { + // ### Invalid argument type tests ### + // The first tests should return false. + expected := false + + invalidType := &ingress.Ingress{} + actual := shouldLoadModSecurityModule(config.Configuration{}, invalidType) + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } + + actual = shouldLoadModSecurityModule(invalidType, []*ingress.Server{}) + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } + + // ### Functional tests ### + actual = shouldLoadModSecurityModule(config.Configuration{}, []*ingress.Server{}) + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } + + // All further tests should return true. + expected = true + + configuration := config.Configuration{EnableModsecurity: true} + actual = shouldLoadModSecurityModule(configuration, []*ingress.Server{}) + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } + + servers := []*ingress.Server{ + { + Locations: []*ingress.Location{ + { + ModSecurity: modsecurity.Config{ + Enable: true, + }, + }, + }, + }, + } + actual = shouldLoadModSecurityModule(config.Configuration{}, servers) + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } + + actual = shouldLoadModSecurityModule(configuration, servers) + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } +} diff --git a/internal/ingress/controller/util.go b/internal/ingress/controller/util.go index a71bb7400b..bfbb338fe1 100644 --- a/internal/ingress/controller/util.go +++ b/internal/ingress/controller/util.go @@ -17,14 +17,15 @@ limitations under the License. package controller import ( + "fmt" + "os/exec" "syscall" - "github.com/golang/glog" - api "k8s.io/api/core/v1" - "k8s.io/kubernetes/pkg/util/sysctl" - + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/ingress-nginx/internal/ingress" + "k8s.io/klog" + "k8s.io/kubernetes/pkg/util/sysctl" ) // newUpstream creates an upstream without servers. @@ -41,37 +42,71 @@ func newUpstream(name string) *ingress.Backend { } } -// sysctlSomaxconn returns the value of net.core.somaxconn, i.e. -// maximum number of connections that can be queued for acceptance +// upstreamName returns a formatted upstream name based on namespace, service, and port +func upstreamName(namespace string, service string, port intstr.IntOrString) string { + return fmt.Sprintf("%v-%v-%v", namespace, service, port.String()) +} + +// sysctlSomaxconn returns the maximum number of connections that can be queued +// for acceptance (value of net.core.somaxconn) // http://nginx.org/en/docs/http/ngx_http_core_module.html#listen func sysctlSomaxconn() int { maxConns, err := sysctl.New().GetSysctl("net/core/somaxconn") if err != nil || maxConns < 512 { - glog.V(3).Infof("system net.core.somaxconn=%v (using system default)", maxConns) + klog.V(3).Infof("net.core.somaxconn=%v (using system default)", maxConns) return 511 } return maxConns } -// sysctlFSFileMax returns the value of fs.file-max, i.e. -// maximum number of open file descriptors -func sysctlFSFileMax() int { +// rlimitMaxNumFiles returns hard limit for RLIMIT_NOFILE +func rlimitMaxNumFiles() int { var rLimit syscall.Rlimit err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rLimit) if err != nil { - glog.Errorf("unexpected error reading system maximum number of open file descriptors (RLIMIT_NOFILE): %v", err) - // returning 0 means don't render the value + klog.Errorf("Error reading system maximum number of open file descriptors (RLIMIT_NOFILE): %v", err) return 0 } + klog.V(2).Infof("rlimit.max=%v", rLimit.Max) return int(rLimit.Max) } -func intInSlice(i int, list []int) bool { - for _, v := range list { - if v == i { - return true - } +const ( + defBinary = "/usr/sbin/nginx" + cfgPath = "/etc/nginx/nginx.conf" +) + +// NginxExecTester defines the interface to execute +// command like reload or test configuration +type NginxExecTester interface { + ExecCommand(args ...string) *exec.Cmd + Test(cfg string) ([]byte, error) +} + +// NginxCommand stores context around a given nginx executable path +type NginxCommand struct { + Binary string +} + +// NewNginxCommand returns a new NginxCommand from which path +// has been detected from environment variable NGINX_BINARY or default +func NewNginxCommand() NginxCommand { + return NginxCommand{ + Binary: defBinary, } - return false +} + +// ExecCommand instanciates an exec.Cmd object to call nginx program +func (nc NginxCommand) ExecCommand(args ...string) *exec.Cmd { + cmdArgs := []string{} + + cmdArgs = append(cmdArgs, "-c", cfgPath) + cmdArgs = append(cmdArgs, args...) + return exec.Command(nc.Binary, cmdArgs...) +} + +// Test checks if config file is a syntax valid nginx configuration +func (nc NginxCommand) Test(cfg string) ([]byte, error) { + return exec.Command(nc.Binary, "-c", cfg, "-t").CombinedOutput() } diff --git a/internal/ingress/controller/util_test.go b/internal/ingress/controller/util_test.go index dc02bf0dc0..d1e5daf280 100644 --- a/internal/ingress/controller/util_test.go +++ b/internal/ingress/controller/util_test.go @@ -20,34 +20,8 @@ import ( "testing" ) -type fakeError struct{} - -func (fe *fakeError) Error() string { - return "fakeError" -} - -func TestIntInSlice(t *testing.T) { - fooTests := []struct { - i int - list []int - er bool - }{ - {1, []int{1, 2}, true}, - {3, []int{1, 2}, false}, - {1, nil, false}, - {0, nil, false}, - } - - for _, fooTest := range fooTests { - r := intInSlice(fooTest.i, fooTest.list) - if r != fooTest.er { - t.Errorf("returned %t but expected %t for s=%v & list=%v", r, fooTest.er, fooTest.i, fooTest.list) - } - } -} - -func TestSysctlFSFileMax(t *testing.T) { - i := sysctlFSFileMax() +func TestRlimitMaxNumFiles(t *testing.T) { + i := rlimitMaxNumFiles() if i < 1 { t.Errorf("returned %v but expected > 0", i) } diff --git a/internal/ingress/defaults/main.go b/internal/ingress/defaults/main.go index b9ee87626e..b492f6ba31 100644 --- a/internal/ingress/defaults/main.go +++ b/internal/ingress/defaults/main.go @@ -50,6 +50,10 @@ type Backend struct { // http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_send_timeout ProxySendTimeout int `json:"proxy-send-timeout"` + // Sets the number of the buffers used for reading a response from the proxied server + // http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffers + ProxyBuffersNumber int `json:"proxy-buffers-number"` + // Sets the size of the buffer used for reading the first part of the response received from the // proxied server. This part usually contains a small response header. // http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffer_size) @@ -69,8 +73,13 @@ type Backend struct { // http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_next_upstream ProxyNextUpstream string `json:"proxy-next-upstream"` - // Parameters for proxy-pass directive (eg. Apache web server). - ProxyPassParams string `json:"proxy-pass-params"` + // Limits the time during which a request can be passed to the next server. + // http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_next_upstream_timeout + ProxyNextUpstreamTimeout int `json:"proxy-next-upstream-timeout"` + + // Limits the number of possible tries for passing a request to the next server. + // https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_next_upstream_tries + ProxyNextUpstreamTries int `json:"proxy-next-upstream-tries"` // Sets the original text that should be changed in the "Location" and "Refresh" header fields of a proxied server response. // http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_redirect @@ -79,7 +88,7 @@ type Backend struct { // Sets the replacement text that should be changed in the "Location" and "Refresh" header fields of a proxied server response. // http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_redirect - // Default: "" + // Default: off ProxyRedirectTo string `json:"proxy-redirect-to"` // Enables or disables buffering of a client request body. @@ -106,24 +115,23 @@ type Backend struct { // Default: false UsePortInRedirects bool `json:"use-port-in-redirects"` - // Number of unsuccessful attempts to communicate with the server that should happen in the - // duration set by the fail_timeout parameter to consider the server unavailable - // http://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream - // Default: 0, ie use platform liveness probe - UpstreamMaxFails int `json:"upstream-max-fails"` - - // Time during which the specified number of unsuccessful attempts to communicate with - // the server should happen to consider the server unavailable - // http://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream - // Default: 0, ie use platform liveness probe - UpstreamFailTimeout int `json:"upstream-fail-timeout"` - // Enable stickiness by client-server mapping based on a NGINX variable, text or a combination of both. // A consistent hashing method will be used which ensures only a few keys would be remapped to different // servers on upstream group changes // http://nginx.org/en/docs/http/ngx_http_upstream_module.html#hash UpstreamHashBy string `json:"upstream-hash-by"` + // Consistent hashing subset flag. + // Default: false + UpstreamHashBySubset bool `json:"upstream-hash-by-subset"` + + // Subset consistent hashing, subset size. + // Default 3 + UpstreamHashBySubsetSize int `json:"upstream-hash-by-subset-size"` + + // Let's us choose a load balancing algorithm per ingress + LoadBalancing string `json:"load-balance"` + // WhitelistSourceRange allows limiting access to certain client addresses // http://nginx.org/en/docs/http/ngx_http_access_module.html WhitelistSourceRange []string `json:"whitelist-source-range,-"` @@ -138,4 +146,8 @@ type Backend struct { // Sets the initial amount after which the further transmission of a response to a client will be rate limited. // http://nginx.org/en/docs/http/ngx_http_core_module.html#limit_rate_after LimitRateAfter int `json:"limit-rate-after"` + + // Enables or disables buffering of responses from the proxied server. + // http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffering + ProxyBuffering string `json:"proxy-buffering"` } diff --git a/internal/ingress/errors/errors.go b/internal/ingress/errors/errors.go index 1a9db9ba44..11ca0f3cf4 100644 --- a/internal/ingress/errors/errors.go +++ b/internal/ingress/errors/errors.go @@ -32,6 +32,14 @@ var ( ErrInvalidAnnotationName = errors.New("invalid annotation name") ) +// NewInvalidAnnotationConfiguration returns a new InvalidConfiguration error for use when +// annotations are not correctly configured +func NewInvalidAnnotationConfiguration(name string, reason string) error { + return InvalidConfiguration{ + Name: fmt.Sprintf("the annotation %v does not contain a valid configuration: %v", name, reason), + } +} + // NewInvalidAnnotationContent returns a new InvalidContent error func NewInvalidAnnotationContent(name string, val interface{}) error { return InvalidContent{ @@ -46,6 +54,15 @@ func NewLocationDenied(reason string) error { } } +// InvalidConfiguration Error +type InvalidConfiguration struct { + Name string +} + +func (e InvalidConfiguration) Error() string { + return e.Name +} + // InvalidContent error type InvalidContent struct { Name string @@ -84,10 +101,13 @@ func IsInvalidContent(e error) bool { return ok } +// New returns a new error func New(m string) error { return errors.New(m) } +// Errorf formats according to a format specifier and returns the string +// as a value that satisfies error. func Errorf(format string, args ...interface{}) error { - return errors.Errorf(format, args) + return errors.Errorf(format, args...) } diff --git a/internal/ingress/metric/collectors/controller.go b/internal/ingress/metric/collectors/controller.go new file mode 100644 index 0000000000..e64a271ff7 --- /dev/null +++ b/internal/ingress/metric/collectors/controller.go @@ -0,0 +1,293 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "fmt" + "time" + + "github.com/prometheus/client_golang/prometheus" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/ingress-nginx/internal/ingress" + "k8s.io/klog" +) + +var ( + operation = []string{"controller_namespace", "controller_class", "controller_pod"} + ingressOperation = []string{"controller_namespace", "controller_class", "controller_pod", "namespace", "ingress"} + sslLabelHost = []string{"namespace", "class", "host"} +) + +// Controller defines base metrics about the ingress controller +type Controller struct { + prometheus.Collector + + configHash prometheus.Gauge + configSuccess prometheus.Gauge + configSuccessTime prometheus.Gauge + + reloadOperation *prometheus.CounterVec + reloadOperationErrors *prometheus.CounterVec + checkIngressOperation *prometheus.CounterVec + checkIngressOperationErrors *prometheus.CounterVec + sslExpireTime *prometheus.GaugeVec + + constLabels prometheus.Labels + labels prometheus.Labels + + leaderElection *prometheus.GaugeVec +} + +// NewController creates a new prometheus collector for the +// Ingress controller operations +func NewController(pod, namespace, class string) *Controller { + constLabels := prometheus.Labels{ + "controller_namespace": namespace, + "controller_class": class, + "controller_pod": pod, + } + + cm := &Controller{ + constLabels: constLabels, + + labels: prometheus.Labels{ + "namespace": namespace, + "class": class, + }, + + configHash: prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: PrometheusNamespace, + Name: "config_hash", + Help: "Running configuration hash actually running", + ConstLabels: constLabels, + }, + ), + configSuccess: prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: PrometheusNamespace, + Name: "config_last_reload_successful", + Help: "Whether the last configuration reload attempt was successful", + ConstLabels: constLabels, + }), + configSuccessTime: prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: PrometheusNamespace, + Name: "config_last_reload_successful_timestamp_seconds", + Help: "Timestamp of the last successful configuration reload.", + ConstLabels: constLabels, + }), + reloadOperation: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: PrometheusNamespace, + Name: "success", + Help: `Cumulative number of Ingress controller reload operations`, + }, + operation, + ), + reloadOperationErrors: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: PrometheusNamespace, + Name: "errors", + Help: `Cumulative number of Ingress controller errors during reload operations`, + }, + operation, + ), + checkIngressOperationErrors: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: PrometheusNamespace, + Name: "check_errors", + Help: `Cumulative number of Ingress controller errors during syntax check operations`, + }, + ingressOperation, + ), + checkIngressOperation: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: PrometheusNamespace, + Name: "check_success", + Help: `Cumulative number of Ingress controller syntax check operations`, + }, + ingressOperation, + ), + sslExpireTime: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: PrometheusNamespace, + Name: "ssl_expire_time_seconds", + Help: `Number of seconds since 1970 to the SSL Certificate expire. + An example to check if this certificate will expire in 10 days is: "nginx_ingress_controller_ssl_expire_time_seconds < (time() + (10 * 24 * 3600))"`, + }, + sslLabelHost, + ), + leaderElection: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: PrometheusNamespace, + Name: "leader_election_status", + Help: "Gauge reporting status of the leader election, 0 indicates follower, 1 indicates leader. 'name' is the string used to identify the lease", + ConstLabels: constLabels, + }, + []string{"name"}, + ), + } + + return cm +} + +// IncReloadCount increment the reload counter +func (cm *Controller) IncReloadCount() { + cm.reloadOperation.With(cm.constLabels).Inc() +} + +// IncReloadErrorCount increment the reload error counter +func (cm *Controller) IncReloadErrorCount() { + cm.reloadOperationErrors.With(cm.constLabels).Inc() +} + +// OnStartedLeading indicates the pod was elected as the leader +func (cm *Controller) OnStartedLeading(electionID string) { + cm.leaderElection.WithLabelValues(electionID).Set(1.0) +} + +// OnStoppedLeading indicates the pod stopped being the leader +func (cm *Controller) OnStoppedLeading(electionID string) { + cm.leaderElection.WithLabelValues(electionID).Set(0) +} + +// IncCheckCount increment the check counter +func (cm *Controller) IncCheckCount(namespace, name string) { + labels := prometheus.Labels{ + "namespace": namespace, + "ingress": name, + } + cm.checkIngressOperation.MustCurryWith(cm.constLabels).With(labels).Inc() +} + +// IncCheckErrorCount increment the check error counter +func (cm *Controller) IncCheckErrorCount(namespace, name string) { + labels := prometheus.Labels{ + "namespace": namespace, + "ingress": name, + } + cm.checkIngressOperationErrors.MustCurryWith(cm.constLabels).With(labels).Inc() +} + +// ConfigSuccess set a boolean flag according to the output of the controller configuration reload +func (cm *Controller) ConfigSuccess(hash uint64, success bool) { + if success { + cm.configSuccessTime.Set(float64(time.Now().Unix())) + cm.configSuccess.Set(1) + + cm.configHash.Set(float64(hash)) + + return + } + + cm.configSuccess.Set(0) + cm.configHash.Set(0) +} + +// Describe implements prometheus.Collector +func (cm Controller) Describe(ch chan<- *prometheus.Desc) { + cm.configHash.Describe(ch) + cm.configSuccess.Describe(ch) + cm.configSuccessTime.Describe(ch) + cm.reloadOperation.Describe(ch) + cm.reloadOperationErrors.Describe(ch) + cm.checkIngressOperation.Describe(ch) + cm.checkIngressOperationErrors.Describe(ch) + cm.sslExpireTime.Describe(ch) + cm.leaderElection.Describe(ch) +} + +// Collect implements the prometheus.Collector interface. +func (cm Controller) Collect(ch chan<- prometheus.Metric) { + cm.configHash.Collect(ch) + cm.configSuccess.Collect(ch) + cm.configSuccessTime.Collect(ch) + cm.reloadOperation.Collect(ch) + cm.reloadOperationErrors.Collect(ch) + cm.checkIngressOperation.Collect(ch) + cm.checkIngressOperationErrors.Collect(ch) + cm.sslExpireTime.Collect(ch) + cm.leaderElection.Collect(ch) +} + +// SetSSLExpireTime sets the expiration time of SSL Certificates +func (cm *Controller) SetSSLExpireTime(servers []*ingress.Server) { + for _, s := range servers { + if s.Hostname != "" && s.SSLCert.ExpireTime.Unix() > 0 { + labels := make(prometheus.Labels, len(cm.labels)+1) + for k, v := range cm.labels { + labels[k] = v + } + labels["host"] = s.Hostname + + cm.sslExpireTime.With(labels).Set(float64(s.SSLCert.ExpireTime.Unix())) + } + } +} + +// RemoveMetrics removes metrics for hostnames not available anymore +func (cm *Controller) RemoveMetrics(hosts []string, registry prometheus.Gatherer) { + cm.removeSSLExpireMetrics(true, hosts, registry) +} + +// RemoveAllSSLExpireMetrics removes metrics for expiration of SSL Certificates +func (cm *Controller) RemoveAllSSLExpireMetrics(registry prometheus.Gatherer) { + cm.removeSSLExpireMetrics(false, []string{}, registry) +} + +func (cm *Controller) removeSSLExpireMetrics(onlyDefinedHosts bool, hosts []string, registry prometheus.Gatherer) { + mfs, err := registry.Gather() + if err != nil { + klog.Errorf("Error gathering metrics: %v", err) + return + } + + toRemove := sets.NewString(hosts...) + + for _, mf := range mfs { + metricName := mf.GetName() + if fmt.Sprintf("%v_ssl_expire_time_seconds", PrometheusNamespace) != metricName { + continue + } + + for _, m := range mf.GetMetric() { + labels := make(map[string]string, len(m.GetLabel())) + for _, labelPair := range m.GetLabel() { + labels[*labelPair.Name] = *labelPair.Value + } + + // remove labels that are constant + deleteConstants(labels) + + host, ok := labels["host"] + if !ok { + continue + } + + if onlyDefinedHosts && !toRemove.Has(host) { + continue + } + + klog.V(2).Infof("Removing prometheus metric from gauge %v for host %v", metricName, host) + removed := cm.sslExpireTime.Delete(labels) + if !removed { + klog.V(2).Infof("metric %v for host %v with labels not removed: %v", metricName, host, labels) + } + } + } +} diff --git a/internal/ingress/metric/collectors/controller_test.go b/internal/ingress/metric/collectors/controller_test.go new file mode 100644 index 0000000000..f3c32bfb39 --- /dev/null +++ b/internal/ingress/metric/collectors/controller_test.go @@ -0,0 +1,158 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" + "k8s.io/ingress-nginx/internal/ingress" +) + +func TestControllerCounters(t *testing.T) { + const metadata = ` + # HELP nginx_ingress_controller_config_last_reload_successful Whether the last configuration reload attempt was successful + # TYPE nginx_ingress_controller_config_last_reload_successful gauge + # HELP nginx_ingress_controller_success Cumulative number of Ingress controller reload operations + # TYPE nginx_ingress_controller_success counter + ` + cases := []struct { + name string + test func(*Controller) + metrics []string + want string + }{ + { + name: "should return not increment in metrics if no operations are invoked", + test: func(cm *Controller) { + }, + want: metadata + ` + nginx_ingress_controller_config_last_reload_successful{controller_class="nginx",controller_namespace="default",controller_pod="pod"} 0 + `, + metrics: []string{"nginx_ingress_controller_config_last_reload_successful", "nginx_ingress_controller_success"}, + }, + { + name: "single increase in reload count should return 1", + test: func(cm *Controller) { + cm.IncReloadCount() + cm.ConfigSuccess(0, true) + }, + want: metadata + ` + nginx_ingress_controller_config_last_reload_successful{controller_class="nginx",controller_namespace="default",controller_pod="pod"} 1 + nginx_ingress_controller_success{controller_class="nginx",controller_namespace="default",controller_pod="pod"} 1 + `, + metrics: []string{"nginx_ingress_controller_config_last_reload_successful", "nginx_ingress_controller_success"}, + }, + { + name: "single increase in error reload count should return 1", + test: func(cm *Controller) { + cm.IncReloadErrorCount() + }, + want: ` + # HELP nginx_ingress_controller_errors Cumulative number of Ingress controller errors during reload operations + # TYPE nginx_ingress_controller_errors counter + nginx_ingress_controller_errors{controller_class="nginx",controller_namespace="default",controller_pod="pod"} 1 + `, + metrics: []string{"nginx_ingress_controller_errors"}, + }, + { + name: "should set SSL certificates metrics", + test: func(cm *Controller) { + t1, _ := time.Parse( + time.RFC3339, + "2012-11-01T22:08:41+00:00") + + servers := []*ingress.Server{ + { + Hostname: "demo", + SSLCert: ingress.SSLCert{ + ExpireTime: t1, + }, + }, + { + Hostname: "invalid", + SSLCert: ingress.SSLCert{ + ExpireTime: time.Unix(0, 0), + }, + }, + } + cm.SetSSLExpireTime(servers) + }, + want: ` + # HELP nginx_ingress_controller_ssl_expire_time_seconds Number of seconds since 1970 to the SSL Certificate expire.\n An example to check if this certificate will expire in 10 days is: "nginx_ingress_controller_ssl_expire_time_seconds < (time() + (10 * 24 * 3600))" + # TYPE nginx_ingress_controller_ssl_expire_time_seconds gauge + nginx_ingress_controller_ssl_expire_time_seconds{class="nginx",host="demo",namespace="default"} 1.351807721e+09 + `, + metrics: []string{"nginx_ingress_controller_ssl_expire_time_seconds"}, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + cm := NewController("pod", "default", "nginx") + reg := prometheus.NewPedanticRegistry() + if err := reg.Register(cm); err != nil { + t.Errorf("registering collector failed: %s", err) + } + + c.test(cm) + + if err := GatherAndCompare(cm, c.want, c.metrics, reg); err != nil { + t.Errorf("unexpected collecting result:\n%s", err) + } + + reg.Unregister(cm) + }) + } +} + +func TestRemoveMetrics(t *testing.T) { + cm := NewController("pod", "default", "nginx") + reg := prometheus.NewPedanticRegistry() + if err := reg.Register(cm); err != nil { + t.Errorf("registering collector failed: %s", err) + } + + t1, _ := time.Parse( + time.RFC3339, + "2012-11-01T22:08:41+00:00") + + servers := []*ingress.Server{ + { + Hostname: "demo", + SSLCert: ingress.SSLCert{ + ExpireTime: t1, + }, + }, + { + Hostname: "invalid", + SSLCert: ingress.SSLCert{ + ExpireTime: time.Unix(0, 0), + }, + }, + } + cm.SetSSLExpireTime(servers) + + cm.RemoveMetrics([]string{"demo"}, reg) + + if err := GatherAndCompare(cm, "", []string{"nginx_ingress_controller_ssl_expire_time_seconds"}, reg); err != nil { + t.Errorf("unexpected collecting result:\n%s", err) + } + + reg.Unregister(cm) +} diff --git a/internal/ingress/metric/collectors/main.go b/internal/ingress/metric/collectors/main.go new file mode 100644 index 0000000000..2c57ad774d --- /dev/null +++ b/internal/ingress/metric/collectors/main.go @@ -0,0 +1,20 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +// PrometheusNamespace default metric namespace +var PrometheusNamespace = "nginx_ingress_controller" diff --git a/internal/ingress/metric/collectors/nginx_status.go b/internal/ingress/metric/collectors/nginx_status.go new file mode 100644 index 0000000000..97c731d4fa --- /dev/null +++ b/internal/ingress/metric/collectors/nginx_status.go @@ -0,0 +1,197 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "log" + "regexp" + "strconv" + + "github.com/prometheus/client_golang/prometheus" + "k8s.io/ingress-nginx/internal/nginx" + "k8s.io/klog" +) + +var ( + ac = regexp.MustCompile(`Active connections: (\d+)`) + sahr = regexp.MustCompile(`(\d+)\s(\d+)\s(\d+)`) + reading = regexp.MustCompile(`Reading: (\d+)`) + writing = regexp.MustCompile(`Writing: (\d+)`) + waiting = regexp.MustCompile(`Waiting: (\d+)`) +) + +type ( + nginxStatusCollector struct { + scrapeChan chan scrapeRequest + + data *nginxStatusData + } + + nginxStatusData struct { + connectionsTotal *prometheus.Desc + requestsTotal *prometheus.Desc + connections *prometheus.Desc + } + + basicStatus struct { + // Active total number of active connections + Active int + // Accepted total number of accepted client connections + Accepted int + // Handled total number of handled connections. Generally, the parameter value is the same as accepts unless some resource limits have been reached (for example, the worker_connections limit). + Handled int + // Requests total number of client requests. + Requests int + // Reading current number of connections where nginx is reading the request header. + Reading int + // Writing current number of connections where nginx is writing the response back to the client. + Writing int + // Waiting current number of idle client connections waiting for a request. + Waiting int + } +) + +// NGINXStatusCollector defines a status collector interface +type NGINXStatusCollector interface { + prometheus.Collector + + Start() + Stop() +} + +// NewNGINXStatus returns a new prometheus collector the default nginx status module +func NewNGINXStatus(podName, namespace, ingressClass string) (NGINXStatusCollector, error) { + + p := nginxStatusCollector{ + scrapeChan: make(chan scrapeRequest), + } + + constLabels := prometheus.Labels{ + "controller_namespace": namespace, + "controller_class": ingressClass, + "controller_pod": podName, + } + + p.data = &nginxStatusData{ + connectionsTotal: prometheus.NewDesc( + prometheus.BuildFQName(PrometheusNamespace, subSystem, "connections_total"), + "total number of connections with state {accepted, handled}", + []string{"state"}, constLabels), + + requestsTotal: prometheus.NewDesc( + prometheus.BuildFQName(PrometheusNamespace, subSystem, "requests_total"), + "total number of client requests", + nil, constLabels), + + connections: prometheus.NewDesc( + prometheus.BuildFQName(PrometheusNamespace, subSystem, "connections"), + "current number of client connections with state {active, reading, writing, waiting}", + []string{"state"}, constLabels), + } + + return p, nil +} + +// Describe implements prometheus.Collector. +func (p nginxStatusCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- p.data.connectionsTotal + ch <- p.data.requestsTotal + ch <- p.data.connections +} + +// Collect implements prometheus.Collector. +func (p nginxStatusCollector) Collect(ch chan<- prometheus.Metric) { + req := scrapeRequest{results: ch, done: make(chan struct{})} + p.scrapeChan <- req + <-req.done +} + +func (p nginxStatusCollector) Start() { + for req := range p.scrapeChan { + ch := req.results + p.scrape(ch) + req.done <- struct{}{} + } +} + +func (p nginxStatusCollector) Stop() { + close(p.scrapeChan) +} + +func toInt(data []string, pos int) int { + if len(data) == 0 { + return 0 + } + if pos > len(data) { + return 0 + } + if v, err := strconv.Atoi(data[pos]); err == nil { + return v + } + return 0 +} + +func parse(data string) *basicStatus { + acr := ac.FindStringSubmatch(data) + sahrr := sahr.FindStringSubmatch(data) + readingr := reading.FindStringSubmatch(data) + writingr := writing.FindStringSubmatch(data) + waitingr := waiting.FindStringSubmatch(data) + + return &basicStatus{ + toInt(acr, 1), + toInt(sahrr, 1), + toInt(sahrr, 2), + toInt(sahrr, 3), + toInt(readingr, 1), + toInt(writingr, 1), + toInt(waitingr, 1), + } +} + +// nginxStatusCollector scrape the nginx status +func (p nginxStatusCollector) scrape(ch chan<- prometheus.Metric) { + klog.V(3).Infof("start scraping socket: %v", nginx.StatusPath) + status, data, err := nginx.NewGetStatusRequest(nginx.StatusPath) + if err != nil { + log.Printf("%v", err) + klog.Warningf("unexpected error obtaining nginx status info: %v", err) + return + } + + if status < 200 || status >= 400 { + klog.Warningf("unexpected error obtaining nginx status info (status %v)", status) + return + } + + s := parse(string(data)) + + ch <- prometheus.MustNewConstMetric(p.data.connectionsTotal, + prometheus.CounterValue, float64(s.Accepted), "accepted") + ch <- prometheus.MustNewConstMetric(p.data.connectionsTotal, + prometheus.CounterValue, float64(s.Handled), "handled") + ch <- prometheus.MustNewConstMetric(p.data.requestsTotal, + prometheus.CounterValue, float64(s.Requests)) + ch <- prometheus.MustNewConstMetric(p.data.connections, + prometheus.GaugeValue, float64(s.Active), "active") + ch <- prometheus.MustNewConstMetric(p.data.connections, + prometheus.GaugeValue, float64(s.Reading), "reading") + ch <- prometheus.MustNewConstMetric(p.data.connections, + prometheus.GaugeValue, float64(s.Writing), "writing") + ch <- prometheus.MustNewConstMetric(p.data.connections, + prometheus.GaugeValue, float64(s.Waiting), "waiting") +} diff --git a/internal/ingress/metric/collectors/nginx_status_test.go b/internal/ingress/metric/collectors/nginx_status_test.go new file mode 100644 index 0000000000..f3c4ff9e93 --- /dev/null +++ b/internal/ingress/metric/collectors/nginx_status_test.go @@ -0,0 +1,151 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "fmt" + "net" + "net/http" + "net/http/httptest" + "os" + "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" + "k8s.io/ingress-nginx/internal/nginx" +) + +func TestStatusCollector(t *testing.T) { + cases := []struct { + name string + mock string + metrics []string + want string + }{ + { + name: "should return empty metrics", + mock: ` + `, + want: ` + # HELP nginx_ingress_controller_nginx_process_connections_total total number of connections with state {accepted, handled} + # TYPE nginx_ingress_controller_nginx_process_connections_total counter + nginx_ingress_controller_nginx_process_connections_total{controller_class="nginx",controller_namespace="default",controller_pod="pod",state="accepted"} 0 + nginx_ingress_controller_nginx_process_connections_total{controller_class="nginx",controller_namespace="default",controller_pod="pod",state="handled"} 0 + `, + metrics: []string{"nginx_ingress_controller_nginx_process_connections_total"}, + }, + { + name: "should return metrics for total connections", + mock: ` + Active connections: 15 + server accepts handled requests + 1 2 3 + Reading: 4 Writing: 5 Waiting: 6 + `, + want: ` + # HELP nginx_ingress_controller_nginx_process_connections_total total number of connections with state {accepted, handled} + # TYPE nginx_ingress_controller_nginx_process_connections_total counter + nginx_ingress_controller_nginx_process_connections_total{controller_class="nginx",controller_namespace="default",controller_pod="pod",state="accepted"} 1 + nginx_ingress_controller_nginx_process_connections_total{controller_class="nginx",controller_namespace="default",controller_pod="pod",state="handled"} 2 + `, + metrics: []string{"nginx_ingress_controller_nginx_process_connections_total"}, + }, + { + name: "should return nginx metrics all available metrics", + mock: ` + Active connections: 15 + server accepts handled requests + 1 2 3 + Reading: 4 Writing: 5 Waiting: 6 + `, + want: ` + # HELP nginx_ingress_controller_nginx_process_connections current number of client connections with state {active, reading, writing, waiting} + # TYPE nginx_ingress_controller_nginx_process_connections gauge + nginx_ingress_controller_nginx_process_connections{controller_class="nginx",controller_namespace="default",controller_pod="pod",state="active"} 15 + nginx_ingress_controller_nginx_process_connections{controller_class="nginx",controller_namespace="default",controller_pod="pod",state="reading"} 4 + nginx_ingress_controller_nginx_process_connections{controller_class="nginx",controller_namespace="default",controller_pod="pod",state="waiting"} 6 + nginx_ingress_controller_nginx_process_connections{controller_class="nginx",controller_namespace="default",controller_pod="pod",state="writing"} 5 + # HELP nginx_ingress_controller_nginx_process_connections_total total number of connections with state {accepted, handled} + # TYPE nginx_ingress_controller_nginx_process_connections_total counter + nginx_ingress_controller_nginx_process_connections_total{controller_class="nginx",controller_namespace="default",controller_pod="pod",state="accepted"} 1 + nginx_ingress_controller_nginx_process_connections_total{controller_class="nginx",controller_namespace="default",controller_pod="pod",state="handled"} 2 + # HELP nginx_ingress_controller_nginx_process_requests_total total number of client requests + # TYPE nginx_ingress_controller_nginx_process_requests_total counter + nginx_ingress_controller_nginx_process_requests_total{controller_class="nginx",controller_namespace="default",controller_pod="pod"} 3 + `, + metrics: []string{ + "nginx_ingress_controller_nginx_process_connections_total", + "nginx_ingress_controller_nginx_process_requests_total", + "nginx_ingress_controller_nginx_process_connections", + }, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + listener, err := net.Listen("unix", nginx.StatusSocket) + if err != nil { + t.Fatalf("crating unix listener: %s", err) + } + + server := &httptest.Server{ + Listener: listener, + Config: &http.Server{Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + + if r.URL.Path == "/nginx_status" { + _, err := fmt.Fprintf(w, c.mock) + if err != nil { + t.Fatal(err) + } + + return + } + + fmt.Fprintf(w, "OK") + })}, + } + server.Start() + + time.Sleep(1 * time.Second) + + cm, err := NewNGINXStatus("pod", "default", "nginx") + if err != nil { + t.Errorf("unexpected error creating nginx status collector: %v", err) + } + + go cm.Start() + + reg := prometheus.NewPedanticRegistry() + if err := reg.Register(cm); err != nil { + t.Errorf("registering collector failed: %s", err) + } + + if err := GatherAndCompare(cm, c.want, c.metrics, reg); err != nil { + t.Errorf("unexpected collecting result:\n%s", err) + } + + reg.Unregister(cm) + + server.Close() + cm.Stop() + + listener.Close() + os.Remove(nginx.StatusSocket) + }) + } +} diff --git a/internal/ingress/metric/collectors/process.go b/internal/ingress/metric/collectors/process.go new file mode 100644 index 0000000000..dcb78597a4 --- /dev/null +++ b/internal/ingress/metric/collectors/process.go @@ -0,0 +1,215 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "fmt" + "path/filepath" + + "k8s.io/klog" + + common "github.com/ncabatoff/process-exporter" + "github.com/ncabatoff/process-exporter/proc" + "github.com/prometheus/client_golang/prometheus" +) + +type scrapeRequest struct { + results chan<- prometheus.Metric + done chan struct{} +} + +// Stopable defines a prometheus collector that can be stopped +type Stopable interface { + prometheus.Collector + Stop() +} + +// BinaryNameMatcher define a namer using the binary name +type BinaryNameMatcher struct { + Name string + Binary string +} + +// MatchAndName returns false if the match failed, otherwise +// true and the resulting name. +func (em BinaryNameMatcher) MatchAndName(nacl common.ProcAttributes) (bool, string) { + if len(nacl.Cmdline) == 0 { + return false, "" + } + cmd := filepath.Base(em.Binary) + return em.Name == cmd, "" +} + +// String returns the name of the binary to match +func (em BinaryNameMatcher) String() string { + return fmt.Sprintf("%+v", em.Binary) +} + +type namedProcessData struct { + numProcs *prometheus.Desc + cpuSecs *prometheus.Desc + readBytes *prometheus.Desc + writeBytes *prometheus.Desc + memResidentbytes *prometheus.Desc + memVirtualbytes *prometheus.Desc + startTime *prometheus.Desc +} + +type namedProcess struct { + *proc.Grouper + + scrapeChan chan scrapeRequest + fs *proc.FS + data namedProcessData +} + +const subSystem = "nginx_process" + +// NGINXProcessCollector defines a process collector interface +type NGINXProcessCollector interface { + prometheus.Collector + + Start() + Stop() +} + +var name = "nginx" +var binary = "/usr/bin/nginx" + +// NewNGINXProcess returns a new prometheus collector for the nginx process +func NewNGINXProcess(pod, namespace, ingressClass string) (NGINXProcessCollector, error) { + fs, err := proc.NewFS("/proc", false) + if err != nil { + return nil, err + } + + nm := BinaryNameMatcher{ + Name: name, + Binary: binary, + } + + p := namedProcess{ + scrapeChan: make(chan scrapeRequest), + Grouper: proc.NewGrouper(nm, true, false, false), + fs: fs, + } + + _, _, err = p.Update(p.fs.AllProcs()) + if err != nil { + return nil, err + } + + constLabels := prometheus.Labels{ + "controller_namespace": namespace, + "controller_class": ingressClass, + "controller_pod": pod, + } + + p.data = namedProcessData{ + numProcs: prometheus.NewDesc( + prometheus.BuildFQName(PrometheusNamespace, subSystem, "num_procs"), + "number of processes", + nil, constLabels), + + cpuSecs: prometheus.NewDesc( + prometheus.BuildFQName(PrometheusNamespace, subSystem, "cpu_seconds_total"), + "Cpu usage in seconds", + nil, constLabels), + + readBytes: prometheus.NewDesc( + prometheus.BuildFQName(PrometheusNamespace, subSystem, "read_bytes_total"), + "number of bytes read", + nil, constLabels), + + writeBytes: prometheus.NewDesc( + prometheus.BuildFQName(PrometheusNamespace, subSystem, "write_bytes_total"), + "number of bytes written", + nil, constLabels), + + memResidentbytes: prometheus.NewDesc( + prometheus.BuildFQName(PrometheusNamespace, subSystem, "resident_memory_bytes"), + "number of bytes of memory in use", + nil, constLabels), + + memVirtualbytes: prometheus.NewDesc( + prometheus.BuildFQName(PrometheusNamespace, subSystem, "virtual_memory_bytes"), + "number of bytes of memory in use", + nil, constLabels), + + startTime: prometheus.NewDesc( + prometheus.BuildFQName(PrometheusNamespace, subSystem, "oldest_start_time_seconds"), + "start time in seconds since 1970/01/01", + nil, constLabels), + } + + return p, nil +} + +// Describe implements prometheus.Collector. +func (p namedProcess) Describe(ch chan<- *prometheus.Desc) { + ch <- p.data.cpuSecs + ch <- p.data.numProcs + ch <- p.data.readBytes + ch <- p.data.writeBytes + ch <- p.data.memResidentbytes + ch <- p.data.memVirtualbytes + ch <- p.data.startTime +} + +// Collect implements prometheus.Collector. +func (p namedProcess) Collect(ch chan<- prometheus.Metric) { + req := scrapeRequest{results: ch, done: make(chan struct{})} + p.scrapeChan <- req + <-req.done +} + +func (p namedProcess) Start() { + for req := range p.scrapeChan { + ch := req.results + p.scrape(ch) + req.done <- struct{}{} + } +} + +func (p namedProcess) Stop() { + close(p.scrapeChan) +} + +func (p namedProcess) scrape(ch chan<- prometheus.Metric) { + _, groups, err := p.Update(p.fs.AllProcs()) + if err != nil { + klog.Warningf("unexpected error obtaining nginx process info: %v", err) + return + } + + for _, gcounts := range groups { + ch <- prometheus.MustNewConstMetric(p.data.numProcs, + prometheus.GaugeValue, float64(gcounts.Procs)) + ch <- prometheus.MustNewConstMetric(p.data.memResidentbytes, + prometheus.GaugeValue, float64(gcounts.Memory.ResidentBytes)) + ch <- prometheus.MustNewConstMetric(p.data.memVirtualbytes, + prometheus.GaugeValue, float64(gcounts.Memory.VirtualBytes)) + ch <- prometheus.MustNewConstMetric(p.data.startTime, + prometheus.GaugeValue, float64(gcounts.OldestStartTime.Unix())) + ch <- prometheus.MustNewConstMetric(p.data.cpuSecs, + prometheus.CounterValue, gcounts.CPUSystemTime) + ch <- prometheus.MustNewConstMetric(p.data.readBytes, + prometheus.CounterValue, float64(gcounts.ReadBytes)) + ch <- prometheus.MustNewConstMetric(p.data.writeBytes, + prometheus.CounterValue, float64(gcounts.WriteBytes)) + } +} diff --git a/internal/ingress/metric/collectors/process_test.go b/internal/ingress/metric/collectors/process_test.go new file mode 100644 index 0000000000..45170572bd --- /dev/null +++ b/internal/ingress/metric/collectors/process_test.go @@ -0,0 +1,96 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "os/exec" + "syscall" + "testing" + + "github.com/prometheus/client_golang/prometheus" +) + +func TestProcessCollector(t *testing.T) { + cases := []struct { + name string + metrics []string + }{ + { + name: "should return metrics", + metrics: []string{"nginx_ingress_controller_nginx_process_num_procs"}, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + name = "sleep" + binary = "/bin/sleep" + + cmd := exec.Command(binary, "1000000") + err := cmd.Start() + if err != nil { + t.Errorf("unexpected error creating dummy process: %v", err) + } + + done := make(chan struct{}) + go func() { + cmd.Wait() + status := cmd.ProcessState.Sys().(syscall.WaitStatus) + if status.Signaled() { + t.Logf("Signal: %v", status.Signal()) + } else { + t.Logf("Status: %v", status.ExitStatus()) + } + done <- struct{}{} + }() + + cm, err := NewNGINXProcess("pod", "default", "nginx") + if err != nil { + t.Errorf("unexpected error creating nginx status collector: %v", err) + t.FailNow() + } + + go cm.Start() + + defer func() { + cm.Stop() + + cmd.Process.Kill() + <-done + close(done) + }() + + reg := prometheus.NewPedanticRegistry() + if err := reg.Register(cm); err != nil { + t.Errorf("registering collector failed: %s", err) + } + + metrics, err := reg.Gather() + if err != nil { + t.Errorf("gathering metrics failed: %s", err) + } + + m := filterMetrics(metrics, c.metrics) + + if *m[0].GetMetric()[0].Gauge.Value < 0 { + t.Errorf("number of process should be > 0") + } + + reg.Unregister(cm) + }) + } +} diff --git a/internal/ingress/metric/collectors/socket.go b/internal/ingress/metric/collectors/socket.go new file mode 100644 index 0000000000..7ca8aff39b --- /dev/null +++ b/internal/ingress/metric/collectors/socket.go @@ -0,0 +1,449 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "fmt" + "io" + "io/ioutil" + "net" + "os" + + jsoniter "github.com/json-iterator/go" + "github.com/prometheus/client_golang/prometheus" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog" +) + +type upstream struct { + Latency float64 `json:"upstreamLatency"` + ResponseLength float64 `json:"upstreamResponseLength"` + ResponseTime float64 `json:"upstreamResponseTime"` + //Status string `json:"upstreamStatus"` +} + +type socketData struct { + Host string `json:"host"` + Status string `json:"status"` + + ResponseLength float64 `json:"responseLength"` + + Method string `json:"method"` + + RequestLength float64 `json:"requestLength"` + RequestTime float64 `json:"requestTime"` + + upstream + + Namespace string `json:"namespace"` + Ingress string `json:"ingress"` + Service string `json:"service"` + Path string `json:"path"` +} + +// SocketCollector stores prometheus metrics and ingress meta-data +type SocketCollector struct { + prometheus.Collector + + requestTime *prometheus.HistogramVec + requestLength *prometheus.HistogramVec + + responseTime *prometheus.HistogramVec + responseLength *prometheus.HistogramVec + + upstreamLatency *prometheus.SummaryVec + + bytesSent *prometheus.HistogramVec + + requests *prometheus.CounterVec + + listener net.Listener + + metricMapping map[string]interface{} + + hosts sets.String + + metricsPerHost bool +} + +var ( + requestTags = []string{ + "status", + + "method", + "path", + + "namespace", + "ingress", + "service", + } +) + +// NewSocketCollector creates a new SocketCollector instance using +// the ingress watch namespace and class used by the controller +func NewSocketCollector(pod, namespace, class string, metricsPerHost bool) (*SocketCollector, error) { + socket := "/tmp/prometheus-nginx.socket" + listener, err := net.Listen("unix", socket) + if err != nil { + return nil, err + } + + err = os.Chmod(socket, 0777) + if err != nil { + return nil, err + } + + constLabels := prometheus.Labels{ + "controller_namespace": namespace, + "controller_class": class, + "controller_pod": pod, + } + + requestTags := requestTags + if metricsPerHost { + requestTags = append(requestTags, "host") + } + + sc := &SocketCollector{ + listener: listener, + + metricsPerHost: metricsPerHost, + + responseTime: prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "response_duration_seconds", + Help: "The time spent on receiving the response from the upstream server", + Namespace: PrometheusNamespace, + ConstLabels: constLabels, + }, + requestTags, + ), + responseLength: prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "response_size", + Help: "The response length (including request line, header, and request body)", + Namespace: PrometheusNamespace, + ConstLabels: constLabels, + }, + requestTags, + ), + + requestTime: prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "request_duration_seconds", + Help: "The request processing time in milliseconds", + Namespace: PrometheusNamespace, + ConstLabels: constLabels, + }, + requestTags, + ), + requestLength: prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "request_size", + Help: "The request length (including request line, header, and request body)", + Namespace: PrometheusNamespace, + Buckets: prometheus.LinearBuckets(10, 10, 10), // 10 buckets, each 10 bytes wide. + ConstLabels: constLabels, + }, + requestTags, + ), + + requests: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "requests", + Help: "The total number of client requests.", + Namespace: PrometheusNamespace, + ConstLabels: constLabels, + }, + []string{"ingress", "namespace", "status", "service"}, + ), + + bytesSent: prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "bytes_sent", + Help: "The number of bytes sent to a client", + Namespace: PrometheusNamespace, + Buckets: prometheus.ExponentialBuckets(10, 10, 7), // 7 buckets, exponential factor of 10. + ConstLabels: constLabels, + }, + requestTags, + ), + + upstreamLatency: prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Name: "ingress_upstream_latency_seconds", + Help: "Upstream service latency per Ingress", + Namespace: PrometheusNamespace, + ConstLabels: constLabels, + }, + []string{"ingress", "namespace", "service"}, + ), + } + + sc.metricMapping = map[string]interface{}{ + prometheus.BuildFQName(PrometheusNamespace, "", "request_duration_seconds"): sc.requestTime, + prometheus.BuildFQName(PrometheusNamespace, "", "request_size"): sc.requestLength, + + prometheus.BuildFQName(PrometheusNamespace, "", "response_duration_seconds"): sc.responseTime, + prometheus.BuildFQName(PrometheusNamespace, "", "response_size"): sc.responseLength, + + prometheus.BuildFQName(PrometheusNamespace, "", "bytes_sent"): sc.bytesSent, + + prometheus.BuildFQName(PrometheusNamespace, "", "ingress_upstream_latency_seconds"): sc.upstreamLatency, + } + + return sc, nil +} + +func (sc *SocketCollector) handleMessage(msg []byte) { + klog.V(5).Infof("msg: %v", string(msg)) + + // Unmarshal bytes + var statsBatch []socketData + err := jsoniter.ConfigCompatibleWithStandardLibrary.Unmarshal(msg, &statsBatch) + if err != nil { + klog.Errorf("Unexpected error deserializing JSON payload: %v. Payload:\n%v", err, string(msg)) + return + } + + for _, stats := range statsBatch { + if !sc.hosts.Has(stats.Host) { + klog.V(3).Infof("skiping metric for host %v that is not being served", stats.Host) + continue + } + + // Note these must match the order in requestTags at the top + requestLabels := prometheus.Labels{ + "status": stats.Status, + "method": stats.Method, + "path": stats.Path, + "namespace": stats.Namespace, + "ingress": stats.Ingress, + "service": stats.Service, + } + if sc.metricsPerHost { + requestLabels["host"] = stats.Host + } + + collectorLabels := prometheus.Labels{ + "namespace": stats.Namespace, + "ingress": stats.Ingress, + "status": stats.Status, + "service": stats.Service, + } + + latencyLabels := prometheus.Labels{ + "namespace": stats.Namespace, + "ingress": stats.Ingress, + "service": stats.Service, + } + + requestsMetric, err := sc.requests.GetMetricWith(collectorLabels) + if err != nil { + klog.Errorf("Error fetching requests metric: %v", err) + } else { + requestsMetric.Inc() + } + + if stats.Latency != -1 { + latencyMetric, err := sc.upstreamLatency.GetMetricWith(latencyLabels) + if err != nil { + klog.Errorf("Error fetching latency metric: %v", err) + } else { + latencyMetric.Observe(stats.Latency) + } + } + + if stats.RequestTime != -1 { + requestTimeMetric, err := sc.requestTime.GetMetricWith(requestLabels) + if err != nil { + klog.Errorf("Error fetching request duration metric: %v", err) + } else { + requestTimeMetric.Observe(stats.RequestTime) + } + } + + if stats.RequestLength != -1 { + requestLengthMetric, err := sc.requestLength.GetMetricWith(requestLabels) + if err != nil { + klog.Errorf("Error fetching request length metric: %v", err) + } else { + requestLengthMetric.Observe(stats.RequestLength) + } + } + + if stats.ResponseTime != -1 { + responseTimeMetric, err := sc.responseTime.GetMetricWith(requestLabels) + if err != nil { + klog.Errorf("Error fetching upstream response time metric: %v", err) + } else { + responseTimeMetric.Observe(stats.ResponseTime) + } + } + + if stats.ResponseLength != -1 { + bytesSentMetric, err := sc.bytesSent.GetMetricWith(requestLabels) + if err != nil { + klog.Errorf("Error fetching bytes sent metric: %v", err) + } else { + bytesSentMetric.Observe(stats.ResponseLength) + } + + responseSizeMetric, err := sc.responseLength.GetMetricWith(requestLabels) + if err != nil { + klog.Errorf("Error fetching bytes sent metric: %v", err) + } else { + responseSizeMetric.Observe(stats.ResponseLength) + } + } + } +} + +// Start listen for connections in the unix socket and spawns a goroutine to process the content +func (sc *SocketCollector) Start() { + for { + conn, err := sc.listener.Accept() + if err != nil { + continue + } + + go handleMessages(conn, sc.handleMessage) + } +} + +// Stop stops unix listener +func (sc *SocketCollector) Stop() { + sc.listener.Close() +} + +// RemoveMetrics deletes prometheus metrics from prometheus for ingresses and +// host that are not available anymore. +// Ref: https://godoc.org/github.com/prometheus/client_golang/prometheus#CounterVec.Delete +func (sc *SocketCollector) RemoveMetrics(ingresses []string, registry prometheus.Gatherer) { + mfs, err := registry.Gather() + if err != nil { + klog.Errorf("Error gathering metrics: %v", err) + return + } + + // 1. remove metrics of removed ingresses + klog.V(2).Infof("removing ingresses %v from metrics", ingresses) + for _, mf := range mfs { + metricName := mf.GetName() + metric, ok := sc.metricMapping[metricName] + if !ok { + continue + } + + toRemove := sets.NewString(ingresses...) + for _, m := range mf.GetMetric() { + labels := make(map[string]string, len(m.GetLabel())) + for _, labelPair := range m.GetLabel() { + labels[*labelPair.Name] = *labelPair.Value + } + + // remove labels that are constant + deleteConstants(labels) + + ns, ok := labels["namespace"] + if !ok { + continue + } + ing, ok := labels["ingress"] + if !ok { + continue + } + + ingKey := fmt.Sprintf("%v/%v", ns, ing) + if !toRemove.Has(ingKey) { + continue + } + + klog.V(2).Infof("Removing prometheus metric from histogram %v for ingress %v", metricName, ingKey) + + h, ok := metric.(*prometheus.HistogramVec) + if ok { + removed := h.Delete(labels) + if !removed { + klog.V(2).Infof("metric %v for ingress %v with labels not removed: %v", metricName, ingKey, labels) + } + } + + s, ok := metric.(*prometheus.SummaryVec) + if ok { + removed := s.Delete(labels) + if !removed { + klog.V(2).Infof("metric %v for ingress %v with labels not removed: %v", metricName, ingKey, labels) + } + } + } + } + +} + +// Describe implements prometheus.Collector +func (sc SocketCollector) Describe(ch chan<- *prometheus.Desc) { + sc.requestTime.Describe(ch) + sc.requestLength.Describe(ch) + + sc.requests.Describe(ch) + + sc.upstreamLatency.Describe(ch) + + sc.responseTime.Describe(ch) + sc.responseLength.Describe(ch) + + sc.bytesSent.Describe(ch) +} + +// Collect implements the prometheus.Collector interface. +func (sc SocketCollector) Collect(ch chan<- prometheus.Metric) { + sc.requestTime.Collect(ch) + sc.requestLength.Collect(ch) + + sc.requests.Collect(ch) + + sc.upstreamLatency.Collect(ch) + + sc.responseTime.Collect(ch) + sc.responseLength.Collect(ch) + + sc.bytesSent.Collect(ch) +} + +// SetHosts sets the hostnames that are being served by the ingress controller +// This set of hostnames is used to filter the metrics to be exposed +func (sc *SocketCollector) SetHosts(hosts sets.String) { + sc.hosts = hosts +} + +// handleMessages process the content received in a network connection +func handleMessages(conn io.ReadCloser, fn func([]byte)) { + defer conn.Close() + data, err := ioutil.ReadAll(conn) + if err != nil { + return + } + + fn(data) +} + +func deleteConstants(labels prometheus.Labels) { + delete(labels, "controller_namespace") + delete(labels, "controller_class") + delete(labels, "controller_pod") +} diff --git a/internal/ingress/metric/collectors/socket_test.go b/internal/ingress/metric/collectors/socket_test.go new file mode 100644 index 0000000000..b9b3c68482 --- /dev/null +++ b/internal/ingress/metric/collectors/socket_test.go @@ -0,0 +1,324 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "fmt" + "net" + "sync/atomic" + "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" + "k8s.io/apimachinery/pkg/util/sets" +) + +func TestNewUDPLogListener(t *testing.T) { + var count uint64 + + fn := func(message []byte) { + atomic.AddUint64(&count, 1) + } + + tmpFile := fmt.Sprintf("/tmp/test-socket-%v", time.Now().Nanosecond()) + + l, err := net.Listen("unix", tmpFile) + if err != nil { + t.Fatalf("unexpected error creating unix socket: %v", err) + } + if l == nil { + t.Fatalf("expected a listener but none returned") + } + + defer l.Close() + + go func() { + for { + conn, err := l.Accept() + if err != nil { + continue + } + + go handleMessages(conn, fn) + } + }() + + conn, _ := net.Dial("unix", tmpFile) + conn.Write([]byte("message")) + conn.Close() + + time.Sleep(1 * time.Millisecond) + if atomic.LoadUint64(&count) != 1 { + t.Errorf("expected only one message from the socket listener but %v returned", atomic.LoadUint64(&count)) + } +} + +func TestCollector(t *testing.T) { + cases := []struct { + name string + data []string + metrics []string + wantBefore string + removeIngresses []string + wantAfter string + }{ + { + name: "invalid metric object should not increase prometheus metrics", + data: []string{`#missing { + "host":"testshop.com", + "status":"200", + "bytesSent":150.0, + "method":"GET", + "path":"/admin", + "requestLength":300.0, + "requestTime":60.0, + "upstreamName":"test-upstream", + "upstreamIP":"1.1.1.1:8080", + "upstreamResponseTime":200, + "upstreamStatus":"220", + "namespace":"test-app-production", + "ingress":"web-yml", + "service":"test-app" + }`}, + metrics: []string{"nginx_ingress_controller_response_duration_seconds"}, + wantBefore: ` + + `, + }, + { + name: "valid metric object should update prometheus metrics", + data: []string{`[{ + "host":"testshop.com", + "status":"200", + "bytesSent":150.0, + "method":"GET", + "path":"/admin", + "requestLength":300.0, + "requestTime":60.0, + "upstreamName":"test-upstream", + "upstreamIP":"1.1.1.1:8080", + "upstreamResponseTime":200, + "upstreamStatus":"220", + "namespace":"test-app-production", + "ingress":"web-yml", + "service":"test-app" + }]`}, + metrics: []string{"nginx_ingress_controller_response_duration_seconds"}, + wantBefore: ` + # HELP nginx_ingress_controller_response_duration_seconds The time spent on receiving the response from the upstream server + # TYPE nginx_ingress_controller_response_duration_seconds histogram + nginx_ingress_controller_response_duration_seconds_bucket{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="200",le="0.005"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="200",le="0.01"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="200",le="0.025"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="200",le="0.05"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="200",le="0.1"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="200",le="0.25"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="200",le="0.5"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="200",le="1"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="200",le="2.5"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="200",le="5"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="200",le="10"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="200",le="+Inf"} 1 + nginx_ingress_controller_response_duration_seconds_sum{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="200"} 200 + nginx_ingress_controller_response_duration_seconds_count{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="200"} 1 + `, + removeIngresses: []string{"test-app-production/web-yml"}, + wantAfter: ` + `, + }, + + { + name: "multiple messages should increase prometheus metric by two", + data: []string{`[{ + "host":"testshop.com", + "status":"200", + "bytesSent":150.0, + "method":"GET", + "path":"/admin", + "requestLength":300.0, + "requestTime":60.0, + "upstreamName":"test-upstream", + "upstreamIP":"1.1.1.1:8080", + "upstreamResponseTime":200, + "upstreamStatus":"220", + "namespace":"test-app-production", + "ingress":"web-yml", + "service":"test-app" + }]`, `[{ + "host":"testshop.com", + "status":"200", + "bytesSent":150.0, + "method":"GET", + "path":"/admin", + "requestLength":300.0, + "requestTime":60.0, + "upstreamName":"test-upstream", + "upstreamIP":"1.1.1.1:8080", + "upstreamResponseTime":200, + "upstreamStatus":"220", + "namespace":"test-app-qa", + "ingress":"web-yml-qa", + "service":"test-app-qa" + }]`, `[{ + "host":"testshop.com", + "status":"200", + "bytesSent":150.0, + "method":"GET", + "path":"/admin", + "requestLength":300.0, + "requestTime":60.0, + "upstreamName":"test-upstream", + "upstreamIP":"1.1.1.1:8080", + "upstreamResponseTime":200, + "upstreamStatus":"220", + "namespace":"test-app-qa", + "ingress":"web-yml-qa", + "service":"test-app-qa" + }]`}, + metrics: []string{"nginx_ingress_controller_response_duration_seconds"}, + wantBefore: ` + # HELP nginx_ingress_controller_response_duration_seconds The time spent on receiving the response from the upstream server + # TYPE nginx_ingress_controller_response_duration_seconds histogram + nginx_ingress_controller_response_duration_seconds_bucket{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="200",le="0.005"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="200",le="0.01"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="200",le="0.025"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="200",le="0.05"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="200",le="0.1"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="200",le="0.25"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="200",le="0.5"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="200",le="1"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="200",le="2.5"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="200",le="5"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="200",le="10"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="200",le="+Inf"} 1 + nginx_ingress_controller_response_duration_seconds_sum{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="200"} 200 + nginx_ingress_controller_response_duration_seconds_count{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="200"} 1 + nginx_ingress_controller_response_duration_seconds_bucket{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml-qa",method="GET",namespace="test-app-qa",path="/admin",service="test-app-qa",status="200",le="0.005"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml-qa",method="GET",namespace="test-app-qa",path="/admin",service="test-app-qa",status="200",le="0.01"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml-qa",method="GET",namespace="test-app-qa",path="/admin",service="test-app-qa",status="200",le="0.025"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml-qa",method="GET",namespace="test-app-qa",path="/admin",service="test-app-qa",status="200",le="0.05"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml-qa",method="GET",namespace="test-app-qa",path="/admin",service="test-app-qa",status="200",le="0.1"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml-qa",method="GET",namespace="test-app-qa",path="/admin",service="test-app-qa",status="200",le="0.25"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml-qa",method="GET",namespace="test-app-qa",path="/admin",service="test-app-qa",status="200",le="0.5"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml-qa",method="GET",namespace="test-app-qa",path="/admin",service="test-app-qa",status="200",le="1"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml-qa",method="GET",namespace="test-app-qa",path="/admin",service="test-app-qa",status="200",le="2.5"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml-qa",method="GET",namespace="test-app-qa",path="/admin",service="test-app-qa",status="200",le="5"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml-qa",method="GET",namespace="test-app-qa",path="/admin",service="test-app-qa",status="200",le="10"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml-qa",method="GET",namespace="test-app-qa",path="/admin",service="test-app-qa",status="200",le="+Inf"} 2 + nginx_ingress_controller_response_duration_seconds_sum{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml-qa",method="GET",namespace="test-app-qa",path="/admin",service="test-app-qa",status="200"} 400 + nginx_ingress_controller_response_duration_seconds_count{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml-qa",method="GET",namespace="test-app-qa",path="/admin",service="test-app-qa",status="200"} 2 + `, + }, + + { + name: "collector should be able to handle batched metrics correctly", + data: []string{`[ + { + "host":"testshop.com", + "status":"200", + "bytesSent":150.0, + "method":"GET", + "path":"/admin", + "requestLength":300.0, + "requestTime":60.0, + "upstreamName":"test-upstream", + "upstreamIP":"1.1.1.1:8080", + "upstreamResponseTime":200, + "upstreamStatus":"220", + "namespace":"test-app-production", + "ingress":"web-yml", + "service":"test-app" + }, + { + "host":"testshop.com", + "status":"200", + "bytesSent":150.0, + "method":"GET", + "path":"/admin", + "requestLength":300.0, + "requestTime":60.0, + "upstreamName":"test-upstream", + "upstreamIP":"1.1.1.1:8080", + "upstreamResponseTime":100, + "upstreamStatus":"220", + "namespace":"test-app-production", + "ingress":"web-yml", + "service":"test-app" + }]`}, + metrics: []string{"nginx_ingress_controller_response_duration_seconds"}, + wantBefore: ` + # HELP nginx_ingress_controller_response_duration_seconds The time spent on receiving the response from the upstream server + # TYPE nginx_ingress_controller_response_duration_seconds histogram + nginx_ingress_controller_response_duration_seconds_bucket{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="200",le="0.005"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="200",le="0.01"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="200",le="0.025"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="200",le="0.05"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="200",le="0.1"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="200",le="0.25"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="200",le="0.5"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="200",le="1"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="200",le="2.5"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="200",le="5"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="200",le="10"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="200",le="+Inf"} 2 + nginx_ingress_controller_response_duration_seconds_sum{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="200"} 300 + nginx_ingress_controller_response_duration_seconds_count{controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="200"} 2 + `, + removeIngresses: []string{"test-app-production/web-yml"}, + wantAfter: ` + `, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + registry := prometheus.NewPedanticRegistry() + + sc, err := NewSocketCollector("pod", "default", "ingress", true) + if err != nil { + t.Errorf("%v: unexpected error creating new SocketCollector: %v", c.name, err) + } + + if err := registry.Register(sc); err != nil { + t.Errorf("registering collector failed: %s", err) + } + + sc.SetHosts(sets.NewString("testshop.com")) + + for _, d := range c.data { + sc.handleMessage([]byte(d)) + } + + if err := GatherAndCompare(sc, c.wantBefore, c.metrics, registry); err != nil { + t.Errorf("unexpected collecting result:\n%s", err) + } + + if len(c.removeIngresses) > 0 { + sc.RemoveMetrics(c.removeIngresses, registry) + time.Sleep(1 * time.Second) + + if err := GatherAndCompare(sc, c.wantAfter, c.metrics, registry); err != nil { + t.Errorf("unexpected collecting result:\n%s", err) + } + } + + sc.Stop() + + registry.Unregister(sc) + }) + } +} diff --git a/internal/ingress/metric/collectors/testutils.go b/internal/ingress/metric/collectors/testutils.go new file mode 100644 index 0000000000..8c5c27b627 --- /dev/null +++ b/internal/ingress/metric/collectors/testutils.go @@ -0,0 +1,199 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "bytes" + "fmt" + "reflect" + "sort" + "strings" + + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/expfmt" +) + +// GatherAndCompare retrieves all metrics exposed by a collector and compares it +// to an expected output in the Prometheus text exposition format. +// metricNames allows only comparing the given metrics. All are compared if it's nil. +func GatherAndCompare(c prometheus.Collector, expected string, metricNames []string, reg prometheus.Gatherer) error { + expected = removeUnusedWhitespace(expected) + + metrics, err := reg.Gather() + if err != nil { + return fmt.Errorf("gathering metrics failed: %s", err) + } + if metricNames != nil { + metrics = filterMetrics(metrics, metricNames) + } + var tp expfmt.TextParser + expectedMetrics, err := tp.TextToMetricFamilies(bytes.NewReader([]byte(expected))) + if err != nil { + return fmt.Errorf("parsing expected metrics failed: %s", err) + } + + if !reflect.DeepEqual(metrics, normalizeMetricFamilies(expectedMetrics)) { + // Encode the gathered output to the readable text format for comparison. + var buf1 bytes.Buffer + enc := expfmt.NewEncoder(&buf1, expfmt.FmtText) + for _, mf := range metrics { + if err := enc.Encode(mf); err != nil { + return fmt.Errorf("encoding result failed: %s", err) + } + } + // Encode normalized expected metrics again to generate them in the same ordering + // the registry does to spot differences more easily. + var buf2 bytes.Buffer + enc = expfmt.NewEncoder(&buf2, expfmt.FmtText) + for _, mf := range normalizeMetricFamilies(expectedMetrics) { + if err := enc.Encode(mf); err != nil { + return fmt.Errorf("encoding result failed: %s", err) + } + } + + if buf2.String() == buf1.String() { + return nil + } + + return fmt.Errorf(` +metric output does not match expectation; want: + +'%s' + +got: + +'%s' + +`, buf2.String(), buf1.String()) + } + return nil +} + +func filterMetrics(metrics []*dto.MetricFamily, names []string) []*dto.MetricFamily { + var filtered []*dto.MetricFamily + for _, m := range metrics { + drop := true + for _, name := range names { + if m.GetName() == name { + drop = false + break + } + } + if !drop { + filtered = append(filtered, m) + } + } + return filtered +} + +func removeUnusedWhitespace(s string) string { + var ( + trimmedLine string + trimmedLines []string + lines = strings.Split(s, "\n") + ) + + for _, l := range lines { + trimmedLine = strings.TrimSpace(l) + + if len(trimmedLine) > 0 { + trimmedLines = append(trimmedLines, trimmedLine) + } + } + + // The Prometheus metrics representation parser expects an empty line at the + // end otherwise fails with an unexpected EOF error. + return strings.Join(trimmedLines, "\n") + "\n" +} + +// The below sorting code is copied form the Prometheus client library modulo the added +// label pair sorting. +// https://github.com/prometheus/client_golang/blob/ea6e1db4cb8127eeb0b6954f7320363e5451820f/prometheus/registry.go#L642-L684 + +// metricSorter is a sortable slice of *dto.Metric. +type metricSorter []*dto.Metric + +func (s metricSorter) Len() int { + return len(s) +} + +func (s metricSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s metricSorter) Less(i, j int) bool { + sort.Sort(labelPairSorter(s[i].Label)) + sort.Sort(labelPairSorter(s[j].Label)) + + if len(s[i].Label) != len(s[j].Label) { + return len(s[i].Label) < len(s[j].Label) + } + + for n, lp := range s[i].Label { + vi := lp.GetValue() + vj := s[j].Label[n].GetValue() + if vi != vj { + return vi < vj + } + } + + if s[i].TimestampMs == nil { + return false + } + if s[j].TimestampMs == nil { + return true + } + return s[i].GetTimestampMs() < s[j].GetTimestampMs() +} + +// normalizeMetricFamilies returns a MetricFamily slice with empty +// MetricFamilies pruned and the remaining MetricFamilies sorted by name within +// the slice, with the contained Metrics sorted within each MetricFamily. +func normalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { + for _, mf := range metricFamiliesByName { + sort.Sort(metricSorter(mf.Metric)) + } + names := make([]string, 0, len(metricFamiliesByName)) + for name, mf := range metricFamiliesByName { + if len(mf.Metric) > 0 { + names = append(names, name) + } + } + sort.Strings(names) + result := make([]*dto.MetricFamily, 0, len(names)) + for _, name := range names { + result = append(result, metricFamiliesByName[name]) + } + return result +} + +// labelPairSorter implements sort.Interface. It is used to sort a slice of +// dto.LabelPair pointers. +type labelPairSorter []*dto.LabelPair + +func (s labelPairSorter) Len() int { + return len(s) +} + +func (s labelPairSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s labelPairSorter) Less(i, j int) bool { + return s[i].GetName() < s[j].GetName() +} diff --git a/internal/ingress/metric/dummy.go b/internal/ingress/metric/dummy.go new file mode 100644 index 0000000000..59a9144e0c --- /dev/null +++ b/internal/ingress/metric/dummy.go @@ -0,0 +1,66 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metric + +import ( + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/ingress-nginx/internal/ingress" +) + +// NewDummyCollector returns a dummy metric collector +func NewDummyCollector() Collector { + return &DummyCollector{} +} + +// DummyCollector dummy implementation for mocks in tests +type DummyCollector struct{} + +// ConfigSuccess ... +func (dc DummyCollector) ConfigSuccess(uint64, bool) {} + +// IncReloadCount ... +func (dc DummyCollector) IncReloadCount() {} + +// IncReloadErrorCount ... +func (dc DummyCollector) IncReloadErrorCount() {} + +// IncCheckCount ... +func (dc DummyCollector) IncCheckCount(string, string) {} + +// IncCheckErrorCount ... +func (dc DummyCollector) IncCheckErrorCount(string, string) {} + +// RemoveMetrics ... +func (dc DummyCollector) RemoveMetrics(ingresses, endpoints []string) {} + +// Start ... +func (dc DummyCollector) Start() {} + +// Stop ... +func (dc DummyCollector) Stop() {} + +// SetSSLExpireTime ... +func (dc DummyCollector) SetSSLExpireTime([]*ingress.Server) {} + +// SetHosts ... +func (dc DummyCollector) SetHosts(hosts sets.String) {} + +// OnStartedLeading indicates the pod is not the current leader +func (dc DummyCollector) OnStartedLeading(electionID string) {} + +// OnStoppedLeading indicates the pod is not the current leader +func (dc DummyCollector) OnStoppedLeading(electionID string) {} diff --git a/internal/ingress/metric/main.go b/internal/ingress/metric/main.go new file mode 100644 index 0000000000..e30fdc7835 --- /dev/null +++ b/internal/ingress/metric/main.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metric + +import ( + "os" + "time" + + "github.com/prometheus/client_golang/prometheus" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/ingress-nginx/internal/ingress" + "k8s.io/ingress-nginx/internal/ingress/annotations/class" + "k8s.io/ingress-nginx/internal/ingress/metric/collectors" +) + +// Collector defines the interface for a metric collector +type Collector interface { + ConfigSuccess(uint64, bool) + + IncReloadCount() + IncReloadErrorCount() + + OnStartedLeading(string) + OnStoppedLeading(string) + + IncCheckCount(string, string) + IncCheckErrorCount(string, string) + + RemoveMetrics(ingresses, endpoints []string) + + SetSSLExpireTime([]*ingress.Server) + + // SetHosts sets the hostnames that are being served by the ingress controller + SetHosts(sets.String) + + Start() + Stop() +} + +type collector struct { + nginxStatus collectors.NGINXStatusCollector + nginxProcess collectors.NGINXProcessCollector + + ingressController *collectors.Controller + + socket *collectors.SocketCollector + + registry *prometheus.Registry +} + +// NewCollector creates a new metric collector the for ingress controller +func NewCollector(metricsPerHost bool, registry *prometheus.Registry) (Collector, error) { + podNamespace := os.Getenv("POD_NAMESPACE") + if podNamespace == "" { + podNamespace = "default" + } + + podName := os.Getenv("POD_NAME") + + nc, err := collectors.NewNGINXStatus(podName, podNamespace, class.IngressClass) + if err != nil { + return nil, err + } + + pc, err := collectors.NewNGINXProcess(podName, podNamespace, class.IngressClass) + if err != nil { + return nil, err + } + + s, err := collectors.NewSocketCollector(podName, podNamespace, class.IngressClass, metricsPerHost) + if err != nil { + return nil, err + } + + ic := collectors.NewController(podName, podNamespace, class.IngressClass) + + return Collector(&collector{ + nginxStatus: nc, + nginxProcess: pc, + + ingressController: ic, + + socket: s, + + registry: registry, + }), nil +} + +func (c *collector) ConfigSuccess(hash uint64, success bool) { + c.ingressController.ConfigSuccess(hash, success) +} + +func (c *collector) IncCheckCount(namespace string, name string) { + c.ingressController.IncCheckCount(namespace, name) +} + +func (c *collector) IncCheckErrorCount(namespace string, name string) { + c.ingressController.IncCheckErrorCount(namespace, name) +} + +func (c *collector) IncReloadCount() { + c.ingressController.IncReloadCount() +} + +func (c *collector) IncReloadErrorCount() { + c.ingressController.IncReloadErrorCount() +} + +func (c *collector) RemoveMetrics(ingresses, hosts []string) { + c.socket.RemoveMetrics(ingresses, c.registry) + c.ingressController.RemoveMetrics(hosts, c.registry) +} + +func (c *collector) Start() { + c.registry.MustRegister(c.nginxStatus) + c.registry.MustRegister(c.nginxProcess) + c.registry.MustRegister(c.ingressController) + c.registry.MustRegister(c.socket) + + // the default nginx.conf does not contains + // a server section with the status port + go func() { + time.Sleep(5 * time.Second) + c.nginxStatus.Start() + }() + go c.nginxProcess.Start() + go c.socket.Start() +} + +func (c *collector) Stop() { + c.registry.Unregister(c.nginxStatus) + c.registry.Unregister(c.nginxProcess) + c.registry.Unregister(c.ingressController) + c.registry.Unregister(c.socket) + + c.nginxStatus.Stop() + c.nginxProcess.Stop() + c.socket.Stop() +} + +func (c *collector) SetSSLExpireTime(servers []*ingress.Server) { + c.ingressController.SetSSLExpireTime(servers) +} + +func (c *collector) SetHosts(hosts sets.String) { + c.socket.SetHosts(hosts) +} + +// OnStartedLeading indicates the pod was elected as the leader +func (c *collector) OnStartedLeading(electionID string) { + c.ingressController.OnStartedLeading(electionID) +} + +// OnStoppedLeading indicates the pod stopped being the leader +func (c *collector) OnStoppedLeading(electionID string) { + c.ingressController.OnStoppedLeading(electionID) + c.ingressController.RemoveAllSSLExpireMetrics(c.registry) +} diff --git a/internal/ingress/resolver/main.go b/internal/ingress/resolver/main.go index b731448665..11af30d555 100644 --- a/internal/ingress/resolver/main.go +++ b/internal/ingress/resolver/main.go @@ -18,7 +18,6 @@ package resolver import ( apiv1 "k8s.io/api/core/v1" - "k8s.io/ingress-nginx/internal/ingress/defaults" ) @@ -27,15 +26,18 @@ type Resolver interface { // GetDefaultBackend returns the backend that must be used as default GetDefaultBackend() defaults.Backend - // GetSecret searches for secrets contenating the namespace and name using a the character / + // GetSecret searches for secrets containing the namespace and name using a the character / GetSecret(string) (*apiv1.Secret, error) // GetAuthCertificate resolves a given secret name into an SSL certificate. // The secret must contain 3 keys named: + // ca.crt: contains the certificate chain used for authentication + // tls.crt: contains the server certificate + // tls.key: contains the server key GetAuthCertificate(string) (*AuthSSLCert, error) - // GetService searches for services contenating the namespace and name using a the character / + // GetService searches for services containing the namespace and name using a the character / GetService(string) (*apiv1.Service, error) } @@ -52,6 +54,13 @@ type AuthSSLCert struct { // Equal tests for equality between two AuthSSLCert types func (asslc1 *AuthSSLCert) Equal(assl2 *AuthSSLCert) bool { + if asslc1 == assl2 { + return true + } + if asslc1 == nil || assl2 == nil { + return false + } + if asslc1.Secret != assl2.Secret { return false } diff --git a/internal/ingress/sort_ingress.go b/internal/ingress/sort_ingress.go deleted file mode 100644 index d5b112283e..0000000000 --- a/internal/ingress/sort_ingress.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package ingress - -import ( - "crypto/x509" - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// SSLCert describes a SSL certificate to be used in a server -type SSLCert struct { - metav1.ObjectMeta `json:"metadata,omitempty"` - Certificate *x509.Certificate `json:"certificate,omitempty"` - // CAFileName contains the path to the file with the root certificate - CAFileName string `json:"caFileName"` - // PemFileName contains the path to the file with the certificate and key concatenated - PemFileName string `json:"pemFileName"` - // FullChainPemFileName contains the path to the file with the certificate and key concatenated - // This certificate contains the full chain (ca + intermediates + cert) - FullChainPemFileName string `json:"fullChainPemFileName"` - // PemSHA contains the sha1 of the pem file. - // This is used to detect changes in the secret that contains the certificates - PemSHA string `json:"pemSha"` - // CN contains all the common names defined in the SSL certificate - CN []string `json:"cn"` - // ExpiresTime contains the expiration of this SSL certificate in timestamp format - ExpireTime time.Time `json:"expires"` -} - -// GetObjectKind implements the ObjectKind interface as a noop -func (s SSLCert) GetObjectKind() schema.ObjectKind { - return schema.EmptyObjectKind -} diff --git a/internal/ingress/sslcert.go b/internal/ingress/sslcert.go new file mode 100644 index 0000000000..4b585a5838 --- /dev/null +++ b/internal/ingress/sslcert.go @@ -0,0 +1,57 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ingress + +import ( + "crypto/x509" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// SSLCert describes a SSL certificate to be used in a server +type SSLCert struct { + metav1.ObjectMeta `json:"metadata,omitempty"` + Certificate *x509.Certificate `json:"certificate,omitempty"` + // CAFileName contains the path to the file with the root certificate + CAFileName string `json:"caFileName"` + // PemFileName contains the path to the file with the certificate and key concatenated + PemFileName string `json:"pemFileName"` + // FullChainPemFileName contains the path to the file with the certificate and key concatenated + // This certificate contains the full chain (ca + intermediates + cert) + FullChainPemFileName string `json:"fullChainPemFileName"` + // PemSHA contains the sha1 of the pem file. + // This is used to detect changes in the secret that contains the certificates + PemSHA string `json:"pemSha"` + // CN contains all the common names defined in the SSL certificate + CN []string `json:"cn"` + // ExpiresTime contains the expiration of this SSL certificate in timestamp format + ExpireTime time.Time `json:"expires"` + // Pem encoded certificate and key concatenated + PemCertKey string `json:"pemCertKey"` +} + +// GetObjectKind implements the ObjectKind interface as a noop +func (s SSLCert) GetObjectKind() schema.ObjectKind { + return schema.EmptyObjectKind +} + +// HashInclude defines if a field should be used or not to calculate the hash +func (s SSLCert) HashInclude(field string, v interface{}) (bool, error) { + return (field != "PemSHA" && field != "ExpireTime"), nil +} diff --git a/internal/ingress/sort_ingress_test.go b/internal/ingress/sslcert_test.go similarity index 100% rename from internal/ingress/sort_ingress_test.go rename to internal/ingress/sslcert_test.go diff --git a/internal/ingress/status/status.go b/internal/ingress/status/status.go index 699b0a3398..04d8790f91 100644 --- a/internal/ingress/status/status.go +++ b/internal/ingress/status/status.go @@ -19,29 +19,23 @@ package status import ( "fmt" "net" - "os" "sort" "strings" "time" - "github.com/golang/glog" "github.com/pkg/errors" + "k8s.io/klog" pool "gopkg.in/go-playground/pool.v3" apiv1 "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/tools/leaderelection" - "k8s.io/client-go/tools/leaderelection/resourcelock" - "k8s.io/client-go/tools/record" "k8s.io/kubernetes/pkg/kubelet/util/sliceutils" - "k8s.io/ingress-nginx/internal/ingress/annotations/class" - "k8s.io/ingress-nginx/internal/ingress/store" + "k8s.io/ingress-nginx/internal/ingress" + "k8s.io/ingress-nginx/internal/ingress/controller/store" "k8s.io/ingress-nginx/internal/k8s" "k8s.io/ingress-nginx/internal/task" ) @@ -50,104 +44,102 @@ const ( updateInterval = 60 * time.Second ) -// Sync ... -type Sync interface { - Run(stopCh <-chan struct{}) +// Syncer ... +type Syncer interface { + Run(chan struct{}) + Shutdown() } +type ingressLister interface { + // ListIngresses returns the list of Ingresses + ListIngresses(store.IngressFilterFunc) []*ingress.Ingress +} + // Config ... type Config struct { Client clientset.Interface PublishService string - ElectionID string + PublishStatusAddress string UpdateStatusOnShutdown bool UseNodeInternalIP bool - IngressLister store.IngressLister - - DefaultIngressClass string - IngressClass string + IngressLister ingressLister } // statusSync keeps the status IP in each Ingress rule updated executing a periodic check // in all the defined rules. To simplify the process leader election is used so the update // is executed only in one node (Ingress controllers can be scaled to more than one) // If the controller is running with the flag --publish-service (with a valid service) -// the IP address behind the service is used, if not the source is the IP/s of the node/s +// the IP address behind the service is used, if it is running with the flag +// --publish-status-address, the address specified in the flag is used, if neither of the +// two flags are set, the source is the IP/s of the node/s type statusSync struct { Config + // pod contains runtime information about this pod pod *k8s.PodInfo - elector *leaderelection.LeaderElector // workqueue used to keep in sync the status IP/s // in the Ingress rules syncQueue *task.Queue } -// Run starts the loop to keep the status in sync -func (s statusSync) Run(stopCh <-chan struct{}) { - go s.elector.Run() - go wait.Forever(s.update, updateInterval) +// Start starts the loop to keep the status in sync +func (s statusSync) Run(stopCh chan struct{}) { go s.syncQueue.Run(time.Second, stopCh) - <-stopCh -} -func (s *statusSync) update() { - // send a dummy object to the queue to force a sync - s.syncQueue.Enqueue("sync status") + // trigger initial sync + s.syncQueue.EnqueueTask(task.GetDummyObject("sync status")) + + // when this instance is the leader we need to enqueue + // an item to trigger the update of the Ingress status. + wait.PollUntil(updateInterval, func() (bool, error) { + s.syncQueue.EnqueueTask(task.GetDummyObject("sync status")) + return false, nil + }, stopCh) } -// Shutdown stop the sync. In case the instance is the leader it will remove the current IP +// Shutdown stops the sync. In case the instance is the leader it will remove the current IP // if there is no other instances running. func (s statusSync) Shutdown() { go s.syncQueue.Shutdown() - // remove IP from Ingress - if !s.elector.IsLeader() { - return - } if !s.UpdateStatusOnShutdown { - glog.Warningf("skipping update of status of Ingress rules") + klog.Warningf("skipping update of status of Ingress rules") return } - glog.Infof("updating status of Ingress rules (remove)") + klog.Info("updating status of Ingress rules (remove)") addrs, err := s.runningAddresses() if err != nil { - glog.Errorf("error obtaining running IPs: %v", addrs) + klog.Errorf("error obtaining running IPs: %v", addrs) return } if len(addrs) > 1 { // leave the job to the next leader - glog.Infof("leaving status update for next leader (%v)", len(addrs)) + klog.Infof("leaving status update for next leader (%v)", len(addrs)) return } if s.isRunningMultiplePods() { - glog.V(2).Infof("skipping Ingress status update (multiple pods running - another one will be elected as master)") + klog.V(2).Infof("skipping Ingress status update (multiple pods running - another one will be elected as master)") return } - glog.Infof("removing address from ingress status (%v)", addrs) + klog.Infof("removing address from ingress status (%v)", addrs) s.updateStatus([]apiv1.LoadBalancerIngress{}) } func (s *statusSync) sync(key interface{}) error { if s.syncQueue.IsShuttingDown() { - glog.V(2).Infof("skipping Ingress status update (shutting down in progress)") - return nil - } - - if !s.elector.IsLeader() { - glog.V(2).Infof("skipping Ingress status update (I am not the current leader)") + klog.V(2).Infof("skipping Ingress status update (shutting down in progress)") return nil } @@ -164,70 +156,15 @@ func (s statusSync) keyfunc(input interface{}) (interface{}, error) { return input, nil } -// NewStatusSyncer returns a new Sync instance -func NewStatusSyncer(config Config) Sync { - pod, err := k8s.GetPodDetails(config.Client) - if err != nil { - glog.Fatalf("unexpected error obtaining pod information: %v", err) - } - +// NewStatusSyncer returns a new Syncer instance +func NewStatusSyncer(podInfo *k8s.PodInfo, config Config) Syncer { st := statusSync{ - pod: pod, + pod: podInfo, Config: config, } st.syncQueue = task.NewCustomTaskQueue(st.sync, st.keyfunc) - // we need to use the defined ingress class to allow multiple leaders - // in order to update information about ingress status - electionID := fmt.Sprintf("%v-%v", config.ElectionID, config.DefaultIngressClass) - if config.IngressClass != "" { - electionID = fmt.Sprintf("%v-%v", config.ElectionID, config.IngressClass) - } - - callbacks := leaderelection.LeaderCallbacks{ - OnStartedLeading: func(stop <-chan struct{}) { - glog.V(2).Infof("I am the new status update leader") - }, - OnStoppedLeading: func() { - glog.V(2).Infof("I am not status update leader anymore") - }, - OnNewLeader: func(identity string) { - glog.Infof("new leader elected: %v", identity) - }, - } - - broadcaster := record.NewBroadcaster() - hostname, _ := os.Hostname() - - recorder := broadcaster.NewRecorder(scheme.Scheme, apiv1.EventSource{ - Component: "ingress-leader-elector", - Host: hostname, - }) - - lock := resourcelock.ConfigMapLock{ - ConfigMapMeta: metav1.ObjectMeta{Namespace: pod.Namespace, Name: electionID}, - Client: config.Client.CoreV1(), - LockConfig: resourcelock.ResourceLockConfig{ - Identity: pod.Name, - EventRecorder: recorder, - }, - } - - ttl := 30 * time.Second - le, err := leaderelection.NewLeaderElector(leaderelection.LeaderElectionConfig{ - Lock: &lock, - LeaseDuration: ttl, - RenewDeadline: ttl / 2, - RetryPeriod: ttl / 4, - Callbacks: callbacks, - }) - - if err != nil { - glog.Fatalf("unexpected error starting leader election: %v", err) - } - - st.elector = le return st } @@ -243,6 +180,11 @@ func (s *statusSync) runningAddresses() ([]string, error) { return nil, err } + if svc.Spec.Type == apiv1.ServiceTypeExternalName { + addrs = append(addrs, svc.Spec.ExternalName) + return addrs, nil + } + for _, ip := range svc.Status.LoadBalancer.Ingress { if ip.IP == "" { addrs = append(addrs, ip.Hostname) @@ -255,6 +197,11 @@ func (s *statusSync) runningAddresses() ([]string, error) { return addrs, nil } + if s.PublishStatusAddress != "" { + addrs = append(addrs, s.PublishStatusAddress) + return addrs, nil + } + // get information about all the pods running the ingress controller pods, err := s.Client.CoreV1().Pods(s.pod.Namespace).List(metav1.ListOptions{ LabelSelector: labels.SelectorFromSet(s.pod.Labels).String(), @@ -264,6 +211,11 @@ func (s *statusSync) runningAddresses() ([]string, error) { } for _, pod := range pods.Items { + // only Running pods are valid + if pod.Status.Phase != apiv1.PodRunning { + continue + } + name := k8s.GetNodeIPOrName(s.Client, pod.Spec.NodeName, s.UseNodeInternalIP) if !sliceutils.StringInSlice(name, addrs) { addrs = append(addrs, name) @@ -304,17 +256,19 @@ func sliceToStatus(endpoints []string) []apiv1.LoadBalancerIngress { // updateStatus changes the status information of Ingress rules func (s *statusSync) updateStatus(newIngressPoint []apiv1.LoadBalancerIngress) { - ings := s.IngressLister.List() + ings := s.IngressLister.ListIngresses(nil) p := pool.NewLimited(10) defer p.Close() batch := p.Batch() + sort.SliceStable(newIngressPoint, lessLoadBalancerIngress(newIngressPoint)) - for _, cur := range ings { - ing := cur.(*extensions.Ingress) - - if !class.IsValid(ing) { + for _, ing := range ings { + curIPs := ing.Status.LoadBalancer.Ingress + sort.SliceStable(curIPs, lessLoadBalancerIngress(curIPs)) + if ingressSliceEqual(curIPs, newIngressPoint) { + klog.V(3).Infof("skipping update of Ingress %v/%v (no change)", ing.Namespace, ing.Name) continue } @@ -325,35 +279,39 @@ func (s *statusSync) updateStatus(newIngressPoint []apiv1.LoadBalancerIngress) { batch.WaitAll() } -func runUpdate(ing *extensions.Ingress, status []apiv1.LoadBalancerIngress, +func runUpdate(ing *ingress.Ingress, status []apiv1.LoadBalancerIngress, client clientset.Interface) pool.WorkFunc { return func(wu pool.WorkUnit) (interface{}, error) { if wu.IsCancelled() { return nil, nil } - sort.SliceStable(status, lessLoadBalancerIngress(status)) - - curIPs := ing.Status.LoadBalancer.Ingress - sort.SliceStable(curIPs, lessLoadBalancerIngress(curIPs)) - - if ingressSliceEqual(status, curIPs) { - glog.V(3).Infof("skipping update of Ingress %v/%v (no change)", ing.Namespace, ing.Name) - return true, nil - } - - ingClient := client.ExtensionsV1beta1().Ingresses(ing.Namespace) + if k8s.IsNetworkingIngressAvailable { + ingClient := client.NetworkingV1beta1().Ingresses(ing.Namespace) + currIng, err := ingClient.Get(ing.Name, metav1.GetOptions{}) + if err != nil { + return nil, errors.Wrap(err, fmt.Sprintf("unexpected error searching Ingress %v/%v", ing.Namespace, ing.Name)) + } - currIng, err := ingClient.Get(ing.Name, metav1.GetOptions{}) - if err != nil { - return nil, errors.Wrap(err, fmt.Sprintf("unexpected error searching Ingress %v/%v", ing.Namespace, ing.Name)) - } + klog.Infof("updating Ingress %v/%v status from %v to %v", currIng.Namespace, currIng.Name, currIng.Status.LoadBalancer.Ingress, status) + currIng.Status.LoadBalancer.Ingress = status + _, err = ingClient.UpdateStatus(currIng) + if err != nil { + klog.Warningf("error updating ingress rule: %v", err) + } + } else { + ingClient := client.ExtensionsV1beta1().Ingresses(ing.Namespace) + currIng, err := ingClient.Get(ing.Name, metav1.GetOptions{}) + if err != nil { + return nil, errors.Wrap(err, fmt.Sprintf("unexpected error searching Ingress %v/%v", ing.Namespace, ing.Name)) + } - glog.Infof("updating Ingress %v/%v status to %v", currIng.Namespace, currIng.Name, status) - currIng.Status.LoadBalancer.Ingress = status - _, err = ingClient.UpdateStatus(currIng) - if err != nil { - glog.Warningf("error updating ingress rule: %v", err) + klog.Infof("updating Ingress %v/%v status from %v to %v", currIng.Namespace, currIng.Name, currIng.Status.LoadBalancer.Ingress, status) + currIng.Status.LoadBalancer.Ingress = status + _, err = ingClient.UpdateStatus(currIng) + if err != nil { + klog.Warningf("error updating ingress rule: %v", err) + } } return true, nil @@ -385,5 +343,6 @@ func ingressSliceEqual(lhs, rhs []apiv1.LoadBalancerIngress) bool { return false } } + return true } diff --git a/internal/ingress/status/status_test.go b/internal/ingress/status/status_test.go index 77bfb6969f..dc5b12a949 100644 --- a/internal/ingress/status/status_test.go +++ b/internal/ingress/status/status_test.go @@ -22,14 +22,13 @@ import ( "time" apiv1 "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" + networking "k8s.io/api/networking/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" testclient "k8s.io/client-go/kubernetes/fake" - "k8s.io/client-go/tools/cache" - "k8s.io/kubernetes/pkg/api" + "k8s.io/ingress-nginx/internal/ingress" "k8s.io/ingress-nginx/internal/ingress/annotations/class" - "k8s.io/ingress-nginx/internal/ingress/store" + "k8s.io/ingress-nginx/internal/ingress/controller/store" "k8s.io/ingress-nginx/internal/k8s" "k8s.io/ingress-nginx/internal/task" ) @@ -69,6 +68,21 @@ func buildSimpleClientSet() *testclient.Clientset { Spec: apiv1.PodSpec{ NodeName: "foo_node_2", }, + Status: apiv1.PodStatus{ + Phase: apiv1.PodRunning, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "foo1-unknown", + Namespace: apiv1.NamespaceDefault, + }, + Spec: apiv1.PodSpec{ + NodeName: "foo_node_1", + }, + Status: apiv1.PodStatus{ + Phase: apiv1.PodUnknown, + }, }, { ObjectMeta: metav1.ObjectMeta{ @@ -82,7 +96,7 @@ func buildSimpleClientSet() *testclient.Clientset { { ObjectMeta: metav1.ObjectMeta{ Name: "foo3", - Namespace: api.NamespaceSystem, + Namespace: metav1.NamespaceSystem, Labels: map[string]string{ "lable_sig": "foo_pod", }, @@ -90,6 +104,9 @@ func buildSimpleClientSet() *testclient.Clientset { Spec: apiv1.PodSpec{ NodeName: "foo_node_2", }, + Status: apiv1.PodStatus{ + Phase: apiv1.PodRunning, + }, }, }}, &apiv1.ServiceList{Items: []apiv1.Service{ @@ -151,10 +168,9 @@ func buildSimpleClientSet() *testclient.Clientset { ObjectMeta: metav1.ObjectMeta{ Name: "ingress-controller-leader", Namespace: apiv1.NamespaceDefault, - SelfLink: "/api/v1/namespaces/default/endpoints/ingress-controller-leader", }, }}}, - &extensions.IngressList{Items: buildExtensionsIngresses()}, + &networking.IngressList{Items: buildExtensionsIngresses()}, ) } @@ -162,14 +178,14 @@ func fakeSynFn(interface{}) error { return nil } -func buildExtensionsIngresses() []extensions.Ingress { - return []extensions.Ingress{ +func buildExtensionsIngresses() []networking.Ingress { + return []networking.Ingress{ { ObjectMeta: metav1.ObjectMeta{ Name: "foo_ingress_1", Namespace: apiv1.NamespaceDefault, }, - Status: extensions.IngressStatus{ + Status: networking.IngressStatus{ LoadBalancer: apiv1.LoadBalancerStatus{ Ingress: []apiv1.LoadBalancerIngress{ { @@ -183,12 +199,12 @@ func buildExtensionsIngresses() []extensions.Ingress { { ObjectMeta: metav1.ObjectMeta{ Name: "foo_ingress_different_class", - Namespace: api.NamespaceDefault, + Namespace: metav1.NamespaceDefault, Annotations: map[string]string{ class.IngressKey: "no-nginx", }, }, - Status: extensions.IngressStatus{ + Status: networking.IngressStatus{ LoadBalancer: apiv1.LoadBalancerStatus{ Ingress: []apiv1.LoadBalancerIngress{ { @@ -204,7 +220,7 @@ func buildExtensionsIngresses() []extensions.Ingress { Name: "foo_ingress_2", Namespace: apiv1.NamespaceDefault, }, - Status: extensions.IngressStatus{ + Status: networking.IngressStatus{ LoadBalancer: apiv1.LoadBalancerStatus{ Ingress: []apiv1.LoadBalancerIngress{}, }, @@ -213,26 +229,36 @@ func buildExtensionsIngresses() []extensions.Ingress { } } -func buildIngressListener() store.IngressLister { - s := cache.NewStore(cache.MetaNamespaceKeyFunc) - s.Add(&extensions.Ingress{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo_ingress_non_01", - Namespace: apiv1.NamespaceDefault, - }}) - s.Add(&extensions.Ingress{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo_ingress_1", - Namespace: apiv1.NamespaceDefault, - }, - Status: extensions.IngressStatus{ - LoadBalancer: apiv1.LoadBalancerStatus{ - Ingress: buildLoadBalancerIngressByIP(), +type testIngressLister struct { +} + +func (til *testIngressLister) ListIngresses(store.IngressFilterFunc) []*ingress.Ingress { + var ingresses []*ingress.Ingress + ingresses = append(ingresses, &ingress.Ingress{ + Ingress: networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo_ingress_non_01", + Namespace: apiv1.NamespaceDefault, + }}}) + + ingresses = append(ingresses, &ingress.Ingress{ + Ingress: networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo_ingress_1", + Namespace: apiv1.NamespaceDefault, }, - }, - }) + Status: networking.IngressStatus{ + LoadBalancer: apiv1.LoadBalancerStatus{ + Ingress: buildLoadBalancerIngressByIP(), + }, + }, + }}) + + return ingresses +} - return store.IngressLister{Store: s} +func buildIngressLister() ingressLister { + return &testIngressLister{} } func buildStatusSync() statusSync { @@ -248,7 +274,7 @@ func buildStatusSync() statusSync { Config: Config{ Client: buildSimpleClientSet(), PublishService: apiv1.NamespaceDefault + "/" + "foo", - IngressLister: buildIngressListener(), + IngressLister: buildIngressLister(), }, } } @@ -260,22 +286,32 @@ func TestStatusActions(t *testing.T) { c := Config{ Client: buildSimpleClientSet(), PublishService: "", - IngressLister: buildIngressListener(), - DefaultIngressClass: "nginx", - IngressClass: "", + IngressLister: buildIngressLister(), UpdateStatusOnShutdown: true, } + // create object - fkSync := NewStatusSyncer(c) + fkSync := NewStatusSyncer(&k8s.PodInfo{ + Name: "foo_base_pod", + Namespace: apiv1.NamespaceDefault, + Labels: map[string]string{ + "lable_sig": "foo_pod", + }, + }, c) if fkSync == nil { t.Fatalf("expected a valid Sync") } + // assume k8s >= 1.14 as the rest of the test + k8s.IsNetworkingIngressAvailable = true + fk := fkSync.(statusSync) - ns := make(chan struct{}) // start it and wait for the election and syn actions - go fk.Run(ns) + stopCh := make(chan struct{}) + defer close(stopCh) + + go fk.Run(stopCh) // wait for the election time.Sleep(100 * time.Millisecond) // execute sync @@ -285,7 +321,7 @@ func TestStatusActions(t *testing.T) { newIPs := []apiv1.LoadBalancerIngress{{ IP: "11.0.0.2", }} - fooIngress1, err1 := fk.Client.ExtensionsV1beta1().Ingresses(apiv1.NamespaceDefault).Get("foo_ingress_1", metav1.GetOptions{}) + fooIngress1, err1 := fk.Client.NetworkingV1beta1().Ingresses(apiv1.NamespaceDefault).Get("foo_ingress_1", metav1.GetOptions{}) if err1 != nil { t.Fatalf("unexpected error") } @@ -294,11 +330,13 @@ func TestStatusActions(t *testing.T) { t.Fatalf("returned %v but expected %v", fooIngress1CurIPs, newIPs) } + time.Sleep(1 * time.Second) + // execute shutdown fk.Shutdown() // ingress should be empty newIPs2 := []apiv1.LoadBalancerIngress{} - fooIngress2, err2 := fk.Client.ExtensionsV1beta1().Ingresses(apiv1.NamespaceDefault).Get("foo_ingress_1", metav1.GetOptions{}) + fooIngress2, err2 := fk.Client.NetworkingV1beta1().Ingresses(apiv1.NamespaceDefault).Get("foo_ingress_1", metav1.GetOptions{}) if err2 != nil { t.Fatalf("unexpected error") } @@ -307,16 +345,13 @@ func TestStatusActions(t *testing.T) { t.Fatalf("returned %v but expected %v", fooIngress2CurIPs, newIPs2) } - oic, err := fk.Client.ExtensionsV1beta1().Ingresses(api.NamespaceDefault).Get("foo_ingress_different_class", metav1.GetOptions{}) + oic, err := fk.Client.NetworkingV1beta1().Ingresses(metav1.NamespaceDefault).Get("foo_ingress_different_class", metav1.GetOptions{}) if err != nil { t.Fatalf("unexpected error") } if oic.Status.LoadBalancer.Ingress[0].IP != "0.0.0.0" && oic.Status.LoadBalancer.Ingress[0].Hostname != "foo.bar.com" { t.Fatalf("invalid ingress status for rule with different class") } - - // end test - ns <- struct{}{} } func TestCallback(t *testing.T) { @@ -325,6 +360,7 @@ func TestCallback(t *testing.T) { func TestKeyfunc(t *testing.T) { fk := buildStatusSync() + i := "foo_base_pod" r, err := fk.keyfunc(i) @@ -367,6 +403,25 @@ func TestRunningAddresessWithPods(t *testing.T) { } } +func TestRunningAddresessWithPublishStatusAddress(t *testing.T) { + fk := buildStatusSync() + fk.PublishService = "" + fk.PublishStatusAddress = "127.0.0.1" + + r, _ := fk.runningAddresses() + if r == nil { + t.Fatalf("returned nil but expected valid []string") + } + rl := len(r) + if len(r) != 1 { + t.Errorf("returned %v but expected %v", rl, 1) + } + rv := r[0] + if rv != "127.0.0.1" { + t.Errorf("returned %v but expected %v", rv, "127.0.0.1") + } +} + /* TODO: this test requires a refactoring func TestUpdateStatus(t *testing.T) { diff --git a/internal/ingress/store/main.go b/internal/ingress/store/main.go deleted file mode 100644 index 299f54c0bb..0000000000 --- a/internal/ingress/store/main.go +++ /dev/null @@ -1,113 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package store - -import ( - "fmt" - - apiv1 "k8s.io/api/core/v1" - "k8s.io/client-go/tools/cache" -) - -// IngressLister makes a Store that lists Ingress. -type IngressLister struct { - cache.Store -} - -// IngressAnnotationsLister makes a Store that lists annotations in Ingress rules. -type IngressAnnotationsLister struct { - cache.Store -} - -// SecretLister makes a Store that lists Secrets. -type SecretLister struct { - cache.Store -} - -// GetByName searches for a secret in the local secrets Store -func (sl *SecretLister) GetByName(name string) (*apiv1.Secret, error) { - s, exists, err := sl.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, fmt.Errorf("secret %v was not found", name) - } - return s.(*apiv1.Secret), nil -} - -// ConfigMapLister makes a Store that lists Configmaps. -type ConfigMapLister struct { - cache.Store -} - -// GetByName searches for a configmap in the local configmaps Store -func (cml *ConfigMapLister) GetByName(name string) (*apiv1.ConfigMap, error) { - s, exists, err := cml.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, fmt.Errorf("configmap %v was not found", name) - } - return s.(*apiv1.ConfigMap), nil -} - -// ServiceLister makes a Store that lists Services. -type ServiceLister struct { - cache.Store -} - -// GetByName searches for a service in the local secrets Store -func (sl *ServiceLister) GetByName(name string) (*apiv1.Service, error) { - s, exists, err := sl.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, fmt.Errorf("service %v was not found", name) - } - return s.(*apiv1.Service), nil -} - -// EndpointLister makes a Store that lists Endpoints. -type EndpointLister struct { - cache.Store -} - -// GetServiceEndpoints returns the endpoints of a service, matched on service name. -func (s *EndpointLister) GetServiceEndpoints(svc *apiv1.Service) (*apiv1.Endpoints, error) { - for _, m := range s.Store.List() { - ep := m.(*apiv1.Endpoints) - if svc.Name == ep.Name && svc.Namespace == ep.Namespace { - return ep, nil - } - } - return nil, fmt.Errorf("could not find endpoints for service: %v", svc.Name) -} - -// SSLCertTracker holds a store of referenced Secrets in Ingress rules -type SSLCertTracker struct { - cache.ThreadSafeStore -} - -// NewSSLCertTracker creates a new SSLCertTracker store -func NewSSLCertTracker() *SSLCertTracker { - return &SSLCertTracker{ - cache.NewThreadSafeStore(cache.Indexers{}, cache.Indices{}), - } -} diff --git a/internal/ingress/type_equals_test.go b/internal/ingress/type_equals_test.go index ca561a462f..8a078b6aef 100644 --- a/internal/ingress/type_equals_test.go +++ b/internal/ingress/type_equals_test.go @@ -71,3 +71,56 @@ func readJSON(p string) (*Configuration, error) { return &c, nil } + +func TestL4ServiceElementsMatch(t *testing.T) { + testCases := []struct { + listA []L4Service + listB []L4Service + expected bool + }{ + {nil, nil, true}, + {[]L4Service{{Port: 80}}, nil, false}, + {[]L4Service{{Port: 80}}, []L4Service{{Port: 80}}, true}, + { + []L4Service{ + {Port: 80, Endpoints: []Endpoint{{Address: "1.1.1.1"}}}}, + []L4Service{{Port: 80}}, + false, + }, + { + []L4Service{ + {Port: 80, Endpoints: []Endpoint{{Address: "1.1.1.1"}, {Address: "1.1.1.2"}}}}, + []L4Service{ + {Port: 80, Endpoints: []Endpoint{{Address: "1.1.1.2"}, {Address: "1.1.1.1"}}}}, + true, + }, + } + + for _, testCase := range testCases { + result := compareL4Service(testCase.listA, testCase.listB) + if result != testCase.expected { + t.Errorf("expected %v but returned %v (%v - %v)", testCase.expected, result, testCase.listA, testCase.listB) + } + } +} + +func TestIntElementsMatch(t *testing.T) { + testCases := []struct { + listA []int + listB []int + expected bool + }{ + {nil, nil, true}, + {[]int{}, nil, false}, + {[]int{}, []int{1}, false}, + {[]int{1}, []int{1}, true}, + {[]int{1, 2, 3}, []int{3, 2, 1}, true}, + } + + for _, testCase := range testCases { + result := compareInts(testCase.listA, testCase.listB) + if result != testCase.expected { + t.Errorf("expected %v but returned %v (%v - %v)", testCase.expected, result, testCase.listA, testCase.listB) + } + } +} diff --git a/internal/ingress/types.go b/internal/ingress/types.go index d9da68b2fa..accc89c599 100644 --- a/internal/ingress/types.go +++ b/internal/ingress/types.go @@ -17,23 +17,26 @@ limitations under the License. package ingress import ( - "time" - apiv1 "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" + networking "k8s.io/api/networking/v1beta1" "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/ingress-nginx/internal/ingress/annotations" "k8s.io/ingress-nginx/internal/ingress/annotations/auth" "k8s.io/ingress-nginx/internal/ingress/annotations/authreq" "k8s.io/ingress-nginx/internal/ingress/annotations/authtls" + "k8s.io/ingress-nginx/internal/ingress/annotations/connection" "k8s.io/ingress-nginx/internal/ingress/annotations/cors" + "k8s.io/ingress-nginx/internal/ingress/annotations/influxdb" "k8s.io/ingress-nginx/internal/ingress/annotations/ipwhitelist" + "k8s.io/ingress-nginx/internal/ingress/annotations/log" + "k8s.io/ingress-nginx/internal/ingress/annotations/luarestywaf" + "k8s.io/ingress-nginx/internal/ingress/annotations/modsecurity" "k8s.io/ingress-nginx/internal/ingress/annotations/proxy" "k8s.io/ingress-nginx/internal/ingress/annotations/ratelimit" "k8s.io/ingress-nginx/internal/ingress/annotations/redirect" "k8s.io/ingress-nginx/internal/ingress/annotations/rewrite" "k8s.io/ingress-nginx/internal/ingress/resolver" - "k8s.io/ingress-nginx/internal/ingress/store" ) var ( @@ -44,35 +47,33 @@ var ( DefaultSSLDirectory = "/ingress-controller/ssl" ) -// StoreLister returns the configured stores for ingresses, services, -// endpoints, secrets and configmaps. -type StoreLister struct { - Ingress store.IngressLister - Service store.ServiceLister - Endpoint store.EndpointLister - Secret store.SecretLister - ConfigMap store.ConfigMapLister - IngressAnnotation store.IngressAnnotationsLister -} - // Configuration holds the definition of all the parts required to describe all // ingresses reachable by the ingress controller (using a filter by namespace) type Configuration struct { // Backends are a list of backends used by all the Ingress rules in the // ingress controller. This list includes the default backend - Backends []*Backend `json:"backends,omitEmpty"` - // Servers - Servers []*Server `json:"servers,omitEmpty"` + Backends []*Backend `json:"backends,omitempty"` + // Servers save the website config + Servers []*Server `json:"servers,omitempty"` // TCPEndpoints contain endpoints for tcp streams handled by this backend // +optional TCPEndpoints []L4Service `json:"tcpEndpoints,omitempty"` // UDPEndpoints contain endpoints for udp streams handled by this backend // +optional UDPEndpoints []L4Service `json:"udpEndpoints,omitempty"` - // PassthroughBackend contains the backends used for SSL passthrough. + // PassthroughBackends contains the backends used for SSL passthrough. // It contains information about the associated Server Name Indication (SNI). // +optional PassthroughBackends []*SSLPassthroughBackend `json:"passthroughBackends,omitempty"` + + // BackendConfigChecksum contains the particular checksum of a Configuration object + BackendConfigChecksum string `json:"BackendConfigChecksum,omitempty"` + + // ConfigurationChecksum contains the particular checksum of a Configuration object + ConfigurationChecksum string `json:"configurationChecksum,omitempty"` + + // ControllerPodsCount contains the list of running ingress controller Pod(s) + ControllerPodsCount int `json:"controllerPodsCount,omitempty"` } // Backend describes one or more remote server/s (endpoints) associated with a service @@ -82,11 +83,6 @@ type Backend struct { Name string `json:"name"` Service *apiv1.Service `json:"service,omitempty"` Port intstr.IntOrString `json:"port"` - // This indicates if the communication protocol between the backend and the endpoint is HTTP or HTTPS - // Allowing the use of HTTPS - // The endpoint/s must provide a TLS connection. - // The certificate used in the endpoint cannot be a self signed certificate - Secure bool `json:"secure"` // SecureCACert has the filename and SHA1 of the certificate authorities used to validate // a secured connection to the backend SecureCACert resolver.AuthSSLCert `json:"secureCACert"` @@ -94,10 +90,43 @@ type Backend struct { SSLPassthrough bool `json:"sslPassthrough"` // Endpoints contains the list of endpoints currently running Endpoints []Endpoint `json:"endpoints,omitempty"` - // StickySessionAffinitySession contains the StickyConfig object with stickness configuration + // StickySessionAffinitySession contains the StickyConfig object with stickyness configuration SessionAffinity SessionAffinityConfig `json:"sessionAffinityConfig"` // Consistent hashing by NGINX variable - UpstreamHashBy string `json:"upstream-hash-by,omitempty"` + UpstreamHashBy UpstreamHashByConfig `json:"upstreamHashByConfig,omitempty"` + // LB algorithm configuration per ingress + LoadBalancing string `json:"load-balance,omitempty"` + // Denotes if a backend has no server. The backend instead shares a server with another backend and acts as an + // alternative backend. + // This can be used to share multiple upstreams in the sam nginx server block. + NoServer bool `json:"noServer"` + // Policies to describe the characteristics of an alternative backend. + // +optional + TrafficShapingPolicy TrafficShapingPolicy `json:"trafficShapingPolicy,omitempty"` + // Contains a list of backends without servers that are associated with this backend. + // +optional + AlternativeBackends []string `json:"alternativeBackends,omitempty"` +} + +// TrafficShapingPolicy describes the policies to put in place when a backend has no server and is used as an +// alternative backend +// +k8s:deepcopy-gen=true +type TrafficShapingPolicy struct { + // Weight (0-100) of traffic to redirect to the backend. + // e.g. Weight 20 means 20% of traffic will be redirected to the backend and 80% will remain + // with the other backend. 0 weight will not send any traffic to this backend + Weight int `json:"weight"` + // Header on which to redirect requests to this backend + Header string `json:"header"` + // HeaderValue on which to redirect requests to this backend + HeaderValue string `json:"headerValue"` + // Cookie on which to redirect requests to this backend + Cookie string `json:"cookie"` +} + +// HashInclude defines if a field should be used or not to calculate the hash +func (s Backend) HashInclude(field string, v interface{}) (bool, error) { + return (field != "Endpoints"), nil } // SessionAffinityConfig describes different affinity configurations for new sessions. @@ -115,9 +144,19 @@ type SessionAffinityConfig struct { // CookieSessionAffinity defines the structure used in Affinity configured by Cookies. // +k8s:deepcopy-gen=true type CookieSessionAffinity struct { - Name string `json:"name"` - Hash string `json:"hash"` - Locations map[string][]string `json:"locations,omitempty"` + Name string `json:"name"` + Expires string `json:"expires,omitempty"` + MaxAge string `json:"maxage,omitempty"` + Locations map[string][]string `json:"locations,omitempty"` + Path string `json:"path,omitempty"` + ChangeOnFailure bool `json:"change_on_failure,omitempty"` +} + +// UpstreamHashByConfig described setting from the upstream-hash-by* annotations. +type UpstreamHashByConfig struct { + UpstreamHashBy string `json:"upstream-hash-by,omitempty"` + UpstreamHashBySubset bool `json:"upstream-hash-by-subset,omitempty"` + UpstreamHashBySubsetSize int `json:"upstream-hash-by-subset-size,omitempty"` } // Endpoint describes a kubernetes endpoint in a backend @@ -127,16 +166,8 @@ type Endpoint struct { Address string `json:"address"` // Port number of the TCP port Port string `json:"port"` - // MaxFails returns the number of unsuccessful attempts to communicate - // allowed before this should be considered dow. - // Setting 0 indicates that the check is performed by a Kubernetes probe - MaxFails int `json:"maxFails"` - // FailTimeout returns the time in seconds during which the specified number - // of unsuccessful attempts to communicate with the server should happen - // to consider the endpoint unavailable - FailTimeout int `json:"failTimeout"` // Target returns a reference to the object providing the endpoint - Target *apiv1.ObjectReference `json:"target,omipempty"` + Target *apiv1.ObjectReference `json:"target,omitempty"` } // Server describes a website @@ -146,18 +177,8 @@ type Server struct { // SSLPassthrough indicates if the TLS termination is realized in // the server or in the remote endpoint SSLPassthrough bool `json:"sslPassthrough"` - // SSLCertificate path to the SSL certificate on disk - SSLCertificate string `json:"sslCertificate"` - // SSLFullChainCertificate path to the SSL certificate on disk - // This certificate contains the full chain (ca + intermediates + cert) - SSLFullChainCertificate string `json:"sslFullChainCertificate"` - // SSLExpireTime has the expire date of this certificate - SSLExpireTime time.Time `json:"sslExpireTime"` - // SSLPemChecksum returns the checksum of the certificate file on disk. - // There is no restriction in the hash generator. This checksim can be - // used to determine if the secret changed without the use of file - // system notifications - SSLPemChecksum string `json:"sslPemChecksum"` + // SSLCert describes the certificate that will be used on the server + SSLCert SSLCert `json:"sslCert"` // Locations list of URIs configured in the server. Locations []*Location `json:"locations,omitempty"` // Alias return the alias of the server name @@ -167,10 +188,13 @@ type Server struct { // CertificateAuth indicates the this server requires mutual authentication // +optional CertificateAuth authtls.Config `json:"certificateAuth"` - // ServerSnippet returns the snippet of server // +optional ServerSnippet string `json:"serverSnippet"` + // SSLCiphers returns list of ciphers to be enabled + SSLCiphers string `json:"sslCiphers,omitempty"` + // AuthTLSError contains the reason why the access to a server should be denied + AuthTLSError string `json:"authTLSError,omitempty"` } // Location describes an URI inside a server. @@ -198,7 +222,7 @@ type Location struct { // uses the default backend. IsDefBackend bool `json:"isDefBackend"` // Ingress returns the ingress from which this location was generated - Ingress *extensions.Ingress `json:"ingress"` + Ingress *Ingress `json:"ingress"` // Backend describes the name of the backend to use. Backend string `json:"backend"` // Service describes the referenced services from the ingress @@ -215,14 +239,21 @@ type Location struct { BasicDigestAuth auth.Config `json:"basicDigestAuth,omitempty"` // Denied returns an error when this location cannot not be allowed // Requesting a denied location should return HTTP code 403. - Denied error `json:"denied,omitempty"` - // CorsConfig returns the Cors Configration for the ingress rule + Denied *string `json:"denied,omitempty"` + // CorsConfig returns the Cors Configuration for the ingress rule // +optional CorsConfig cors.Config `json:"corsConfig,omitempty"` // ExternalAuth indicates the access to this location requires // authentication using an external provider // +optional ExternalAuth authreq.Config `json:"externalAuth,omitempty"` + // EnableGlobalAuth indicates if the access to this location requires + // authentication using an external provider defined in controller's config + EnableGlobalAuth bool `json:"enableGlobalAuth"` + // HTTP2PushPreload allows to configure the HTTP2 Push Preload from backend + // original location. + // +optional + HTTP2PushPreload bool `json:"http2PushPreload,omitempty"` // RateLimit describes a limit in the number of connections per IP // address or connections per second. // The Redirect annotation precedes RateLimit @@ -245,13 +276,13 @@ type Location struct { // UsePortInRedirects indicates if redirects must specify the port // +optional UsePortInRedirects bool `json:"usePortInRedirects"` - // VtsFilterKey contains the vts filter key on the location level - // https://github.com/vozlt/nginx-module-vts#vhost_traffic_status_filter_by_set_key - // +optional - VtsFilterKey string `json:"vtsFilterKey,omitempty"` // ConfigurationSnippet contains additional configuration for the backend // to be considered in the configuration of the location ConfigurationSnippet string `json:"configurationSnippet"` + // Connection contains connection header to override the default Connection header + // to the request. + // +optional + Connection connection.Config `json:"connection"` // ClientBodyBufferSize allows for the configuration of the client body // buffer size for a specific location. // +optional @@ -259,6 +290,32 @@ type Location struct { // DefaultBackend allows the use of a custom default backend for this location. // +optional DefaultBackend *apiv1.Service `json:"defaultBackend,omitempty"` + // DefaultBackendUpstreamName is the upstream-formatted string for the name of + // this location's custom default backend + DefaultBackendUpstreamName string `json:"defaultBackendUpstreamName,omitempty"` + // XForwardedPrefix allows to add a header X-Forwarded-Prefix to the request with the + // original location. + // +optional + XForwardedPrefix string `json:"xForwardedPrefix,omitempty"` + // Logs allows to enable or disable the nginx logs + // By default access logs are enabled and rewrite logs are disabled + Logs log.Config `json:"logs,omitempty"` + // LuaRestyWAF contains parameters to configure lua-resty-waf + LuaRestyWAF luarestywaf.Config `json:"luaRestyWAF"` + // InfluxDB allows to monitor the incoming request by sending them to an influxdb database + // +optional + InfluxDB influxdb.Config `json:"influxDB,omitempty"` + // BackendProtocol indicates which protocol should be used to communicate with the service + // By default this is HTTP + BackendProtocol string `json:"backend-protocol"` + // CustomHTTPErrors specifies the error codes that should be intercepted. + // +optional + CustomHTTPErrors []int `json:"custom-http-errors"` + // ModSecurity allows to enable and configure modsecurity + // +optional + ModSecurity modsecurity.Config `json:"modsecurity"` + // Satisfy dictates allow access if any or all is set + Satisfy string `json:"satisfy"` } // SSLPassthroughBackend describes a SSL upstream server configured @@ -266,7 +323,7 @@ type Location struct { // The endpoints must provide the TLS termination exposing the required SSL certificate. // The ingress controller only pipes the underlying TCP connection type SSLPassthroughBackend struct { - Service *apiv1.Service `json:"service,omitEmpty"` + Service *apiv1.Service `json:"service,omitempty"` Port intstr.IntOrString `json:"port"` // Backend describes the endpoints to use. Backend string `json:"namespace,omitempty"` @@ -281,7 +338,9 @@ type L4Service struct { // Backend of the service Backend L4Backend `json:"backend"` // Endpoints active endpoints of the service - Endpoints []Endpoint `json:"endpoins,omitEmpty"` + Endpoints []Endpoint `json:"endpoints,omitempty"` + // k8s Service + Service *apiv1.Service `json:"service,omitempty"` } // L4Backend describes the kubernetes service behind L4 Ingress service @@ -299,3 +358,14 @@ type ProxyProtocol struct { Decode bool `json:"decode"` Encode bool `json:"encode"` } + +// Ingress holds the definition of an Ingress plus its annotations +type Ingress struct { + networking.Ingress + ParsedAnnotations *annotations.Ingress +} + +// GeneralConfig holds the definition of lua general configuration data +type GeneralConfig struct { + ControllerPodsCount int `json:"controllerPodsCount"` +} diff --git a/internal/ingress/types_equals.go b/internal/ingress/types_equals.go index c86e33e7a4..d3ec2b2aba 100644 --- a/internal/ingress/types_equals.go +++ b/internal/ingress/types_equals.go @@ -16,6 +16,10 @@ limitations under the License. package ingress +import ( + "k8s.io/ingress-nginx/internal/sets" +) + // Equal tests for equality between two Configuration types func (c1 *Configuration) Equal(c2 *Configuration) bool { if c1 == c2 { @@ -25,23 +29,11 @@ func (c1 *Configuration) Equal(c2 *Configuration) bool { return false } - if len(c1.Backends) != len(c2.Backends) { + match := compareBackends(c1.Backends, c2.Backends) + if !match { return false } - for _, c1b := range c1.Backends { - found := false - for _, c2b := range c2.Backends { - if c1b.Equal(c2b) { - found = true - break - } - } - if !found { - return false - } - } - if len(c1.Servers) != len(c2.Servers) { return false } @@ -53,40 +45,16 @@ func (c1 *Configuration) Equal(c2 *Configuration) bool { } } - if len(c1.TCPEndpoints) != len(c2.TCPEndpoints) { + match = compareL4Service(c1.TCPEndpoints, c2.TCPEndpoints) + if !match { return false } - for _, tcp1 := range c1.TCPEndpoints { - found := false - for _, tcp2 := range c2.TCPEndpoints { - if (&tcp1).Equal(&tcp2) { - found = true - break - } - } - if !found { - return false - } - } - - if len(c1.UDPEndpoints) != len(c2.UDPEndpoints) { + match = compareL4Service(c1.UDPEndpoints, c2.UDPEndpoints) + if !match { return false } - for _, udp1 := range c1.UDPEndpoints { - found := false - for _, udp2 := range c2.UDPEndpoints { - if (&udp1).Equal(&udp2) { - found = true - break - } - } - if !found { - return false - } - } - if len(c1.PassthroughBackends) != len(c2.PassthroughBackends) { return false } @@ -104,6 +72,14 @@ func (c1 *Configuration) Equal(c2 *Configuration) bool { } } + if c1.BackendConfigChecksum != c2.BackendConfigChecksum { + return false + } + + if c1.ControllerPodsCount != c2.ControllerPodsCount { + return false + } + return true } @@ -118,6 +94,9 @@ func (b1 *Backend) Equal(b2 *Backend) bool { if b1.Name != b2.Name { return false } + if b1.NoServer != b2.NoServer { + return false + } if b1.Service != b2.Service { if b1.Service == nil || b2.Service == nil { @@ -129,17 +108,11 @@ func (b1 *Backend) Equal(b2 *Backend) bool { if b1.Service.GetName() != b2.Service.GetName() { return false } - if b1.Service.GetResourceVersion() != b2.Service.GetResourceVersion() { - return false - } } if b1.Port != b2.Port { return false } - if b1.Secure != b2.Secure { - return false - } if !(&b1.SecureCACert).Equal(&b2.SecureCACert) { return false } @@ -152,22 +125,22 @@ func (b1 *Backend) Equal(b2 *Backend) bool { if b1.UpstreamHashBy != b2.UpstreamHashBy { return false } + if b1.LoadBalancing != b2.LoadBalancing { + return false + } - if len(b1.Endpoints) != len(b2.Endpoints) { + match := compareEndpoints(b1.Endpoints, b2.Endpoints) + if !match { return false } - for _, udp1 := range b1.Endpoints { - found := false - for _, udp2 := range b2.Endpoints { - if (&udp1).Equal(&udp2) { - found = true - break - } - } - if !found { - return false - } + if !b1.TrafficShapingPolicy.Equal(b2.TrafficShapingPolicy) { + return false + } + + match = sets.StringElementsMatch(b1.AlternativeBackends, b2.AlternativeBackends) + if !match { + return false } return true @@ -202,7 +175,34 @@ func (csa1 *CookieSessionAffinity) Equal(csa2 *CookieSessionAffinity) bool { if csa1.Name != csa2.Name { return false } - if csa1.Hash != csa2.Hash { + if csa1.Path != csa2.Path { + return false + } + if csa1.Expires != csa2.Expires { + return false + } + if csa1.MaxAge != csa2.MaxAge { + return false + } + + return true +} + +//Equal checks the equality between UpstreamByConfig types +func (u1 *UpstreamHashByConfig) Equal(u2 *UpstreamHashByConfig) bool { + if u1 == u2 { + return true + } + if u1 == nil || u2 == nil { + return false + } + if u1.UpstreamHashBy != u2.UpstreamHashBy { + return false + } + if u1.UpstreamHashBySubset != u2.UpstreamHashBySubset { + return false + } + if u1.UpstreamHashBySubsetSize != u2.UpstreamHashBySubsetSize { return false } @@ -223,12 +223,6 @@ func (e1 *Endpoint) Equal(e2 *Endpoint) bool { if e1.Port != e2.Port { return false } - if e1.MaxFails != e2.MaxFails { - return false - } - if e1.FailTimeout != e2.FailTimeout { - return false - } if e1.Target != e2.Target { if e1.Target == nil || e2.Target == nil { @@ -245,6 +239,24 @@ func (e1 *Endpoint) Equal(e2 *Endpoint) bool { return true } +// Equal checks for equality between two TrafficShapingPolicies +func (tsp1 TrafficShapingPolicy) Equal(tsp2 TrafficShapingPolicy) bool { + if tsp1.Weight != tsp2.Weight { + return false + } + if tsp1.Header != tsp2.Header { + return false + } + if tsp1.HeaderValue != tsp2.HeaderValue { + return false + } + if tsp1.Cookie != tsp2.Cookie { + return false + } + + return true +} + // Equal tests for equality between two Server types func (s1 *Server) Equal(s2 *Server) bool { if s1 == s2 { @@ -256,25 +268,28 @@ func (s1 *Server) Equal(s2 *Server) bool { if s1.Hostname != s2.Hostname { return false } - if s1.Alias != s2.Alias { + if s1.SSLPassthrough != s2.SSLPassthrough { return false } - if s1.SSLPassthrough != s2.SSLPassthrough { + if !(&s1.SSLCert).Equal(&s2.SSLCert) { return false } - if s1.SSLCertificate != s2.SSLCertificate { + if s1.Alias != s2.Alias { return false } - if s1.SSLPemChecksum != s2.SSLPemChecksum { + if s1.RedirectFromToWWW != s2.RedirectFromToWWW { return false } if !(&s1.CertificateAuth).Equal(&s2.CertificateAuth) { return false } - if s1.SSLFullChainCertificate != s2.SSLFullChainCertificate { + if s1.ServerSnippet != s2.ServerSnippet { return false } - if s1.RedirectFromToWWW != s2.RedirectFromToWWW { + if s1.SSLCiphers != s2.SSLCiphers { + return false + } + if s1.AuthTLSError != s2.AuthTLSError { return false } @@ -320,9 +335,6 @@ func (l1 *Location) Equal(l2 *Location) bool { if l1.Service.GetName() != l2.Service.GetName() { return false } - if l1.Service.GetResourceVersion() != l2.Service.GetResourceVersion() { - return false - } } if l1.Port.StrVal != l2.Port.StrVal { @@ -340,6 +352,12 @@ func (l1 *Location) Equal(l2 *Location) bool { if !(&l1.ExternalAuth).Equal(&l2.ExternalAuth) { return false } + if l1.EnableGlobalAuth != l2.EnableGlobalAuth { + return false + } + if l1.HTTP2PushPreload != l2.HTTP2PushPreload { + return false + } if !(&l1.RateLimit).Equal(&l2.RateLimit) { return false } @@ -367,6 +385,43 @@ func (l1 *Location) Equal(l2 *Location) bool { if l1.UpstreamVhost != l2.UpstreamVhost { return false } + if l1.XForwardedPrefix != l2.XForwardedPrefix { + return false + } + if !(&l1.Connection).Equal(&l2.Connection) { + return false + } + if !(&l1.Logs).Equal(&l2.Logs) { + return false + } + if !(&l1.LuaRestyWAF).Equal(&l2.LuaRestyWAF) { + return false + } + + if !(&l1.InfluxDB).Equal(&l2.InfluxDB) { + return false + } + + if l1.BackendProtocol != l2.BackendProtocol { + return false + } + + match := compareInts(l1.CustomHTTPErrors, l2.CustomHTTPErrors) + if !match { + return false + } + + if !(&l1.ModSecurity).Equal(&l2.ModSecurity) { + return false + } + + if l1.Satisfy != l2.Satisfy { + return false + } + + if l1.DefaultBackendUpstreamName != l2.DefaultBackendUpstreamName { + return false + } return true } @@ -399,9 +454,6 @@ func (ptb1 *SSLPassthroughBackend) Equal(ptb2 *SSLPassthroughBackend) bool { if ptb1.Service.GetName() != ptb2.Service.GetName() { return false } - if ptb1.Service.GetResourceVersion() != ptb2.Service.GetResourceVersion() { - return false - } } return true @@ -421,21 +473,10 @@ func (e1 *L4Service) Equal(e2 *L4Service) bool { if !(&e1.Backend).Equal(&e2.Backend) { return false } - if len(e1.Endpoints) != len(e2.Endpoints) { - return false - } - for _, ep1 := range e1.Endpoints { - found := false - for _, ep2 := range e2.Endpoints { - if (&ep1).Equal(&ep2) { - found = true - break - } - } - if !found { - return false - } + match := compareEndpoints(e1.Endpoints, e2.Endpoints) + if !match { + return false } return true @@ -465,7 +506,7 @@ func (l4b1 *L4Backend) Equal(l4b2 *L4Backend) bool { return true } -// Equal tests for equality between two L4Backend types +// Equal tests for equality between two SSLCert types func (s1 *SSLCert) Equal(s2 *SSLCert) bool { if s1 == s2 { return true @@ -482,19 +523,89 @@ func (s1 *SSLCert) Equal(s2 *SSLCert) bool { if !s1.ExpireTime.Equal(s2.ExpireTime) { return false } + if s1.FullChainPemFileName != s2.FullChainPemFileName { + return false + } + if s1.PemCertKey != s2.PemCertKey { + return false + } - for _, cn1 := range s1.CN { - found := false - for _, cn2 := range s2.CN { - if cn1 == cn2 { - found = true - break - } - } - if !found { - return false - } + match := sets.StringElementsMatch(s1.CN, s2.CN) + if !match { + return false } return true } + +var compareEndpointsFunc = func(e1, e2 interface{}) bool { + ep1, ok := e1.(Endpoint) + if !ok { + return false + } + + ep2, ok := e2.(Endpoint) + if !ok { + return false + } + + return (&ep1).Equal(&ep2) +} + +func compareEndpoints(a, b []Endpoint) bool { + return sets.Compare(a, b, compareEndpointsFunc) +} + +var compareBackendsFunc = func(e1, e2 interface{}) bool { + b1, ok := e1.(*Backend) + if !ok { + return false + } + + b2, ok := e2.(*Backend) + if !ok { + return false + } + + return b1.Equal(b2) +} + +func compareBackends(a, b []*Backend) bool { + return sets.Compare(a, b, compareBackendsFunc) +} + +var compareIntsFunc = func(e1, e2 interface{}) bool { + b1, ok := e1.(int) + if !ok { + return false + } + + b2, ok := e2.(int) + if !ok { + return false + } + + return b1 == b2 +} + +func compareInts(a, b []int) bool { + return sets.Compare(a, b, compareIntsFunc) +} + +var compareL4ServiceFunc = func(e1, e2 interface{}) bool { + b1, ok := e1.(L4Service) + if !ok { + return false + } + + b2, ok := e2.(L4Service) + if !ok { + return false + } + + return (&b1).Equal(&b2) +} + +func compareL4Service(a, b []L4Service) bool { + return sets.Compare(a, b, compareL4ServiceFunc) +} diff --git a/internal/ingress/zz_generated.deepcopy.go b/internal/ingress/zz_generated.deepcopy.go index 82e25588ab..c6f95123cb 100644 --- a/internal/ingress/zz_generated.deepcopy.go +++ b/internal/ingress/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,51 +16,21 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This file was autogenerated by deepcopy-gen. Do not edit it manually! +// Code generated by deepcopy-gen. DO NOT EDIT. package ingress import ( v1 "k8s.io/api/core/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - reflect "reflect" ) -// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { - return []conversion.GeneratedDeepCopyFunc{ - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Backend).DeepCopyInto(out.(*Backend)) - return nil - }, InType: reflect.TypeOf(&Backend{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CookieSessionAffinity).DeepCopyInto(out.(*CookieSessionAffinity)) - return nil - }, InType: reflect.TypeOf(&CookieSessionAffinity{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Endpoint).DeepCopyInto(out.(*Endpoint)) - return nil - }, InType: reflect.TypeOf(&Endpoint{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SessionAffinityConfig).DeepCopyInto(out.(*SessionAffinityConfig)) - return nil - }, InType: reflect.TypeOf(&SessionAffinityConfig{})}, - } -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Backend) DeepCopyInto(out *Backend) { *out = *in if in.Service != nil { in, out := &in.Service, &out.Service - if *in == nil { - *out = nil - } else { - *out = new(v1.Service) - (*in).DeepCopyInto(*out) - } + *out = new(v1.Service) + (*in).DeepCopyInto(*out) } out.Port = in.Port out.SecureCACert = in.SecureCACert @@ -72,6 +42,13 @@ func (in *Backend) DeepCopyInto(out *Backend) { } } in.SessionAffinity.DeepCopyInto(&out.SessionAffinity) + out.UpstreamHashBy = in.UpstreamHashBy + out.TrafficShapingPolicy = in.TrafficShapingPolicy + if in.AlternativeBackends != nil { + in, out := &in.AlternativeBackends, &out.AlternativeBackends + *out = make([]string, len(*in)) + copy(*out, *in) + } return } @@ -92,12 +69,15 @@ func (in *CookieSessionAffinity) DeepCopyInto(out *CookieSessionAffinity) { in, out := &in.Locations, &out.Locations *out = make(map[string][]string, len(*in)) for key, val := range *in { + var outVal []string if val == nil { (*out)[key] = nil } else { - (*out)[key] = make([]string, len(val)) - copy((*out)[key], val) + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) } + (*out)[key] = outVal } } return @@ -118,12 +98,8 @@ func (in *Endpoint) DeepCopyInto(out *Endpoint) { *out = *in if in.Target != nil { in, out := &in.Target, &out.Target - if *in == nil { - *out = nil - } else { - *out = new(v1.ObjectReference) - **out = **in - } + *out = new(v1.ObjectReference) + **out = **in } return } @@ -154,3 +130,19 @@ func (in *SessionAffinityConfig) DeepCopy() *SessionAffinityConfig { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrafficShapingPolicy) DeepCopyInto(out *TrafficShapingPolicy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficShapingPolicy. +func (in *TrafficShapingPolicy) DeepCopy() *TrafficShapingPolicy { + if in == nil { + return nil + } + out := new(TrafficShapingPolicy) + in.DeepCopyInto(out) + return out +} diff --git a/internal/k8s/main.go b/internal/k8s/main.go index b391cad690..4ebba5f340 100644 --- a/internal/k8s/main.go +++ b/internal/k8s/main.go @@ -21,9 +21,13 @@ import ( "os" "strings" + "k8s.io/klog" + apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/version" clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" ) // ParseNameNS parses a string searching a namespace and name @@ -40,6 +44,7 @@ func ParseNameNS(input string) (string, string, error) { func GetNodeIPOrName(kubeClient clientset.Interface, name string, useInternalIP bool) string { node, err := kubeClient.CoreV1().Nodes().Get(name, metav1.GetOptions{}) if err != nil { + klog.Errorf("Error getting node %v: %v", name, err) return "" } @@ -51,12 +56,12 @@ func GetNodeIPOrName(kubeClient clientset.Interface, name string, useInternalIP } } } - } else { - for _, address := range node.Status.Addresses { - if address.Type == apiv1.NodeExternalIP { - if address.Address != "" { - return address.Address - } + } + + for _, address := range node.Status.Addresses { + if address.Type == apiv1.NodeExternalIP { + if address.Address != "" { + return address.Address } } } @@ -68,7 +73,6 @@ func GetNodeIPOrName(kubeClient clientset.Interface, name string, useInternalIP type PodInfo struct { Name string Namespace string - NodeIP string // Labels selectors of the running pod // This is used to search for other Ingress controller pods Labels map[string]string @@ -92,7 +96,44 @@ func GetPodDetails(kubeClient clientset.Interface) (*PodInfo, error) { return &PodInfo{ Name: podName, Namespace: podNs, - NodeIP: GetNodeIPOrName(kubeClient, pod.Spec.NodeName, true), Labels: pod.GetLabels(), }, nil } + +// MetaNamespaceKey knows how to make keys for API objects which implement meta.Interface. +func MetaNamespaceKey(obj interface{}) string { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + if err != nil { + klog.Warning(err) + } + + return key +} + +// IsNetworkingIngressAvailable indicates if package "k8s.io/api/networking/v1beta1" is available or not +var IsNetworkingIngressAvailable bool + +// NetworkingIngressAvailable checks if the package "k8s.io/api/networking/v1beta1" is available or not +func NetworkingIngressAvailable(client clientset.Interface) bool { + // check kubernetes version to use new ingress package or not + version114, err := version.ParseGeneric("v1.14.0") + if err != nil { + klog.Errorf("unexpected error parsing version: %v", err) + return false + } + + serverVersion, _ := client.Discovery().ServerVersion() + if err != nil { + klog.Errorf("unexpected error parsing Kubernetes version: %v", err) + return false + } + + klog.Errorf("%v", serverVersion) + runningVersion, _ := version.ParseGeneric(serverVersion.String()) + if err != nil { + klog.Errorf("unexpected error parsing running Kubernetes version: %v", err) + return false + } + + return runningVersion.AtLeast(version114) +} diff --git a/internal/k8s/main_test.go b/internal/k8s/main_test.go index f724f5cee0..7283ebc5e9 100644 --- a/internal/k8s/main_test.go +++ b/internal/k8s/main_test.go @@ -81,7 +81,7 @@ func TestGetNodeIP(t *testing.T) { }, }}}), "notexistnode", "", true}, - // node exist + // node exist {testclient.NewSimpleClientset(&apiv1.NodeList{Items: []apiv1.Node{{ ObjectMeta: metav1.ObjectMeta{ Name: "demo", diff --git a/internal/net/dns/dns.go b/internal/net/dns/dns.go index 1249f79102..1edd64cf98 100644 --- a/internal/net/dns/dns.go +++ b/internal/net/dns/dns.go @@ -21,7 +21,7 @@ import ( "net" "strings" - "github.com/golang/glog" + "k8s.io/klog" ) var defResolvConf = "/etc/resolv.conf" @@ -53,6 +53,6 @@ func GetSystemNameServers() ([]net.IP, error) { } } - glog.V(3).Infof("nameservers IP address/es to use: %v", nameservers) + klog.V(3).Infof("nameservers IP address/es to use: %v", nameservers) return nameservers, nil } diff --git a/internal/net/dns/dns_test.go b/internal/net/dns/dns_test.go index 979d65c32e..bd2243ae75 100644 --- a/internal/net/dns/dns_test.go +++ b/internal/net/dns/dns_test.go @@ -21,6 +21,8 @@ import ( "net" "os" "testing" + + "k8s.io/ingress-nginx/internal/file" ) func TestGetDNSServers(t *testing.T) { @@ -32,22 +34,22 @@ func TestGetDNSServers(t *testing.T) { t.Error("expected at least 1 nameserver in /etc/resolv.conf") } - file, err := ioutil.TempFile("", "fw") + f, err := ioutil.TempFile("", "fw") if err != nil { t.Fatalf("unexpected error: %v", err) } - defer file.Close() - defer os.Remove(file.Name()) + defer f.Close() + defer os.Remove(f.Name()) - ioutil.WriteFile(file.Name(), []byte(` + ioutil.WriteFile(f.Name(), []byte(` # comment ; comment nameserver 2001:4860:4860::8844 nameserver 2001:4860:4860::8888 nameserver 8.8.8.8 - `), 0644) + `), file.ReadWriteByUser) - defResolvConf = file.Name() + defResolvConf = f.Name() s, err = GetSystemNameServers() if err != nil { t.Fatalf("unexpected error reading /etc/resolv.conf file: %v", err) diff --git a/internal/net/net.go b/internal/net/net.go index 8547e3dbda..b7fefb8b3b 100644 --- a/internal/net/net.go +++ b/internal/net/net.go @@ -24,21 +24,38 @@ import ( // IsIPV6 checks if the input contains a valid IPV6 address func IsIPV6(ip _net.IP) bool { - return ip.To4() == nil + return ip != nil && ip.To4() == nil } // IsPortAvailable checks if a TCP port is available or not func IsPortAvailable(p int) bool { - ln, err := _net.Listen("tcp", fmt.Sprintf(":%v", p)) + conn, err := _net.Dial("tcp", fmt.Sprintf(":%v", p)) if err != nil { - return false + return true } - ln.Close() - return true + defer conn.Close() + return false } -// IsIPv6Enabled checks if IPV6 is enabled or not +// IsIPv6Enabled checks if IPV6 is enabled or not and we have +// at least one configured in the pod func IsIPv6Enabled() bool { cmd := exec.Command("test", "-f", "/proc/net/if_inet6") - return cmd.Run() == nil + if cmd.Run() != nil { + return false + } + + addrs, err := _net.InterfaceAddrs() + if err != nil { + return false + } + + for _, addr := range addrs { + ip, _, _ := _net.ParseCIDR(addr.String()) + if IsIPV6(ip) { + return true + } + } + + return false } diff --git a/internal/net/net_test.go b/internal/net/net_test.go index 6ab4223caa..f2f37a8386 100644 --- a/internal/net/net_test.go +++ b/internal/net/net_test.go @@ -58,3 +58,13 @@ func TestIsPortAvailable(t *testing.T) { t.Fatalf("expected port %v to not be available", p) } } + +/* +// TODO: this test should be optional or running behind a flag +func TestIsIPv6Enabled(t *testing.T) { + isEnabled := IsIPv6Enabled() + if !isEnabled { + t.Fatalf("expected IPV6 be enabled") + } +} +*/ diff --git a/internal/net/ssl/ssl.go b/internal/net/ssl/ssl.go index 5efc075039..e5b2318ed0 100644 --- a/internal/net/ssl/ssl.go +++ b/internal/net/ssl/ssl.go @@ -17,6 +17,7 @@ limitations under the License. package ssl import ( + "bytes" "crypto/rand" "crypto/rsa" "crypto/tls" @@ -26,83 +27,74 @@ import ( "encoding/pem" "errors" "fmt" - "io/ioutil" "math/big" "net" - "os" "strconv" + "strings" + "sync" "time" - "github.com/golang/glog" "github.com/zakjan/cert-chain-resolver/certUtil" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/ingress-nginx/internal/file" "k8s.io/ingress-nginx/internal/ingress" + "k8s.io/ingress-nginx/internal/watch" + "k8s.io/klog" ) var ( oidExtensionSubjectAltName = asn1.ObjectIdentifier{2, 5, 29, 17} ) -// AddOrUpdateCertAndKey creates a .pem file wth the cert and the key with the specified name -func AddOrUpdateCertAndKey(name string, cert, key, ca []byte) (*ingress.SSLCert, error) { - pemName := fmt.Sprintf("%v.pem", name) - pemFileName := fmt.Sprintf("%v/%v", ingress.DefaultSSLDirectory, pemName) - tempPemFile, err := ioutil.TempFile(ingress.DefaultSSLDirectory, pemName) +const ( + fakeCertificateName = "default-fake-certificate" +) - if err != nil { - return nil, fmt.Errorf("could not create temp pem file %v: %v", pemFileName, err) - } - glog.V(3).Infof("Creating temp file %v for Keypair: %v", tempPemFile.Name(), pemName) +// getPemFileName returns absolute file path and file name of pem cert related to given fullSecretName +func getPemFileName(fullSecretName string) (string, string) { + pemName := fmt.Sprintf("%v.pem", fullSecretName) + return fmt.Sprintf("%v/%v", file.DefaultSSLDirectory, pemName), pemName +} - _, err = tempPemFile.Write(cert) - if err != nil { - return nil, fmt.Errorf("could not write to pem file %v: %v", tempPemFile.Name(), err) - } - _, err = tempPemFile.Write([]byte("\n")) - if err != nil { - return nil, fmt.Errorf("could not write to pem file %v: %v", tempPemFile.Name(), err) - } - _, err = tempPemFile.Write(key) - if err != nil { - return nil, fmt.Errorf("could not write to pem file %v: %v", tempPemFile.Name(), err) +func verifyPemCertAgainstRootCA(pemCert *x509.Certificate, ca []byte) error { + bundle := x509.NewCertPool() + bundle.AppendCertsFromPEM(ca) + opts := x509.VerifyOptions{ + Roots: bundle, } - err = tempPemFile.Close() + _, err := pemCert.Verify(opts) if err != nil { - return nil, fmt.Errorf("could not close temp pem file %v: %v", tempPemFile.Name(), err) + return err } - pemCerts, err := ioutil.ReadFile(tempPemFile.Name()) - if err != nil { - _ = os.Remove(tempPemFile.Name()) - return nil, err - } + return nil +} - pemBlock, _ := pem.Decode(pemCerts) +// CreateSSLCert validates cert and key, extracts common names and returns corresponding SSLCert object +func CreateSSLCert(cert, key []byte) (*ingress.SSLCert, error) { + var pemCertBuffer bytes.Buffer + + pemCertBuffer.Write(cert) + pemCertBuffer.Write([]byte("\n")) + pemCertBuffer.Write(key) + + pemBlock, _ := pem.Decode(pemCertBuffer.Bytes()) if pemBlock == nil { - _ = os.Remove(tempPemFile.Name()) return nil, fmt.Errorf("no valid PEM formatted block found") } - // If the file does not start with 'BEGIN CERTIFICATE' it's invalid and must not be used. if pemBlock.Type != "CERTIFICATE" { - _ = os.Remove(tempPemFile.Name()) - return nil, fmt.Errorf("certificate %v contains invalid data, and must be created with 'kubectl create secret tls'", name) + return nil, fmt.Errorf("no certificate PEM data found, make sure certificate content starts with 'BEGIN CERTIFICATE'") } pemCert, err := x509.ParseCertificate(pemBlock.Bytes) if err != nil { - _ = os.Remove(tempPemFile.Name()) return nil, err } - //Ensure that certificate and private key have a matching public key if _, err := tls.X509KeyPair(cert, key); err != nil { - _ = os.Remove(tempPemFile.Name()) - return nil, err + return nil, fmt.Errorf("certificate and private key does not have a matching public key: %v", err) } cn := sets.NewString(pemCert.Subject.CommonName) @@ -113,11 +105,11 @@ func AddOrUpdateCertAndKey(name string, cert, key, ca []byte) (*ingress.SSLCert, } if len(pemCert.Extensions) > 0 { - glog.V(3).Info("parsing ssl certificate extensions") + klog.V(3).Info("parsing ssl certificate extensions") for _, ext := range getExtension(pemCert, oidExtensionSubjectAltName) { dns, _, _, err := parseSANExtension(ext.Value) if err != nil { - glog.Warningf("unexpected error parsing certificate extensions: %v", err) + klog.Warningf("unexpected error parsing certificate extensions: %v", err) continue } @@ -129,56 +121,128 @@ func AddOrUpdateCertAndKey(name string, cert, key, ca []byte) (*ingress.SSLCert, } } - err = os.Rename(tempPemFile.Name(), pemFileName) + return &ingress.SSLCert{ + Certificate: pemCert, + CN: cn.List(), + ExpireTime: pemCert.NotAfter, + PemCertKey: pemCertBuffer.String(), + }, nil +} + +// CreateCACert is similar to CreateSSLCert but it creates instance of SSLCert only based on given ca after +// parsing and validating it +func CreateCACert(ca []byte) (*ingress.SSLCert, error) { + pemCABlock, _ := pem.Decode(ca) + if pemCABlock == nil { + return nil, fmt.Errorf("no valid PEM formatted block found") + } + // If the first certificate does not start with 'BEGIN CERTIFICATE' it's invalid and must not be used. + if pemCABlock.Type != "CERTIFICATE" { + return nil, fmt.Errorf("no certificate PEM data found, make sure certificate content starts with 'BEGIN CERTIFICATE'") + } + + pemCert, err := x509.ParseCertificate(pemCABlock.Bytes) if err != nil { - return nil, fmt.Errorf("could not move temp pem file %v to destination %v: %v", tempPemFile.Name(), pemFileName, err) + return nil, err } - if len(ca) > 0 { - bundle := x509.NewCertPool() - bundle.AppendCertsFromPEM(ca) - opts := x509.VerifyOptions{ - Roots: bundle, - } + return &ingress.SSLCert{ + Certificate: pemCert, + }, nil +} - _, err := pemCert.Verify(opts) - if err != nil { - oe := fmt.Sprintf("failed to verify certificate chain: \n\t%s\n", err) - return nil, errors.New(oe) - } +// StoreSSLCertOnDisk creates a .pem file with content PemCertKey from the given sslCert +// and sets relevant remaining fields of sslCert object +func StoreSSLCertOnDisk(fs file.Filesystem, name string, sslCert *ingress.SSLCert) error { + pemFileName, _ := getPemFileName(name) - caFile, err := os.OpenFile(pemFileName, os.O_RDWR|os.O_APPEND, 0600) - if err != nil { - return nil, fmt.Errorf("could not open file %v for writing additional CA chains: %v", pemFileName, err) - } + pemFile, err := fs.Create(pemFileName) + if err != nil { + return fmt.Errorf("could not create PEM certificate file %v: %v", pemFileName, err) + } + defer pemFile.Close() - defer caFile.Close() - _, err = caFile.Write([]byte("\n")) - if err != nil { - return nil, fmt.Errorf("could not append CA to cert file %v: %v", pemFileName, err) - } - caFile.Write(ca) - caFile.Write([]byte("\n")) + _, err = pemFile.Write([]byte(sslCert.PemCertKey)) + if err != nil { + return fmt.Errorf("could not write data to PEM file %v: %v", pemFileName, err) + } + + sslCert.PemFileName = pemFileName + sslCert.PemSHA = file.SHA1(pemFileName) + + return nil +} + +func isSSLCertStoredOnDisk(sslCert *ingress.SSLCert) bool { + return len(sslCert.PemFileName) > 0 +} - return &ingress.SSLCert{ - Certificate: pemCert, - CAFileName: pemFileName, - PemFileName: pemFileName, - PemSHA: file.SHA1(pemFileName), - CN: cn.List(), - ExpireTime: pemCert.NotAfter, - }, nil +// ConfigureCACertWithCertAndKey appends ca into existing PEM file consisting of cert and key +// and sets relevant fields in sslCert object +func ConfigureCACertWithCertAndKey(fs file.Filesystem, name string, ca []byte, sslCert *ingress.SSLCert) error { + err := verifyPemCertAgainstRootCA(sslCert.Certificate, ca) + if err != nil { + oe := fmt.Sprintf("failed to verify certificate chain: \n\t%s\n", err) + return errors.New(oe) } - s := &ingress.SSLCert{ - Certificate: pemCert, - PemFileName: pemFileName, - PemSHA: file.SHA1(pemFileName), - CN: cn.List(), - ExpireTime: pemCert.NotAfter, + certAndKey, err := fs.ReadFile(sslCert.PemFileName) + if err != nil { + return fmt.Errorf("could not read file %v for writing additional CA chains: %v", sslCert.PemFileName, err) + } + + f, err := fs.Create(sslCert.PemFileName) + if err != nil { + return fmt.Errorf("could not create PEM file %v: %v", sslCert.PemFileName, err) + } + defer f.Close() + + _, err = f.Write(certAndKey) + if err != nil { + return fmt.Errorf("could not write cert and key bundle to cert file %v: %v", sslCert.PemFileName, err) + } + + _, err = f.Write([]byte("\n")) + if err != nil { + return fmt.Errorf("could not append newline to cert file %v: %v", sslCert.PemFileName, err) + } + + _, err = f.Write(ca) + if err != nil { + return fmt.Errorf("could not write ca data to cert file %v: %v", sslCert.PemFileName, err) } - return s, nil + sslCert.CAFileName = sslCert.PemFileName + // since we updated sslCert.PemFileName we need to recalculate the checksum + sslCert.PemSHA = file.SHA1(sslCert.PemFileName) + + return nil +} + +// ConfigureCACert is similar to ConfigureCACertWithCertAndKey but it creates a separate file +// for CA cert and writes only ca into it and then sets relevant fields in sslCert +func ConfigureCACert(fs file.Filesystem, name string, ca []byte, sslCert *ingress.SSLCert) error { + caName := fmt.Sprintf("ca-%v.pem", name) + fileName := fmt.Sprintf("%v/%v", file.DefaultSSLDirectory, caName) + + f, err := fs.Create(fileName) + if err != nil { + return fmt.Errorf("could not write CA file %v: %v", fileName, err) + } + defer f.Close() + + _, err = f.Write(ca) + if err != nil { + return fmt.Errorf("could not write CA file %v: %v", fileName, err) + } + + sslCert.PemFileName = fileName + sslCert.CAFileName = fileName + sslCert.PemSHA = file.SHA1(fileName) + + klog.V(3).Infof("Created CA Certificate for Authentication: %v", fileName) + + return nil } func getExtension(c *x509.Certificate, id asn1.ObjectIdentifier) []pkix.Extension { @@ -247,48 +311,13 @@ func parseSANExtension(value []byte) (dnsNames, emailAddresses []string, ipAddre return } -// AddCertAuth creates a .pem file with the specified CAs to be used in Cert Authentication -// If it's already exists, it's clobbered. -func AddCertAuth(name string, ca []byte) (*ingress.SSLCert, error) { - - caName := fmt.Sprintf("ca-%v.pem", name) - caFileName := fmt.Sprintf("%v/%v", ingress.DefaultSSLDirectory, caName) - - pemCABlock, _ := pem.Decode(ca) - if pemCABlock == nil { - return nil, fmt.Errorf("no valid PEM formatted block found") - } - // If the first certificate does not start with 'BEGIN CERTIFICATE' it's invalid and must not be used. - if pemCABlock.Type != "CERTIFICATE" { - return nil, fmt.Errorf("CA file %v contains invalid data, and must be created only with PEM formated certificates", name) - } - - _, err := x509.ParseCertificate(pemCABlock.Bytes) - if err != nil { - return nil, err - } - - err = ioutil.WriteFile(caFileName, ca, 0644) - if err != nil { - return nil, fmt.Errorf("could not write CA file %v: %v", caFileName, err) - } - - glog.V(3).Infof("Created CA Certificate for Authentication: %v", caFileName) - return &ingress.SSLCert{ - CAFileName: caFileName, - PemFileName: caFileName, - PemSHA: file.SHA1(caFileName), - }, nil -} - // AddOrUpdateDHParam creates a dh parameters file with the specified name -func AddOrUpdateDHParam(name string, dh []byte) (string, error) { - pemName := fmt.Sprintf("%v.pem", name) - pemFileName := fmt.Sprintf("%v/%v", ingress.DefaultSSLDirectory, pemName) +func AddOrUpdateDHParam(name string, dh []byte, fs file.Filesystem) (string, error) { + pemFileName, pemName := getPemFileName(name) - tempPemFile, err := ioutil.TempFile(ingress.DefaultSSLDirectory, pemName) + tempPemFile, err := fs.TempFile(file.DefaultSSLDirectory, pemName) - glog.V(3).Infof("Creating temp file %v for DH param: %v", tempPemFile.Name(), pemName) + klog.V(3).Infof("Creating temp file %v for DH param: %v", tempPemFile.Name(), pemName) if err != nil { return "", fmt.Errorf("could not create temp pem file %v: %v", pemFileName, err) } @@ -303,25 +332,24 @@ func AddOrUpdateDHParam(name string, dh []byte) (string, error) { return "", fmt.Errorf("could not close temp pem file %v: %v", tempPemFile.Name(), err) } - pemCerts, err := ioutil.ReadFile(tempPemFile.Name()) + defer fs.RemoveAll(tempPemFile.Name()) + + pemCerts, err := fs.ReadFile(tempPemFile.Name()) if err != nil { - _ = os.Remove(tempPemFile.Name()) return "", err } pemBlock, _ := pem.Decode(pemCerts) if pemBlock == nil { - _ = os.Remove(tempPemFile.Name()) return "", fmt.Errorf("no valid PEM formatted block found") } // If the file does not start with 'BEGIN DH PARAMETERS' it's invalid and must not be used. if pemBlock.Type != "DH PARAMETERS" { - _ = os.Remove(tempPemFile.Name()) return "", fmt.Errorf("certificate %v contains invalid data", name) } - err = os.Rename(tempPemFile.Name(), pemFileName) + err = fs.Rename(tempPemFile.Name(), pemFileName) if err != nil { return "", fmt.Errorf("could not move temp pem file %v to destination %v: %v", tempPemFile.Name(), pemFileName, err) } @@ -331,7 +359,23 @@ func AddOrUpdateDHParam(name string, dh []byte) (string, error) { // GetFakeSSLCert creates a Self Signed Certificate // Based in the code https://golang.org/src/crypto/tls/generate_cert.go -func GetFakeSSLCert() ([]byte, []byte) { +func GetFakeSSLCert(fs file.Filesystem) *ingress.SSLCert { + cert, key := getFakeHostSSLCert("ingress.local") + + sslCert, err := CreateSSLCert(cert, key) + if err != nil { + klog.Fatalf("unexpected error creating fake SSL Cert: %v", err) + } + + err = StoreSSLCertOnDisk(fs, fakeCertificateName, sslCert) + if err != nil { + klog.Fatalf("unexpected error storing fake SSL Cert: %v", err) + } + + return sslCert +} + +func getFakeHostSSLCert(host string) ([]byte, []byte) { var priv interface{} var err error @@ -339,7 +383,7 @@ func GetFakeSSLCert() ([]byte, []byte) { priv, err = rsa.GenerateKey(rand.Reader, 2048) if err != nil { - glog.Fatalf("failed to generate fake private key: %s", err) + klog.Fatalf("failed to generate fake private key: %v", err) } notBefore := time.Now() @@ -350,7 +394,7 @@ func GetFakeSSLCert() ([]byte, []byte) { serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) if err != nil { - glog.Fatalf("failed to generate fake serial number: %s", err) + klog.Fatalf("failed to generate fake serial number: %v", err) } template := x509.Certificate{ @@ -365,11 +409,11 @@ func GetFakeSSLCert() ([]byte, []byte) { KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, BasicConstraintsValid: true, - DNSNames: []string{"ingress.local"}, + DNSNames: []string{host}, } derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.(*rsa.PrivateKey).PublicKey, priv) if err != nil { - glog.Fatalf("Failed to create fake certificate: %s", err) + klog.Fatalf("Failed to create fake certificate: %v", err) } cert := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) @@ -382,13 +426,8 @@ func GetFakeSSLCert() ([]byte, []byte) { // FullChainCert checks if a certificate file contains issues in the intermediate CA chain // Returns a new certificate with the intermediate certificates. // If the certificate does not contains issues with the chain it return an empty byte array -func FullChainCert(in string) ([]byte, error) { - inputFile, err := os.Open(in) - if err != nil { - return nil, err - } - - data, err := ioutil.ReadAll(inputFile) +func FullChainCert(in string, fs file.Filesystem) ([]byte, error) { + data, err := fs.ReadFile(in) if err != nil { return nil, err } @@ -420,3 +459,82 @@ func FullChainCert(in string) ([]byte, error) { return certUtil.EncodeCertificates(certs), nil } + +// IsValidHostname checks if a hostname is valid in a list of common names +func IsValidHostname(hostname string, commonNames []string) bool { + for _, cn := range commonNames { + if strings.EqualFold(hostname, cn) { + return true + } + + labels := strings.Split(hostname, ".") + labels[0] = "*" + candidate := strings.Join(labels, ".") + if strings.EqualFold(candidate, cn) { + return true + } + } + + return false +} + +// TLSListener implements a dynamic certificate loader +type TLSListener struct { + certificatePath string + keyPath string + fs file.Filesystem + certificate *tls.Certificate + err error + lock sync.Mutex +} + +// NewTLSListener watches changes to th certificate and key paths +// and reloads it whenever it changes +func NewTLSListener(certificate, key string) *TLSListener { + fs, err := file.NewLocalFS() + if err != nil { + panic(fmt.Sprintf("failed to instanciate certificate: %v", err)) + } + l := TLSListener{ + certificatePath: certificate, + keyPath: key, + fs: fs, + lock: sync.Mutex{}, + } + l.load() + watch.NewFileWatcher(certificate, l.load) + watch.NewFileWatcher(key, l.load) + return &l +} + +// GetCertificate implements the tls.Config.GetCertificate interface +func (tl *TLSListener) GetCertificate(*tls.ClientHelloInfo) (*tls.Certificate, error) { + tl.lock.Lock() + defer tl.lock.Unlock() + return tl.certificate, tl.err +} + +// TLSConfig instanciates a TLS configuration, always providing an up to date certificate +func (tl *TLSListener) TLSConfig() *tls.Config { + return &tls.Config{ + GetCertificate: tl.GetCertificate, + } +} + +func (tl *TLSListener) load() { + klog.Infof("loading tls certificate from certificate path %s and key path %s", tl.certificatePath, tl.keyPath) + certBytes, err := tl.fs.ReadFile(tl.certificatePath) + if err != nil { + tl.certificate = nil + tl.err = err + } + keyBytes, err := tl.fs.ReadFile(tl.keyPath) + if err != nil { + tl.certificate = nil + tl.err = err + } + cert, err := tls.X509KeyPair(certBytes, keyBytes) + tl.lock.Lock() + defer tl.lock.Unlock() + tl.certificate, tl.err = &cert, err +} diff --git a/internal/net/ssl/ssl_test.go b/internal/net/ssl/ssl_test.go index 95767eeca9..e2ca147859 100644 --- a/internal/net/ssl/ssl_test.go +++ b/internal/net/ssl/ssl_test.go @@ -17,26 +17,40 @@ limitations under the License. package ssl import ( + "bytes" + "crypto" + "crypto/rand" + cryptorand "crypto/rand" + "crypto/rsa" + "crypto/tls" "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "errors" "fmt" - "io/ioutil" + "math" + "math/big" + "net/http" + "net/http/httptest" + "strings" + "sync" "testing" "time" certutil "k8s.io/client-go/util/cert" - "k8s.io/client-go/util/cert/triple" + "k8s.io/kubernetes/pkg/util/filesystem" - "k8s.io/ingress-nginx/internal/ingress" + "k8s.io/ingress-nginx/internal/file" ) // generateRSACerts generates a self signed certificate using a self generated ca -func generateRSACerts(host string) (*triple.KeyPair, *triple.KeyPair, error) { - ca, err := triple.NewCA("self-sign-ca") +func generateRSACerts(host string) (*keyPair, *keyPair, error) { + ca, err := newCA("self-sign-ca") if err != nil { return nil, nil, err } - key, err := certutil.NewPrivateKey() + key, err := newPrivateKey() if err != nil { return nil, nil, fmt.Errorf("unable to create a server private key: %v", err) } @@ -45,23 +59,19 @@ func generateRSACerts(host string) (*triple.KeyPair, *triple.KeyPair, error) { CommonName: host, Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, } - cert, err := certutil.NewSignedCert(config, key, ca.Cert, ca.Key) + cert, err := newSignedCert(config, key, ca.Cert, ca.Key) if err != nil { return nil, nil, fmt.Errorf("unable to sign the server certificate: %v", err) } - return &triple.KeyPair{ + return &keyPair{ Key: key, Cert: cert, }, ca, nil } -func TestAddOrUpdateCertAndKey(t *testing.T) { - td, err := ioutil.TempDir("", "ssl") - if err != nil { - t.Fatalf("Unexpected error creating temporal directory: %v", err) - } - ingress.DefaultSSLDirectory = td +func TestStoreSSLCertOnDisk(t *testing.T) { + fs := newFS(t) cert, _, err := generateRSACerts("echoheaders") if err != nil { @@ -70,33 +80,34 @@ func TestAddOrUpdateCertAndKey(t *testing.T) { name := fmt.Sprintf("test-%v", time.Now().UnixNano()) - c := certutil.EncodeCertPEM(cert.Cert) - k := certutil.EncodePrivateKeyPEM(cert.Key) + c := encodeCertPEM(cert.Cert) + k := encodePrivateKeyPEM(cert.Key) - ngxCert, err := AddOrUpdateCertAndKey(name, c, k, []byte{}) + sslCert, err := CreateSSLCert(c, k) if err != nil { - t.Fatalf("unexpected error checking SSL certificate: %v", err) + t.Fatalf("unexpected error creating SSL certificate: %v", err) } - if ngxCert.PemFileName == "" { + err = StoreSSLCertOnDisk(fs, name, sslCert) + if err != nil { + t.Fatalf("unexpected error storing SSL certificate: %v", err) + } + + if sslCert.PemFileName == "" { t.Fatalf("expected path to pem file but returned empty") } - if len(ngxCert.CN) == 0 { + if len(sslCert.CN) == 0 { t.Fatalf("expected at least one cname but none returned") } - if ngxCert.CN[0] != "echoheaders" { - t.Fatalf("expected cname echoheaders but %v returned", ngxCert.CN[0]) + if sslCert.CN[0] != "echoheaders" { + t.Fatalf("expected cname echoheaders but %v returned", sslCert.CN[0]) } } func TestCACert(t *testing.T) { - td, err := ioutil.TempDir("", "ssl") - if err != nil { - t.Fatalf("Unexpected error creating temporal directory: %v", err) - } - ingress.DefaultSSLDirectory = td + fs := newFS(t) cert, CA, err := generateRSACerts("echoheaders") if err != nil { @@ -105,47 +116,343 @@ func TestCACert(t *testing.T) { name := fmt.Sprintf("test-%v", time.Now().UnixNano()) - c := certutil.EncodeCertPEM(cert.Cert) - k := certutil.EncodePrivateKeyPEM(cert.Key) - ca := certutil.EncodeCertPEM(CA.Cert) + c := encodeCertPEM(cert.Cert) + k := encodePrivateKeyPEM(cert.Key) + ca := encodeCertPEM(CA.Cert) - ngxCert, err := AddOrUpdateCertAndKey(name, c, k, ca) + sslCert, err := CreateSSLCert(c, k) if err != nil { - t.Fatalf("unexpected error checking SSL certificate: %v", err) + t.Fatalf("unexpected error creating SSL certificate: %v", err) + } + + err = StoreSSLCertOnDisk(fs, name, sslCert) + if err != nil { + t.Fatalf("unexpected error storing SSL certificate: %v", err) + } + + if sslCert.CAFileName != "" { + t.Fatalf("expected CA file name to be empty") } - if ngxCert.CAFileName == "" { + + err = ConfigureCACertWithCertAndKey(fs, name, ca, sslCert) + if err != nil { + t.Fatalf("unexpected error configuring CA certificate: %v", err) + } + + if sslCert.CAFileName == "" { t.Fatalf("expected a valid CA file name") } } func TestGetFakeSSLCert(t *testing.T) { - k, c := GetFakeSSLCert() - if len(k) == 0 { - t.Fatalf("expected a valid key") + fs := newFS(t) + + sslCert := GetFakeSSLCert(fs) + + if len(sslCert.PemCertKey) == 0 { + t.Fatalf("expected PemCertKey to not be empty") } - if len(c) == 0 { - t.Fatalf("expected a valid certificate") + + if len(sslCert.PemFileName) == 0 { + t.Fatalf("expected PemFileName to not be empty") } -} -func TestAddCertAuth(t *testing.T) { - td, err := ioutil.TempDir("", "ssl") - if err != nil { - t.Fatalf("Unexpected error creating temporal directory: %v", err) + if len(sslCert.CN) != 2 { + t.Fatalf("expected 2 entries in CN, but got %v", len(sslCert.CN)) + } + + if sslCert.CN[0] != "Kubernetes Ingress Controller Fake Certificate" { + t.Fatalf("expected common name to be \"Kubernetes Ingress Controller Fake Certificate\" but got %v", sslCert.CN[0]) } - ingress.DefaultSSLDirectory = td + + if sslCert.CN[1] != "ingress.local" { + t.Fatalf("expected a DNS name \"ingress.local\" but got: %v", sslCert.CN[1]) + } +} + +func TestConfigureCACert(t *testing.T) { + fs := newFS(t) cn := "demo-ca" _, ca, err := generateRSACerts(cn) if err != nil { t.Fatalf("unexpected error creating SSL certificate: %v", err) } - c := certutil.EncodeCertPEM(ca.Cert) - ic, err := AddCertAuth(cn, c) + c := encodeCertPEM(ca.Cert) + + sslCert, err := CreateCACert(c) + if err != nil { + t.Fatalf("unexpected error creating SSL certificate: %v", err) + } + if sslCert.CAFileName != "" { + t.Fatalf("expected CAFileName to be empty") + } + if sslCert.Certificate == nil { + t.Fatalf("expected Certificate to be set") + } + + err = ConfigureCACert(fs, cn, c, sslCert) if err != nil { t.Fatalf("unexpected error creating SSL certificate: %v", err) } - if ic.CAFileName == "" { + if sslCert.CAFileName == "" { t.Fatalf("expected a valid CA file name") } } + +func newFS(t *testing.T) file.Filesystem { + fs, err := file.NewFakeFS() + if err != nil { + t.Fatalf("unexpected error creating filesystem: %v", err) + } + return fs +} + +func TestCreateSSLCert(t *testing.T) { + cert, _, err := generateRSACerts("echoheaders") + if err != nil { + t.Fatalf("unexpected error creating SSL certificate: %v", err) + } + + c := encodeCertPEM(cert.Cert) + k := encodePrivateKeyPEM(cert.Key) + + sslCert, err := CreateSSLCert(c, k) + if err != nil { + t.Fatalf("unexpected error checking SSL certificate: %v", err) + } + + var certKeyBuf bytes.Buffer + certKeyBuf.Write(c) + certKeyBuf.Write([]byte("\n")) + certKeyBuf.Write(k) + + if sslCert.PemCertKey != certKeyBuf.String() { + t.Fatalf("expected concatenated PEM cert and key but returned %v", sslCert.PemCertKey) + } + + if len(sslCert.CN) == 0 { + t.Fatalf("expected at least one CN but none returned") + } + + if sslCert.CN[0] != "echoheaders" { + t.Fatalf("expected cname echoheaders but %v returned", sslCert.CN[0]) + } +} + +type keyPair struct { + Key *rsa.PrivateKey + Cert *x509.Certificate +} + +func newCA(name string) (*keyPair, error) { + key, err := newPrivateKey() + if err != nil { + return nil, fmt.Errorf("unable to create a private key for a new CA: %v", err) + } + config := certutil.Config{ + CommonName: name, + } + cert, err := certutil.NewSelfSignedCACert(config, key) + if err != nil { + return nil, fmt.Errorf("unable to create a self-signed certificate for a new CA: %v", err) + } + return &keyPair{ + Key: key, + Cert: cert, + }, nil +} + +func TestIsValidHostname(t *testing.T) { + cases := map[string]struct { + Hostname string + CN []string + Valid bool + }{ + "when there is no common names": { + "foo.bar", + []string{}, + false, + }, + "when there is a match for foo.bar": { + "foo.bar", + []string{"foo.bar"}, + true, + }, + "when there is a wildcard match for foo.bar": { + "foo.bar", + []string{"*.bar"}, + true, + }, + "when there is a wrong wildcard for *.bar": { + "invalid.foo.bar", + []string{"*.bar"}, + false, + }, + } + + for k, tc := range cases { + valid := IsValidHostname(tc.Hostname, tc.CN) + if valid != tc.Valid { + t.Errorf("%s: expected '%v' but returned %v", k, tc.Valid, valid) + } + } +} + +const ( + duration365d = time.Hour * 24 * 365 + rsaKeySize = 2048 +) + +// newPrivateKey creates an RSA private key +func newPrivateKey() (*rsa.PrivateKey, error) { + return rsa.GenerateKey(cryptorand.Reader, rsaKeySize) +} + +// newSignedCert creates a signed certificate using the given CA certificate and key +func newSignedCert(cfg certutil.Config, key crypto.Signer, caCert *x509.Certificate, caKey crypto.Signer) (*x509.Certificate, error) { + serial, err := rand.Int(rand.Reader, new(big.Int).SetInt64(math.MaxInt64)) + if err != nil { + return nil, err + } + if len(cfg.CommonName) == 0 { + return nil, errors.New("must specify a CommonName") + } + if len(cfg.Usages) == 0 { + return nil, errors.New("must specify at least one ExtKeyUsage") + } + + certTmpl := x509.Certificate{ + Subject: pkix.Name{ + CommonName: cfg.CommonName, + Organization: cfg.Organization, + }, + DNSNames: cfg.AltNames.DNSNames, + IPAddresses: cfg.AltNames.IPs, + SerialNumber: serial, + NotBefore: caCert.NotBefore, + NotAfter: time.Now().Add(duration365d).UTC(), + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: cfg.Usages, + } + certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &certTmpl, caCert, key.Public(), caKey) + if err != nil { + return nil, err + } + return x509.ParseCertificate(certDERBytes) +} + +// encodePublicKeyPEM returns PEM-encoded public data +func encodePublicKeyPEM(key *rsa.PublicKey) ([]byte, error) { + der, err := x509.MarshalPKIXPublicKey(key) + if err != nil { + return []byte{}, err + } + block := pem.Block{ + Type: "PUBLIC KEY", + Bytes: der, + } + return pem.EncodeToMemory(&block), nil +} + +// encodePrivateKeyPEM returns PEM-encoded private key data +func encodePrivateKeyPEM(key *rsa.PrivateKey) []byte { + block := pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: x509.MarshalPKCS1PrivateKey(key), + } + return pem.EncodeToMemory(&block) +} + +// encodeCertPEM returns PEM-endcoded certificate data +func encodeCertPEM(cert *x509.Certificate) []byte { + block := pem.Block{ + Type: certutil.CertificateBlockType, + Bytes: cert.Raw, + } + return pem.EncodeToMemory(&block) +} + +func fakeCertificate(t *testing.T, fs filesystem.Filesystem) []byte { + cert, key := getFakeHostSSLCert("localhost") + fd, err := fs.Create("/key.crt") + if err != nil { + t.Errorf("failed to write test key: %v", err) + } + fd.Write(cert) + fd, err = fs.Create("/key.key") + if err != nil { + t.Errorf("failed to write test key: %v", err) + } + fd.Write(key) + return cert +} + +func dialTestServer(port string, rootCertificates ...[]byte) error { + roots := x509.NewCertPool() + for _, cert := range rootCertificates { + ok := roots.AppendCertsFromPEM(cert) + if !ok { + return fmt.Errorf("failed to add root certificate") + } + } + resp, err := tls.Dial("tcp", "localhost:"+port, &tls.Config{ + RootCAs: roots, + }) + if err != nil { + return err + } + if resp.Handshake() != nil { + return fmt.Errorf("TLS handshake should succeed: %v", err) + } + return nil +} + +func TestTLSKeyReloader(t *testing.T) { + fs := filesystem.NewFakeFs() + cert := fakeCertificate(t, fs) + + watcher := TLSListener{ + certificatePath: "/key.crt", + keyPath: "/key.key", + fs: fs, + lock: sync.Mutex{}, + } + watcher.load() + + s := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})) + s.Config.TLSConfig = watcher.TLSConfig() + s.Listener = tls.NewListener(s.Listener, s.Config.TLSConfig) + go s.Start() + defer s.Close() + port := strings.Split(s.Listener.Addr().String(), ":")[1] + + t.Run("without the trusted certificate", func(t *testing.T) { + if dialTestServer(port) == nil { + t.Errorf("TLS dial should fail") + } + }) + + t.Run("with the certificate trustes as root certificate", func(t *testing.T) { + if err := dialTestServer(port, cert); err != nil { + t.Errorf("TLS dial should succeed, got error: %v", err) + } + }) + + t.Run("with a new certificate", func(t *testing.T) { + newCert := fakeCertificate(t, fs) + t.Run("when the certificate is not reloaded", func(t *testing.T) { + if dialTestServer(port, newCert) == nil { + t.Errorf("TLS dial should fail") + } + }) + // simulate watch.NewFileWatcher to call the load function + watcher.load() + t.Run("when the certificate is reloaded", func(t *testing.T) { + if err := dialTestServer(port, newCert); err != nil { + t.Errorf("TLS dial should succeed, got error: %v", err) + } + }) + }) + +} diff --git a/internal/nginx/main.go b/internal/nginx/main.go new file mode 100644 index 0000000000..9043670341 --- /dev/null +++ b/internal/nginx/main.go @@ -0,0 +1,144 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nginx + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "os" + "strings" + "time" + + "github.com/tv42/httpunix" +) + +// PID defines the location of the pid file used by NGINX +var PID = "/tmp/nginx.pid" + +// StatusSocket defines the location of the unix socket used by NGINX for the status server +var StatusSocket = "/tmp/nginx-status-server.sock" + +// HealthPath defines the path used to define the health check location in NGINX +var HealthPath = "/healthz" + +// HealthCheckTimeout defines the time limit in seconds for a probe to health-check-path to succeed +var HealthCheckTimeout = 10 * time.Second + +// StatusPath defines the path used to expose the NGINX status page +// http://nginx.org/en/docs/http/ngx_http_stub_status_module.html +var StatusPath = "/nginx_status" + +// StreamSocket defines the location of the unix socket used by NGINX for the NGINX stream configuration socket +var StreamSocket = "/tmp/ingress-stream.sock" + +var statusLocation = "nginx-status" + +// NewGetStatusRequest creates a new GET request to the internal NGINX status server +func NewGetStatusRequest(path string) (int, []byte, error) { + url := fmt.Sprintf("http+unix://%v%v", statusLocation, path) + + res, err := buildUnixSocketClient(HealthCheckTimeout).Get(url) + if err != nil { + return 0, nil, err + } + defer res.Body.Close() + + data, err := ioutil.ReadAll(res.Body) + if err != nil { + return 0, nil, err + } + + return res.StatusCode, data, nil +} + +// NewPostStatusRequest creates a new POST request to the internal NGINX status server +func NewPostStatusRequest(path, contentType string, data interface{}) (int, []byte, error) { + url := fmt.Sprintf("http+unix://%v%v", statusLocation, path) + + buf, err := json.Marshal(data) + if err != nil { + return 0, nil, err + } + + res, err := buildUnixSocketClient(HealthCheckTimeout).Post(url, contentType, bytes.NewReader(buf)) + if err != nil { + return 0, nil, err + } + defer res.Body.Close() + + body, err := ioutil.ReadAll(res.Body) + if err != nil { + return 0, nil, err + } + + return res.StatusCode, body, nil +} + +// GetServerBlock takes an nginx.conf file and a host and tries to find the server block for that host +func GetServerBlock(conf string, host string) (string, error) { + startMsg := fmt.Sprintf("## start server %v\n", host) + endMsg := fmt.Sprintf("## end server %v", host) + + blockStart := strings.Index(conf, startMsg) + if blockStart < 0 { + return "", fmt.Errorf("Host %v was not found in the controller's nginx.conf", host) + } + blockStart = blockStart + len(startMsg) + + blockEnd := strings.Index(conf, endMsg) + if blockEnd < 0 { + return "", fmt.Errorf("The end of the host server block could not be found, but the beginning was") + } + + return conf[blockStart:blockEnd], nil +} + +// ReadNginxConf reads the nginx configuration file into a string +func ReadNginxConf() (string, error) { + return ReadFileToString("/etc/nginx/nginx.conf") +} + +// ReadFileToString reads any file into a string +func ReadFileToString(path string) (string, error) { + f, err := os.Open(path) + if err != nil { + return "", err + } + defer f.Close() + + contents, err := ioutil.ReadAll(f) + if err != nil { + return "", err + } + return string(contents), nil +} + +func buildUnixSocketClient(timeout time.Duration) *http.Client { + u := &httpunix.Transport{ + DialTimeout: 1 * time.Second, + RequestTimeout: timeout, + ResponseHeaderTimeout: timeout, + } + u.RegisterLocation(statusLocation, StatusSocket) + + return &http.Client{ + Transport: u, + } +} diff --git a/internal/runtime/cpu.go b/internal/runtime/cpu.go new file mode 100644 index 0000000000..bd1865e745 --- /dev/null +++ b/internal/runtime/cpu.go @@ -0,0 +1,64 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "io/ioutil" + "math" + "path/filepath" + "runtime" + "strconv" + "strings" + + libcontainercgroups "github.com/opencontainers/runc/libcontainer/cgroups" +) + +// NumCPU returns the number of logical CPUs usable by the current process. +// If CPU cgroups limits are configured, use cfs_quota_us / cfs_period_us +// as formula +// https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt +func NumCPU() int { + cpus := runtime.NumCPU() + + cgroupPath, err := libcontainercgroups.FindCgroupMountpoint("cpu") + if err != nil { + return cpus + } + + cpuQuota := readCgroupFileToInt64(cgroupPath, "cpu.cfs_quota_us") + cpuPeriod := readCgroupFileToInt64(cgroupPath, "cpu.cfs_period_us") + + if cpuQuota == -1 || cpuPeriod == -1 { + return cpus + } + + return int(math.Ceil(float64(cpuQuota) / float64(cpuPeriod))) +} + +func readCgroupFileToInt64(cgroupPath, cgroupFile string) int64 { + contents, err := ioutil.ReadFile(filepath.Join(cgroupPath, cgroupFile)) + if err != nil { + return -1 + } + + strValue := strings.TrimSpace(string(contents)) + if value, err := strconv.ParseInt(strValue, 10, 64); err == nil { + return value + } + + return -1 +} diff --git a/internal/sets/match.go b/internal/sets/match.go new file mode 100644 index 0000000000..a8bae7c671 --- /dev/null +++ b/internal/sets/match.go @@ -0,0 +1,102 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sets + +import ( + "reflect" +) + +type equalFunction func(e1, e2 interface{}) bool + +// Compare checks if the parameters are iterable and contains the same elements +func Compare(listA, listB interface{}, eq equalFunction) bool { + ok := isIterable(listA) + if !ok { + return false + } + + ok = isIterable(listB) + if !ok { + return false + } + + a := reflect.ValueOf(listA) + b := reflect.ValueOf(listB) + + if a.IsNil() && b.IsNil() { + return true + } + + if a.IsNil() != b.IsNil() { + return false + } + + if a.Len() != b.Len() { + return false + } + + visited := make([]bool, b.Len()) + + for i := 0; i < a.Len(); i++ { + found := false + for j := 0; j < b.Len(); j++ { + if visited[j] { + continue + } + + if eq(a.Index(i).Interface(), b.Index(j).Interface()) { + visited[j] = true + found = true + break + } + } + + if !found { + return false + } + } + + return true +} + +var compareStrings = func(e1, e2 interface{}) bool { + s1, ok := e1.(string) + if !ok { + return false + } + + s2, ok := e2.(string) + if !ok { + return false + } + + return s1 == s2 +} + +// StringElementsMatch compares two string slices and returns if are equals +func StringElementsMatch(a, b []string) bool { + return Compare(a, b, compareStrings) +} + +func isIterable(obj interface{}) bool { + switch reflect.TypeOf(obj).Kind() { + case reflect.Slice, reflect.Array: + return true + default: + return false + } +} diff --git a/internal/sets/match_test.go b/internal/sets/match_test.go new file mode 100644 index 0000000000..e2366d2c7d --- /dev/null +++ b/internal/sets/match_test.go @@ -0,0 +1,60 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sets + +import ( + "testing" +) + +var ( + testCasesElementMatch = []struct { + listA []string + listB []string + expected bool + }{ + {nil, nil, true}, + {[]string{"1"}, nil, false}, + {[]string{"1"}, []string{"1"}, true}, + {[]string{"1", "2", "1"}, []string{"1", "1", "2"}, true}, + {[]string{"1", "3", "1"}, []string{"1", "1", "2"}, false}, + {[]string{"1", "1"}, []string{"1", "2"}, false}, + } +) + +func TestElementsMatch(t *testing.T) { + + for _, testCase := range testCasesElementMatch { + result := StringElementsMatch(testCase.listA, testCase.listB) + if result != testCase.expected { + t.Errorf("expected %v but returned %v (%v - %v)", testCase.expected, result, testCase.listA, testCase.listB) + } + + sameResult := StringElementsMatch(testCase.listB, testCase.listA) + if sameResult != testCase.expected { + t.Errorf("expected %v but returned %v (%v - %v)", testCase.expected, sameResult, testCase.listA, testCase.listB) + } + } +} + +func BenchmarkStringElementsMatchReflection(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, test := range testCasesElementMatch { + StringElementsMatch(test.listA, test.listB) + } + } +} diff --git a/internal/task/queue.go b/internal/task/queue.go index 9b05f1653a..872b37cdbc 100644 --- a/internal/task/queue.go +++ b/internal/task/queue.go @@ -20,8 +20,9 @@ import ( "fmt" "time" - "github.com/golang/glog" + "k8s.io/klog" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" @@ -42,35 +43,51 @@ type Queue struct { sync func(interface{}) error // workerDone is closed when the worker exits workerDone chan bool - + // fn makes a key for an API object fn func(obj interface{}) (interface{}, error) - + // lastSync is the Unix epoch time of the last execution of 'sync' lastSync int64 } // Element represents one item of the queue type Element struct { - Key interface{} - Timestamp int64 + Key interface{} + Timestamp int64 + IsSkippable bool } -// Run ... +// Run starts processing elements in the queue func (t *Queue) Run(period time.Duration, stopCh <-chan struct{}) { wait.Until(t.worker, period, stopCh) } -// Enqueue enqueues ns/name of the given api object in the task queue. -func (t *Queue) Enqueue(obj interface{}) { +// EnqueueTask enqueues ns/name of the given api object in the task queue. +func (t *Queue) EnqueueTask(obj interface{}) { + t.enqueue(obj, false) +} + +// EnqueueSkippableTask enqueues ns/name of the given api object in +// the task queue that can be skipped +func (t *Queue) EnqueueSkippableTask(obj interface{}) { + t.enqueue(obj, true) +} + +// enqueue enqueues ns/name of the given api object in the task queue. +func (t *Queue) enqueue(obj interface{}, skippable bool) { if t.IsShuttingDown() { - glog.Errorf("queue has been shutdown, failed to enqueue: %v", obj) + klog.Errorf("queue has been shutdown, failed to enqueue: %v", obj) return } ts := time.Now().UnixNano() - glog.V(3).Infof("queuing item %v", obj) + if !skippable { + // make sure the timestamp is bigger than lastSync + ts = time.Now().Add(24 * time.Hour).UnixNano() + } + klog.V(3).Infof("queuing item %v", obj) key, err := t.fn(obj) if err != nil { - glog.Errorf("%v", err) + klog.Errorf("%v", err) return } t.queue.Add(Element{ @@ -102,15 +119,15 @@ func (t *Queue) worker() { item := key.(Element) if t.lastSync > item.Timestamp { - glog.V(3).Infof("skipping %v sync (%v > %v)", item.Key, t.lastSync, item.Timestamp) + klog.V(3).Infof("skipping %v sync (%v > %v)", item.Key, t.lastSync, item.Timestamp) t.queue.Forget(key) t.queue.Done(key) continue } - glog.V(3).Infof("syncing %v", item.Key) + klog.V(3).Infof("syncing %v", item.Key) if err := t.sync(key); err != nil { - glog.Warningf("requeuing %v, err %v", item.Key, err) + klog.Warningf("requeuing %v, err %v", item.Key, err) t.queue.AddRateLimited(Element{ Key: item.Key, Timestamp: time.Now().UnixNano(), @@ -166,3 +183,10 @@ func NewCustomTaskQueue(syncFn func(interface{}) error, fn func(interface{}) (in return q } + +// GetDummyObject returns a valid object that can be used in the Queue +func GetDummyObject(name string) *metav1.ObjectMeta { + return &metav1.ObjectMeta{ + Name: name, + } +} diff --git a/internal/task/queue_test.go b/internal/task/queue_test.go index cbfd9a49b8..52dab7c207 100644 --- a/internal/task/queue_test.go +++ b/internal/task/queue_test.go @@ -71,7 +71,7 @@ func TestEnqueueSuccess(t *testing.T) { k: "testKey", v: "testValue", } - q.Enqueue(mo) + q.EnqueueSkippableTask(mo) // wait for 'mockSynFn' time.Sleep(time.Millisecond * 10) if atomic.LoadUint32(&sr) != 1 { @@ -99,7 +99,7 @@ func TestEnqueueFailed(t *testing.T) { q.Shutdown() // wait for shutdown time.Sleep(time.Millisecond * 10) - q.Enqueue(mo) + q.EnqueueSkippableTask(mo) // wait for 'mockSynFn' time.Sleep(time.Millisecond * 10) // queue is shutdown, so mockSynFn should not be executed, so the result should be 0 @@ -121,7 +121,7 @@ func TestEnqueueKeyError(t *testing.T) { v: "testValue", } - q.Enqueue(mo) + q.EnqueueSkippableTask(mo) // wait for 'mockSynFn' time.Sleep(time.Millisecond * 10) // key error, so the result should be 0 @@ -137,21 +137,21 @@ func TestSkipEnqueue(t *testing.T) { atomic.StoreUint32(&sr, 0) q := NewCustomTaskQueue(mockSynFn, mockKeyFn) stopCh := make(chan struct{}) - // run queue - go q.Run(time.Second, stopCh) // mock object whichi will be enqueue mo := mockEnqueueObj{ k: "testKey", v: "testValue", } - q.Enqueue(mo) - q.Enqueue(mo) - q.Enqueue(mo) - q.Enqueue(mo) + q.EnqueueSkippableTask(mo) + q.EnqueueSkippableTask(mo) + q.EnqueueTask(mo) + q.EnqueueSkippableTask(mo) + // run queue + go q.Run(time.Second, stopCh) // wait for 'mockSynFn' time.Sleep(time.Millisecond * 10) - if atomic.LoadUint32(&sr) != 1 { - t.Errorf("sr should be 1, but is %d", sr) + if atomic.LoadUint32(&sr) != 2 { + t.Errorf("sr should be 2, but is %d", sr) } // shutdown queue before exit diff --git a/internal/watch/dummy.go b/internal/watch/dummy.go deleted file mode 100644 index 16a607fc2e..0000000000 --- a/internal/watch/dummy.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package watch - -// DummyFileWatcher noop implementation of a file watcher -type DummyFileWatcher struct{} - -func NewDummyFileWatcher(file string, onEvent func()) FileWatcher { - return DummyFileWatcher{} -} - -// Close ends the watch -func (f DummyFileWatcher) Close() error { - return nil -} diff --git a/internal/watch/file_watcher.go b/internal/watch/file_watcher.go index 91daf06208..a9ffb6ad83 100644 --- a/internal/watch/file_watcher.go +++ b/internal/watch/file_watcher.go @@ -21,9 +21,10 @@ import ( "path" "strings" - "gopkg.in/fsnotify.v1" + "gopkg.in/fsnotify/fsnotify.v1" ) +// FileWatcher is an interface we use to watch changes in files type FileWatcher interface { Close() error } diff --git a/internal/watch/file_watcher_test.go b/internal/watch/file_watcher_test.go index 5733cd07c9..83a37ea0b9 100644 --- a/internal/watch/file_watcher_test.go +++ b/internal/watch/file_watcher_test.go @@ -21,6 +21,8 @@ import ( "os" "testing" "time" + + "k8s.io/ingress-nginx/internal/file" ) func prepareTimeout() chan bool { @@ -33,15 +35,15 @@ func prepareTimeout() chan bool { } func TestFileWatcher(t *testing.T) { - file, err := ioutil.TempFile("", "fw") + f, err := ioutil.TempFile("", "fw") if err != nil { t.Fatalf("unexpected error: %v", err) } - defer file.Close() - defer os.Remove(file.Name()) + defer f.Close() + defer os.Remove(f.Name()) count := 0 events := make(chan bool, 10) - fw, err := NewFileWatcher(file.Name(), func() { + fw, err := NewFileWatcher(f.Name(), func() { count++ if count != 1 { t.Fatalf("expected 1 but returned %v", count) @@ -58,7 +60,7 @@ func TestFileWatcher(t *testing.T) { t.Fatalf("expected no events before writing a file") case <-timeoutChan: } - ioutil.WriteFile(file.Name(), []byte{}, 0644) + ioutil.WriteFile(f.Name(), []byte{}, file.ReadWriteByUser) select { case <-events: case <-timeoutChan: diff --git a/mkdocs.yml b/mkdocs.yml new file mode 100644 index 0000000000..d682f33133 --- /dev/null +++ b/mkdocs.yml @@ -0,0 +1,88 @@ +site_name: NGINX Ingress Controller +strict: true +repo_name: "kubernetes/ingress-nginx" +repo_url: "https://github.com/kubernetes/ingress-nginx" +markdown_extensions: + - admonition + - codehilite + - pymdownx.inlinehilite + - pymdownx.tasklist: + custom_checkbox: true + - pymdownx.superfences + - toc: + permalink: true +theme: + name: material + feature: + tabs: true + logo: + icon: "public" # globe icon + palette: + primary: "teal" + accent: "green" + include_sidebar: true + +plugins: + - search + +extra_css: [extra.css] + +google_analytics: ["UA-118407822-1", "kubernetes.github.io"] + +nav: + - Welcome: + - Welcome: "index.md" + - How it works: "how-it-works.md" + - Troubleshooting: "troubleshooting.md" + - kubectl plugin: "kubectl-plugin.md" + - Development: "development.md" + - Deployment: + - Installation Guide: "deploy/index.md" + - Bare-metal considerations: "deploy/baremetal.md" + - Role Based Access Control (RBAC): "deploy/rbac.md" + - Validating Webhook (admission controller): "deploy/validating-webhook.md" + - Upgrade: "deploy/upgrade.md" + - User guide: + - NGINX Configuration: + - Introduction: "user-guide/nginx-configuration/index.md" + - Basic usage: "user-guide/basic-usage.md" + - Annotations: "user-guide/nginx-configuration/annotations.md" + - ConfigMap: "user-guide/nginx-configuration/configmap.md" + - Custom NGINX template: "user-guide/nginx-configuration/custom-template.md" + - Log format: "user-guide/nginx-configuration/log-format.md" + - Command line arguments: "user-guide/cli-arguments.md" + - Custom errors: "user-guide/custom-errors.md" + - Default backend: "user-guide/default-backend.md" + - Exposing TCP and UDP services: "user-guide/exposing-tcp-udp-services.md" + - Regular expressions in paths: user-guide/ingress-path-matching.md + - External Articles: "user-guide/external-articles.md" + - Miscellaneous: "user-guide/miscellaneous.md" + - Prometheus and Grafana installation: "user-guide/monitoring.md" + - Multiple Ingress controllers: "user-guide/multiple-ingress.md" + - TLS/HTTPS: "user-guide/tls.md" + - Third party addons: + - ModSecurity Web Application Firewall: "user-guide/third-party-addons/modsecurity.md" + - OpenTracing: "user-guide/third-party-addons/opentracing.md" + - Examples: + - Introduction: "examples/index.md" + - Prerequisites: "examples/PREREQUISITES.md" + - Sticky Sessions: "examples/affinity/cookie/README.md" + - Auth: + - Basic Authentication: "examples/auth/basic/README.md" + - Client Certificate Authentication: "examples/auth/client-certs/README.md" + - External Basic Authentication: "examples/auth/external-auth/README.md" + - External OAUTH Authentication: "examples/auth/oauth-external-auth/README.md" + - Customization: + - Configuration Snippets: "examples/customization/configuration-snippets/README.md" + - Custom Configuration: "examples/customization/custom-configuration/README.md" + - Custom Errors: "examples/customization/custom-errors/README.md" + - Custom Headers: "examples/customization/custom-headers/README.md" + - External authentication: "examples/customization/external-auth-headers/README.md" + - Custom DH parameters for perfect forward secrecy: "examples/customization/ssl-dh-param/README.md" + - Sysctl tuning: "examples/customization/sysctl/README.md" + - Docker registry: "examples/docker-registry/README.md" + - gRPC: "examples/grpc/README.md" + - Multi TLS certificate termination: "examples/multi-tls/README.md" + - Rewrite: "examples/rewrite/README.md" + - Static IPs: "examples/static-ip/README.md" + - TLS termination: "examples/tls-termination/README.md" diff --git a/requirements-docs.txt b/requirements-docs.txt new file mode 100644 index 0000000000..e9d0be8439 --- /dev/null +++ b/requirements-docs.txt @@ -0,0 +1,4 @@ +mkdocs-material~=4.0.2 +mkdocs~=1.0.4 +pymdown-extensions~=6.0 +pygments~=2.3.1 diff --git a/rootfs/Dockerfile b/rootfs/Dockerfile index 013d32de0a..d6d522fb24 100644 --- a/rootfs/Dockerfile +++ b/rootfs/Dockerfile @@ -16,18 +16,42 @@ FROM BASEIMAGE CROSS_BUILD_COPY qemu-QEMUARCH-static /usr/bin/ +WORKDIR /etc/nginx + RUN clean-install \ diffutils \ - dumb-init + libcap2-bin + +COPY --chown=www-data:www-data . / + +# Fix permission during the build to avoid issues at runtime +# with volumes (custom templates) +RUN bash -eu -c ' \ + writeDirs=( \ + /etc/ingress-controller/ssl \ + /etc/ingress-controller/auth \ + /var/log \ + /var/log/nginx \ + /tmp \ + ); \ + for dir in "${writeDirs[@]}"; do \ + mkdir -p ${dir}; \ + chown -R www-data.www-data ${dir}; \ + done' + +RUN setcap cap_net_bind_service=+ep /nginx-ingress-controller \ + && setcap -v cap_net_bind_service=+ep /nginx-ingress-controller + +RUN setcap cap_net_bind_service=+ep /usr/sbin/nginx \ + && setcap -v cap_net_bind_service=+ep /usr/sbin/nginx # Create symlinks to redirect nginx logs to stdout and stderr docker log collector # This only works if nginx is started with CMD or ENTRYPOINT -RUN mkdir -p /var/log/nginx \ - && ln -sf /dev/stdout /var/log/nginx/access.log \ - && ln -sf /dev/stderr /var/log/nginx/error.log +RUN ln -sf /dev/stdout /var/log/nginx/access.log +RUN ln -sf /dev/stderr /var/log/nginx/error.log -COPY . / +USER www-data -ENTRYPOINT ["/usr/bin/dumb-init"] +ENTRYPOINT ["/usr/bin/dumb-init", "--"] CMD ["/nginx-ingress-controller"] diff --git a/rootfs/etc/nginx/lua/balancer.lua b/rootfs/etc/nginx/lua/balancer.lua new file mode 100644 index 0000000000..dbdeea6dc4 --- /dev/null +++ b/rootfs/etc/nginx/lua/balancer.lua @@ -0,0 +1,263 @@ +local ngx_balancer = require("ngx.balancer") +local cjson = require("cjson.safe") +local util = require("util") +local dns_util = require("util.dns") +local configuration = require("configuration") +local round_robin = require("balancer.round_robin") +local chash = require("balancer.chash") +local chashsubset = require("balancer.chashsubset") +local sticky = require("balancer.sticky") +local ewma = require("balancer.ewma") + +-- measured in seconds +-- for an Nginx worker to pick up the new list of upstream peers +-- it will take + BACKENDS_SYNC_INTERVAL +local BACKENDS_SYNC_INTERVAL = 1 + +local DEFAULT_LB_ALG = "round_robin" +local IMPLEMENTATIONS = { + round_robin = round_robin, + chash = chash, + chashsubset = chashsubset, + sticky = sticky, + ewma = ewma, +} + +local _M = {} +local balancers = {} + +local function get_implementation(backend) + local name = backend["load-balance"] or DEFAULT_LB_ALG + + if backend["sessionAffinityConfig"] and backend["sessionAffinityConfig"]["name"] == "cookie" then + name = "sticky" + elseif backend["upstreamHashByConfig"] and backend["upstreamHashByConfig"]["upstream-hash-by"] then + if backend["upstreamHashByConfig"]["upstream-hash-by-subset"] then + name = "chashsubset" + else + name = "chash" + end + end + + local implementation = IMPLEMENTATIONS[name] + if not implementation then + ngx.log(ngx.WARN, string.format("%s is not supported, falling back to %s", backend["load-balance"], DEFAULT_LB_ALG)) + implementation = IMPLEMENTATIONS[DEFAULT_LB_ALG] + end + + return implementation +end + +local function resolve_external_names(original_backend) + local backend = util.deepcopy(original_backend) + local endpoints = {} + for _, endpoint in ipairs(backend.endpoints) do + local ips = dns_util.resolve(endpoint.address) + for _, ip in ipairs(ips) do + table.insert(endpoints, { address = ip, port = endpoint.port }) + end + end + backend.endpoints = endpoints + return backend +end + +local function format_ipv6_endpoints(endpoints) + local formatted_endpoints = {} + for _, endpoint in ipairs(endpoints) do + local formatted_endpoint = endpoint + if not endpoint.address:match("^%d+.%d+.%d+.%d+$") then + formatted_endpoint.address = string.format("[%s]", endpoint.address) + end + table.insert(formatted_endpoints, formatted_endpoint) + end + return formatted_endpoints +end + +local function sync_backend(backend) + if not backend.endpoints or #backend.endpoints == 0 then + ngx.log(ngx.INFO, string.format("there is no endpoint for backend %s. Removing...", backend.name)) + balancers[backend.name] = nil + return + end + + local implementation = get_implementation(backend) + local balancer = balancers[backend.name] + + if not balancer then + balancers[backend.name] = implementation:new(backend) + return + end + + -- every implementation is the metatable of its instances (see .new(...) functions) + -- here we check if `balancer` is the instance of `implementation` + -- if it is not then we deduce LB algorithm has changed for the backend + if getmetatable(balancer) ~= implementation then + ngx.log(ngx.INFO, + string.format("LB algorithm changed from %s to %s, resetting the instance", balancer.name, implementation.name)) + balancers[backend.name] = implementation:new(backend) + return + end + + local service_type = backend.service and backend.service.spec and backend.service.spec["type"] + if service_type == "ExternalName" then + backend = resolve_external_names(backend) + end + + backend.endpoints = format_ipv6_endpoints(backend.endpoints) + + balancer:sync(backend) +end + +local function sync_backends() + local backends_data = configuration.get_backends_data() + if not backends_data then + balancers = {} + return + end + + local new_backends, err = cjson.decode(backends_data) + if not new_backends then + ngx.log(ngx.ERR, "could not parse backends data: ", err) + return + end + + local balancers_to_keep = {} + for _, new_backend in ipairs(new_backends) do + sync_backend(new_backend) + balancers_to_keep[new_backend.name] = balancers[new_backend.name] + end + + for backend_name, _ in pairs(balancers) do + if not balancers_to_keep[backend_name] then + balancers[backend_name] = nil + end + end +end + +local function route_to_alternative_balancer(balancer) + if not balancer.alternative_backends then + return false + end + + -- TODO: support traffic shaping for n > 1 alternative backends + local backend_name = balancer.alternative_backends[1] + if not backend_name then + ngx.log(ngx.ERR, "empty alternative backend") + return false + end + + local alternative_balancer = balancers[backend_name] + if not alternative_balancer then + ngx.log(ngx.ERR, "no alternative balancer for backend: " .. tostring(backend_name)) + return false + end + + local traffic_shaping_policy = alternative_balancer.traffic_shaping_policy + if not traffic_shaping_policy then + ngx.log(ngx.ERR, "traffic shaping policy is not set for balanacer of backend: " .. tostring(backend_name)) + return false + end + + local target_header = util.replace_special_char(traffic_shaping_policy.header, "-", "_") + local header = ngx.var["http_" .. target_header] + if header then + if traffic_shaping_policy.headerValue and #traffic_shaping_policy.headerValue > 0 then + if traffic_shaping_policy.headerValue == header then + return true + end + elseif header == "always" then + return true + elseif header == "never" then + return false + end + end + + local target_cookie = traffic_shaping_policy.cookie + local cookie = ngx.var["cookie_" .. target_cookie] + if cookie then + if cookie == "always" then + return true + elseif cookie == "never" then + return false + end + end + + if math.random(100) <= traffic_shaping_policy.weight then + return true + end + + return false +end + +local function get_balancer() + local backend_name = ngx.var.proxy_upstream_name + + local balancer = balancers[backend_name] + if not balancer then + return + end + + if route_to_alternative_balancer(balancer) then + local alternative_balancer = balancers[balancer.alternative_backends[1]] + return alternative_balancer + end + + return balancer +end + +function _M.init_worker() + sync_backends() -- when worker starts, sync backends without delay + local _, err = ngx.timer.every(BACKENDS_SYNC_INTERVAL, sync_backends) + if err then + ngx.log(ngx.ERR, string.format("error when setting up timer.every for sync_backends: %s", tostring(err))) + end +end + +function _M.rewrite() + local balancer = get_balancer() + if not balancer then + ngx.status = ngx.HTTP_SERVICE_UNAVAILABLE + return ngx.exit(ngx.status) + end +end + +function _M.balance() + local balancer = get_balancer() + if not balancer then + return + end + + local peer = balancer:balance() + if not peer then + ngx.log(ngx.WARN, "no peer was returned, balancer: " .. balancer.name) + return + end + + ngx_balancer.set_more_tries(1) + + local ok, err = ngx_balancer.set_current_peer(peer) + if not ok then + ngx.log(ngx.ERR, string.format("error while setting current upstream peer %s: %s", peer, err)) + end +end + +function _M.log() + local balancer = get_balancer() + if not balancer then + return + end + + if not balancer.after_balance then + return + end + + balancer:after_balance() +end + +if _TEST then + _M.get_implementation = get_implementation + _M.sync_backend = sync_backend + _M.route_to_alternative_balancer = route_to_alternative_balancer +end + +return _M diff --git a/rootfs/etc/nginx/lua/balancer/chash.lua b/rootfs/etc/nginx/lua/balancer/chash.lua new file mode 100644 index 0000000000..3eddab9e58 --- /dev/null +++ b/rootfs/etc/nginx/lua/balancer/chash.lua @@ -0,0 +1,25 @@ +local balancer_resty = require("balancer.resty") +local resty_chash = require("resty.chash") +local util = require("util") + +local _M = balancer_resty:new({ factory = resty_chash, name = "chash" }) + +function _M.new(self, backend) + local nodes = util.get_nodes(backend.endpoints) + local o = { + instance = self.factory:new(nodes), + hash_by = backend["upstreamHashByConfig"]["upstream-hash-by"], + traffic_shaping_policy = backend.trafficShapingPolicy, + alternative_backends = backend.alternativeBackends, + } + setmetatable(o, self) + self.__index = self + return o +end + +function _M.balance(self) + local key = util.lua_ngx_var(self.hash_by) + return self.instance:find(key) +end + +return _M diff --git a/rootfs/etc/nginx/lua/balancer/chashsubset.lua b/rootfs/etc/nginx/lua/balancer/chashsubset.lua new file mode 100644 index 0000000000..9599378d62 --- /dev/null +++ b/rootfs/etc/nginx/lua/balancer/chashsubset.lua @@ -0,0 +1,84 @@ +-- Consistent hashing to a subset of nodes. Instead of returning the same node +-- always, we return the same subset always. + +local resty_chash = require("resty.chash") +local util = require("util") + +local _M = { name = "chashsubset" } + +local function build_subset_map(backend) + local endpoints = {} + local subset_map = {} + local subsets = {} + local subset_size = backend["upstreamHashByConfig"]["upstream-hash-by-subset-size"] + + for _, endpoint in pairs(backend.endpoints) do + table.insert(endpoints, endpoint) + end + + local set_count = math.ceil(#endpoints/subset_size) + local node_count = set_count * subset_size + + -- if we don't have enough endpoints, we reuse endpoints in the last set to + -- keep the same number on all of them. + local j = 1 + for _ = #endpoints+1, node_count do + table.insert(endpoints, endpoints[j]) + j = j+1 + end + + local k = 1 + for i = 1, set_count do + local subset = {} + local subset_id = "set" .. tostring(i) + for _ = 1, subset_size do + table.insert(subset, endpoints[k]) + k = k+1 + end + subsets[subset_id] = subset + subset_map[subset_id] = 1 + end + + return subset_map, subsets +end + +function _M.new(self, backend) + local subset_map, subsets = build_subset_map(backend) + + local o = { + instance = resty_chash:new(subset_map), + hash_by = backend["upstreamHashByConfig"]["upstream-hash-by"], + subsets = subsets, + current_endpoints = backend.endpoints + } + setmetatable(o, self) + self.__index = self + return o +end + +function _M.balance(self) + local key = util.lua_ngx_var(self.hash_by) + local subset_id = self.instance:find(key) + local endpoints = self.subsets[subset_id] + local endpoint = endpoints[math.random(#endpoints)] + return endpoint.address .. ":" .. endpoint.port +end + +function _M.sync(self, backend) + local subset_map + + local changed = not util.deep_compare(self.current_endpoints, backend.endpoints) + if not changed then + return + end + + self.current_endpoints = backend.endpoints + + subset_map, self.subsets = build_subset_map(backend) + + self.instance:reinit(subset_map) + + return +end + +return _M diff --git a/rootfs/etc/nginx/lua/balancer/ewma.lua b/rootfs/etc/nginx/lua/balancer/ewma.lua new file mode 100644 index 0000000000..afea019145 --- /dev/null +++ b/rootfs/etc/nginx/lua/balancer/ewma.lua @@ -0,0 +1,134 @@ +-- Original Authors: Shiv Nagarajan & Scott Francis +-- Accessed: March 12, 2018 +-- Inspiration drawn from: +-- https://github.com/twitter/finagle/blob/1bc837c4feafc0096e43c0e98516a8e1c50c4421 +-- /finagle-core/src/main/scala/com/twitter/finagle/loadbalancer/PeakEwma.scala + + +local util = require("util") +local split = require("util.split") + +local string_format = string.format +local ngx_log = ngx.log +local INFO = ngx.INFO + +local DECAY_TIME = 10 -- this value is in seconds +local PICK_SET_SIZE = 2 + +local _M = { name = "ewma" } + +local function decay_ewma(ewma, last_touched_at, rtt, now) + local td = now - last_touched_at + td = (td > 0) and td or 0 + local weight = math.exp(-td/DECAY_TIME) + + ewma = ewma * weight + rtt * (1.0 - weight) + return ewma +end + +local function get_or_update_ewma(self, upstream, rtt, update) + local ewma = self.ewma[upstream] or 0 + + local now = ngx.now() + local last_touched_at = self.ewma_last_touched_at[upstream] or 0 + ewma = decay_ewma(ewma, last_touched_at, rtt, now) + + if not update then + return ewma, nil + end + + self.ewma[upstream] = ewma + self.ewma_last_touched_at[upstream] = now + return ewma, nil +end + + +local function score(self, upstream) + -- Original implementation used names + -- Endpoints don't have names, so passing in IP:Port as key instead + local upstream_name = upstream.address .. ":" .. upstream.port + return get_or_update_ewma(self, upstream_name, 0, false) +end + +-- implementation similar to https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle +-- or https://en.wikipedia.org/wiki/Random_permutation +-- loop from 1 .. k +-- pick a random value r from the remaining set of unpicked values (i .. n) +-- swap the value at position i with the value at position r +local function shuffle_peers(peers, k) + for i=1, k do + local rand_index = math.random(i,#peers) + peers[i], peers[rand_index] = peers[rand_index], peers[i] + end + -- peers[1 .. k] will now contain a randomly selected k from #peers +end + +local function pick_and_score(self, peers, k) + shuffle_peers(peers, k) + local lowest_score_index = 1 + local lowest_score = score(self, peers[lowest_score_index]) + for i = 2, k do + local new_score = score(self, peers[i]) + if new_score < lowest_score then + lowest_score_index, lowest_score = i, new_score + end + end + return peers[lowest_score_index] +end + +function _M.balance(self) + local peers = self.peers + local endpoint = peers[1] + + if #peers > 1 then + local k = (#peers < PICK_SET_SIZE) and #peers or PICK_SET_SIZE + local peer_copy = util.deepcopy(peers) + endpoint = pick_and_score(self, peer_copy, k) + end + + -- TODO(elvinefendi) move this processing to _M.sync + return endpoint.address .. ":" .. endpoint.port +end + +function _M.after_balance(self) + local response_time = tonumber(split.get_first_value(ngx.var.upstream_response_time)) or 0 + local connect_time = tonumber(split.get_first_value(ngx.var.upstream_connect_time)) or 0 + local rtt = connect_time + response_time + local upstream = split.get_first_value(ngx.var.upstream_addr) + + if util.is_blank(upstream) then + return + end + get_or_update_ewma(self, upstream, rtt, true) +end + +function _M.sync(self, backend) + self.traffic_shaping_policy = backend.trafficShapingPolicy + self.alternative_backends = backend.alternativeBackends + + local changed = not util.deep_compare(self.peers, backend.endpoints) + if not changed then + return + end + + ngx_log(INFO, string_format("[%s] peers have changed for backend %s", self.name, backend.name)) + + self.peers = backend.endpoints + self.ewma = {} + self.ewma_last_touched_at = {} +end + +function _M.new(self, backend) + local o = { + peers = backend.endpoints, + ewma = {}, + ewma_last_touched_at = {}, + traffic_shaping_policy = backend.trafficShapingPolicy, + alternative_backends = backend.alternativeBackends, + } + setmetatable(o, self) + self.__index = self + return o +end + +return _M diff --git a/rootfs/etc/nginx/lua/balancer/resty.lua b/rootfs/etc/nginx/lua/balancer/resty.lua new file mode 100644 index 0000000000..a4090c4c08 --- /dev/null +++ b/rootfs/etc/nginx/lua/balancer/resty.lua @@ -0,0 +1,31 @@ +local util = require("util") + +local string_format = string.format +local ngx_log = ngx.log +local INFO = ngx.INFO + +local _M = {} + +function _M.new(self, o) + o = o or {} + setmetatable(o, self) + self.__index = self + return o +end + +function _M.sync(self, backend) + self.traffic_shaping_policy = backend.trafficShapingPolicy + self.alternative_backends = backend.alternativeBackends + + local nodes = util.get_nodes(backend.endpoints) + local changed = not util.deep_compare(self.instance.nodes, nodes) + if not changed then + return + end + + ngx_log(INFO, string_format("[%s] nodes have changed for backend %s", self.name, backend.name)) + + self.instance:reinit(nodes) +end + +return _M diff --git a/rootfs/etc/nginx/lua/balancer/round_robin.lua b/rootfs/etc/nginx/lua/balancer/round_robin.lua new file mode 100644 index 0000000000..46641363ee --- /dev/null +++ b/rootfs/etc/nginx/lua/balancer/round_robin.lua @@ -0,0 +1,23 @@ +local balancer_resty = require("balancer.resty") +local resty_roundrobin = require("resty.roundrobin") +local util = require("util") + +local _M = balancer_resty:new({ factory = resty_roundrobin, name = "round_robin" }) + +function _M.new(self, backend) + local nodes = util.get_nodes(backend.endpoints) + local o = { + instance = self.factory:new(nodes), + traffic_shaping_policy = backend.trafficShapingPolicy, + alternative_backends = backend.alternativeBackends, + } + setmetatable(o, self) + self.__index = self + return o +end + +function _M.balance(self) + return self.instance:find() +end + +return _M diff --git a/rootfs/etc/nginx/lua/balancer/sticky.lua b/rootfs/etc/nginx/lua/balancer/sticky.lua new file mode 100644 index 0000000000..4860c75a38 --- /dev/null +++ b/rootfs/etc/nginx/lua/balancer/sticky.lua @@ -0,0 +1,168 @@ +local balancer_resty = require("balancer.resty") +local resty_chash = require("resty.chash") +local util = require("util") +local ck = require("resty.cookie") +local math = require("math") +local ngx_balancer = require("ngx.balancer") +local split = require("util.split") + +local string_format = string.format +local ngx_log = ngx.log +local INFO = ngx.INFO + +local _M = balancer_resty:new({ factory = resty_chash, name = "sticky" }) +local DEFAULT_COOKIE_NAME = "route" + +-- Consider the situation of N upstreams one of which is failing. +-- Then the probability to obtain failing upstream after M iterations would be close to (1/N)**M. +-- For the worst case (2 upstreams; 20 iterations) it would be ~10**(-6) +-- which is much better then ~10**(-3) for 10 iterations. +local MAX_UPSTREAM_CHECKS_COUNT = 20 + +function _M.cookie_name(self) + return self.cookie_session_affinity.name or DEFAULT_COOKIE_NAME +end + +function _M.new(self, backend) + local nodes = util.get_nodes(backend.endpoints) + + local o = { + instance = self.factory:new(nodes), + traffic_shaping_policy = backend.trafficShapingPolicy, + alternative_backends = backend.alternativeBackends, + cookie_session_affinity = backend["sessionAffinityConfig"]["cookieSessionAffinity"] + } + setmetatable(o, self) + self.__index = self + return o +end + +local function set_cookie(self, value) + local cookie, err = ck:new() + if not cookie then + ngx.log(ngx.ERR, err) + end + + local cookie_path = self.cookie_session_affinity.path + if not cookie_path then + cookie_path = ngx.var.location_path + end + + local cookie_data = { + key = self:cookie_name(), + value = value, + path = cookie_path, + httponly = true, + secure = ngx.var.https == "on", + } + + if self.cookie_session_affinity.expires and self.cookie_session_affinity.expires ~= "" then + cookie_data.expires = ngx.cookie_time(ngx.time() + tonumber(self.cookie_session_affinity.expires)) + end + + if self.cookie_session_affinity.maxage and self.cookie_session_affinity.maxage ~= "" then + cookie_data.max_age = tonumber(self.cookie_session_affinity.maxage) + end + + local ok + ok, err = cookie:set(cookie_data) + if not ok then + ngx.log(ngx.ERR, err) + end +end + +function _M.get_last_failure() + return ngx_balancer.get_last_failure() +end + +local function get_failed_upstreams() + local indexed_upstream_addrs = {} + local upstream_addrs = split.split_upstream_var(ngx.var.upstream_addr) or {} + + for _, addr in ipairs(upstream_addrs) do + indexed_upstream_addrs[addr] = true + end + + return indexed_upstream_addrs +end + +local function pick_new_upstream(self) + local failed_upstreams = get_failed_upstreams() + + for i = 1, MAX_UPSTREAM_CHECKS_COUNT do + local key = string.format("%s.%s.%s", ngx.now() + i, ngx.worker.pid(), math.random(999999)) + + local new_upstream = self.instance:find(key) + + if not failed_upstreams[new_upstream] then + return new_upstream, key + end + end + + return nil, nil +end + +local function should_set_cookie(self) + if self.cookie_session_affinity.locations then + local locs = self.cookie_session_affinity.locations[ngx.var.host] + if locs ~= nil then + for _, path in pairs(locs) do + if ngx.var.location_path == path then + return true + end + end + end + end + + return false +end + +function _M.balance(self) + local cookie, err = ck:new() + if not cookie then + ngx.log(ngx.ERR, "error while initializing cookie: " .. tostring(err)) + return + end + + local upstream_from_cookie + + local key = cookie:get(self:cookie_name()) + if key then + upstream_from_cookie = self.instance:find(key) + end + + local last_failure = self.get_last_failure() + local should_pick_new_upstream = last_failure ~= nil and self.cookie_session_affinity.change_on_failure or upstream_from_cookie == nil + + if not should_pick_new_upstream then + return upstream_from_cookie + end + + local new_upstream, key = pick_new_upstream(self) + if not new_upstream then + ngx.log(ngx.WARN, string.format("failed to get new upstream; using upstream %s", new_upstream)) + elseif should_set_cookie(self) then + set_cookie(self, key) + end + + return new_upstream +end + +function _M.sync(self, backend) + balancer_resty.sync(self, backend) + + -- Reload the balancer if any of the annotations have changed. + local changed = not util.deep_compare( + self.cookie_session_affinity, + backend.sessionAffinityConfig.cookieSessionAffinity + ) + if not changed then + return + end + + ngx_log(INFO, string_format("[%s] nodes have changed for backend %s", self.name, backend.name)) + + self.cookie_session_affinity = backend.sessionAffinityConfig.cookieSessionAffinity +end + +return _M diff --git a/rootfs/etc/nginx/lua/certificate.lua b/rootfs/etc/nginx/lua/certificate.lua new file mode 100644 index 0000000000..e07ebcb080 --- /dev/null +++ b/rootfs/etc/nginx/lua/certificate.lua @@ -0,0 +1,82 @@ +local ssl = require("ngx.ssl") +local configuration = require("configuration") +local re_sub = ngx.re.sub + +local _M = {} + +local DEFAULT_CERT_HOSTNAME = "_" + +local function set_pem_cert_key(pem_cert_key) + local der_cert, der_cert_err = ssl.cert_pem_to_der(pem_cert_key) + if not der_cert then + return "failed to convert certificate chain from PEM to DER: " .. der_cert_err + end + + local set_cert_ok, set_cert_err = ssl.set_der_cert(der_cert) + if not set_cert_ok then + return "failed to set DER cert: " .. set_cert_err + end + + local der_priv_key, dev_priv_key_err = ssl.priv_key_pem_to_der(pem_cert_key) + if not der_priv_key then + return "failed to convert private key from PEM to DER: " .. dev_priv_key_err + end + + local set_priv_key_ok, set_priv_key_err = ssl.set_der_priv_key(der_priv_key) + if not set_priv_key_ok then + return "failed to set DER private key: " .. set_priv_key_err + end +end + +local function get_pem_cert_key(hostname) + local pem_cert_key = configuration.get_pem_cert_key(hostname) + if pem_cert_key then + return pem_cert_key + end + + local wildcard_hosatname, _, err = re_sub(hostname, "^[^\\.]+\\.", "*.", "jo") + if err then + ngx.log(ngx.ERR, "error: ", err) + return pem_cert_key + end + + if wildcard_hosatname then + pem_cert_key = configuration.get_pem_cert_key(wildcard_hosatname) + end + return pem_cert_key +end + +function _M.call() + local hostname, hostname_err = ssl.server_name() + if hostname_err then + ngx.log(ngx.ERR, "error while obtaining hostname: " .. hostname_err) + end + if not hostname then + ngx.log(ngx.INFO, + "obtained hostname is nil (the client does not support SNI?), falling back to default certificate") + hostname = DEFAULT_CERT_HOSTNAME + end + + local pem_cert_key = get_pem_cert_key(hostname) + if not pem_cert_key then + pem_cert_key = get_pem_cert_key(DEFAULT_CERT_HOSTNAME) + end + if not pem_cert_key then + ngx.log(ngx.ERR, "certificate not found, falling back to fake certificate for hostname: " .. tostring(hostname)) + return + end + + local clear_ok, clear_err = ssl.clear_certs() + if not clear_ok then + ngx.log(ngx.ERR, "failed to clear existing (fallback) certificates: " .. clear_err) + return ngx.exit(ngx.ERROR) + end + + local set_pem_cert_key_err = set_pem_cert_key(pem_cert_key) + if set_pem_cert_key_err then + ngx.log(ngx.ERR, set_pem_cert_key_err) + return ngx.exit(ngx.ERROR) + end +end + +return _M diff --git a/rootfs/etc/nginx/lua/configuration.lua b/rootfs/etc/nginx/lua/configuration.lua new file mode 100644 index 0000000000..10f86eddc9 --- /dev/null +++ b/rootfs/etc/nginx/lua/configuration.lua @@ -0,0 +1,194 @@ +local cjson = require("cjson.safe") + +-- this is the Lua representation of Configuration struct in internal/ingress/types.go +local configuration_data = ngx.shared.configuration_data +local certificate_data = ngx.shared.certificate_data + +local _M = { + nameservers = {} +} + +function _M.get_backends_data() + return configuration_data:get("backends") +end + +function _M.get_general_data() + return configuration_data:get("general") +end + +local function fetch_request_body() + ngx.req.read_body() + local body = ngx.req.get_body_data() + + if not body then + -- request body might've been written to tmp file if body > client_body_buffer_size + local file_name = ngx.req.get_body_file() + local file = io.open(file_name, "rb") + + if not file then + return nil + end + + body = file:read("*all") + file:close() + end + + return body +end + +function _M.get_pem_cert_key(hostname) + return certificate_data:get(hostname) +end + +local function handle_servers() + if ngx.var.request_method ~= "POST" then + ngx.status = ngx.HTTP_BAD_REQUEST + ngx.print("Only POST requests are allowed!") + return + end + + local raw_servers = fetch_request_body() + + local servers, err = cjson.decode(raw_servers) + if not servers then + ngx.log(ngx.ERR, "could not parse servers: ", err) + ngx.status = ngx.HTTP_BAD_REQUEST + return + end + + local err_buf = {} + for _, server in ipairs(servers) do + if server.hostname and server.sslCert.pemCertKey then + local success + success, err = certificate_data:safe_set(server.hostname, server.sslCert.pemCertKey) + if not success then + if err == "no memory" then + ngx.status = ngx.HTTP_INTERNAL_SERVER_ERROR + ngx.log(ngx.ERR, "no memory in certificate_data dictionary") + return + end + + local err_msg = string.format("error setting certificate for %s: %s\n", server.hostname, tostring(err)) + table.insert(err_buf, err_msg) + end + else + ngx.log(ngx.WARN, "hostname or pemCertKey are not present") + end + end + + if #err_buf > 0 then + ngx.log(ngx.ERR, table.concat(err_buf)) + ngx.status = ngx.HTTP_INTERNAL_SERVER_ERROR + return + end + + ngx.status = ngx.HTTP_CREATED +end + +local function handle_general() + if ngx.var.request_method == "GET" then + ngx.status = ngx.HTTP_OK + ngx.print(_M.get_general_data()) + return + end + + if ngx.var.request_method ~= "POST" then + ngx.status = ngx.HTTP_BAD_REQUEST + ngx.print("Only POST and GET requests are allowed!") + return + end + + local config = fetch_request_body() + + local success, err = configuration_data:safe_set("general", config) + if not success then + ngx.status = ngx.HTTP_INTERNAL_SERVER_ERROR + ngx.log(ngx.ERR, "error setting general config: " .. tostring(err)) + return + end + + ngx.status = ngx.HTTP_CREATED +end + +local function handle_certs() + if ngx.var.request_method ~= "GET" then + ngx.status = ngx.HTTP_BAD_REQUEST + ngx.print("Only GET requests are allowed!") + return + end + + local query = ngx.req.get_uri_args() + if not query["hostname"] then + ngx.status = ngx.HTTP_BAD_REQUEST + ngx.print("Hostname must be specified.") + return + end + + local key = _M.get_pem_cert_key(query["hostname"]) + if key then + ngx.status = ngx.HTTP_OK + ngx.print(key) + return + else + ngx.status = ngx.HTTP_NOT_FOUND + ngx.print("No key associated with this hostname.") + return + end +end + +function _M.call() + if ngx.var.request_method ~= "POST" and ngx.var.request_method ~= "GET" then + ngx.status = ngx.HTTP_BAD_REQUEST + ngx.print("Only POST and GET requests are allowed!") + return + end + + if ngx.var.request_uri == "/configuration/servers" then + handle_servers() + return + end + + if ngx.var.request_uri == "/configuration/general" then + handle_general() + return + end + + if ngx.var.uri == "/configuration/certs" then + handle_certs() + return + end + + if ngx.var.request_uri ~= "/configuration/backends" then + ngx.status = ngx.HTTP_NOT_FOUND + ngx.print("Not found!") + return + end + + if ngx.var.request_method == "GET" then + ngx.status = ngx.HTTP_OK + ngx.print(_M.get_backends_data()) + return + end + + local backends = fetch_request_body() + if not backends then + ngx.log(ngx.ERR, "dynamic-configuration: unable to read valid request body") + ngx.status = ngx.HTTP_BAD_REQUEST + return + end + + local success, err = configuration_data:set("backends", backends) + if not success then + ngx.log(ngx.ERR, "dynamic-configuration: error updating configuration: " .. tostring(err)) + ngx.status = ngx.HTTP_BAD_REQUEST + return + end + + ngx.status = ngx.HTTP_CREATED +end + +if _TEST then + _M.handle_servers = handle_servers +end + +return _M diff --git a/rootfs/etc/nginx/lua/lua_ingress.lua b/rootfs/etc/nginx/lua/lua_ingress.lua new file mode 100644 index 0000000000..bee1d3cbaf --- /dev/null +++ b/rootfs/etc/nginx/lua/lua_ingress.lua @@ -0,0 +1,133 @@ +local ngx_re_split = require("ngx.re").split + +local original_randomseed = math.randomseed +local string_format = string.format +local ngx_redirect = ngx.redirect + +local _M = {} + +local seeds = {} +-- general Nginx configuration passed by controller to be used in this module +local config + +local function get_seed_from_urandom() + local seed + local frandom, err = io.open("/dev/urandom", "rb") + if not frandom then + ngx.log(ngx.WARN, 'failed to open /dev/urandom: ', err) + return nil + end + + local str = frandom:read(4) + frandom:close() + if not str then + ngx.log(ngx.WARN, 'failed to read data from /dev/urandom') + return nil + end + + seed = 0 + for i = 1, 4 do + seed = 256 * seed + str:byte(i) + end + + return seed +end + +math.randomseed = function(seed) + local pid = ngx.worker.pid() + if seeds[pid] then + ngx.log(ngx.WARN, + string.format("ignoring math.randomseed(%d) since PRNG is already seeded for worker %d", seed, pid)) + return + end + + original_randomseed(seed) + seeds[pid] = seed +end + +local function randomseed() + local seed = get_seed_from_urandom() + if not seed then + ngx.log(ngx.WARN, 'failed to get seed from urandom') + seed = ngx.now() * 1000 + ngx.worker.pid() + end + math.randomseed(seed) +end + +local function redirect_to_https() + return ngx.var.pass_access_scheme == "http" and (ngx.var.scheme == "http" or ngx.var.scheme == "https") +end + +local function redirect_host() + local host_port, err = ngx_re_split(ngx.var.best_http_host, ":") + if err then + ngx.log(ngx.ERR, "could not parse variable: ", err) + return ngx.var.best_http_host; + end + + return host_port[1]; +end + +local function parse_x_forwarded_host() + local hosts, err = ngx_re_split(ngx.var.http_x_forwarded_host, ",") + if err then + ngx.log(ngx.ERR, string_format("could not parse variable: %s", err)) + return "" + end + + return hosts[1] +end + +function _M.init_worker() + randomseed() +end + +function _M.set_config(new_config) + config = new_config +end + +-- rewrite gets called in every location context. +-- This is where we do variable assignments to be used in subsequent +-- phases or redirection +function _M.rewrite(location_config) + ngx.var.pass_access_scheme = ngx.var.scheme + ngx.var.pass_server_port = ngx.var.server_port + ngx.var.best_http_host = ngx.var.http_host or ngx.var.host + + if config.use_forwarded_headers then + -- trust http_x_forwarded_proto headers correctly indicate ssl offloading + if ngx.var.http_x_forwarded_proto then + ngx.var.pass_access_scheme = ngx.var.http_x_forwarded_proto + end + + if ngx.var.http_x_forwarded_port then + ngx.var.pass_server_port = ngx.var.http_x_forwarded_port + end + + -- Obtain best http host + if ngx.var.http_x_forwarded_host then + ngx.var.best_http_host = parse_x_forwarded_host() + end + end + + ngx.var.pass_port = ngx.var.pass_server_port + if config.is_ssl_passthrough_enabled then + if ngx.var.pass_server_port == config.listen_ports.ssl_proxy then + ngx.var.pass_port = 443 + end + elseif ngx.var.pass_server_port == config.listen_ports.https then + ngx.var.pass_port = 443 + end + + if location_config.force_ssl_redirect and redirect_to_https() then + local uri = string_format("https://%s%s", redirect_host(), ngx.var.request_uri) + + if location_config.use_port_in_redirects then + uri = string_format("https://%s:%s%s", redirect_host(), config.listen_ports.https, ngx.var.request_uri) + end + + ngx_redirect(uri, config.http_redirect_code) + end +end + +return _M diff --git a/rootfs/etc/nginx/lua/monitor.lua b/rootfs/etc/nginx/lua/monitor.lua new file mode 100644 index 0000000000..014113b44d --- /dev/null +++ b/rootfs/etc/nginx/lua/monitor.lua @@ -0,0 +1,88 @@ +local socket = ngx.socket.tcp +local cjson = require("cjson.safe") +local assert = assert +local new_tab = require "table.new" +local clear_tab = require "table.clear" +local clone_tab = require "table.clone" +local nkeys = require "table.nkeys" + +-- if an Nginx worker processes more than (MAX_BATCH_SIZE/FLUSH_INTERVAL) RPS then it will start dropping metrics +local MAX_BATCH_SIZE = 10000 +local FLUSH_INTERVAL = 1 -- second + +local metrics_batch = new_tab(MAX_BATCH_SIZE, 0) + +local _M = {} + +local function send(payload) + local s = assert(socket()) + assert(s:connect("unix:/tmp/prometheus-nginx.socket")) + assert(s:send(payload)) + assert(s:close()) +end + +local function metrics() + return { + host = ngx.var.host or "-", + namespace = ngx.var.namespace or "-", + ingress = ngx.var.ingress_name or "-", + service = ngx.var.service_name or "-", + path = ngx.var.location_path or "-", + + method = ngx.var.request_method or "-", + status = ngx.var.status or "-", + requestLength = tonumber(ngx.var.request_length) or -1, + requestTime = tonumber(ngx.var.request_time) or -1, + responseLength = tonumber(ngx.var.bytes_sent) or -1, + + upstreamLatency = tonumber(ngx.var.upstream_connect_time) or -1, + upstreamResponseTime = tonumber(ngx.var.upstream_response_time) or -1, + upstreamResponseLength = tonumber(ngx.var.upstream_response_length) or -1, + --upstreamStatus = ngx.var.upstream_status or "-", + } +end + +local function flush(premature) + if premature then + return + end + + if #metrics_batch == 0 then + return + end + + local current_metrics_batch = clone_tab(metrics_batch) + clear_tab(metrics_batch) + + local payload, err = cjson.encode(current_metrics_batch) + if not payload then + ngx.log(ngx.ERR, "error while encoding metrics: ", err) + return + end + + send(payload) +end + +function _M.init_worker() + local _, err = ngx.timer.every(FLUSH_INTERVAL, flush) + if err then + ngx.log(ngx.ERR, string.format("error when setting up timer.every: %s", tostring(err))) + end +end + +function _M.call() + local metrics_size = nkeys(metrics_batch) + if metrics_size >= MAX_BATCH_SIZE then + ngx.log(ngx.WARN, "omitting metrics for the request, current batch is full") + return + end + + metrics_batch[metrics_size + 1] = metrics() +end + +if _TEST then + _M.flush = flush + _M.get_metrics_batch = function() return metrics_batch end +end + +return _M diff --git a/rootfs/etc/nginx/lua/plugins.lua b/rootfs/etc/nginx/lua/plugins.lua new file mode 100644 index 0000000000..d634bc2f4d --- /dev/null +++ b/rootfs/etc/nginx/lua/plugins.lua @@ -0,0 +1,48 @@ +local string_format = string.format +local new_tab = require "table.new" +local ngx_log = ngx.log +local INFO = ngx.INFO +local ERR = ngx.ERR + +local _M = {} +local MAX_NUMBER_OF_PLUGINS = 10000 +-- TODO: is this good for a dictionary? +local plugins = new_tab(MAX_NUMBER_OF_PLUGINS, 0) + +local function load_plugin(name) + local path = string_format("plugins.%s.main", name) + + local ok, plugin = pcall(require, path) + if not ok then + ngx_log(ERR, string_format("error loading plugin \"%s\": %s", path, plugin)) + return + end + + plugins[name] = plugin +end + +function _M.init(names) + for _, name in ipairs(names) do + load_plugin(name) + end +end + +function _M.run() + local phase = ngx.get_phase() + + for name, plugin in pairs(plugins) do + if plugin[phase] then + ngx_log(INFO, string_format("running plugin \"%s\" in phase \"%s\"", name, phase)) + + -- TODO: consider sandboxing this, should we? + -- probably yes, at least prohibit plugin from accessing env vars etc + -- but since the plugins are going to be installed by ingress-nginx operator they can be assumed to be safe also + local ok, err = pcall(plugin[phase]) + if not ok then + ngx_log(ERR, string_format("error while running plugin \"%s\" in phase \"%s\": %s", name, phase, err)) + end + end + end +end + +return _M diff --git a/rootfs/etc/nginx/lua/plugins/hello_world/main.lua b/rootfs/etc/nginx/lua/plugins/hello_world/main.lua new file mode 100644 index 0000000000..799daa33e0 --- /dev/null +++ b/rootfs/etc/nginx/lua/plugins/hello_world/main.lua @@ -0,0 +1,15 @@ +local _M = {} + +function _M.rewrite() + ngx.req.set_header("x-hello-world", "1") +end + +function _M.access() + local ua = ngx.var.http_user_agent + + if ua == "hello" then + ngx.exit(403) + end +end + +return _M diff --git a/rootfs/etc/nginx/lua/tcp_udp_balancer.lua b/rootfs/etc/nginx/lua/tcp_udp_balancer.lua new file mode 100644 index 0000000000..7d9601ca8b --- /dev/null +++ b/rootfs/etc/nginx/lua/tcp_udp_balancer.lua @@ -0,0 +1,177 @@ +local ngx_balancer = require("ngx.balancer") +local cjson = require("cjson.safe") +local util = require("util") +local dns_util = require("util.dns") +local configuration = require("tcp_udp_configuration") +local round_robin = require("balancer.round_robin") + +-- measured in seconds +-- for an Nginx worker to pick up the new list of upstream peers +-- it will take + BACKENDS_SYNC_INTERVAL +local BACKENDS_SYNC_INTERVAL = 1 + +local DEFAULT_LB_ALG = "round_robin" +local IMPLEMENTATIONS = { + round_robin = round_robin +} + +local _M = {} +local balancers = {} + +local function get_implementation(backend) + local name = backend["load-balance"] or DEFAULT_LB_ALG + + local implementation = IMPLEMENTATIONS[name] + if not implementation then + ngx.log(ngx.WARN, string.format("%s is not supported, falling back to %s", backend["load-balance"], DEFAULT_LB_ALG)) + implementation = IMPLEMENTATIONS[DEFAULT_LB_ALG] + end + + return implementation +end + +local function resolve_external_names(original_backend) + local backend = util.deepcopy(original_backend) + local endpoints = {} + for _, endpoint in ipairs(backend.endpoints) do + local ips = dns_util.resolve(endpoint.address) + for _, ip in ipairs(ips) do + table.insert(endpoints, {address = ip, port = endpoint.port}) + end + end + backend.endpoints = endpoints + return backend +end + +local function format_ipv6_endpoints(endpoints) + local formatted_endpoints = {} + for _, endpoint in ipairs(endpoints) do + local formatted_endpoint = endpoint + if not endpoint.address:match("^%d+.%d+.%d+.%d+$") then + formatted_endpoint.address = string.format("[%s]", endpoint.address) + end + table.insert(formatted_endpoints, formatted_endpoint) + end + return formatted_endpoints +end + +local function sync_backend(backend) + if not backend.endpoints or #backend.endpoints == 0 then + ngx.log(ngx.INFO, string.format("there is no endpoint for backend %s. Skipping...", backend.name)) + return + end + + ngx.log(ngx.INFO, string.format("backend ", backend.name)) + local implementation = get_implementation(backend) + local balancer = balancers[backend.name] + + if not balancer then + balancers[backend.name] = implementation:new(backend) + return + end + + -- every implementation is the metatable of its instances (see .new(...) functions) + -- here we check if `balancer` is the instance of `implementation` + -- if it is not then we deduce LB algorithm has changed for the backend + if getmetatable(balancer) ~= implementation then + ngx.log( + ngx.INFO, + string.format("LB algorithm changed from %s to %s, resetting the instance", balancer.name, implementation.name) + ) + balancers[backend.name] = implementation:new(backend) + return + end + + local service_type = backend.service and backend.service.spec and backend.service.spec["type"] + if service_type == "ExternalName" then + backend = resolve_external_names(backend) + end + + backend.endpoints = format_ipv6_endpoints(backend.endpoints) + + balancer:sync(backend) +end + +local function sync_backends() + local backends_data = configuration.get_backends_data() + if not backends_data then + balancers = {} + return + end + + local new_backends, err = cjson.decode(backends_data) + if not new_backends then + ngx.log(ngx.ERR, "could not parse backends data: ", err) + return + end + + local balancers_to_keep = {} + for _, new_backend in ipairs(new_backends) do + sync_backend(new_backend) + balancers_to_keep[new_backend.name] = balancers[new_backend.name] + end + + for backend_name, _ in pairs(balancers) do + if not balancers_to_keep[backend_name] then + balancers[backend_name] = nil + end + end +end + +local function get_balancer() + local backend_name = ngx.var.proxy_upstream_name + local balancer = balancers[backend_name] + if not balancer then + return + end + + return balancer +end + +function _M.init_worker() + sync_backends() -- when worker starts, sync backends without delay + local _, err = ngx.timer.every(BACKENDS_SYNC_INTERVAL, sync_backends) + if err then + ngx.log(ngx.ERR, string.format("error when setting up timer.every for sync_backends: %s", tostring(err))) + end +end + +function _M.balance() + local balancer = get_balancer() + if not balancer then + return + end + + local peer = balancer:balance() + if not peer then + ngx.log(ngx.WARN, "no peer was returned, balancer: " .. balancer.name) + return + end + + ngx_balancer.set_more_tries(1) + + local ok, err = ngx_balancer.set_current_peer(peer) + if not ok then + ngx.log(ngx.ERR, string.format("error while setting current upstream peer %s: %s", peer, err)) + end +end + +function _M.log() + local balancer = get_balancer() + if not balancer then + return + end + + if not balancer.after_balance then + return + end + + balancer:after_balance() +end + +if _TEST then + _M.get_implementation = get_implementation + _M.sync_backend = sync_backend +end + +return _M diff --git a/rootfs/etc/nginx/lua/tcp_udp_configuration.lua b/rootfs/etc/nginx/lua/tcp_udp_configuration.lua new file mode 100644 index 0000000000..902ac59b61 --- /dev/null +++ b/rootfs/etc/nginx/lua/tcp_udp_configuration.lua @@ -0,0 +1,38 @@ +-- this is the Lua representation of TCP/UDP Configuration +local tcp_udp_configuration_data = ngx.shared.tcp_udp_configuration_data + +local _M = {} + +function _M.get_backends_data() + return tcp_udp_configuration_data:get("backends") +end + +function _M.call() + local sock, err = ngx.req.socket(true) + if not sock then + ngx.log(ngx.ERR, "failed to get raw req socket: ", err) + ngx.say("error: ", err) + return + end + + local reader = sock:receiveuntil("\r\n") + local backends, err_read = reader() + if not backends then + ngx.log(ngx.ERR, "failed TCP/UDP dynamic-configuration:", err_read) + ngx.say("error: ", err_read) + return + end + + if backends == nil or backends == "" then + return + end + + local success, err_conf = tcp_udp_configuration_data:set("backends", backends) + if not success then + ngx.log(ngx.ERR, "dynamic-configuration: error updating configuration: " .. tostring(err_conf)) + ngx.say("error: ", err_conf) + return + end +end + +return _M diff --git a/rootfs/etc/nginx/lua/test/balancer/chash_test.lua b/rootfs/etc/nginx/lua/test/balancer/chash_test.lua new file mode 100644 index 0000000000..d71dfc56ab --- /dev/null +++ b/rootfs/etc/nginx/lua/test/balancer/chash_test.lua @@ -0,0 +1,28 @@ +describe("Balancer chash", function() + local balancer_chash = require("balancer.chash") + + describe("balance()", function() + it("uses correct key for given backend", function() + _G.ngx = { var = { request_uri = "/alma/armud" }} + + local resty_chash = package.loaded["resty.chash"] + resty_chash.new = function(self, nodes) + return { + find = function(self, key) + assert.equal("/alma/armud", key) + return "10.184.7.40:8080" + end + } + end + + local backend = { + name = "my-dummy-backend", upstreamHashByConfig = { ["upstream-hash-by"] = "$request_uri" }, + endpoints = { { address = "10.184.7.40", port = "8080", maxFails = 0, failTimeout = 0 } } + } + local instance = balancer_chash:new(backend) + + local peer = instance:balance() + assert.equal("10.184.7.40:8080", peer) + end) + end) +end) diff --git a/rootfs/etc/nginx/lua/test/balancer/chashsubset_test.lua b/rootfs/etc/nginx/lua/test/balancer/chashsubset_test.lua new file mode 100644 index 0000000000..6bbd582dd3 --- /dev/null +++ b/rootfs/etc/nginx/lua/test/balancer/chashsubset_test.lua @@ -0,0 +1,82 @@ + +local function get_test_backend(n_endpoints) + local backend = { + name = "my-dummy-backend", + ["upstreamHashByConfig"] = { + ["upstream-hash-by"] = "$request_uri", + ["upstream-hash-by-subset"] = true, + ["upstream-hash-by-subset-size"] = 3 + }, + endpoints = {} + } + + for i = 1, n_endpoints do + backend.endpoints[i] = { address = "10.184.7." .. tostring(i), port = "8080", maxFails = 0, failTimeout = 0 } + end + + return backend +end + +describe("Balancer chash subset", function() + local balancer_chashsubset = require("balancer.chashsubset") + + describe("balance()", function() + it("returns peers from the same subset", function() + _G.ngx = { var = { request_uri = "/alma/armud" }} + + local backend = get_test_backend(9) + + local instance = balancer_chashsubset:new(backend) + + instance:sync(backend) + + local first_node = instance:balance() + local subset_id + local endpoint_strings + + local function has_value (tab, val) + for _, value in ipairs(tab) do + if value == val then + return true + end + end + + return false + end + + for id, endpoints in pairs(instance["subsets"]) do + endpoint_strings = {} + for _, endpoint in pairs(endpoints) do + local endpoint_string = endpoint.address .. ":" .. endpoint.port + table.insert(endpoint_strings, endpoint_string) + if first_node == endpoint_string then + -- found the set of first_node + subset_id = id + end + end + if subset_id then + break + end + end + + -- multiple calls to balance must return nodes from the same subset + for i = 0, 10 do + assert.True(has_value(endpoint_strings, instance:balance())) + end + end) + end) + describe("new(backend)", function() + it("fills last subset correctly", function() + _G.ngx = { var = { request_uri = "/alma/armud" }} + + local backend = get_test_backend(7) + + local instance = balancer_chashsubset:new(backend) + + instance:sync(backend) + for id, endpoints in pairs(instance["subsets"]) do + assert.are.equal(#endpoints, 3) + end + end) + end) +end) diff --git a/rootfs/etc/nginx/lua/test/balancer/ewma_test.lua b/rootfs/etc/nginx/lua/test/balancer/ewma_test.lua new file mode 100644 index 0000000000..42d69f580d --- /dev/null +++ b/rootfs/etc/nginx/lua/test/balancer/ewma_test.lua @@ -0,0 +1,91 @@ +local util = require("util") + +describe("Balancer ewma", function() + local balancer_ewma = require("balancer.ewma") + + describe("after_balance()", function() + local ngx_now = 1543238266 + _G.ngx.now = function() return ngx_now end + _G.ngx.var = { upstream_response_time = "0.25", upstream_connect_time = "0.02", upstream_addr = "10.184.7.40:8080" } + + it("updates EWMA stats", function() + local backend = { + name = "my-dummy-backend", ["load-balance"] = "ewma", + endpoints = { { address = "10.184.7.40", port = "8080", maxFails = 0, failTimeout = 0 } } + } + local instance = balancer_ewma:new(backend) + + instance:after_balance() + assert.equal(0.27, instance.ewma[ngx.var.upstream_addr]) + assert.equal(ngx_now, instance.ewma_last_touched_at[ngx.var.upstream_addr]) + end) + end) + + describe("balance()", function() + it("returns single endpoint when the given backend has only one endpoint", function() + local backend = { + name = "my-dummy-backend", ["load-balance"] = "ewma", + endpoints = { { address = "10.184.7.40", port = "8080", maxFails = 0, failTimeout = 0 } } + } + local instance = balancer_ewma:new(backend) + + local peer = instance:balance() + assert.equal("10.184.7.40:8080", peer) + end) + + it("picks the endpoint with lowest score when there two of them", function() + local backend = { + name = "my-dummy-backend", ["load-balance"] = "ewma", + endpoints = { + { address = "10.184.7.40", port = "8080", maxFails = 0, failTimeout = 0 }, + { address = "10.184.97.100", port = "8080", maxFails = 0, failTimeout = 0 }, + } + } + local instance = balancer_ewma:new(backend) + instance.ewma = { ["10.184.7.40:8080"] = 0.5, ["10.184.97.100:8080"] = 0.3 } + instance.ewma_last_touched_at = { ["10.184.7.40:8080"] = ngx.now(), ["10.184.97.100:8080"] = ngx.now() } + + local peer = instance:balance() + assert.equal("10.184.97.100:8080", peer) + end) + end) + + describe("sync()", function() + local backend, instance + + before_each(function() + backend = { + name = "my-dummy-backend", ["load-balance"] = "ewma", + endpoints = { { address = "10.184.7.40", port = "8080", maxFails = 0, failTimeout = 0 } } + } + instance = balancer_ewma:new(backend) + end) + + it("does nothing when endpoints do not change", function() + local new_backend = { + endpoints = { { address = "10.184.7.40", port = "8080", maxFails = 0, failTimeout = 0 } } + } + + instance:sync(new_backend) + end) + + it("updates endpoints", function() + local new_backend = { + endpoints = { + { address = "10.184.7.40", port = "8080", maxFails = 0, failTimeout = 0 }, + { address = "10.184.97.100", port = "8080", maxFails = 0, failTimeout = 0 }, + } + } + + instance:sync(new_backend) + assert.are.same(new_backend.endpoints, instance.peers) + end) + + it("resets stats", function() + local new_backend = util.deepcopy(backend) + new_backend.endpoints[1].maxFails = 3 + + instance:sync(new_backend) + end) + end) +end) diff --git a/rootfs/etc/nginx/lua/test/balancer/sticky_test.lua b/rootfs/etc/nginx/lua/test/balancer/sticky_test.lua new file mode 100644 index 0000000000..b0e0cc30e4 --- /dev/null +++ b/rootfs/etc/nginx/lua/test/balancer/sticky_test.lua @@ -0,0 +1,265 @@ +local sticky = require("balancer.sticky") +local cookie = require("resty.cookie") +local util = require("util") + +local original_ngx = ngx + +function mock_ngx(mock) + local _ngx = mock + setmetatable(_ngx, {__index = _G.ngx}) + _G.ngx = _ngx +end + +local function reset_ngx() + _G.ngx = original_ngx +end + +function get_mocked_cookie_new() + return function(self) + return { + get = function(self, n) return nil, "error" end, + set = function(self, n) return true, "" end + } + end +end + +cookie.new = get_mocked_cookie_new() + +local function get_test_backend() + return { + name = "access-router-production-web-80", + endpoints = { + { address = "10.184.7.40", port = "8080", maxFails = 0, failTimeout = 0 }, + }, + sessionAffinityConfig = { + name = "cookie", + cookieSessionAffinity = { name = "test_name", hash = "sha1" } + }, + } +end + +describe("Sticky", function() + before_each(function() + mock_ngx({ var = { location_path = "/", host = "test.com" } }) + end) + + after_each(function() + reset_ngx() + end) + + local test_backend = get_test_backend() + local test_backend_endpoint= test_backend.endpoints[1].address .. ":" .. test_backend.endpoints[1].port + + describe("new(backend)", function() + context("when backend specifies cookie name", function() + it("returns an instance containing the corresponding cookie name", function() + local sticky_balancer_instance = sticky:new(test_backend) + local test_backend_cookie_name = test_backend.sessionAffinityConfig.cookieSessionAffinity.name + assert.equal(sticky_balancer_instance:cookie_name(), test_backend_cookie_name) + end) + end) + + context("when backend does not specify cookie name", function() + it("returns an instance with 'route' as cookie name", function() + local temp_backend = util.deepcopy(test_backend) + temp_backend.sessionAffinityConfig.cookieSessionAffinity.name = nil + local sticky_balancer_instance = sticky:new(temp_backend) + local default_cookie_name = "route" + assert.equal(sticky_balancer_instance:cookie_name(), default_cookie_name) + end) + end) + end) + + describe("balance()", function() + local mocked_cookie_new = cookie.new + + before_each(function() + package.loaded["balancer.sticky"] = nil + sticky = require("balancer.sticky") + end) + + after_each(function() + cookie.new = mocked_cookie_new + end) + + context("when client doesn't have a cookie set and location is in cookie_locations", function() + it("picks an endpoint for the client", function() + local sticky_balancer_instance = sticky:new(test_backend) + local peer = sticky_balancer_instance:balance() + assert.equal(peer, test_backend_endpoint) + end) + + it("sets a cookie on the client", function() + local s = {} + cookie.new = function(self) + local cookie_instance = { + set = function(self, payload) + assert.equal(payload.key, test_backend.sessionAffinityConfig.cookieSessionAffinity.name) + assert.equal(payload.path, ngx.var.location_path) + assert.equal(payload.domain, nil) + assert.equal(payload.httponly, true) + assert.equal(payload.secure, false) + return true, nil + end, + get = function(k) return false end, + } + s = spy.on(cookie_instance, "set") + return cookie_instance, false + end + local b = get_test_backend() + b.sessionAffinityConfig.cookieSessionAffinity.locations = {} + b.sessionAffinityConfig.cookieSessionAffinity.locations["test.com"] = {"/"} + local sticky_balancer_instance = sticky:new(b) + assert.has_no.errors(function() sticky_balancer_instance:balance() end) + assert.spy(s).was_called() + end) + + it("sets a secure cookie on the client when being in ssl mode", function() + ngx.var.https = "on" + local s = {} + cookie.new = function(self) + local cookie_instance = { + set = function(self, payload) + assert.equal(payload.key, test_backend.sessionAffinityConfig.cookieSessionAffinity.name) + assert.equal(payload.path, ngx.var.location_path) + assert.equal(payload.domain, nil) + assert.equal(payload.httponly, true) + assert.equal(payload.secure, true) + return true, nil + end, + get = function(k) return false end, + } + s = spy.on(cookie_instance, "set") + return cookie_instance, false + end + local b = get_test_backend() + b.sessionAffinityConfig.cookieSessionAffinity.locations = {} + b.sessionAffinityConfig.cookieSessionAffinity.locations["test.com"] = {"/"} + local sticky_balancer_instance = sticky:new(b) + assert.has_no.errors(function() sticky_balancer_instance:balance() end) + assert.spy(s).was_called() + end) + end) + + context("when client doesn't have a cookie set and location not in cookie_locations", function() + it("picks an endpoint for the client", function() + local sticky_balancer_instance = sticky:new(test_backend) + local peer = sticky_balancer_instance:balance() + assert.equal(peer, test_backend_endpoint) + end) + + it("does not set a cookie on the client", function() + local s = {} + cookie.new = function(self) + local cookie_instance = { + set = function(self, payload) + assert.equal(payload.key, test_backend.sessionAffinityConfig.cookieSessionAffinity.name) + assert.equal(payload.path, ngx.var.location_path) + assert.equal(payload.domain, ngx.var.host) + assert.equal(payload.httponly, true) + return true, nil + end, + get = function(k) return false end, + } + s = spy.on(cookie_instance, "set") + return cookie_instance, false + end + local sticky_balancer_instance = sticky:new(get_test_backend()) + assert.has_no.errors(function() sticky_balancer_instance:balance() end) + assert.spy(s).was_not_called() + end) + end) + + context("when client has a cookie set", function() + it("does not set a cookie", function() + local s = {} + cookie.new = function(self) + local return_obj = { + set = function(v) return false, nil end, + get = function(k) return test_backend_endpoint end, + } + s = spy.on(return_obj, "set") + return return_obj, false + end + local sticky_balancer_instance = sticky:new(test_backend) + assert.has_no.errors(function() sticky_balancer_instance:balance() end) + assert.spy(s).was_not_called() + end) + + it("returns the correct endpoint for the client", function() + local sticky_balancer_instance = sticky:new(test_backend) + local peer = sticky_balancer_instance:balance() + assert.equal(peer, test_backend_endpoint) + end) + end) + end) + + local function get_several_test_backends(change_on_failure) + return { + name = "access-router-production-web-80", + endpoints = { + { address = "10.184.7.40", port = "8080", maxFails = 0, failTimeout = 0 }, + { address = "10.184.7.41", port = "8080", maxFails = 0, failTimeout = 0 }, + }, + sessionAffinityConfig = { + name = "cookie", + cookieSessionAffinity = { name = "test_name", hash = "sha1", change_on_failure = change_on_failure } + }, + } + end + + describe("balance() after error", function() + local mocked_cookie_new = cookie.new + + before_each(function() + package.loaded["balancer.sticky"] = nil + sticky = require("balancer.sticky") + end) + + after_each(function() + cookie.new = mocked_cookie_new + end) + + context("when request to upstream fails", function() + it("changes upstream when change_on_failure option is true", function() + -- create sticky cookie + cookie.new = function(self) + local return_obj = { + set = function(v) return false, nil end, + get = function(k) return "" end, + } + return return_obj, false + end + + local options = {false, true} + + for _, option in ipairs(options) do + local sticky_balancer_instance = sticky:new(get_several_test_backends(option)) + + local old_upstream = sticky_balancer_instance:balance() + for _ = 1, 100 do + -- make sure upstream doesn't change on subsequent calls of balance() + assert.equal(old_upstream, sticky_balancer_instance:balance()) + end + + -- simulate request failure + sticky_balancer_instance.get_last_failure = function() + return "failed" + end + _G.ngx.var = { upstream_addr = old_upstream } + + for _ = 1, 100 do + local new_upstream = sticky_balancer_instance:balance() + if option == false then + -- upstream should be the same inspite of error, if change_on_failure option is false + assert.equal(new_upstream, old_upstream) + else + -- upstream should change after error, if change_on_failure option is true + assert.not_equal(new_upstream, old_upstream) + end + end + end + end) + end) + end) +end) diff --git a/rootfs/etc/nginx/lua/test/balancer_test.lua b/rootfs/etc/nginx/lua/test/balancer_test.lua new file mode 100644 index 0000000000..379e6fcf64 --- /dev/null +++ b/rootfs/etc/nginx/lua/test/balancer_test.lua @@ -0,0 +1,366 @@ +_G._TEST = true + +local balancer, expected_implementations, backends +local original_ngx = ngx + +local function reset_ngx() + _G.ngx = original_ngx +end + +local function mock_ngx(mock) + local _ngx = mock + setmetatable(_ngx, { __index = ngx }) + _G.ngx = _ngx +end + +local function reset_balancer() + package.loaded["balancer"] = nil + balancer = require("balancer") +end + +local function reset_expected_implementations() + expected_implementations = { + ["access-router-production-web-80"] = package.loaded["balancer.round_robin"], + ["my-dummy-app-1"] = package.loaded["balancer.round_robin"], + ["my-dummy-app-2"] = package.loaded["balancer.chash"], + ["my-dummy-app-3"] = package.loaded["balancer.sticky"], + ["my-dummy-app-4"] = package.loaded["balancer.ewma"], + ["my-dummy-app-5"] = package.loaded["balancer.sticky"], + } +end + +local function reset_backends() + backends = { + { + name = "access-router-production-web-80", port = "80", secure = false, + secureCACert = { secret = "", caFilename = "", pemSha = "" }, + sslPassthrough = false, + endpoints = { + { address = "10.184.7.40", port = "8080", maxFails = 0, failTimeout = 0 }, + { address = "10.184.97.100", port = "8080", maxFails = 0, failTimeout = 0 }, + { address = "10.184.98.239", port = "8080", maxFails = 0, failTimeout = 0 }, + }, + sessionAffinityConfig = { name = "", cookieSessionAffinity = { name = "" } }, + trafficShapingPolicy = { + weight = 0, + header = "", + headerValue = "", + cookie = "" + }, + }, + { name = "my-dummy-app-1", ["load-balance"] = "round_robin", }, + { + name = "my-dummy-app-2", ["load-balance"] = "chash", + upstreamHashByConfig = { ["upstream-hash-by"] = "$request_uri", }, + }, + { + name = "my-dummy-app-3", ["load-balance"] = "ewma", + sessionAffinityConfig = { name = "cookie", cookieSessionAffinity = { name = "route" } } + }, + { name = "my-dummy-app-4", ["load-balance"] = "ewma", }, + { + name = "my-dummy-app-5", ["load-balance"] = "ewma", ["upstream-hash-by"] = "$request_uri", + sessionAffinityConfig = { name = "cookie", cookieSessionAffinity = { name = "route" } } + }, + } +end + +describe("Balancer", function() + before_each(function() + reset_balancer() + reset_expected_implementations() + reset_backends() + end) + + after_each(function() + reset_ngx() + end) + + describe("get_implementation()", function() + it("returns correct implementation for given backend", function() + for _, backend in pairs(backends) do + local expected_implementation = expected_implementations[backend.name] + local implementation = balancer.get_implementation(backend) + assert.equal(expected_implementation, balancer.get_implementation(backend)) + end + end) + end) + + describe("route_to_alternative_balancer()", function() + local backend, _balancer + + before_each(function() + backend = backends[1] + _balancer = { + alternative_backends = { + backend.name, + } + } + mock_ngx({ var = { request_uri = "/" } }) + end) + + it("returns false when no trafficShapingPolicy is set", function() + balancer.sync_backend(backend) + assert.equal(false, balancer.route_to_alternative_balancer(_balancer)) + end) + + it("returns false when no alternative backends is set", function() + backend.trafficShapingPolicy.weight = 100 + balancer.sync_backend(backend) + _balancer.alternative_backends = nil + assert.equal(false, balancer.route_to_alternative_balancer(_balancer)) + end) + + it("returns false when alternative backends name does not match", function() + backend.trafficShapingPolicy.weight = 100 + balancer.sync_backend(backend) + _balancer.alternative_backends[1] = "nonExistingBackend" + assert.equal(false, balancer.route_to_alternative_balancer(_balancer)) + end) + + context("canary by weight", function() + it("returns true when weight is 100", function() + backend.trafficShapingPolicy.weight = 100 + balancer.sync_backend(backend) + assert.equal(true, balancer.route_to_alternative_balancer(_balancer)) + end) + + it("returns false when weight is 0", function() + backend.trafficShapingPolicy.weight = 0 + balancer.sync_backend(backend) + assert.equal(false, balancer.route_to_alternative_balancer(_balancer)) + end) + end) + + context("canary by cookie", function() + it("returns correct result for given cookies", function() + backend.trafficShapingPolicy.cookie = "canaryCookie" + balancer.sync_backend(backend) + local test_patterns = { + { + case_title = "cookie_value is 'always'", + request_cookie_name = "canaryCookie", + request_cookie_value = "always", + expected_result = true, + }, + { + case_title = "cookie_value is 'never'", + request_cookie_name = "canaryCookie", + request_cookie_value = "never", + expected_result = false, + }, + { + case_title = "cookie_value is undefined", + request_cookie_name = "canaryCookie", + request_cookie_value = "foo", + expected_result = false, + }, + { + case_title = "cookie_name is undefined", + request_cookie_name = "foo", + request_cookie_value = "always", + expected_result = false + }, + } + for _, test_pattern in pairs(test_patterns) do + mock_ngx({ var = { + ["cookie_" .. test_pattern.request_cookie_name] = test_pattern.request_cookie_value, + request_uri = "/" + }}) + assert.message("\nTest data pattern: " .. test_pattern.case_title) + .equal(test_pattern.expected_result, balancer.route_to_alternative_balancer(_balancer)) + reset_ngx() + end + end) + end) + + context("canary by header", function() + it("returns correct result for given headers", function() + local test_patterns = { + -- with no header value setting + { + case_title = "no custom header value and header value is 'always'", + header_name = "canaryHeader", + header_value = "", + request_header_name = "canaryHeader", + request_header_value = "always", + expected_result = true, + }, + { + case_title = "no custom header value and header value is 'never'", + header_name = "canaryHeader", + header_value = "", + request_header_name = "canaryHeader", + request_header_value = "never", + expected_result = false, + }, + { + case_title = "no custom header value and header value is undefined", + header_name = "canaryHeader", + header_value = "", + request_header_name = "canaryHeader", + request_header_value = "foo", + expected_result = false, + }, + { + case_title = "no custom header value and header name is undefined", + header_name = "canaryHeader", + header_value = "", + request_header_name = "foo", + request_header_value = "always", + expected_result = false, + }, + -- with header value setting + { + case_title = "custom header value is set and header value is 'always'", + header_name = "canaryHeader", + header_value = "foo", + request_header_name = "canaryHeader", + request_header_value = "always", + expected_result = false, + }, + { + case_title = "custom header value is set and header value match custom header value", + header_name = "canaryHeader", + header_value = "foo", + request_header_name = "canaryHeader", + request_header_value = "foo", + expected_result = true, + }, + { + case_title = "custom header value is set and header name is undefined", + header_name = "canaryHeader", + header_value = "foo", + request_header_name = "bar", + request_header_value = "foo", + expected_result = false + }, + } + + for _, test_pattern in pairs(test_patterns) do + reset_balancer() + backend.trafficShapingPolicy.header = test_pattern.header_name + backend.trafficShapingPolicy.headerValue = test_pattern.header_value + balancer.sync_backend(backend) + mock_ngx({ var = { + ["http_" .. test_pattern.request_header_name] = test_pattern.request_header_value, + request_uri = "/" + }}) + assert.message("\nTest data pattern: " .. test_pattern.case_title) + .equal(test_pattern.expected_result, balancer.route_to_alternative_balancer(_balancer)) + reset_ngx() + end + end) + end) + end) + + describe("sync_backend()", function() + local backend, implementation + + before_each(function() + backend = backends[1] + implementation = expected_implementations[backend.name] + end) + + it("initializes balancer for given backend", function() + local s = spy.on(implementation, "new") + + assert.has_no.errors(function() balancer.sync_backend(backend) end) + assert.spy(s).was_called_with(implementation, backend) + end) + + it("resolves external name to endpoints when service is of type External name", function() + backend = { + name = "exmaple-com", service = { spec = { ["type"] = "ExternalName" } }, + endpoints = { + { address = "example.com", port = "80", maxFails = 0, failTimeout = 0 } + } + } + + local dns_helper = require("test/dns_helper") + dns_helper.mock_dns_query({ + { + name = "example.com", + address = "192.168.1.1", + ttl = 3600, + }, + { + name = "example.com", + address = "1.2.3.4", + ttl = 60, + } + }) + expected_backend = { + name = "exmaple-com", service = { spec = { ["type"] = "ExternalName" } }, + endpoints = { + { address = "192.168.1.1", port = "80" }, + { address = "1.2.3.4", port = "80" }, + } + } + + local mock_instance = { sync = function(backend) end } + setmetatable(mock_instance, implementation) + implementation.new = function(self, backend) return mock_instance end + assert.has_no.errors(function() balancer.sync_backend(backend) end) + stub(mock_instance, "sync") + assert.has_no.errors(function() balancer.sync_backend(backend) end) + assert.stub(mock_instance.sync).was_called_with(mock_instance, expected_backend) + end) + + it("wraps IPv6 addresses into square brackets", function() + local backend = { + name = "exmaple-com", + endpoints = { + { address = "::1", port = "8080", maxFails = 0, failTimeout = 0 }, + { address = "192.168.1.1", port = "8080", maxFails = 0, failTimeout = 0 }, + } + } + local expected_backend = { + name = "exmaple-com", + endpoints = { + { address = "[::1]", port = "8080", maxFails = 0, failTimeout = 0 }, + { address = "192.168.1.1", port = "8080", maxFails = 0, failTimeout = 0 }, + } + } + + local mock_instance = { sync = function(backend) end } + setmetatable(mock_instance, implementation) + implementation.new = function(self, backend) return mock_instance end + assert.has_no.errors(function() balancer.sync_backend(backend) end) + stub(mock_instance, "sync") + assert.has_no.errors(function() balancer.sync_backend(backend) end) + assert.stub(mock_instance.sync).was_called_with(mock_instance, expected_backend) + end) + + it("replaces the existing balancer when load balancing config changes for backend", function() + assert.has_no.errors(function() balancer.sync_backend(backend) end) + + backend["load-balance"] = "ewma" + local new_implementation = package.loaded["balancer.ewma"] + + local s_old = spy.on(implementation, "new") + local s = spy.on(new_implementation, "new") + local s_ngx_log = spy.on(ngx, "log") + + assert.has_no.errors(function() balancer.sync_backend(backend) end) + assert.spy(s_ngx_log).was_called_with(ngx.INFO, + "LB algorithm changed from round_robin to ewma, resetting the instance") + -- TODO(elvinefendi) figure out why + -- assert.spy(s).was_called_with(new_implementation, backend) does not work here + assert.spy(s).was_called(1) + assert.spy(s_old).was_not_called() + end) + + it("calls sync(backend) on existing balancer instance when load balancing config does not change", function() + local mock_instance = { sync = function(...) end } + setmetatable(mock_instance, implementation) + implementation.new = function(self, backend) return mock_instance end + assert.has_no.errors(function() balancer.sync_backend(backend) end) + + stub(mock_instance, "sync") + + assert.has_no.errors(function() balancer.sync_backend(backend) end) + assert.stub(mock_instance.sync).was_called_with(mock_instance, backend) + end) + end) +end) diff --git a/rootfs/etc/nginx/lua/test/certificate_test.lua b/rootfs/etc/nginx/lua/test/certificate_test.lua new file mode 100644 index 0000000000..2de532ad69 --- /dev/null +++ b/rootfs/etc/nginx/lua/test/certificate_test.lua @@ -0,0 +1,105 @@ +local certificate = require("certificate") +local ssl = require("ngx.ssl") + +local function read_file(path) + local file = assert(io.open(path, "rb")) + local content = file:read("*a") + file:close() + return content +end + +local EXAMPLE_CERT = read_file("rootfs/etc/nginx/lua/test/fixtures/example-com-cert.pem") +local DEFAULT_CERT = read_file("rootfs/etc/nginx/lua/test/fixtures/default-cert.pem") +local DEFAULT_CERT_HOSTNAME = "_" + +local function assert_certificate_is_set(cert) + spy.on(ngx, "log") + spy.on(ssl, "set_der_cert") + spy.on(ssl, "set_der_priv_key") + + assert.has_no.errors(certificate.call) + assert.spy(ngx.log).was_not_called_with(ngx.ERR, _) + assert.spy(ssl.set_der_cert).was_called_with(ssl.cert_pem_to_der(cert)) + assert.spy(ssl.set_der_priv_key).was_called_with(ssl.priv_key_pem_to_der(cert)) +end + +local function refute_certificate_is_set() + spy.on(ssl, "set_der_cert") + spy.on(ssl, "set_der_priv_key") + + assert.has_no.errors(certificate.call) + assert.spy(ssl.set_der_cert).was_not_called() + assert.spy(ssl.set_der_priv_key).was_not_called() +end + +local unmocked_ngx = _G.ngx + +describe("Certificate", function() + describe("call", function() + before_each(function() + ssl.server_name = function() return "hostname", nil end + ssl.clear_certs = function() return true, "" end + ssl.set_der_cert = function(cert) return true, "" end + ssl.set_der_priv_key = function(priv_key) return true, "" end + + ngx.exit = function(status) end + + + ngx.shared.certificate_data:set(DEFAULT_CERT_HOSTNAME, DEFAULT_CERT) + end) + + after_each(function() + ngx = unmocked_ngx + ngx.shared.certificate_data:flush_all() + end) + + it("sets certificate and key when hostname is found in dictionary", function() + ngx.shared.certificate_data:set("hostname", EXAMPLE_CERT) + + assert_certificate_is_set(EXAMPLE_CERT) + end) + + it("sets certificate and key for wildcard cert", function() + ssl.server_name = function() return "sub.hostname", nil end + ngx.shared.certificate_data:set("*.hostname", EXAMPLE_CERT) + + assert_certificate_is_set(EXAMPLE_CERT) + end) + + it("sets certificate and key for nested wildcard cert", function() + ssl.server_name = function() return "sub.nested.hostname", nil end + ngx.shared.certificate_data:set("*.nested.hostname", EXAMPLE_CERT) + + assert_certificate_is_set(EXAMPLE_CERT) + end) + + it("logs error message when certificate in dictionary is invalid", function() + ngx.shared.certificate_data:set("hostname", "something invalid") + + spy.on(ngx, "log") + + refute_certificate_is_set() + assert.spy(ngx.log).was_called_with(ngx.ERR, "failed to convert certificate chain from PEM to DER: PEM_read_bio_X509_AUX() failed") + end) + + it("uses default certificate when there's none found for given hostname", function() + assert_certificate_is_set(DEFAULT_CERT) + end) + + it("uses default certificate when hostname can not be obtained", function() + ssl.server_name = function() return nil, "crazy hostname error" end + + assert_certificate_is_set(DEFAULT_CERT) + assert.spy(ngx.log).was_called_with(ngx.ERR, "error while obtaining hostname: crazy hostname error") + end) + + it("fails when hostname does not have certificate and default cert is invalid", function() + ngx.shared.certificate_data:set(DEFAULT_CERT_HOSTNAME, "invalid") + + spy.on(ngx, "log") + + refute_certificate_is_set() + assert.spy(ngx.log).was_called_with(ngx.ERR, "failed to convert certificate chain from PEM to DER: PEM_read_bio_X509_AUX() failed") + end) + end) +end) diff --git a/rootfs/etc/nginx/lua/test/configuration_test.lua b/rootfs/etc/nginx/lua/test/configuration_test.lua new file mode 100644 index 0000000000..64c851b165 --- /dev/null +++ b/rootfs/etc/nginx/lua/test/configuration_test.lua @@ -0,0 +1,262 @@ +_G._TEST = true +local cjson = require("cjson") +local configuration = require("configuration") + +local unmocked_ngx = _G.ngx +local certificate_data = ngx.shared.certificate_data + +function get_backends() + return { + { + name = "my-dummy-backend-1", ["load-balance"] = "sticky", + endpoints = { { address = "10.183.7.40", port = "8080", maxFails = 0, failTimeout = 0 } }, + sessionAffinityConfig = { name = "cookie", cookieSessionAffinity = { name = "route" } }, + }, + { + name = "my-dummy-backend-2", ["load-balance"] = "ewma", + endpoints = { + { address = "10.184.7.40", port = "7070", maxFails = 3, failTimeout = 2 }, + { address = "10.184.7.41", port = "7070", maxFails = 2, failTimeout = 1 }, + } + }, + { + name = "my-dummy-backend-3", ["load-balance"] = "round_robin", + endpoints = { + { address = "10.185.7.40", port = "6060", maxFails = 0, failTimeout = 0 }, + { address = "10.185.7.41", port = "6060", maxFails = 2, failTimeout = 1 }, + } + }, + } +end + +function get_mocked_ngx_env() + local _ngx = { + status = ngx.HTTP_OK, + var = {}, + req = { + read_body = function() end, + get_body_data = function() return cjson.encode(get_backends()) end, + get_body_file = function() return nil end, + }, + log = function(msg) end, + } + setmetatable(_ngx, {__index = _G.ngx}) + return _ngx +end + +describe("Configuration", function() + before_each(function() + _G.ngx = get_mocked_ngx_env() + end) + + after_each(function() + _G.ngx = unmocked_ngx + package.loaded["configuration"] = nil + configuration = require("configuration") + end) + + describe("Backends", function() + context("Request method is neither GET nor POST", function() + it("sends 'Only POST and GET requests are allowed!' in the response body", function() + ngx.var.request_method = "PUT" + local s = spy.on(ngx, "print") + assert.has_no.errors(configuration.call) + assert.spy(s).was_called_with("Only POST and GET requests are allowed!") + end) + + it("returns a status code of 400", function() + ngx.var.request_method = "PUT" + assert.has_no.errors(configuration.call) + assert.equal(ngx.status, ngx.HTTP_BAD_REQUEST) + end) + end) + + context("GET request to /configuration/backends", function() + before_each(function() + ngx.var.request_method = "GET" + ngx.var.request_uri = "/configuration/backends" + end) + + it("returns the current configured backends on the response body", function() + -- Encoding backends since comparing tables fail due to reference comparison + local encoded_backends = cjson.encode(get_backends()) + ngx.shared.configuration_data:set("backends", encoded_backends) + local s = spy.on(ngx, "print") + assert.has_no.errors(configuration.call) + assert.spy(s).was_called_with(encoded_backends) + end) + + it("returns a status of 200", function() + assert.has_no.errors(configuration.call) + assert.equal(ngx.status, ngx.HTTP_OK) + end) + end) + + context("POST request to /configuration/backends", function() + before_each(function() + ngx.var.request_method = "POST" + ngx.var.request_uri = "/configuration/backends" + end) + + it("stores the posted backends on the shared dictionary", function() + -- Encoding backends since comparing tables fail due to reference comparison + assert.has_no.errors(configuration.call) + assert.equal(ngx.shared.configuration_data:get("backends"), cjson.encode(get_backends())) + end) + + context("Failed to read request body", function() + local mocked_get_body_data = ngx.req.get_body_data + before_each(function() + ngx.req.get_body_data = function() return nil end + end) + + teardown(function() + ngx.req.get_body_data = mocked_get_body_data + end) + + it("returns a status of 400", function() + _G.io.open = function(filename, extension) return false end + assert.has_no.errors(configuration.call) + assert.equal(ngx.status, ngx.HTTP_BAD_REQUEST) + end) + + it("logs 'dynamic-configuration: unable to read valid request body to stderr'", function() + local s = spy.on(ngx, "log") + assert.has_no.errors(configuration.call) + assert.spy(s).was_called_with(ngx.ERR, "dynamic-configuration: unable to read valid request body") + end) + end) + + context("Failed to set the new backends to the configuration dictionary", function() + local resty_configuration_data_set = ngx.shared.configuration_data.set + before_each(function() + ngx.shared.configuration_data.set = function(key, value) return false, "" end + end) + + teardown(function() + ngx.shared.configuration_data.set = resty_configuration_data_set + end) + + it("returns a status of 400", function() + assert.has_no.errors(configuration.call) + assert.equal(ngx.status, ngx.HTTP_BAD_REQUEST) + end) + + it("logs 'dynamic-configuration: error updating configuration:' to stderr", function() + local s = spy.on(ngx, "log") + assert.has_no.errors(configuration.call) + assert.spy(s).was_called_with(ngx.ERR, "dynamic-configuration: error updating configuration: ") + end) + end) + + context("Succeeded to update backends configuration", function() + it("returns a status of 201", function() + assert.has_no.errors(configuration.call) + assert.equal(ngx.status, ngx.HTTP_CREATED) + end) + end) + end) + end) + + describe("handle_servers()", function() + it("should not accept non POST methods", function() + ngx.var.request_method = "GET" + + local s = spy.on(ngx, "print") + assert.has_no.errors(configuration.handle_servers) + assert.spy(s).was_called_with("Only POST requests are allowed!") + assert.same(ngx.status, ngx.HTTP_BAD_REQUEST) + end) + + it("should ignore servers that don't have hostname or pemCertKey set", function() + ngx.var.request_method = "POST" + local mock_servers = cjson.encode({ + { + hostname = "hostname", + sslCert = {} + }, + { + sslCert = { + pemCertKey = "pemCertKey" + } + } + }) + ngx.req.get_body_data = function() return mock_servers end + + local s = spy.on(ngx, "log") + assert.has_no.errors(configuration.handle_servers) + assert.spy(s).was_called_with(ngx.WARN, "hostname or pemCertKey are not present") + assert.same(ngx.status, ngx.HTTP_CREATED) + end) + + it("should successfully update certificates and keys for each host", function() + ngx.var.request_method = "POST" + local mock_servers = cjson.encode({ + { + hostname = "hostname", + sslCert = { + pemCertKey = "pemCertKey" + } + } + }) + ngx.req.get_body_data = function() return mock_servers end + + assert.has_no.errors(configuration.handle_servers) + assert.same(certificate_data:get("hostname"), "pemCertKey") + assert.same(ngx.status, ngx.HTTP_CREATED) + end) + + it("should log an err and set status to Internal Server Error when a certificate cannot be set", function() + ngx.var.request_method = "POST" + ngx.shared.certificate_data.safe_set = function(self, data) return false, "error" end + local mock_servers = cjson.encode({ + { + hostname = "hostname", + sslCert = { + pemCertKey = "pemCertKey" + } + }, + { + hostname = "hostname2", + sslCert = { + pemCertKey = "pemCertKey2" + } + } + }) + ngx.req.get_body_data = function() return mock_servers end + + local s = spy.on(ngx, "log") + assert.has_no.errors(configuration.handle_servers) + assert.spy(s).was_called_with(ngx.ERR, + "error setting certificate for hostname: error\nerror setting certificate for hostname2: error\n") + assert.same(ngx.status, ngx.HTTP_INTERNAL_SERVER_ERROR) + end) + + it("should log an err, set status to Internal Server Error, and short circuit when shared dictionary is full", function() + ngx.var.request_method = "POST" + ngx.shared.certificate_data.safe_set = function(self, data) return false, "no memory" end + local mock_servers = cjson.encode({ + { + hostname = "hostname", + sslCert = { + pemCertKey = "pemCertKey" + } + }, + { + hostname = "hostname2", + sslCert = { + pemCertKey = "pemCertKey2" + } + } + }) + ngx.req.get_body_data = function() return mock_servers end + + local s1 = spy.on(ngx, "log") + local s2 = spy.on(ngx.shared.certificate_data, "safe_set") + assert.has_no.errors(configuration.handle_servers) + assert.spy(s1).was_called_with(ngx.ERR, "no memory in certificate_data dictionary") + assert.spy(s2).was_not_called_with("hostname2", "pemCertKey2") + assert.same(ngx.status, ngx.HTTP_INTERNAL_SERVER_ERROR) + end) + end) +end) diff --git a/rootfs/etc/nginx/lua/test/dns_helper.lua b/rootfs/etc/nginx/lua/test/dns_helper.lua new file mode 100644 index 0000000000..570a60e52a --- /dev/null +++ b/rootfs/etc/nginx/lua/test/dns_helper.lua @@ -0,0 +1,27 @@ +local _M = {} + +local configuration = require("configuration") +local resolver = require("resty.dns.resolver") +local old_resolver_new = resolver.new + +local function reset(nameservers) + configuration.nameservers = nameservers or { "1.1.1.1" } +end + +function _M.mock_new(func, nameservers) + reset(nameservers) + resolver.new = func +end + +function _M.mock_dns_query(response, err) + reset() + resolver.new = function(self, options) + local r = old_resolver_new(self, options) + r.query = function(self, name, options, tries) + return response, err + end + return r + end +end + +return _M diff --git a/rootfs/etc/nginx/lua/test/fixtures/default-cert.pem b/rootfs/etc/nginx/lua/test/fixtures/default-cert.pem new file mode 100644 index 0000000000..e8631c85b5 --- /dev/null +++ b/rootfs/etc/nginx/lua/test/fixtures/default-cert.pem @@ -0,0 +1,45 @@ +-----BEGIN CERTIFICATE----- +MIICxDCCAawCCQCjnxUYH38uOjANBgkqhkiG9w0BAQsFADAkMRAwDgYDVQQDDAdk +ZWZhdWx0MRAwDgYDVQQKDAdkZWZhdWx0MB4XDTE5MDQxMzE3NTgwNVoXDTM5MDQw +ODE3NTgwNVowJDEQMA4GA1UEAwwHZGVmYXVsdDEQMA4GA1UECgwHZGVmYXVsdDCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANuhdV19jJGMRCy7/x6krbHh +GUoN/C2T9a/ZuA8CaBHpDCcCnYCrYjPWjOezuuk5CMWx/IHRgTxbz9z2MXyfSMli +7bkDra0tmahI3Z0ADNxt/QQ30f1I84Y877urO5RJt3W4NmWM9jqbv/AFO8+oWRjI +s9+leQxyvIWtKz524eXGmu0iGD4KkeH6bPCXYotC/t5XH4v9NfHRoZ3M9eaDuKd6 +k54EVol8LUDaBvbicIE8M1Znf1vQWdP8w4nhP739Oc/p5YKcG7jJahLa9nx+AJIe +vxPP9/nQxN1PAcuXK6HAtgF3nkadtW2nd9Ws3bsOn+ZHaE+hQXtMzLZ5/L8BZ50C +AwEAATANBgkqhkiG9w0BAQsFAAOCAQEApWib3ctn/okShC0Krw56vyjqbuKx9KMQ +QuClYR6HTU8D5F9zr2NFyrSMik12wbqPH6VPYRAjVBfFEhzYDaO+DjTJp0wcIe1z +a2fWVjELLg9PEDlB4mVmtJUMkVknwbZ6eD4XRO6ooifSOhg/36KchilbnGchwwaY +Gh4/rNKWqKD5rPVQhUsptNnsZ8trPQ+W3p94rzXyQkWS8KWCD0EeMzdRZnUm/utx +4lDGCdw1GLEfm/SnNR+dyu4ETzY6/s5csChBVZw9xlXzId6QymeGvJe0jcoTnLCG +KNq3F1fTqUXXhP3PTuuNclz0c4/8QZC/l2xH6Xb07H2iOPuuFnDVZA== +-----END CERTIFICATE----- +-----BEGIN PRIVATE KEY----- +MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDboXVdfYyRjEQs +u/8epK2x4RlKDfwtk/Wv2bgPAmgR6QwnAp2Aq2Iz1ozns7rpOQjFsfyB0YE8W8/c +9jF8n0jJYu25A62tLZmoSN2dAAzcbf0EN9H9SPOGPO+7qzuUSbd1uDZljPY6m7/w +BTvPqFkYyLPfpXkMcryFrSs+duHlxprtIhg+CpHh+mzwl2KLQv7eVx+L/TXx0aGd +zPXmg7inepOeBFaJfC1A2gb24nCBPDNWZ39b0FnT/MOJ4T+9/TnP6eWCnBu4yWoS +2vZ8fgCSHr8Tz/f50MTdTwHLlyuhwLYBd55GnbVtp3fVrN27Dp/mR2hPoUF7TMy2 +efy/AWedAgMBAAECggEBAJ37LLX8CjHjqGJZNDCxmfNajFtVZfDO/inovNmnDH7d +mI0y92JHZRMOoDpGcQszqFi0J4Kl1YU6MXGqcXxIAw5BJ+guei4Yn++JwkcdcyLX +xujS0iyT3f/QM01V5TxMLjfyMsanN7J+t/iJezVqzfPi4mfb2g+XNH4fSvzafLFO +7p9/Mw3J2rB2rV0aJxh8abh0p4bSSPSoQgeubQ6KlwoOJYBZ/a8TmmZqB8DjOPYb +Pad0sTHsQc4q9wrP7zxZmeCDnD0XEluAxX9ZF4Ou/AWBHTRQpu5HH2pUXvm88VI1 +/4QAaxYozxuAknqSnVqpCXjSpYoXAXEX64aroJui/UECgYEA7VdN4jtn62jgVK1V +HHaoZlyfIAzrC7yObyfdlL39NDz4B7emRNvFx7FXgslPRnvlMb/B2mASzrQij7uI +sfsIO7kOJBq6BqnEalCynFj9p5EFQcOehOXYu46Qj1dKp43dptTaxnYnA1xKL9Z5 +DDwrxpD2Z6ur3o6A55qX7M6tLTECgYEA7OW33x3dTX4H5ea8ghhXvrSnqycFhhqE +Grae9HpAUFV5/u6LC14xHk6Cd27cVI/OXAIar1M9aA1FNwnU+NmMgLKyAGgnKVPi +GkDWaAaWKeW32bNHdqg3XmP2TcEXn1PCSwNc4cVPWDfeVQeCtposH0jWITFB9C4O +9sKkfVMCVi0CgYEAzecn0lTnWvupYszdQcxPXD6ObifG4m+6wgQ734bT3DXomAlj +XemsNApOeVBcTjG+LOLHMsSWjG0KbterR30ZL3bkJb5qFM3DcNiBm9I4fN77SIqF +Q5aD6HNORozcX3BcExgmlHZ8chXm5omSimLJN4MbweTVPkcy3brogrDq3IECgYEA +x3Ls6NuS+/BVI/ms0nc+QOCGnfG/k9V1TaxdjgXzae9dRAaAaHTIM/TzoSxkMonU +uuBGqUAS3iz2Dk2n0lAPHDfW58LI3eGy5lmaaoDJIsM2lAJ982fTHhRZRcOBaPIz +DcbqB2eA0wxOkxY8thJ9fWVsawu2tKemj5j2tlESEY0CgYAl3QnNqfkuKKpen7wJ +3LF+rm0Xtw3a9kSE2+dsdkzn3sl2OpX44V1UtSJDJmpsk8OBw20ISgWp6UlnnncG +J0xmjSNaRH0UBfQ7PyntvC7FhaOncP5emrwH80oOjlGyY2i6m9ognLQBo44/XgGq +VwtXclxMu2tvVKKXaXQAwQiNOA== +-----END PRIVATE KEY----- diff --git a/rootfs/etc/nginx/lua/test/fixtures/example-com-cert.pem b/rootfs/etc/nginx/lua/test/fixtures/example-com-cert.pem new file mode 100644 index 0000000000..bc8757a42e --- /dev/null +++ b/rootfs/etc/nginx/lua/test/fixtures/example-com-cert.pem @@ -0,0 +1,45 @@ +-----BEGIN CERTIFICATE----- +MIICzDCCAbQCCQD8UB3X6pdyYjANBgkqhkiG9w0BAQsFADAoMRQwEgYDVQQDDAtl +eGFtcGxlLmNvbTEQMA4GA1UECgwHZXhhbXBsZTAeFw0xOTA0MTMxNjU2MzJaFw0z +OTA0MDgxNjU2MzJaMCgxFDASBgNVBAMMC2V4YW1wbGUuY29tMRAwDgYDVQQKDAdl +eGFtcGxlMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3+IeuF0/KeVZ +wuV54zZ1D747T+EGUmGAJgS7rBmzagSk87tFcFkVFz2Pfh/bytde0YjoYEfb+Nil +LEwCZG1tZsYAxah2sy5BQAWfclQQ5mj+VMn611Eceq5ELVzZeHTIHEqJnNuUyh7V +DCeZeWjT+kwc/NnCn8F1lwVMvm6ZTQ37reyVYKZqkQRWjCst9aTFAlQl6hYLD+LR +cg/b5oOo2SiAtqELaBJDU3lX/zBqG38o0N1hIT364bj6+9vzngx0ce8TMj1y92MJ +YA4r6RUy7NwYc6sfxVjoBr30ARmqsXdYEZwu1DK37fikWCgmqiilBT/AIVjCdb5J +MO+6NhtP6wIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQCSu0r59BdG0uLmQ/ncLvJJ +vealSX6pbbZInPCCztGa7SJpfkbVNj3c/Fs0q5UH2SPYH/O/yqgp8IWNYNUuTlO3 +2IJWFovi6cHpetszLxBat75XDq3Spvw8mIGd8Lhw2B6RpR5Hy/kO/mXmnpH/1aty +xRJY6V5Tin/bsx3IwWKK/kcXzEtrCAnS2w2V4WTOk7y6WOGhsEfmwVc4MbvXQc/5 +yysvN41AUcWK94XJ2FZZc8ykUkHJ+TeRGq8wnl7l3E9d0wQw+ArL4toD4puFmxvH +qZV5n628d+ecNTbAhanX46A4xdfxhD0LvnURizAfu3N5snMmhjfgL5ukMwrGCwRo +-----END CERTIFICATE----- +-----BEGIN PRIVATE KEY----- +MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDf4h64XT8p5VnC +5XnjNnUPvjtP4QZSYYAmBLusGbNqBKTzu0VwWRUXPY9+H9vK117RiOhgR9v42KUs +TAJkbW1mxgDFqHazLkFABZ9yVBDmaP5UyfrXURx6rkQtXNl4dMgcSomc25TKHtUM +J5l5aNP6TBz82cKfwXWXBUy+bplNDfut7JVgpmqRBFaMKy31pMUCVCXqFgsP4tFy +D9vmg6jZKIC2oQtoEkNTeVf/MGobfyjQ3WEhPfrhuPr72/OeDHRx7xMyPXL3Ywlg +DivpFTLs3Bhzqx/FWOgGvfQBGaqxd1gRnC7UMrft+KRYKCaqKKUFP8AhWMJ1vkkw +77o2G0/rAgMBAAECggEBALVyswki8b1H137gsu+WRDu1Jqbvrkr4IH8vmNa7obBM +AVBUN8v9Nt22E+TZdy4nbP6PYh4eP0aodv22wL2Z/m+sDBYmGcXQuCtmIzrqrSPA +dlhLtpPpdhZrxG+rb8lzhHeBZZSOVkGVyX9nXLiMYDjclSXMazNE/MOgFPnF81MB +fQIf1g1FzJKbH5cPrl5hAnxoaRv3SvCCxsCTs51XvweKHmy5X4MlvRAYIj9IKutk +iF2EYTQSY6MSrJWP1buZm0JriJncvT3BdArihNK6OuraxRhc5TUCW7nIx4Pi+hwo +FODwbgtj5AtHmAdiL2AWJnaJoQVPEw6Oq1JBr9i3AGECgYEA9i+0D9dS1vsckQ2G +E1P1ItVkoZBjbSFV6lB8sBsx2hAl6bUIQtJvgoffDlCqkuCl2jagGcmHwlk4V8sc +O2HivNB9TcoQh5L4m8uN6uytLUXw4vUS23YI1LNImAuwf1refEuKVPM+Mn5Y/FMk +n0fK7IfuLgu13WZ6iYkBS+C7RNECgYEA6M7RK9mw/kcquK2bag96S0/0znsYtXtj +naNgsDOfIjuOHJJFinNrVbdW72zqJePXRPtpQ8/5xoyWjysKUqb7I94BXYGPMXzv +Z8fCzSDKTFBODpu4cMvgQk7c4D4ZgQSaWP1+wf9x8WglKowyUeh0CwJ307SYa3Mw +SYPdg2OTJ/sCgYEAsUkbF0lNy6kcIk0l32dXodUgWcTcBOu7rjh2AnAjD1EPrGSE +5XIbgVmNRQbMP2dtqF4sH0Xk8Q1FKNwIoa7VFHnjspAwJSGuzKrisWntMCws05P/ +F3HB3EKbpXrNiHkMvV+8534frUcVl+fb+KQ/uuQMnrYqKp0w4zh5aYYV9fECgYA0 +EAw3AjfSpZeoNSrMTSnMLdVRV7Xu3+knF6JHxUORJEBjo1Jp4+XdBWMrp++1CX7a +rl6cC6aQAGCrI7TrRuxi2QL1JkQfjRD85G9r8ClNZ6gNHEXi87TzHy/F9h09/QmH +XSk7uSSCGAg3u6KFLrbEv4iMj5aGcPwbdKHVAC+ogQKBgQC9vkSFhE2bVrIwul6Y +SEDap+YzbA0yfP8PXd0lX47uyOsd3hD0OoGwTRsmeJr1/4Yuf0Wub838ZxgP2dpP +qfW7+7OeNyTvS2avxygWvolVV+O5Yx13rE2Dsd7DQnIyGO9yRCWCPjpuOHFqEgMv +HzJX6j3SaubH52pePu5mrzMLcg== +-----END PRIVATE KEY----- diff --git a/rootfs/etc/nginx/lua/test/lua_ingress_test.lua b/rootfs/etc/nginx/lua/test/lua_ingress_test.lua new file mode 100644 index 0000000000..89d3150ef8 --- /dev/null +++ b/rootfs/etc/nginx/lua/test/lua_ingress_test.lua @@ -0,0 +1,9 @@ +describe("lua_ingress", function() + it("patches math.randomseed to not be called more than once per worker", function() + local s = spy.on(ngx, "log") + + math.randomseed(100) + assert.spy(s).was_called_with(ngx.WARN, + string.format("ignoring math.randomseed(%d) since PRNG is already seeded for worker %d", 100, ngx.worker.pid())) + end) +end) diff --git a/rootfs/etc/nginx/lua/test/monitor_test.lua b/rootfs/etc/nginx/lua/test/monitor_test.lua new file mode 100644 index 0000000000..8796d0a62f --- /dev/null +++ b/rootfs/etc/nginx/lua/test/monitor_test.lua @@ -0,0 +1,106 @@ +_G._TEST = true + +local original_ngx = ngx +local function reset_ngx() + _G.ngx = original_ngx +end + +local function mock_ngx(mock) + local _ngx = mock + setmetatable(_ngx, { __index = ngx }) + _G.ngx = _ngx +end + +local function mock_ngx_socket_tcp() + local tcp_mock = {} + stub(tcp_mock, "connect", true) + stub(tcp_mock, "send", true) + stub(tcp_mock, "close", true) + + local socket_mock = {} + stub(socket_mock, "tcp", tcp_mock) + mock_ngx({ socket = socket_mock }) + + return tcp_mock +end + +describe("Monitor", function() + after_each(function() + reset_ngx() + package.loaded["monitor"] = nil + end) + + it("batches metrics", function() + local monitor = require("monitor") + mock_ngx({ var = {} }) + + for i = 1,10,1 do + monitor.call() + end + + assert.equal(10, #monitor.get_metrics_batch()) + end) + + describe("flush", function() + it("short circuits when premmature is true (when worker is shutting down)", function() + local tcp_mock = mock_ngx_socket_tcp() + local monitor = require("monitor") + mock_ngx({ var = {} }) + + for i = 1,10,1 do + monitor.call() + end + monitor.flush(true) + assert.stub(tcp_mock.connect).was_not_called() + end) + + it("short circuits when there's no metrics batched", function() + local tcp_mock = mock_ngx_socket_tcp() + local monitor = require("monitor") + + monitor.flush() + assert.stub(tcp_mock.connect).was_not_called() + end) + + it("JSON encodes and sends the batched metrics", function() + local tcp_mock = mock_ngx_socket_tcp() + local monitor = require("monitor") + + local ngx_var_mock = { + host = "example.com", + namespace = "default", + ingress_name = "example", + service_name = "http-svc", + location_path = "/", + + request_method = "GET", + status = "200", + request_length = "256", + request_time = "0.04", + bytes_sent = "512", + + upstream_addr = "10.10.0.1", + upstream_connect_time = "0.01", + upstream_response_time = "0.02", + upstream_response_length = "456", + upstream_status = "200", + } + mock_ngx({ var = ngx_var_mock }) + monitor.call() + + local ngx_var_mock1 = ngx_var_mock + ngx_var_mock1.status = "201" + ngx_var_mock1.request_method = "POST" + mock_ngx({ var = ngx_var_mock }) + monitor.call() + + monitor.flush() + + local expected_payload = '[{"requestLength":256,"ingress":"example","status":"200","service":"http-svc","requestTime":0.04,"namespace":"default","host":"example.com","method":"GET","upstreamResponseTime":0.02,"upstreamResponseLength":456,"upstreamLatency":0.01,"path":"\\/","responseLength":512},{"requestLength":256,"ingress":"example","status":"201","service":"http-svc","requestTime":0.04,"namespace":"default","host":"example.com","method":"POST","upstreamResponseTime":0.02,"upstreamResponseLength":456,"upstreamLatency":0.01,"path":"\\/","responseLength":512}]' + + assert.stub(tcp_mock.connect).was_called_with(tcp_mock, "unix:/tmp/prometheus-nginx.socket") + assert.stub(tcp_mock.send).was_called_with(tcp_mock, expected_payload) + assert.stub(tcp_mock.close).was_called_with(tcp_mock) + end) + end) +end) diff --git a/rootfs/etc/nginx/lua/test/run.lua b/rootfs/etc/nginx/lua/test/run.lua new file mode 100644 index 0000000000..dcf6e800ea --- /dev/null +++ b/rootfs/etc/nginx/lua/test/run.lua @@ -0,0 +1,38 @@ +local ffi = require("ffi") +local lua_ingress = require("lua_ingress") + +-- without this we get errors such as "attempt to redefine XXX" +local old_cdef = ffi.cdef +local exists = {} +ffi.cdef = function(def) + if exists[def] then + return + end + exists[def] = true + return old_cdef(def) +end + +local old_udp = ngx.socket.udp +ngx.socket.udp = function(...) + local socket = old_udp(...) + socket.send = function(...) + error("ngx.socket.udp:send please mock this to use in tests") + end + return socket +end + +local old_tcp = ngx.socket.tcp +ngx.socket.tcp = function(...) + local socket = old_tcp(...) + socket.send = function(...) + error("ngx.socket.tcp:send please mock this to use in tests") + end + return socket +end + +ngx.log = function(...) end +ngx.print = function(...) end + +lua_ingress.init_worker() + +require "busted.runner"({ standalone = false }) diff --git a/rootfs/etc/nginx/lua/test/util/dns_test.lua b/rootfs/etc/nginx/lua/test/util/dns_test.lua new file mode 100644 index 0000000000..48e7c6ca31 --- /dev/null +++ b/rootfs/etc/nginx/lua/test/util/dns_test.lua @@ -0,0 +1,74 @@ +describe("resolve", function() + local dns = require("util.dns") + local dns_helper = require("test/dns_helper") + + it("sets correct nameservers", function() + dns_helper.mock_new(function(self, options) + assert.are.same({ nameservers = { "1.2.3.4", "4.5.6.7" }, retrans = 5, timeout = 2000 }, options) + return nil, "" + end, { "1.2.3.4", "4.5.6.7" }) + dns.resolve("example.com") + end) + + it("returns host when an error happens", function() + local s_ngx_log = spy.on(ngx, "log") + + dns_helper.mock_new(function(...) return nil, "an error" end) + assert.are.same({ "example.com" }, dns.resolve("example.com")) + assert.spy(s_ngx_log).was_called_with(ngx.ERR, "failed to instantiate the resolver: an error") + + dns_helper.mock_dns_query(nil, "oops!") + assert.are.same({ "example.com" }, dns.resolve("example.com")) + assert.spy(s_ngx_log).was_called_with(ngx.ERR, "failed to query the DNS server:\noops!\noops!") + + dns_helper.mock_dns_query({ errcode = 1, errstr = "format error" }) + assert.are.same({ "example.com" }, dns.resolve("example.com")) + assert.spy(s_ngx_log).was_called_with(ngx.ERR, "failed to query the DNS server:\nserver returned error code: 1: format error\nserver returned error code: 1: format error") + + dns_helper.mock_dns_query({}) + assert.are.same({ "example.com" }, dns.resolve("example.com")) + assert.spy(s_ngx_log).was_called_with(ngx.ERR, "failed to query the DNS server:\nno record resolved\nno record resolved") + + dns_helper.mock_dns_query({ { name = "example.com", cname = "sub.example.com", ttl = 60 } }) + assert.are.same({ "example.com" }, dns.resolve("example.com")) + assert.spy(s_ngx_log).was_called_with(ngx.ERR, "failed to query the DNS server:\nno record resolved\nno record resolved") + end) + + it("resolves all A records of given host, caches them with minimal ttl and returns from cache next time", function() + dns_helper.mock_dns_query({ + { + name = "example.com", + address = "192.168.1.1", + ttl = 3600, + }, + { + name = "example.com", + address = "1.2.3.4", + ttl = 60, + } + }) + + local lrucache = require("resty.lrucache") + local old_lrucache_new = lrucache.new + lrucache.new = function(...) + local cache = old_lrucache_new(...) + + local old_set = cache.set + cache.set = function(self, key, value, ttl) + assert.equal("example.com", key) + assert.are.same({ "192.168.1.1", "1.2.3.4" }, value) + assert.equal(60, ttl) + return old_set(self, key, value, ttl) + end + + return cache + end + + assert.are.same({ "192.168.1.1", "1.2.3.4" }, dns.resolve("example.com")) + + dns_helper.mock_new(function(...) + error("expected to short-circuit and return response from cache") + end) + assert.are.same({ "192.168.1.1", "1.2.3.4" }, dns.resolve("example.com")) + end) +end) diff --git a/rootfs/etc/nginx/lua/test/util_test.lua b/rootfs/etc/nginx/lua/test/util_test.lua new file mode 100644 index 0000000000..682c859192 --- /dev/null +++ b/rootfs/etc/nginx/lua/test/util_test.lua @@ -0,0 +1,35 @@ +local original_ngx = ngx +local function reset_ngx() + _G.ngx = original_ngx +end + +local function mock_ngx(mock) + local _ngx = mock + setmetatable(_ngx, { __index = ngx }) + _G.ngx = _ngx +end + +describe("lua_ngx_var", function() + local util = require("util") + + before_each(function() + mock_ngx({ var = { remote_addr = "192.168.1.1", [1] = "nginx/regexp/1/group/capturing" } }) + end) + + after_each(function() + reset_ngx() + package.loaded["monitor"] = nil + end) + + it("returns value of nginx var by key", function() + assert.equal("192.168.1.1", util.lua_ngx_var("$remote_addr")) + end) + + it("returns value of nginx var when key is number", function() + assert.equal("nginx/regexp/1/group/capturing", util.lua_ngx_var("$1")) + end) + + it("returns nil when variable is not defined", function() + assert.equal(nil, util.lua_ngx_var("$foo_bar")) + end) +end) diff --git a/rootfs/etc/nginx/lua/util.lua b/rootfs/etc/nginx/lua/util.lua new file mode 100644 index 0000000000..50e1a7e5ee --- /dev/null +++ b/rootfs/etc/nginx/lua/util.lua @@ -0,0 +1,90 @@ +local string_len = string.len +local string_sub = string.sub + +local _M = {} + +function _M.get_nodes(endpoints) + local nodes = {} + local weight = 1 + + for _, endpoint in pairs(endpoints) do + local endpoint_string = endpoint.address .. ":" .. endpoint.port + nodes[endpoint_string] = weight + end + + return nodes +end + +-- given an Nginx variable i.e $request_uri +-- it returns value of ngx.var[request_uri] +function _M.lua_ngx_var(ngx_var) + local var_name = string_sub(ngx_var, 2) + if var_name:match("^%d+$") then + var_name = tonumber(var_name) + end + + return ngx.var[var_name] +end + +-- this implementation is taken from +-- https://web.archive.org/web/20131225070434/http://snippets.luacode.org/snippets/Deep_Comparison_of_Two_Values_3 +-- and modified for use in this project +local function deep_compare(t1, t2, ignore_mt) + local ty1 = type(t1) + local ty2 = type(t2) + if ty1 ~= ty2 then return false end + -- non-table types can be directly compared + if ty1 ~= 'table' and ty2 ~= 'table' then return t1 == t2 end + -- as well as tables which have the metamethod __eq + local mt = getmetatable(t1) + if not ignore_mt and mt and mt.__eq then return t1 == t2 end + for k1,v1 in pairs(t1) do + local v2 = t2[k1] + if v2 == nil or not deep_compare(v1,v2) then return false end + end + for k2,v2 in pairs(t2) do + local v1 = t1[k2] + if v1 == nil or not deep_compare(v1,v2) then return false end + end + return true +end +_M.deep_compare = deep_compare + +function _M.is_blank(str) + return str == nil or string_len(str) == 0 +end + +-- this implementation is taken from: +-- https://github.com/luafun/luafun/blob/master/fun.lua#L33 +-- SHA: 04c99f9c393e54a604adde4b25b794f48104e0d0 +local function deepcopy(orig) + local orig_type = type(orig) + local copy + if orig_type == 'table' then + copy = {} + for orig_key, orig_value in next, orig, nil do + copy[deepcopy(orig_key)] = deepcopy(orig_value) + end + else + copy = orig + end + return copy +end +_M.deepcopy = deepcopy + +local function tablelength(T) + local count = 0 + for _ in pairs(T) do + count = count + 1 + end + return count +end +_M.tablelength = tablelength + +-- replaces special character value a with value b for all occurences in a string +local function replace_special_char(str, a, b) + return string.gsub(str, "%" .. a, b) +end +_M.replace_special_char = replace_special_char + +return _M diff --git a/rootfs/etc/nginx/lua/util/dns.lua b/rootfs/etc/nginx/lua/util/dns.lua new file mode 100644 index 0000000000..fcebeae0bf --- /dev/null +++ b/rootfs/etc/nginx/lua/util/dns.lua @@ -0,0 +1,98 @@ +local resolver = require("resty.dns.resolver") +local lrucache = require("resty.lrucache") +local configuration = require("configuration") +local util = require("util") + +local _M = {} +local CACHE_SIZE = 10000 +local MAXIMUM_TTL_VALUE = 2147483647 -- maximum value according to https://tools.ietf.org/html/rfc2181 + +local cache, err = lrucache.new(CACHE_SIZE) +if not cache then + return error("failed to create the cache: " .. (err or "unknown")) +end + +local function a_records_and_max_ttl(answers) + local addresses = {} + local ttl = MAXIMUM_TTL_VALUE -- maximum value according to https://tools.ietf.org/html/rfc2181 + + for _, ans in ipairs(answers) do + if ans.address then + table.insert(addresses, ans.address) + if ttl > ans.ttl then + ttl = ans.ttl + end + end + end + + return addresses, ttl +end + +local function resolve_host(host, r, qtype) + local answers + answers, err = r:query(host, { qtype = qtype }, {}) + if not answers then + return nil, -1, tostring(err) + end + + if answers.errcode then + return nil, -1, string.format("server returned error code: %s: %s", answers.errcode, answers.errstr) + end + + local addresses, ttl = a_records_and_max_ttl(answers) + if #addresses == 0 then + return nil, -1, "no record resolved" + end + + return addresses, ttl, nil +end + +function _M.resolve(host) + local cached_addresses = cache:get(host) + if cached_addresses then + local message = string.format( + "addresses %s for host %s was resolved from cache", + table.concat(cached_addresses, ", "), host) + ngx.log(ngx.INFO, message) + return cached_addresses + end + + local r + r, err = resolver:new{ + nameservers = util.deepcopy(configuration.nameservers), + retrans = 5, + timeout = 2000, -- 2 sec + } + + if not r then + ngx.log(ngx.ERR, "failed to instantiate the resolver: " .. tostring(err)) + return { host } + end + + local dns_errors = {} + + local addresses, ttl + addresses, ttl, err = resolve_host(host, r, r.TYPE_A) + if not addresses then + table.insert(dns_errors, tostring(err)) + elseif #addresses > 0 then + cache:set(host, addresses, ttl) + return addresses + end + + addresses, ttl, err = resolve_host(host, r, r.TYPE_AAAA) + if not addresses then + table.insert(dns_errors, tostring(err)) + elseif #addresses > 0 then + cache:set(host, addresses, ttl) + return addresses + end + + if #dns_errors > 0 then + ngx.log(ngx.ERR, "failed to query the DNS server:\n" .. table.concat(dns_errors, "\n")) + end + + return { host } +end + +return _M diff --git a/rootfs/etc/nginx/lua/util/split.lua b/rootfs/etc/nginx/lua/util/split.lua new file mode 100644 index 0000000000..620e083a51 --- /dev/null +++ b/rootfs/etc/nginx/lua/util/split.lua @@ -0,0 +1,59 @@ +local _M = {} + +-- splits strings into host and port +local function parse_addr(addr) + local _, _, host, port = addr:find("([^:]+):([^:]+)") + if host and port then + return {host=host, port=port} + else + return nil, "error in parsing upstream address!" + end +end + +function _M.get_first_value(var) + local t = _M.split_upstream_var(var) or {} + if #t == 0 then return nil end + return t[1] +end + +-- http://nginx.org/en/docs/http/ngx_http_upstream_module.html#example +-- CAVEAT: nginx is giving out : instead of , so the docs are wrong +-- 127.0.0.1:26157 : 127.0.0.1:26157 , ngx.var.upstream_addr +-- 200 : 200 , ngx.var.upstream_status +-- 0.00 : 0.00, ngx.var.upstream_response_time +function _M.split_upstream_var(var) + if not var then + return nil, nil + end + local t = {} + for v in var:gmatch("[^%s|,]+") do + if v ~= ":" then + t[#t+1] = v + end + end + return t +end + +-- Splits an NGINX $upstream_addr and returns an array of tables with a `host` and `port` key-value pair. +function _M.split_upstream_addr(addrs_str) + if not addrs_str then + return nil, nil + end + + local addrs = _M.split_upstream_var(addrs_str) + local host_and_ports = {} + + for _, v in ipairs(addrs) do + local a, err = parse_addr(v) + if err then + return nil, err + end + host_and_ports[#host_and_ports+1] = a + end + if #host_and_ports == 0 then + return nil, "no upstream addresses to parse!" + end + return host_and_ports +end + +return _M diff --git a/rootfs/etc/nginx/nginx.conf b/rootfs/etc/nginx/nginx.conf index bb36624ce5..6f8e86b90e 100644 --- a/rootfs/etc/nginx/nginx.conf +++ b/rootfs/etc/nginx/nginx.conf @@ -1,5 +1,5 @@ # A very simple nginx configuration file that forces nginx to start. -pid /run/nginx.pid; +pid /tmp/nginx.pid; events {} http {} diff --git a/rootfs/etc/nginx/opentracing.json b/rootfs/etc/nginx/opentracing.json new file mode 100644 index 0000000000..9e26dfeeb6 --- /dev/null +++ b/rootfs/etc/nginx/opentracing.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/rootfs/etc/nginx/template/nginx.tmpl b/rootfs/etc/nginx/template/nginx.tmpl old mode 100644 new mode 100755 index 22cb69969e..f012957af1 --- a/rootfs/etc/nginx/template/nginx.tmpl +++ b/rootfs/etc/nginx/template/nginx.tmpl @@ -7,7 +7,16 @@ {{ $proxyHeaders := .ProxySetHeaders }} {{ $addHeaders := .AddHeaders }} -{{ if $cfg.EnableModsecurity }} +# Configuration checksum: {{ $all.Cfg.Checksum }} + +# setup custom paths that do not require root access +pid {{ .PID }}; + +{{ if $cfg.UseGeoIP2 }} +load_module /etc/nginx/modules/ngx_http_geoip2_module.so; +{{ end }} + +{{ if (shouldLoadModSecurityModule $cfg $servers) }} load_module /etc/nginx/modules/ngx_http_modsecurity_module.so; {{ end }} @@ -15,30 +24,109 @@ load_module /etc/nginx/modules/ngx_http_modsecurity_module.so; load_module /etc/nginx/modules/ngx_http_opentracing_module.so; {{ end }} -{{ if (and $cfg.EnableOpentracing (ne $cfg.ZipkinCollectorHost "")) }} -load_module /etc/nginx/modules/ngx_http_zipkin_module.so; -{{ end }} - daemon off; worker_processes {{ $cfg.WorkerProcesses }}; -pid /run/nginx.pid; -{{ if ne .MaxOpenFiles 0 }} -worker_rlimit_nofile {{ .MaxOpenFiles }}; -{{ end}} +{{ if gt (len $cfg.WorkerCPUAffinity) 0 }} +worker_cpu_affinity {{ $cfg.WorkerCPUAffinity }}; +{{ end }} + +worker_rlimit_nofile {{ $cfg.MaxWorkerOpenFiles }}; {{/* http://nginx.org/en/docs/ngx_core_module.html#worker_shutdown_timeout */}} {{/* avoid waiting too long during a reload */}} worker_shutdown_timeout {{ $cfg.WorkerShutdownTimeout }} ; +{{ if not (empty $cfg.MainSnippet) }} +{{ $cfg.MainSnippet }} +{{ end }} + events { - multi_accept on; + multi_accept {{ if $cfg.EnableMultiAccept }}on{{ else }}off{{ end }}; worker_connections {{ $cfg.MaxWorkerConnections }}; use epoll; } http { - {{/* we use the value of the header X-Forwarded-For to be able to use the geo_ip module */}} + lua_package_cpath "/usr/local/lib/lua/?.so;/usr/lib/lua-platform-path/lua/5.1/?.so;;"; + lua_package_path "/etc/nginx/lua/?.lua;/etc/nginx/lua/vendor/?.lua;/usr/local/lib/lua/?.lua;;"; + + {{ buildLuaSharedDictionaries $servers $all.Cfg.DisableLuaRestyWAF }} + + init_by_lua_block { + require("resty.core") + collectgarbage("collect") + + local lua_resty_waf = require("resty.waf") + lua_resty_waf.init() + + -- init modules + local ok, res + + ok, res = pcall(require, "lua_ingress") + if not ok then + error("require failed: " .. tostring(res)) + else + lua_ingress = res + lua_ingress.set_config({{ configForLua $all }}) + end + + ok, res = pcall(require, "configuration") + if not ok then + error("require failed: " .. tostring(res)) + else + configuration = res + configuration.nameservers = { {{ buildResolversForLua $cfg.Resolver $cfg.DisableIpv6DNS }} } + end + + ok, res = pcall(require, "balancer") + if not ok then + error("require failed: " .. tostring(res)) + else + balancer = res + end + + {{ if $all.EnableMetrics }} + ok, res = pcall(require, "monitor") + if not ok then + error("require failed: " .. tostring(res)) + else + monitor = res + end + {{ end }} + + {{ if $all.DynamicCertificatesEnabled }} + ok, res = pcall(require, "certificate") + if not ok then + error("require failed: " .. tostring(res)) + else + certificate = res + end + {{ end }} + + ok, res = pcall(require, "plugins") + if not ok then + error("require failed: " .. tostring(res)) + else + plugins = res + end + -- load all plugins that'll be used here + plugins.init({}) + } + + init_worker_by_lua_block { + lua_ingress.init_worker() + balancer.init_worker() + {{ if $all.EnableMetrics }} + monitor.init_worker() + {{ end }} + + plugins.run() + } + + {{/* Enable the real_ip module only if we use either X-Forwarded headers or Proxy Protocol. */}} + {{/* we use the value of the real IP for the geo_ip module */}} + {{ if or $cfg.UseForwardedHeaders $cfg.UseProxyProtocol }} {{ if $cfg.UseProxyProtocol }} real_ip_header proxy_protocol; {{ else }} @@ -49,20 +137,48 @@ http { {{ range $trusted_ip := $cfg.ProxyRealIPCIDR }} set_real_ip_from {{ $trusted_ip }}; {{ end }} + {{ end }} + + {{ if $all.Cfg.EnableModsecurity }} + modsecurity on; + + modsecurity_rules_file /etc/nginx/modsecurity/modsecurity.conf; + + {{ if $all.Cfg.EnableOWASPCoreRules }} + modsecurity_rules_file /etc/nginx/owasp-modsecurity-crs/nginx-modsecurity.conf; + {{ end }} + {{ end }} + + {{ if $cfg.UseGeoIP }} {{/* databases used to determine the country depending on the client IP address */}} {{/* http://nginx.org/en/docs/http/ngx_http_geoip_module.html */}} {{/* this is require to calculate traffic for individual country using GeoIP in the status page */}} - geoip_country /etc/nginx/GeoIP.dat; - geoip_city /etc/nginx/GeoLiteCity.dat; + geoip_country /etc/nginx/geoip/GeoIP.dat; + geoip_city /etc/nginx/geoip/GeoLiteCity.dat; + geoip_org /etc/nginx/geoip/GeoIPASNum.dat; geoip_proxy_recursive on; - - {{ if $cfg.EnableVtsStatus }} - vhost_traffic_status_zone shared:vhost_traffic_status:{{ $cfg.VtsStatusZoneSize }}; - vhost_traffic_status_filter_by_set_key {{ $cfg.VtsDefaultFilterKey }}; {{ end }} - sendfile on; + {{ if $cfg.UseGeoIP2 }} + # https://github.com/leev/ngx_http_geoip2_module#example-usage + + geoip2 /etc/nginx/geoip/GeoLite2-City.mmdb { + $geoip2_city_country_code source=$the_real_ip country iso_code; + $geoip2_city_country_name source=$the_real_ip country names en; + $geoip2_city source=$the_real_ip city names en; + $geoip2_postal_code source=$the_real_ip postal code; + $geoip2_dma_code source=$the_real_ip location metro_code; + $geoip2_latitude source=$the_real_ip location latitude; + $geoip2_longitude source=$the_real_ip location longitude; + $geoip2_region_code source=$the_real_ip subdivisions 0 iso_code; + $geoip2_region_name source=$the_real_ip subdivisions 0 names en; + } + + geoip2 /etc/nginx/geoip/GeoLite2-ASN.mmdb { + $geoip2_asn source=$the_real_ip autonomous_system_number; + } + {{ end }} aio threads; aio_write on; @@ -77,6 +193,11 @@ http { keepalive_timeout {{ $cfg.KeepAlive }}s; keepalive_requests {{ $cfg.KeepAliveRequests }}; + client_body_temp_path /tmp/client-body; + fastcgi_temp_path /tmp/fastcgi-temp; + proxy_temp_path /tmp/proxy-temp; + ajp_temp_path /tmp/ajp-temp; + client_header_buffer_size {{ $cfg.ClientHeaderBufferSize }}; client_header_timeout {{ $cfg.ClientHeaderTimeout }}s; large_client_header_buffers {{ $cfg.LargeClientHeaderBuffers }}; @@ -85,6 +206,7 @@ http { http2_max_field_size {{ $cfg.HTTP2MaxFieldSize }}; http2_max_header_size {{ $cfg.HTTP2MaxHeaderSize }}; + http2_max_requests {{ $cfg.HTTP2MaxRequests }}; types_hash_max_size 2048; server_names_hash_max_size {{ $cfg.ServerNameHashMaxSize }}; @@ -100,19 +222,18 @@ http { underscores_in_headers {{ if $cfg.EnableUnderscoresInHeaders }}on{{ else }}off{{ end }}; ignore_invalid_headers {{ if $cfg.IgnoreInvalidHeaders }}on{{ else }}off{{ end }}; + limit_req_status {{ $cfg.LimitReqStatusCode }}; + limit_conn_status {{ $cfg.LimitConnStatusCode }}; + {{ if $cfg.EnableOpentracing }} opentracing on; {{ end }} - {{ if (and $cfg.EnableOpentracing (ne $cfg.ZipkinCollectorHost "")) }} - zipkin_collector_host {{ $cfg.ZipkinCollectorHost }}; - zipkin_collector_port {{ $cfg.ZipkinCollectorPort }}; - zipkin_service_name {{ $cfg.ZipkinServiceName }}; - {{ end }} + {{ buildOpentracing $cfg }} include /etc/nginx/mime.types; default_type text/html; - + {{ if $cfg.EnableBrotli }} brotli on; brotli_comp_level {{ $cfg.BrotliLevel }}; @@ -121,7 +242,7 @@ http { {{ if $cfg.UseGzip }} gzip on; - gzip_comp_level 5; + gzip_comp_level {{ $cfg.GzipLevel }}; gzip_http_version 1.1; gzip_min_length 256; gzip_types {{ $cfg.GzipTypes }}; @@ -135,6 +256,9 @@ http { {{ end }} server_tokens {{ if $cfg.ShowServerTokens }}on{{ else }}off{{ end }}; + {{ if not $cfg.ShowServerTokens }} + more_clear_headers Server; + {{ end }} # disable warnings uninitialized_variable_warn off; @@ -143,6 +267,7 @@ http { # $namespace # $ingress_name # $service_name + # $service_port log_format upstreaminfo {{ if $cfg.LogFormatEscapeJSON }}escape=json {{ end }}'{{ buildLogFormatUpstream $cfg }}'; {{/* map urls that should not appear in access.log */}} @@ -156,25 +281,35 @@ http { {{ if $cfg.DisableAccessLog }} access_log off; {{ else }} - access_log {{ $cfg.AccessLogPath }} upstreaminfo if=$loggable; + {{ if $cfg.EnableSyslog }} + access_log syslog:server={{ $cfg.SyslogHost }}:{{ $cfg.SyslogPort }} upstreaminfo if=$loggable; + {{ else }} + access_log {{ $cfg.AccessLogPath }} upstreaminfo {{ $cfg.AccessLogParams }} if=$loggable; + {{ end }} {{ end }} + + {{ if $cfg.EnableSyslog }} + error_log syslog:server={{ $cfg.SyslogHost }}:{{ $cfg.SyslogPort }} {{ $cfg.ErrorLogLevel }}; + {{ else }} error_log {{ $cfg.ErrorLogPath }} {{ $cfg.ErrorLogLevel }}; + {{ end }} - {{ buildResolvers $cfg.Resolver }} + {{ buildResolvers $cfg.Resolver $cfg.DisableIpv6DNS }} - {{/* Whenever nginx proxies a request without a "Connection" header, the "Connection" header is set to "close" */}} - {{/* when making the target request. This means that you cannot simply use */}} - {{/* "proxy_set_header Connection $http_connection" for WebSocket support because in this case, the */}} - {{/* "Connection" header would be set to "" whenever the original request did not have a "Connection" header, */}} - {{/* which would mean no "Connection" header would be in the target request. Since this would deviate from */}} - {{/* normal nginx behavior we have to use this approach. */}} - # Retain the default nginx handling of requests without a "Connection" header + # See https://www.nginx.com/blog/websocket-nginx map $http_upgrade $connection_upgrade { default upgrade; + {{ if (gt $cfg.UpstreamKeepaliveConnections 0) }} + # See http://nginx.org/en/docs/http/ngx_http_upstream_module.html#keepalive + '' ''; + {{ else }} '' close; + {{ end }} } - map {{ buildForwardedFor $cfg.ForwardedForHeader }} $the_real_ip { + # The following is a sneaky way to do "set $the_real_ip $remote_addr" + # Needed because using set is not allowed outside server blocks. + map '' $the_real_ip { {{ if $cfg.UseProxyProtocol }} # Get IP address from Proxy Protocol default $proxy_protocol_addr; @@ -183,42 +318,16 @@ http { {{ end }} } - # trust http_x_forwarded_proto headers correctly indicate ssl offloading - map $http_x_forwarded_proto $pass_access_scheme { - default $http_x_forwarded_proto; - '' $scheme; - } - - map $http_x_forwarded_port $pass_server_port { - default $http_x_forwarded_port; - '' $server_port; - } - - map $http_x_forwarded_host $best_http_host { - default $http_x_forwarded_host; - '' $this_host; - } - - {{ if $all.IsSSLPassthroughEnabled }} - # map port {{ $all.ListenPorts.SSLProxy }} to 443 for header X-Forwarded-Port - map $pass_server_port $pass_port { - {{ $all.ListenPorts.SSLProxy }} 443; - default $pass_server_port; - } - {{ else }} - map $pass_server_port $pass_port { - 443 443; - default $pass_server_port; - } - {{ end }} - - # Obtain best http host - map $http_host $this_host { - default $http_host; - '' $host; + # Reverse proxies can detect if a client provides a X-Request-ID header, and pass it on to the backend server. + # If no such header is provided, it can provide a random value. + map $http_x_request_id $req_id { + default $http_x_request_id; + {{ if $cfg.GenerateRequestID }} + "" $request_id; + {{ end }} } - {{ if $cfg.ComputeFullForwardedFor }} + {{ if and $cfg.UseForwardedHeaders $cfg.ComputeFullForwardedFor }} # We can't use $proxy_add_x_forwarded_for because the realip module # replaces the remote_addr too soon map $http_x_forwarded_for $full_x_forwarded_for { @@ -232,6 +341,12 @@ http { } {{ end }} + # Create a variable that contains the literal $ character. + # This works because the geo module will not resolve variables. + geo $literal_dollar { + default "$"; + } + server_name_in_redirect off; port_in_redirect off; @@ -270,13 +385,12 @@ http { ssl_ecdh_curve {{ $cfg.SSLECDHCurve }}; - {{ if .CustomErrors }} - # Custom error pages + {{ if gt (len $cfg.CustomHTTPErrors) 0 }} proxy_intercept_errors on; {{ end }} {{ range $errCode := $cfg.CustomHTTPErrors }} - error_page {{ $errCode }} = @custom_{{ $errCode }};{{ end }} + error_page {{ $errCode }} = @custom_upstream-default-backend_{{ $errCode }};{{ end }} proxy_ssl_session_reuse on; @@ -284,68 +398,29 @@ http { proxy_pass_header Server; {{ end }} + {{ range $header := $cfg.HideHeaders }}proxy_hide_header {{ $header }}; + {{ end }} + {{ if not (empty $cfg.HTTPSnippet) }} # Custom code snippet configured in the configuration configmap {{ $cfg.HTTPSnippet }} {{ end }} - {{ range $name, $upstream := $backends }} - {{ if eq $upstream.SessionAffinity.AffinityType "cookie" }} - upstream sticky-{{ $upstream.Name }} { - sticky hash={{ $upstream.SessionAffinity.CookieSessionAffinity.Hash }} name={{ $upstream.SessionAffinity.CookieSessionAffinity.Name }} httponly; - - {{ if (gt $cfg.UpstreamKeepaliveConnections 0) }} - keepalive {{ $cfg.UpstreamKeepaliveConnections }}; - {{ end }} - - {{ range $server := $upstream.Endpoints }}server {{ $server.Address | formatIP }}:{{ $server.Port }} max_fails={{ $server.MaxFails }} fail_timeout={{ $server.FailTimeout }}; - {{ end }} + upstream upstream_balancer { + server 0.0.0.1; # placeholder - } - - {{ end }} - - - upstream {{ $upstream.Name }} { - # Load balance algorithm; empty for round robin, which is the default - {{ if ne $cfg.LoadBalanceAlgorithm "round_robin" }} - {{ $cfg.LoadBalanceAlgorithm }}; - {{ end }} - - {{ if $upstream.UpstreamHashBy }} - hash {{ $upstream.UpstreamHashBy }} consistent; - {{ end }} + balancer_by_lua_block { + balancer.balance() + } {{ if (gt $cfg.UpstreamKeepaliveConnections 0) }} keepalive {{ $cfg.UpstreamKeepaliveConnections }}; - {{ end }} - {{ range $server := $upstream.Endpoints }}server {{ $server.Address | formatIP }}:{{ $server.Port }} max_fails={{ $server.MaxFails }} fail_timeout={{ $server.FailTimeout }}; + keepalive_timeout {{ $cfg.UpstreamKeepaliveTimeout }}s; + keepalive_requests {{ $cfg.UpstreamKeepaliveRequests }}; {{ end }} } - {{ end }} - - {{/* build the maps that will be use to validate the Whitelist */}} - {{ range $index, $server := $servers }} - {{ range $location := $server.Locations }} - {{ $path := buildLocation $location }} - - {{ if isLocationAllowed $location }} - {{ if gt (len $location.Whitelist.CIDR) 0 }} - - # Deny for {{ print $server.Hostname $path }} - geo $the_real_ip {{ buildDenyVariable (print $server.Hostname "_" $path) }} { - default 1; - - {{ range $ip := $location.Whitelist.CIDR }} - {{ $ip }} 0;{{ end }} - } - {{ end }} - {{ end }} - {{ end }} - {{ end }} - {{ range $rl := (filterRateLimits $servers ) }} # Ratelimit {{ $rl.Name }} geo $the_real_ip $whitelist_{{ $rl.ID }} { @@ -367,8 +442,31 @@ http { {{ $zone }} {{ end }} + # Global filters + {{ range $ip := $cfg.BlockCIDRs }}deny {{ trimSpace $ip }}; + {{ end }} + + {{ if gt (len $cfg.BlockUserAgents) 0 }} + map $http_user_agent $block_ua { + default 0; + + {{ range $ua := $cfg.BlockUserAgents }}{{ trimSpace $ua }} 1; + {{ end }} + } + {{ end }} + + {{ if gt (len $cfg.BlockReferers) 0 }} + map $http_referer $block_ref { + default 0; + + {{ range $ref := $cfg.BlockReferers }}{{ trimSpace $ref }} 1; + {{ end }} + } + {{ end }} + {{/* Build server redirects (from/to www) */}} - {{ range $hostname, $to := .RedirectServers }} + {{ range $redirect := .RedirectServers }} + ## start server {{ $redirect.From }} server { {{ range $address := $all.Cfg.BindAddressIpv4 }} listen {{ $address }}:{{ $all.ListenPorts.HTTP }}{{ if $all.Cfg.UseProxyProtocol }} proxy_protocol{{ end }}; @@ -386,22 +484,64 @@ http { listen [::]:{{ if $all.IsSSLPassthroughEnabled }}{{ $all.ListenPorts.SSLProxy }} proxy_protocol{{ else }}{{ $all.ListenPorts.HTTPS }}{{ if $all.Cfg.UseProxyProtocol }} proxy_protocol{{ end }}{{ end }}; {{ end }} {{ end }} - server_name {{ $hostname }}; + server_name {{ $redirect.From }}; + + {{ if not (empty $redirect.SSLCert.PemFileName) }} + {{/* comment PEM sha is required to detect changes in the generated configuration and force a reload */}} + # PEM sha: {{ $redirect.SSLCert.PemSHA }} + ssl_certificate {{ $redirect.SSLCert.PemFileName }}; + ssl_certificate_key {{ $redirect.SSLCert.PemFileName }}; + {{ if not (empty $redirect.SSLCert.FullChainPemFileName)}} + ssl_trusted_certificate {{ $redirect.SSLCert.FullChainPemFileName }}; + ssl_stapling on; + ssl_stapling_verify on; + {{ end }} + + {{ if $all.DynamicCertificatesEnabled}} + ssl_certificate_by_lua_block { + certificate.call() + } + {{ end }} + {{ end }} + + {{ if gt (len $cfg.BlockUserAgents) 0 }} + if ($block_ua) { + return 403; + } + {{ end }} + {{ if gt (len $cfg.BlockReferers) 0 }} + if ($block_ref) { + return 403; + } + {{ end }} {{ if ne $all.ListenPorts.HTTPS 443 }} {{ $redirect_port := (printf ":%v" $all.ListenPorts.HTTPS) }} - return {{ $all.Cfg.HTTPRedirectCode }} $scheme://{{ $to }}{{ $redirect_port }}$request_uri; + return {{ $all.Cfg.HTTPRedirectCode }} $scheme://{{ $redirect.To }}{{ $redirect_port }}$request_uri; {{ else }} - return {{ $all.Cfg.HTTPRedirectCode }} $scheme://{{ $to }}$request_uri; + return {{ $all.Cfg.HTTPRedirectCode }} $scheme://{{ $redirect.To }}$request_uri; {{ end }} } + ## end server {{ $redirect.From }} {{ end }} - {{ range $index, $server := $servers }} + {{ range $server := $servers }} ## start server {{ $server.Hostname }} server { server_name {{ $server.Hostname }} {{ $server.Alias }}; + + {{ if gt (len $cfg.BlockUserAgents) 0 }} + if ($block_ua) { + return 403; + } + {{ end }} + {{ if gt (len $cfg.BlockReferers) 0 }} + if ($block_ref) { + return 403; + } + {{ end }} + {{ template "SERVER" serverConfig $all $server }} {{ if not (empty $cfg.ServerSnippet) }} @@ -409,69 +549,155 @@ http { {{ $cfg.ServerSnippet }} {{ end }} - {{ template "CUSTOM_ERRORS" $all }} + {{ template "CUSTOM_ERRORS" (buildCustomErrorDeps "upstream-default-backend" $cfg.CustomHTTPErrors $all.EnableMetrics) }} } ## end server {{ $server.Hostname }} {{ end }} + # backend for when default-backend-service is not configured or it does not have endpoints + server { + listen {{ $all.ListenPorts.Default }} default_server {{ if $all.Cfg.ReusePort }}reuseport{{ end }} backlog={{ $all.BacklogSize }}; + {{ if $IsIPV6Enabled }}listen [::]:{{ $all.ListenPorts.Default }} default_server {{ if $all.Cfg.ReusePort }}reuseport{{ end }} backlog={{ $all.BacklogSize }};{{ end }} + set $proxy_upstream_name "internal"; + + access_log off; + + location / { + return 404; + } + } + # default server, used for NGINX healthcheck and access to nginx stats server { - # Use the port {{ $all.ListenPorts.Status }} (random value just to avoid known ports) as default port for nginx. - # Changing this value requires a change in: - # https://github.com/kubernetes/ingress-nginx/blob/master/controllers/nginx/pkg/cmd/controller/nginx.go - listen {{ $all.ListenPorts.Status }} default_server reuseport backlog={{ $all.BacklogSize }}; - {{ if $IsIPV6Enabled }}listen [::]:{{ $all.ListenPorts.Status }} default_server reuseport backlog={{ $all.BacklogSize }};{{ end }} - set $proxy_upstream_name "-"; + listen unix:{{ .StatusSocket }}; + set $proxy_upstream_name "internal"; + + keepalive_timeout 0; + gzip off; + + access_log off; + + {{ if $cfg.EnableOpentracing }} + opentracing off; + {{ end }} location {{ $healthzURI }} { - access_log off; return 200; } - location /nginx_status { - set $proxy_upstream_name "internal"; + location /is-dynamic-lb-initialized { + content_by_lua_block { + local configuration = require("configuration") + local backend_data = configuration.get_backends_data() + if not backend_data then + ngx.exit(ngx.HTTP_INTERNAL_SERVER_ERROR) + return + end + + ngx.say("OK") + ngx.exit(ngx.HTTP_OK) + } + } - {{ if $cfg.EnableVtsStatus }} - vhost_traffic_status_display; - vhost_traffic_status_display_format html; - {{ else }} - access_log off; + location {{ .StatusPath }} { stub_status on; - {{ end }} } - location / { - {{ if .CustomErrors }} - proxy_set_header X-Code 404; - {{ end }} - set $proxy_upstream_name "upstream-default-backend"; - proxy_pass http://upstream-default-backend; + location /configuration { + # this should be equals to configuration_data dict + client_max_body_size 10m; + client_body_buffer_size 10m; + proxy_buffering off; + + content_by_lua_block { + configuration.call() + } } - {{ template "CUSTOM_ERRORS" $all }} + location / { + content_by_lua_block { + ngx.exit(ngx.HTTP_NOT_FOUND) + } + } } } stream { + lua_package_cpath "/usr/local/lib/lua/?.so;/usr/lib/lua-platform-path/lua/5.1/?.so;;"; + lua_package_path "/etc/nginx/lua/?.lua;/etc/nginx/lua/vendor/?.lua;/usr/local/lib/lua/?.lua;;"; + + lua_shared_dict tcp_udp_configuration_data 5M; + + init_by_lua_block { + require("resty.core") + collectgarbage("collect") + + -- init modules + local ok, res + + ok, res = pcall(require, "configuration") + if not ok then + error("require failed: " .. tostring(res)) + else + configuration = res + configuration.nameservers = { {{ buildResolversForLua $cfg.Resolver $cfg.DisableIpv6DNS }} } + end + + ok, res = pcall(require, "tcp_udp_configuration") + if not ok then + error("require failed: " .. tostring(res)) + else + tcp_udp_configuration = res + end + + ok, res = pcall(require, "tcp_udp_balancer") + if not ok then + error("require failed: " .. tostring(res)) + else + tcp_udp_balancer = res + end + } + + init_worker_by_lua_block { + tcp_udp_balancer.init_worker() + } + + lua_add_variable $proxy_upstream_name; + log_format log_stream {{ $cfg.LogFormatStream }}; {{ if $cfg.DisableAccessLog }} access_log off; {{ else }} - access_log {{ $cfg.AccessLogPath }} log_stream; + access_log {{ $cfg.AccessLogPath }} log_stream {{ $cfg.AccessLogParams }}; {{ end }} error_log {{ $cfg.ErrorLogPath }}; - # TCP services - {{ range $i, $tcpServer := .TCPBackends }} - upstream tcp-{{ $tcpServer.Port }}-{{ $tcpServer.Backend.Namespace }}-{{ $tcpServer.Backend.Name }}-{{ $tcpServer.Backend.Port }} { - {{ range $j, $endpoint := $tcpServer.Endpoints }} - server {{ $endpoint.Address }}:{{ $endpoint.Port }}; - {{ end }} + upstream upstream_balancer { + server 0.0.0.1:1234; # placeholder + + balancer_by_lua_block { + tcp_udp_balancer.balance() + } + } + + server { + listen unix:{{ .StreamSocket }}; + + content_by_lua_block { + tcp_udp_configuration.call() + } } + + # TCP services + {{ range $tcpServer := .TCPBackends }} server { + preread_by_lua_block { + ngx.var.proxy_upstream_name="tcp-{{ $tcpServer.Backend.Namespace }}-{{ $tcpServer.Backend.Name }}-{{ $tcpServer.Backend.Port }}"; + } + {{ range $address := $all.Cfg.BindAddressIpv4 }} listen {{ $address }}:{{ $tcpServer.Port }}{{ if $tcpServer.Backend.ProxyProtocol.Decode }} proxy_protocol{{ end }}; {{ else }} @@ -485,23 +711,20 @@ stream { {{ end }} {{ end }} proxy_timeout {{ $cfg.ProxyStreamTimeout }}; - proxy_pass tcp-{{ $tcpServer.Port }}-{{ $tcpServer.Backend.Namespace }}-{{ $tcpServer.Backend.Name }}-{{ $tcpServer.Backend.Port }}; + proxy_pass upstream_balancer; {{ if $tcpServer.Backend.ProxyProtocol.Encode }} proxy_protocol on; {{ end }} } - {{ end }} # UDP services - {{ range $i, $udpServer := .UDPBackends }} - upstream udp-{{ $udpServer.Port }}-{{ $udpServer.Backend.Namespace }}-{{ $udpServer.Backend.Name }}-{{ $udpServer.Backend.Port }} { - {{ range $j, $endpoint := $udpServer.Endpoints }} - server {{ $endpoint.Address }}:{{ $endpoint.Port }}; - {{ end }} - } - + {{ range $udpServer := .UDPBackends }} server { + preread_by_lua_block { + ngx.var.proxy_upstream_name="udp-{{ $udpServer.Backend.Namespace }}-{{ $udpServer.Backend.Name }}-{{ $udpServer.Backend.Port }}"; + } + {{ range $address := $all.Cfg.BindAddressIpv4 }} listen {{ $address }}:{{ $udpServer.Port }} udp; {{ else }} @@ -516,17 +739,17 @@ stream { {{ end }} proxy_responses {{ $cfg.ProxyStreamResponses }}; proxy_timeout {{ $cfg.ProxyStreamTimeout }}; - proxy_pass udp-{{ $udpServer.Port }}-{{ $udpServer.Backend.Namespace }}-{{ $udpServer.Backend.Name }}-{{ $udpServer.Backend.Port }}; + proxy_pass upstream_balancer; } - {{ end }} } {{/* definition of templates to avoid repetitions */}} {{ define "CUSTOM_ERRORS" }} - {{ $proxySetHeaders := .ProxySetHeaders }} - {{ range $errCode := .Cfg.CustomHTTPErrors }} - location @custom_{{ $errCode }} { + {{ $enableMetrics := .EnableMetrics }} + {{ $upstreamName := .UpstreamName }} + {{ range $errCode := .ErrorCodes }} + location @custom_{{ $upstreamName }}_{{ $errCode }} { internal; proxy_intercept_errors off; @@ -537,9 +760,20 @@ stream { proxy_set_header X-Namespace $namespace; proxy_set_header X-Ingress-Name $ingress_name; proxy_set_header X-Service-Name $service_name; + proxy_set_header X-Service-Port $service_port; + proxy_set_header X-Request-ID $req_id; + proxy_set_header Host $best_http_host; + + set $proxy_upstream_name {{ $upstreamName }}; rewrite (.*) / break; - proxy_pass http://upstream-default-backend; + + proxy_pass http://upstream_balancer; + log_by_lua_block { + {{ if $enableMetrics }} + monitor.call() + {{ end }} + } } {{ end }} {{ end }} @@ -549,20 +783,20 @@ stream { {{ $cors := .CorsConfig }} # Cors Preflight methods needs additional options and different Return Code if ($request_method = 'OPTIONS') { - add_header 'Access-Control-Allow-Origin' '{{ $cors.CorsAllowOrigin }}' always; - {{ if $cors.CorsAllowCredentials }} add_header 'Access-Control-Allow-Credentials' '{{ $cors.CorsAllowCredentials }}' always; {{ end }} - add_header 'Access-Control-Allow-Methods' '{{ $cors.CorsAllowMethods }}' always; - add_header 'Access-Control-Allow-Headers' '{{ $cors.CorsAllowHeaders }}' always; - add_header 'Access-Control-Max-Age' 1728000; - add_header 'Content-Type' 'text/plain charset=UTF-8'; - add_header 'Content-Length' 0; + more_set_headers 'Access-Control-Allow-Origin: {{ $cors.CorsAllowOrigin }}'; + {{ if $cors.CorsAllowCredentials }} more_set_headers 'Access-Control-Allow-Credentials: {{ $cors.CorsAllowCredentials }}'; {{ end }} + more_set_headers 'Access-Control-Allow-Methods: {{ $cors.CorsAllowMethods }}'; + more_set_headers 'Access-Control-Allow-Headers: {{ $cors.CorsAllowHeaders }}'; + more_set_headers 'Access-Control-Max-Age: {{ $cors.CorsMaxAge }}'; + more_set_headers 'Content-Type: text/plain charset=UTF-8'; + more_set_headers 'Content-Length: 0'; return 204; } - add_header 'Access-Control-Allow-Origin' '{{ $cors.CorsAllowOrigin }}' always; - {{ if $cors.CorsAllowCredentials }} add_header 'Access-Control-Allow-Credentials' '{{ $cors.CorsAllowCredentials }}' always; {{ end }} - add_header 'Access-Control-Allow-Methods' '{{ $cors.CorsAllowMethods }}' always; - add_header 'Access-Control-Allow-Headers' '{{ $cors.CorsAllowHeaders }}' always; + more_set_headers 'Access-Control-Allow-Origin: {{ $cors.CorsAllowOrigin }}'; + {{ if $cors.CorsAllowCredentials }} more_set_headers 'Access-Control-Allow-Credentials: {{ $cors.CorsAllowCredentials }}'; {{ end }} + more_set_headers 'Access-Control-Allow-Methods: {{ $cors.CorsAllowMethods }}'; + more_set_headers 'Access-Control-Allow-Headers: {{ $cors.CorsAllowHeaders }}'; {{ end }} @@ -571,49 +805,59 @@ stream { {{ $all := .First }} {{ $server := .Second }} {{ range $address := $all.Cfg.BindAddressIpv4 }} - listen {{ $address }}:{{ $all.ListenPorts.HTTP }}{{ if $all.Cfg.UseProxyProtocol }} proxy_protocol{{ end }}{{ if eq $server.Hostname "_"}} default_server reuseport backlog={{ $all.BacklogSize }}{{end}}; + listen {{ $address }}:{{ $all.ListenPorts.HTTP }}{{ if $all.Cfg.UseProxyProtocol }} proxy_protocol{{ end }}{{ if eq $server.Hostname "_"}} default_server {{ if $all.Cfg.ReusePort }}reuseport{{ end }} backlog={{ $all.BacklogSize }}{{end}}; {{ else }} - listen {{ $all.ListenPorts.HTTP }}{{ if $all.Cfg.UseProxyProtocol }} proxy_protocol{{ end }}{{ if eq $server.Hostname "_"}} default_server reuseport backlog={{ $all.BacklogSize }}{{end}}; + listen {{ $all.ListenPorts.HTTP }}{{ if $all.Cfg.UseProxyProtocol }} proxy_protocol{{ end }}{{ if eq $server.Hostname "_"}} default_server {{ if $all.Cfg.ReusePort }}reuseport{{ end }} backlog={{ $all.BacklogSize }}{{end}}; {{ end }} {{ if $all.IsIPV6Enabled }} {{ range $address := $all.Cfg.BindAddressIpv6 }} - listen {{ $address }}:{{ $all.ListenPorts.HTTP }}{{ if $all.Cfg.UseProxyProtocol }} proxy_protocol{{ end }}{{ if eq $server.Hostname "_"}} default_server reuseport backlog={{ $all.BacklogSize }}{{ end }}; + listen {{ $address }}:{{ $all.ListenPorts.HTTP }}{{ if $all.Cfg.UseProxyProtocol }} proxy_protocol{{ end }}{{ if eq $server.Hostname "_"}} default_server {{ if $all.Cfg.ReusePort }}reuseport{{ end }} backlog={{ $all.BacklogSize }}{{ end }}; {{ else }} - listen [::]:{{ $all.ListenPorts.HTTP }}{{ if $all.Cfg.UseProxyProtocol }} proxy_protocol{{ end }}{{ if eq $server.Hostname "_"}} default_server reuseport backlog={{ $all.BacklogSize }}{{ end }}; + listen [::]:{{ $all.ListenPorts.HTTP }}{{ if $all.Cfg.UseProxyProtocol }} proxy_protocol{{ end }}{{ if eq $server.Hostname "_"}} default_server {{ if $all.Cfg.ReusePort }}reuseport{{ end }} backlog={{ $all.BacklogSize }}{{ end }}; {{ end }} {{ end }} set $proxy_upstream_name "-"; + set $pass_access_scheme $scheme; + set $pass_server_port $server_port; + set $best_http_host $http_host; + set $pass_port $pass_server_port; {{/* Listen on {{ $all.ListenPorts.SSLProxy }} because port {{ $all.ListenPorts.HTTPS }} is used in the TLS sni server */}} {{/* This listener must always have proxy_protocol enabled, because the SNI listener forwards on source IP info in it. */}} - {{ if not (empty $server.SSLCertificate) }} + {{ if not (empty $server.SSLCert.PemFileName) }} {{ range $address := $all.Cfg.BindAddressIpv4 }} - listen {{ $address }}:{{ if $all.IsSSLPassthroughEnabled }}{{ $all.ListenPorts.SSLProxy }} proxy_protocol {{ else }}{{ $all.ListenPorts.HTTPS }}{{ if $all.Cfg.UseProxyProtocol }} proxy_protocol{{ end }}{{ end }} {{ if eq $server.Hostname "_"}} default_server reuseport backlog={{ $all.BacklogSize }}{{end}} ssl {{ if $all.Cfg.UseHTTP2 }}http2{{ end }}; + listen {{ $address }}:{{ if $all.IsSSLPassthroughEnabled }}{{ $all.ListenPorts.SSLProxy }} proxy_protocol {{ else }}{{ $all.ListenPorts.HTTPS }}{{ if $all.Cfg.UseProxyProtocol }} proxy_protocol{{ end }}{{ end }} {{ if eq $server.Hostname "_"}} default_server {{ if $all.Cfg.ReusePort }}reuseport{{ end }} backlog={{ $all.BacklogSize }}{{end}} ssl {{ if $all.Cfg.UseHTTP2 }}http2{{ end }}; {{ else }} - listen {{ if $all.IsSSLPassthroughEnabled }}{{ $all.ListenPorts.SSLProxy }} proxy_protocol {{ else }}{{ $all.ListenPorts.HTTPS }}{{ if $all.Cfg.UseProxyProtocol }} proxy_protocol{{ end }}{{ end }} {{ if eq $server.Hostname "_"}} default_server reuseport backlog={{ $all.BacklogSize }}{{end}} ssl {{ if $all.Cfg.UseHTTP2 }}http2{{ end }}; + listen {{ if $all.IsSSLPassthroughEnabled }}{{ $all.ListenPorts.SSLProxy }} proxy_protocol {{ else }}{{ $all.ListenPorts.HTTPS }}{{ if $all.Cfg.UseProxyProtocol }} proxy_protocol{{ end }}{{ end }} {{ if eq $server.Hostname "_"}} default_server {{ if $all.Cfg.ReusePort }}reuseport{{ end }} backlog={{ $all.BacklogSize }}{{end}} ssl {{ if $all.Cfg.UseHTTP2 }}http2{{ end }}; {{ end }} {{ if $all.IsIPV6Enabled }} {{ range $address := $all.Cfg.BindAddressIpv6 }} - {{ if not (empty $server.SSLCertificate) }}listen {{ $address }}:{{ if $all.IsSSLPassthroughEnabled }}{{ $all.ListenPorts.SSLProxy }} proxy_protocol{{ else }}{{ $all.ListenPorts.HTTPS }}{{ if $all.Cfg.UseProxyProtocol }} proxy_protocol{{ end }}{{ end }}{{ end }} {{ if eq $server.Hostname "_"}} default_server reuseport backlog={{ $all.BacklogSize }}{{end}} ssl {{ if $all.Cfg.UseHTTP2 }}http2{{ end }}; + {{ if not (empty $server.SSLCert.PemFileName) }}listen {{ $address }}:{{ if $all.IsSSLPassthroughEnabled }}{{ $all.ListenPorts.SSLProxy }} proxy_protocol{{ else }}{{ $all.ListenPorts.HTTPS }}{{ if $all.Cfg.UseProxyProtocol }} proxy_protocol{{ end }}{{ end }}{{ end }} {{ if eq $server.Hostname "_"}} default_server {{ if $all.Cfg.ReusePort }}reuseport{{ end }} backlog={{ $all.BacklogSize }}{{end}} ssl {{ if $all.Cfg.UseHTTP2 }}http2{{ end }}; {{ else }} - {{ if not (empty $server.SSLCertificate) }}listen [::]:{{ if $all.IsSSLPassthroughEnabled }}{{ $all.ListenPorts.SSLProxy }} proxy_protocol{{ else }}{{ $all.ListenPorts.HTTPS }}{{ if $all.Cfg.UseProxyProtocol }} proxy_protocol{{ end }}{{ end }}{{ end }} {{ if eq $server.Hostname "_"}} default_server reuseport backlog={{ $all.BacklogSize }}{{end}} ssl {{ if $all.Cfg.UseHTTP2 }}http2{{ end }}; + {{ if not (empty $server.SSLCert.PemFileName) }}listen [::]:{{ if $all.IsSSLPassthroughEnabled }}{{ $all.ListenPorts.SSLProxy }} proxy_protocol{{ else }}{{ $all.ListenPorts.HTTPS }}{{ if $all.Cfg.UseProxyProtocol }} proxy_protocol{{ end }}{{ end }}{{ end }} {{ if eq $server.Hostname "_"}} default_server {{ if $all.Cfg.ReusePort }}reuseport{{ end }} backlog={{ $all.BacklogSize }}{{end}} ssl {{ if $all.Cfg.UseHTTP2 }}http2{{ end }}; {{ end }} {{ end }} {{/* comment PEM sha is required to detect changes in the generated configuration and force a reload */}} - # PEM sha: {{ $server.SSLPemChecksum }} - ssl_certificate {{ $server.SSLCertificate }}; - ssl_certificate_key {{ $server.SSLCertificate }}; - {{ if not (empty $server.SSLFullChainCertificate)}} - ssl_trusted_certificate {{ $server.SSLFullChainCertificate }}; + # PEM sha: {{ $server.SSLCert.PemSHA }} + ssl_certificate {{ $server.SSLCert.PemFileName }}; + ssl_certificate_key {{ $server.SSLCert.PemFileName }}; + {{ if not (empty $server.SSLCert.FullChainPemFileName)}} + ssl_trusted_certificate {{ $server.SSLCert.FullChainPemFileName }}; ssl_stapling on; ssl_stapling_verify on; {{ end }} - {{ end }} - {{ if (and (not (empty $server.SSLCertificate)) $all.Cfg.HSTS) }} - more_set_headers "Strict-Transport-Security: max-age={{ $all.Cfg.HSTSMaxAge }}{{ if $all.Cfg.HSTSIncludeSubdomains }}; includeSubDomains{{ end }};{{ if $all.Cfg.HSTSPreload }} preload{{ end }}"; + {{ if $all.DynamicCertificatesEnabled}} + ssl_certificate_by_lua_block { + certificate.call() + } + {{ end }} {{ end }} + {{ if not (empty $server.AuthTLSError) }} + # {{ $server.AuthTLSError }} + return 403; + {{ else }} {{ if not (empty $server.CertificateAuth.CAFileName) }} # PEM sha: {{ $server.CertificateAuth.PemSHA }} @@ -625,13 +869,30 @@ stream { {{ end }} {{ end }} + {{ if not (empty $server.SSLCiphers) }} + ssl_ciphers {{ $server.SSLCiphers }}; + {{ end }} + {{ if not (empty $server.ServerSnippet) }} {{ $server.ServerSnippet }} {{ end }} + {{ range $errorLocation := (buildCustomErrorLocationsPerServer $server) }} + {{ template "CUSTOM_ERRORS" (buildCustomErrorDeps $errorLocation.UpstreamName $errorLocation.Codes $all.EnableMetrics) }} + {{ end }} + + + {{ $enforceRegex := enforceRegexModifier $server.Locations }} {{ range $location := $server.Locations }} - {{ $path := buildLocation $location }} - {{ $authPath := buildAuthLocation $location }} + {{ $path := buildLocation $location $enforceRegex }} + {{ $proxySetHeader := proxySetHeader $location }} + {{ $authPath := buildAuthLocation $location $all.Cfg.GlobalExternalAuth.URL }} + {{ $applyGlobalAuth := shouldApplyGlobalAuth $location $all.Cfg.GlobalExternalAuth.URL }} + + {{ $externalAuth := $location.ExternalAuth }} + {{ if eq $applyGlobalAuth true }} + {{ $externalAuth = $all.Cfg.GlobalExternalAuth }} + {{ end }} {{ if not (empty $location.Rewrite.AppRoot)}} if ($uri = /) { @@ -639,98 +900,242 @@ stream { } {{ end }} - {{ if not (empty $authPath) }} + {{ if $authPath }} location = {{ $authPath }} { internal; - set $proxy_upstream_name "external-authentication"; + + # ngx_auth_request module overrides variables in the parent request, + # therefore we have to explicitly set this variable again so that when the parent request + # resumes it has the correct value set for this variable so that Lua can pick backend correctly + set $proxy_upstream_name "{{ buildUpstreamName $location }}"; proxy_pass_request_body off; proxy_set_header Content-Length ""; + proxy_set_header X-Forwarded-Proto ""; - {{ if not (empty $location.ExternalAuth.Method) }} - proxy_method {{ $location.ExternalAuth.Method }}; + {{ if $externalAuth.Method }} + proxy_method {{ $externalAuth.Method }}; proxy_set_header X-Original-URI $request_uri; proxy_set_header X-Scheme $pass_access_scheme; {{ end }} - proxy_set_header Host {{ $location.ExternalAuth.Host }}; + proxy_set_header Host {{ $externalAuth.Host }}; proxy_set_header X-Original-URL $scheme://$http_host$request_uri; proxy_set_header X-Original-Method $request_method; - proxy_set_header X-Auth-Request-Redirect $request_uri; proxy_set_header X-Sent-From "nginx-ingress-controller"; + proxy_set_header X-Real-IP $the_real_ip; + {{ if and $all.Cfg.UseForwardedHeaders $all.Cfg.ComputeFullForwardedFor }} + proxy_set_header X-Forwarded-For $full_x_forwarded_for; + {{ else }} + proxy_set_header X-Forwarded-For $the_real_ip; + {{ end }} + {{ if $externalAuth.RequestRedirect }} + proxy_set_header X-Auth-Request-Redirect {{ $externalAuth.RequestRedirect }}; + {{ else }} + proxy_set_header X-Auth-Request-Redirect $request_uri; + {{ end }} + + proxy_buffering {{ $location.Proxy.ProxyBuffering }}; + proxy_buffer_size {{ $location.Proxy.BufferSize }}; + proxy_buffers {{ $location.Proxy.BuffersNumber }} {{ $location.Proxy.BufferSize }}; + proxy_request_buffering {{ $location.Proxy.RequestBuffering }}; + + proxy_http_version 1.1; proxy_ssl_server_name on; proxy_pass_request_headers on; - client_max_body_size "{{ $location.Proxy.BodySize }}"; - {{ if isValidClientBodyBufferSize $location.ClientBodyBufferSize }} + {{ if isValidByteSize $location.Proxy.BodySize true }} + client_max_body_size {{ $location.Proxy.BodySize }}; + {{ end }} + {{ if isValidByteSize $location.ClientBodyBufferSize false }} client_body_buffer_size {{ $location.ClientBodyBufferSize }}; {{ end }} - set $target {{ $location.ExternalAuth.URL }}; + # Pass the extracted client certificate to the auth provider + {{ if not (empty $server.CertificateAuth.CAFileName) }} + {{ if $server.CertificateAuth.PassCertToUpstream }} + proxy_set_header ssl-client-cert $ssl_client_escaped_cert; + {{ end }} + proxy_set_header ssl-client-verify $ssl_client_verify; + proxy_set_header ssl-client-subject-dn $ssl_client_s_dn; + proxy_set_header ssl-client-issuer-dn $ssl_client_i_dn; + {{ end }} + + {{ if not (empty $externalAuth.AuthSnippet) }} + {{ $externalAuth.AuthSnippet }} + {{ end }} + + set $target {{ $externalAuth.URL }}; proxy_pass $target; } {{ end }} - location {{ $path }} { - {{ if $all.Cfg.EnableVtsStatus }}{{ if $location.VtsFilterKey }} vhost_traffic_status_filter_by_set_key {{ $location.VtsFilterKey }};{{ end }}{{ end }} - - set $proxy_upstream_name "{{ buildUpstreamName $server.Hostname $all.Backends $location }}"; - {{ $ing := (getIngressInformation $location.Ingress $path) }} - {{/* $ing.Metadata contains the Ingress metadata */}} + location {{ $path }} { + {{ $ing := (getIngressInformation $location.Ingress $server.Hostname $location.Path) }} set $namespace "{{ $ing.Namespace }}"; set $ingress_name "{{ $ing.Rule }}"; set $service_name "{{ $ing.Service }}"; + set $service_port "{{ $location.Port }}"; + set $location_path "{{ $location.Path | escapeLiteralDollar }}"; + + {{ if $all.Cfg.EnableOpentracing }} + {{ opentracingPropagateContext $location }}; + {{ end }} + + rewrite_by_lua_block { + lua_ingress.rewrite({{ locationConfigForLua $location $server $all }}) + balancer.rewrite() + plugins.run() + } + + {{ if shouldConfigureLuaRestyWAF $all.Cfg.DisableLuaRestyWAF $location.LuaRestyWAF.Mode }} + # be careful with `access_by_lua_block` and `satisfy any` directives as satisfy any + # will always succeed when there's `access_by_lua_block` that does not have any lua code doing `ngx.exit(ngx.DECLINED)` + # that means currently `satisfy any` and lua-resty-waf together will potentiall render any + # other authentication method such as basic auth or external auth useless - all requests will be allowed. + access_by_lua_block { + local lua_resty_waf = require("resty.waf") + local waf = lua_resty_waf:new() + + waf:set_option("mode", "{{ $location.LuaRestyWAF.Mode }}") + waf:set_option("storage_zone", "waf_storage") - {{ if (or $location.Rewrite.ForceSSLRedirect (and (not (empty $server.SSLCertificate)) $location.Rewrite.SSLRedirect)) }} - # enforce ssl on server side - if ($pass_access_scheme = http) { - {{ if ne $all.ListenPorts.HTTPS 443 }} - {{ $redirect_port := (printf ":%v" $all.ListenPorts.HTTPS) }} - return {{ $all.Cfg.HTTPRedirectCode }} https://$best_http_host{{ $redirect_port }}$request_uri; + {{ if $location.LuaRestyWAF.AllowUnknownContentTypes }} + waf:set_option("allow_unknown_content_types", true) {{ else }} - return {{ $all.Cfg.HTTPRedirectCode }} https://$best_http_host$request_uri; + waf:set_option("allowed_content_types", { "text/html", "text/json", "application/json" }) + {{ end }} + + waf:set_option("event_log_level", ngx.WARN) + + {{ if gt $location.LuaRestyWAF.ScoreThreshold 0 }} + waf:set_option("score_threshold", {{ $location.LuaRestyWAF.ScoreThreshold }}) + {{ end }} + + {{ if not $location.LuaRestyWAF.ProcessMultipartBody }} + waf:set_option("process_multipart_body", false) + {{ end }} + + {{ if $location.LuaRestyWAF.Debug }} + waf:set_option("debug", true) + waf:set_option("event_log_request_arguments", true) + waf:set_option("event_log_request_body", true) + waf:set_option("event_log_request_headers", true) + waf:set_option("req_tid_header", true) + waf:set_option("res_tid_header", true) + {{ end }} + + {{ range $ruleset := $location.LuaRestyWAF.IgnoredRuleSets }} + waf:set_option("ignore_ruleset", "{{ $ruleset }}") + {{ end }} + + {{ if gt (len $location.LuaRestyWAF.ExtraRulesetString) 0 }} + waf:set_option("add_ruleset_string", "10000_extra_rules", {{ $location.LuaRestyWAF.ExtraRulesetString }}) + {{ end }} + + waf:exec() + } + {{ end }} + + header_filter_by_lua_block { + {{ if shouldConfigureLuaRestyWAF $all.Cfg.DisableLuaRestyWAF $location.LuaRestyWAF.Mode }} + local lua_resty_waf = require "resty.waf" + local waf = lua_resty_waf:new() + waf:exec() + {{ end }} + + plugins.run() + } + body_filter_by_lua_block { + {{ if shouldConfigureLuaRestyWAF $all.Cfg.DisableLuaRestyWAF $location.LuaRestyWAF.Mode }} + local lua_resty_waf = require "resty.waf" + local waf = lua_resty_waf:new() + waf:exec() {{ end }} } + + log_by_lua_block { + {{ if shouldConfigureLuaRestyWAF $all.Cfg.DisableLuaRestyWAF $location.LuaRestyWAF.Mode }} + local lua_resty_waf = require "resty.waf" + local waf = lua_resty_waf:new() + waf:exec() + {{ end }} + balancer.log() + {{ if $all.EnableMetrics }} + monitor.call() + {{ end }} + + plugins.run() + } + + {{ if (and (not (empty $server.SSLCert.PemFileName)) $all.Cfg.HSTS) }} + if ($scheme = https) { + more_set_headers "Strict-Transport-Security: max-age={{ $all.Cfg.HSTSMaxAge }}{{ if $all.Cfg.HSTSIncludeSubdomains }}; includeSubDomains{{ end }}{{ if $all.Cfg.HSTSPreload }}; preload{{ end }}"; + } + {{ end }} + + + {{ if not $location.Logs.Access }} + access_log off; + {{ end }} + + {{ if $location.Logs.Rewrite }} + rewrite_log on; + {{ end }} + + {{ if $location.HTTP2PushPreload }} + http2_push_preload on; {{ end }} - {{ if $all.Cfg.EnableModsecurity }} + port_in_redirect {{ if $location.UsePortInRedirects }}on{{ else }}off{{ end }}; + + set $proxy_upstream_name "{{ buildUpstreamName $location }}"; + set $proxy_host $proxy_upstream_name; + + {{ if (or $location.ModSecurity.Enable $all.Cfg.EnableModsecurity) }} + {{ if not $all.Cfg.EnableModsecurity }} modsecurity on; modsecurity_rules_file /etc/nginx/modsecurity/modsecurity.conf; - {{ if $all.Cfg.EnableOWASPCoreRules }} + {{ end }} + + {{ if $location.ModSecurity.Snippet }} + modsecurity_rules ' + {{ $location.ModSecurity.Snippet }} + '; + {{ else if (and ((not $all.Cfg.EnableOWASPCoreRules) $location.ModSecurity.OWASPRules))}} modsecurity_rules_file /etc/nginx/owasp-modsecurity-crs/nginx-modsecurity.conf; {{ end }} + + {{ if (not (empty $location.ModSecurity.TransactionID)) }} + modsecurity_transaction_id "{{ $location.ModSecurity.TransactionID }}"; + {{ end }} {{ end }} {{ if isLocationAllowed $location }} {{ if gt (len $location.Whitelist.CIDR) 0 }} - if ({{ buildDenyVariable (print $server.Hostname "_" $path) }}) { - return 403; - } + {{ range $ip := $location.Whitelist.CIDR }} + allow {{ $ip }};{{ end }} + deny all; {{ end }} - port_in_redirect {{ if $location.UsePortInRedirects }}on{{ else }}off{{ end }}; - - {{ if not (empty $authPath) }} + {{ if not (isLocationInLocationList $location $all.Cfg.NoAuthLocations) }} + {{ if $authPath }} # this location requires authentication auth_request {{ $authPath }}; auth_request_set $auth_cookie $upstream_http_set_cookie; add_header Set-Cookie $auth_cookie; - {{- range $idx, $line := buildAuthResponseHeaders $location }} + {{- range $line := buildAuthResponseHeaders $externalAuth.ResponseHeaders }} {{ $line }} {{- end }} {{ end }} - {{ if not (empty $location.ExternalAuth.SigninURL) }} - error_page 401 = {{ buildAuthSignURL $location.ExternalAuth.SigninURL }}; + {{ if $externalAuth.SigninURL }} + set_escape_uri $escaped_request_uri $request_uri; + error_page 401 = {{ buildAuthSignURL $externalAuth.SigninURL }}; {{ end }} - {{/* if the location contains a rate limit annotation, create one */}} - {{ $limits := buildRateLimit $location }} - {{ range $limit := $limits }} - {{ $limit }}{{ end }} - {{ if $location.BasicDigestAuth.Secured }} {{ if eq $location.BasicDigestAuth.Type "basic" }} auth_basic "{{ $location.BasicDigestAuth.Realm }}"; @@ -741,86 +1146,92 @@ stream { {{ end }} proxy_set_header Authorization ""; {{ end }} + {{ end }} + + {{/* if the location contains a rate limit annotation, create one */}} + {{ $limits := buildRateLimit $location }} + {{ range $limit := $limits }} + {{ $limit }}{{ end }} {{ if $location.CorsConfig.CorsEnabled }} {{ template "CORS" $location }} {{ end }} + {{ buildInfluxDB $location.InfluxDB }} + {{ if not (empty $location.Redirect.URL) }} - if ($uri ~* {{ $path }}) { + if ($uri ~* {{ stripLocationModifer $path }}) { return {{ $location.Redirect.Code }} {{ $location.Redirect.URL }}; } {{ end }} - client_max_body_size "{{ $location.Proxy.BodySize }}"; - {{ if isValidClientBodyBufferSize $location.ClientBodyBufferSize }} + {{ if isValidByteSize $location.Proxy.BodySize true }} + client_max_body_size {{ $location.Proxy.BodySize }}; + {{ end }} + {{ if isValidByteSize $location.ClientBodyBufferSize false }} client_body_buffer_size {{ $location.ClientBodyBufferSize }}; {{ end }} {{/* By default use vhost as Host to upstream, but allow overrides */}} {{ if not (empty $location.UpstreamVhost) }} - proxy_set_header Host "{{ $location.UpstreamVhost }}"; + {{ $proxySetHeader }} Host "{{ $location.UpstreamVhost }}"; {{ else }} - proxy_set_header Host $best_http_host; + {{ $proxySetHeader }} Host $best_http_host; {{ end }} - # Pass the extracted client certificate to the backend {{ if not (empty $server.CertificateAuth.CAFileName) }} {{ if $server.CertificateAuth.PassCertToUpstream }} - proxy_set_header ssl-client-cert $ssl_client_escaped_cert; - {{ else }} - proxy_set_header ssl-client-cert ""; + {{ $proxySetHeader }} ssl-client-cert $ssl_client_escaped_cert; {{ end }} - proxy_set_header ssl-client-verify $ssl_client_verify; - proxy_set_header ssl-client-dn $ssl_client_s_dn; - {{ else }} - proxy_set_header ssl-client-cert ""; - proxy_set_header ssl-client-verify ""; - proxy_set_header ssl-client-dn ""; + {{ $proxySetHeader }} ssl-client-verify $ssl_client_verify; + {{ $proxySetHeader }} ssl-client-subject-dn $ssl_client_s_dn; + {{ $proxySetHeader }} ssl-client-issuer-dn $ssl_client_i_dn; {{ end }} # Allow websocket connections - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection $connection_upgrade; + {{ $proxySetHeader }} Upgrade $http_upgrade; + {{ if $location.Connection.Enabled}} + {{ $proxySetHeader }} Connection {{ $location.Connection.Header }}; + {{ else }} + {{ $proxySetHeader }} Connection $connection_upgrade; + {{ end }} - proxy_set_header X-Real-IP $the_real_ip; - {{ if $all.Cfg.ComputeFullForwardedFor }} - proxy_set_header X-Forwarded-For $full_x_forwarded_for; + {{ $proxySetHeader }} X-Request-ID $req_id; + {{ $proxySetHeader }} X-Real-IP $the_real_ip; + {{ if and $all.Cfg.UseForwardedHeaders $all.Cfg.ComputeFullForwardedFor }} + {{ $proxySetHeader }} X-Forwarded-For $full_x_forwarded_for; {{ else }} - proxy_set_header X-Forwarded-For $the_real_ip; + {{ $proxySetHeader }} X-Forwarded-For $the_real_ip; + {{ end }} + {{ $proxySetHeader }} X-Forwarded-Host $best_http_host; + {{ $proxySetHeader }} X-Forwarded-Port $pass_port; + {{ $proxySetHeader }} X-Forwarded-Proto $pass_access_scheme; + {{ if $all.Cfg.ProxyAddOriginalURIHeader }} + {{ $proxySetHeader }} X-Original-URI $request_uri; {{ end }} - proxy_set_header X-Forwarded-Host $best_http_host; - proxy_set_header X-Forwarded-Port $pass_port; - proxy_set_header X-Forwarded-Proto $pass_access_scheme; - proxy_set_header X-Original-URI $request_uri; - proxy_set_header X-Scheme $pass_access_scheme; + {{ $proxySetHeader }} X-Scheme $pass_access_scheme; # Pass the original X-Forwarded-For - proxy_set_header X-Original-Forwarded-For {{ buildForwardedFor $all.Cfg.ForwardedForHeader }}; + {{ $proxySetHeader }} X-Original-Forwarded-For {{ buildForwardedFor $all.Cfg.ForwardedForHeader }}; # mitigate HTTPoxy Vulnerability # https://www.nginx.com/blog/mitigating-the-httpoxy-vulnerability-with-nginx/ - proxy_set_header Proxy ""; + {{ $proxySetHeader }} Proxy ""; # Custom headers to proxied server {{ range $k, $v := $all.ProxySetHeaders }} - proxy_set_header {{ $k }} "{{ $v }}"; + {{ $proxySetHeader }} {{ $k }} "{{ $v }}"; {{ end }} proxy_connect_timeout {{ $location.Proxy.ConnectTimeout }}s; proxy_send_timeout {{ $location.Proxy.SendTimeout }}s; proxy_read_timeout {{ $location.Proxy.ReadTimeout }}s; - {{ if (or (eq $location.Proxy.ProxyRedirectFrom "default") (eq $location.Proxy.ProxyRedirectFrom "off")) }} - proxy_redirect {{ $location.Proxy.ProxyRedirectFrom }}; - {{ else }} - proxy_redirect {{ $location.Proxy.ProxyRedirectFrom }} {{ $location.Proxy.ProxyRedirectTo }}; - {{ end }} - proxy_buffering off; - proxy_buffer_size "{{ $location.Proxy.BufferSize }}"; - proxy_buffers 4 "{{ $location.Proxy.BufferSize }}"; - proxy_request_buffering "{{ $location.Proxy.RequestBuffering }}"; + proxy_buffering {{ $location.Proxy.ProxyBuffering }}; + proxy_buffer_size {{ $location.Proxy.BufferSize }}; + proxy_buffers {{ $location.Proxy.BuffersNumber }} {{ $location.Proxy.BufferSize }}; + proxy_request_buffering {{ $location.Proxy.RequestBuffering }}; proxy_http_version 1.1; @@ -829,11 +1240,8 @@ stream { # In case of errors try the next upstream server before returning an error proxy_next_upstream {{ buildNextUpstream $location.Proxy.NextUpstream $all.Cfg.RetryNonIdempotent }}; - - {{/* rewrite only works if the content is not compressed */}} - {{ if $location.Rewrite.AddBaseURL }} - proxy_set_header Accept-Encoding ""; - {{ end }} + proxy_next_upstream_timeout {{ $location.Proxy.NextUpstreamTimeout }}; + proxy_next_upstream_tries {{ $location.Proxy.NextUpstreamTries }}; {{/* Add any additional configuration defined */}} {{ $location.ConfigurationSnippet }} @@ -850,26 +1258,44 @@ stream { proxy_set_header X-Namespace $namespace; proxy_set_header X-Ingress-Name $ingress_name; proxy_set_header X-Service-Name $service_name; + proxy_set_header X-Service-Port $service_port; + proxy_set_header X-Request-ID $req_id; + {{ end }} + + {{ if $location.Satisfy }} + satisfy {{ $location.Satisfy }}; + {{ end }} + + {{/* if a location-specific error override is set, add the proxy_intercept here */}} + {{ if $location.CustomHTTPErrors }} + # Custom error pages per ingress + proxy_intercept_errors on; {{ end }} + {{ range $errCode := $location.CustomHTTPErrors }} + error_page {{ $errCode }} = @custom_{{ $location.DefaultBackendUpstreamName }}_{{ $errCode }};{{ end }} - {{ if not (empty $location.Backend) }} {{ buildProxyPass $server.Hostname $all.Backends $location }} - {{ else }} - # No endpoints available for the request - return 503; + {{ if (or (eq $location.Proxy.ProxyRedirectFrom "default") (eq $location.Proxy.ProxyRedirectFrom "off")) }} + proxy_redirect {{ $location.Proxy.ProxyRedirectFrom }}; + {{ else if not (eq $location.Proxy.ProxyRedirectTo "off") }} + proxy_redirect {{ $location.Proxy.ProxyRedirectFrom }} {{ $location.Proxy.ProxyRedirectTo }}; {{ end }} {{ else }} - # Location denied. Reason: {{ $location.Denied }} + # Location denied. Reason: {{ $location.Denied | printf "%q" }} return 503; {{ end }} } - + {{ end }} {{ end }} {{ if eq $server.Hostname "_" }} # health checks in cloud providers require the use of port {{ $all.ListenPorts.HTTP }} location {{ $all.HealthzURI }} { + {{ if $all.Cfg.EnableOpentracing }} + opentracing off; + {{ end }} + access_log off; return 200; } @@ -877,8 +1303,18 @@ stream { # this is required to avoid error if nginx is being monitored # with an external software (like sysdig) location /nginx_status { - allow 127.0.0.1; - {{ if $all.IsIPV6Enabled }}allow ::1;{{ end }} + {{ if $all.Cfg.EnableOpentracing }} + opentracing off; + {{ end }} + + {{ range $v := $all.NginxStatusIpv4Whitelist }} + allow {{ $v }}; + {{ end }} + {{ if $all.IsIPV6Enabled -}} + {{ range $v := $all.NginxStatusIpv6Whitelist }} + allow {{ $v }}; + {{ end }} + {{ end -}} deny all; access_log off; diff --git a/rootfs/ingress-controller/clean-nginx-conf.sh b/rootfs/ingress-controller/clean-nginx-conf.sh index 91ff6bcca3..07900981e1 100755 --- a/rootfs/ingress-controller/clean-nginx-conf.sh +++ b/rootfs/ingress-controller/clean-nginx-conf.sh @@ -21,4 +21,7 @@ # 1. remove the return carrier character/s # 2. remove empty lines # 3. replace multiple empty lines -sed -e 's/\r//g' | sed -e 's/^ *$/\'$'\n/g' | sed -e '/^$/{N;/^\n$/D;}' + +SCRIPT_ROOT=$(dirname ${BASH_SOURCE}) + +sed -e 's/\r//g' | sed -e 's/^ *$/\'$'\n/g' | sed -e '/^$/{N;/^\n$/D;}' | ${SCRIPT_ROOT}/indent.sh diff --git a/rootfs/ingress-controller/indent.sh b/rootfs/ingress-controller/indent.sh new file mode 100755 index 0000000000..83c0f00057 --- /dev/null +++ b/rootfs/ingress-controller/indent.sh @@ -0,0 +1,22 @@ +#!/usr/bin/awk -f + +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Credits to https://evasive.ru/f29bd7ebacf24a50c582f973a55eee28.html + +{sub(/^[ \t]+/,"");idx=0} +/\{/{ctx++;idx=1} +/\}/{ctx--} +{id="";for(i=idx;i /proc/sys/kernel/core_pattern + sysctl -w fs.suid_dumpable=2 + securityContext: + privileged: true + containers: + - name: nginx-ingress-controller + livenessProbe: + timeoutSeconds: 1 + readinessProbe: + timeoutSeconds: 1 diff --git a/test/e2e-image/overlay/deployment-extension-group-patch.yaml b/test/e2e-image/overlay/deployment-extension-group-patch.yaml new file mode 100644 index 0000000000..837a5f7e12 --- /dev/null +++ b/test/e2e-image/overlay/deployment-extension-group-patch.yaml @@ -0,0 +1,3 @@ +- op: replace + path: /apiVersion + value: extensions/v1beta1 diff --git a/test/e2e-image/overlay/deployment-namespace-patch.yaml b/test/e2e-image/overlay/deployment-namespace-patch.yaml new file mode 100644 index 0000000000..f0f1fddd8a --- /dev/null +++ b/test/e2e-image/overlay/deployment-namespace-patch.yaml @@ -0,0 +1,3 @@ +- op: add + path: /spec/template/spec/containers/0/args/-1 + value: "--watch-namespace=$(POD_NAMESPACE)" diff --git a/test/e2e-image/overlay/kustomization.yaml b/test/e2e-image/overlay/kustomization.yaml new file mode 100644 index 0000000000..8c64132e69 --- /dev/null +++ b/test/e2e-image/overlay/kustomization.yaml @@ -0,0 +1,40 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +bases: +- ../../../deploy/cloud-generic +configMapGenerator: +- name: nginx-configuration + behavior: merge + literals: + - worker-processes=1 +patchesStrategicMerge: +- deployment-e2e.yaml +- service-protocol-tcp.yaml +patchesJson6902: +- path: deployment-namespace-patch.yaml + target: + group: apps + kind: Deployment + name: nginx-ingress-controller + version: v1 +- path: service-cluster-patch.yaml + target: + kind: Service + name: ingress-nginx + version: v1 +- path: deployment-extension-group-patch.yaml + target: + group: apps + kind: Deployment + name: nginx-ingress-controller + version: v1 +- path: role.yaml + target: + group: rbac.authorization.k8s.io + kind: Role + name: nginx-ingress-role + version: v1beta1 +images: +- name: quay.io/kubernetes-ingress-controller/nginx-ingress-controller + newName: ingress-controller/nginx-ingress-controller + newTag: dev diff --git a/test/e2e-image/overlay/role.yaml b/test/e2e-image/overlay/role.yaml new file mode 100644 index 0000000000..5e1430e939 --- /dev/null +++ b/test/e2e-image/overlay/role.yaml @@ -0,0 +1,3 @@ +- op: add + path: /rules/1/resourceNames/-1 + value: "ingress-controller-leader-testclass" diff --git a/test/e2e-image/overlay/service-cluster-patch.yaml b/test/e2e-image/overlay/service-cluster-patch.yaml new file mode 100644 index 0000000000..0465d38046 --- /dev/null +++ b/test/e2e-image/overlay/service-cluster-patch.yaml @@ -0,0 +1,4 @@ +- op: remove + path: /spec/externalTrafficPolicy +- op: remove + path: /spec/type diff --git a/test/e2e-image/overlay/service-protocol-tcp.yaml b/test/e2e-image/overlay/service-protocol-tcp.yaml new file mode 100644 index 0000000000..c49626fcd1 --- /dev/null +++ b/test/e2e-image/overlay/service-protocol-tcp.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: ingress-nginx +spec: + ports: + - name: http + port: 80 + targetPort: 80 + protocol: TCP + - name: https + port: 443 + targetPort: 443 + protocol: TCP diff --git a/test/e2e-prow/Dockerfile b/test/e2e-prow/Dockerfile new file mode 100644 index 0000000000..ffa6963676 --- /dev/null +++ b/test/e2e-prow/Dockerfile @@ -0,0 +1,70 @@ +# Copyright 2016 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This file creates a build environment for building and running kubernetes +# unit and integration tests + +FROM gcr.io/k8s-testimages/bootstrap + +# hint to kubetest that it is in CI +ENV KUBETEST_IN_DOCKER="true" + +# Go standard envs +ENV GOPATH /go +ENV PATH /usr/local/go/bin:$PATH + +RUN mkdir -p /go/bin +ENV PATH $GOPATH/bin:$PATH + +# setup k8s repo symlink +RUN mkdir -p /go/src/k8s.io/kubernetes \ + && ln -s /go/src/k8s.io/kubernetes /workspace/kubernetes + +# preinstall: +# - graphviz package for graphing profiles +# - bc for shell to junit +# - rpm for building RPMs with Bazel +RUN apt-get update && \ + apt-get install -y bc \ + rpm && \ + rm -rf /var/lib/apt/lists/* + +ARG K8S_RELEASE +ARG ETCD_VERSION + +RUN wget https://storage.googleapis.com/kubernetes-release/release/${K8S_RELEASE}/bin/linux/amd64/kubectl -O /usr/local/bin/kubectl \ + && chmod +x /usr/local/bin/kubectl + +RUN wget https://storage.googleapis.com/kubernetes-release/release/${K8S_RELEASE}/bin/linux/amd64/kube-apiserver -O /usr/local/bin/kube-apiserver \ + && chmod +x /usr/local/bin/kube-apiserver + +RUN curl -L https://storage.googleapis.com/etcd/${ETCD_VERSION}/etcd-${ETCD_VERSION}-linux-amd64.tar.gz -o /tmp/etcd-${ETCD_VERSION}-linux-amd64.tar.gz \ + && mkdir -p /tmp/etcd-download \ + && tar xzvf /tmp/etcd-${ETCD_VERSION}-linux-amd64.tar.gz -C /tmp/etcd-download --strip-components=1 \ + && cp /tmp/etcd-download/etcd /usr/local/bin \ + && rm -rf /tmp/etcd-download + +# install go +ENV GO_VERSION 1.12.5 +ENV GO_TARBALL "go${GO_VERSION}.linux-amd64.tar.gz" +RUN wget -q "https://storage.googleapis.com/golang/${GO_TARBALL}" && \ + tar xzf "${GO_TARBALL}" -C /usr/local && \ + rm "${GO_TARBALL}" + +RUN go get github.com/onsi/ginkgo/ginkgo \ + && go get golang.org/x/lint/golint \ + && GO111MODULE="on" go get -u sigs.k8s.io/kind@master \ + && rm -rf /go/src/github.com + +ENV KUBEBUILDER_ASSETS /usr/local/bin diff --git a/test/e2e-prow/Makefile b/test/e2e-prow/Makefile new file mode 100644 index 0000000000..a072e70140 --- /dev/null +++ b/test/e2e-prow/Makefile @@ -0,0 +1,19 @@ +TAG ?=v$(shell date +%m%d%Y)-$(shell git rev-parse --short HEAD) +REGISTRY ?= quay.io/kubernetes-ingress-controller +DOCKER ?= docker + +IMAGE = $(REGISTRY)/e2e-prow + +all: docker-build docker-push + +docker-build: + $(DOCKER) build \ + --pull \ + --build-arg K8S_RELEASE=v1.14.1 \ + --build-arg ETCD_VERSION=v3.3.12 \ + -t $(IMAGE):$(TAG) . + +docker-push: + $(DOCKER) push $(IMAGE):$(TAG) + $(DOCKER) tag $(IMAGE):$(TAG) $(IMAGE):latest + $(DOCKER) push $(IMAGE):latest diff --git a/test/e2e/annotations/affinity.go b/test/e2e/annotations/affinity.go index 0f80dddde7..2f74c4764d 100644 --- a/test/e2e/annotations/affinity.go +++ b/test/e2e/annotations/affinity.go @@ -20,24 +20,24 @@ import ( "fmt" "net/http" "strings" + "time" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/parnurzeal/gorequest" - v1beta1 "k8s.io/api/extensions/v1beta1" + extensions "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/ingress-nginx/test/e2e/framework" ) -var _ = framework.IngressNginxDescribe("Annotations - Affinity", func() { +var _ = framework.IngressNginxDescribe("Annotations - Affinity/Sticky Sessions", func() { f := framework.NewDefaultFramework("affinity") BeforeEach(func() { - err := f.NewEchoDeploymentWithReplicas(2) - Expect(err).NotTo(HaveOccurred()) + f.NewEchoDeploymentWithReplicas(2) }) AfterEach(func() { @@ -45,79 +45,126 @@ var _ = framework.IngressNginxDescribe("Annotations - Affinity", func() { It("should set sticky cookie SERVERID", func() { host := "sticky.foo.com" + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/affinity": "cookie", + "nginx.ingress.kubernetes.io/session-cookie-name": "SERVERID", + } - ing, err := f.EnsureIngress(&v1beta1.Ingress{ - ObjectMeta: metav1.ObjectMeta{ - Name: host, - Namespace: f.Namespace.Name, - Annotations: map[string]string{ - "nginx.ingress.kubernetes.io/affinity": "cookie", - "nginx.ingress.kubernetes.io/session-cookie-name": "SERVERID", - }, - }, - Spec: v1beta1.IngressSpec{ - Rules: []v1beta1.IngressRule{ - { - Host: host, - IngressRuleValue: v1beta1.IngressRuleValue{ - HTTP: &v1beta1.HTTPIngressRuleValue{ - Paths: []v1beta1.HTTPIngressPath{ - { - Path: "/", - Backend: v1beta1.IngressBackend{ - ServiceName: "http-svc", - ServicePort: intstr.FromInt(80), - }, - }, - }, - }, - }, - }, - }, - }, - }) - Expect(err).NotTo(HaveOccurred()) - Expect(ing).NotTo(BeNil()) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) - err = f.WaitForNginxServer(host, + f.WaitForNginxServer(host, func(server string) bool { - return strings.Contains(server, "proxy_pass http://sticky-"+f.Namespace.Name+"-http-svc-80;") + return strings.Contains(server, fmt.Sprintf("server_name %s ;", host)) }) - Expect(err).NotTo(HaveOccurred()) + time.Sleep(waitForLuaSync) resp, _, errs := gorequest.New(). - Get(f.NginxHTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). End() - Expect(len(errs)).Should(BeNumerically("==", 0)) + Expect(errs).Should(BeEmpty()) Expect(resp.StatusCode).Should(Equal(http.StatusOK)) Expect(resp.Header.Get("Set-Cookie")).Should(ContainSubstring("SERVERID=")) }) - It("should redirect to '/something' with enabled affinity", func() { - host := "example.com" + It("should change cookie name on ingress definition change", func() { + host := "change.foo.com" + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/affinity": "cookie", + "nginx.ingress.kubernetes.io/session-cookie-name": "SERVERID", + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, fmt.Sprintf("server_name %s ;", host)) + }) + time.Sleep(waitForLuaSync) + + resp, _, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(resp.Header.Get("Set-Cookie")).Should(ContainSubstring("SERVERID")) + + ing.ObjectMeta.Annotations["nginx.ingress.kubernetes.io/session-cookie-name"] = "OTHERCOOKIENAME" + f.EnsureIngress(ing) + + time.Sleep(waitForLuaSync) + + resp, _, errs = gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(resp.Header.Get("Set-Cookie")).Should(ContainSubstring("OTHERCOOKIENAME")) + }) + + It("should set the path to /something on the generated cookie", func() { + host := "path.foo.com" + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/affinity": "cookie", + "nginx.ingress.kubernetes.io/session-cookie-name": "SERVERID", + } + + ing := framework.NewSingleIngress(host, "/something", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, fmt.Sprintf("server_name %s ;", host)) + }) + time.Sleep(waitForLuaSync) + + resp, _, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)+"/something"). + Set("Host", host). + End() - ing, err := f.EnsureIngress(&v1beta1.Ingress{ + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(resp.Header.Get("Set-Cookie")).Should(ContainSubstring("Path=/something")) + }) + + It("does not set the path to / on the generated cookie if there's more than one rule referring to the same backend", func() { + host := "morethanonerule.foo.com" + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/affinity": "cookie", + "nginx.ingress.kubernetes.io/session-cookie-name": "SERVERID", + } + + f.EnsureIngress(&extensions.Ingress{ ObjectMeta: metav1.ObjectMeta{ - Name: host, - Namespace: f.Namespace.Name, - Annotations: map[string]string{ - "nginx.ingress.kubernetes.io/affinity": "cookie", - "nginx.ingress.kubernetes.io/session-cookie-name": "SERVERID", - "nginx.ingress.kubernetes.io/rewrite-target": "/something", - }, + Name: host, + Namespace: f.Namespace, + Annotations: annotations, }, - Spec: v1beta1.IngressSpec{ - Rules: []v1beta1.IngressRule{ + Spec: extensions.IngressSpec{ + Rules: []extensions.IngressRule{ { Host: host, - IngressRuleValue: v1beta1.IngressRuleValue{ - HTTP: &v1beta1.HTTPIngressRuleValue{ - Paths: []v1beta1.HTTPIngressPath{ + IngressRuleValue: extensions.IngressRuleValue{ + HTTP: &extensions.HTTPIngressRuleValue{ + Paths: []extensions.HTTPIngressPath{ + { + Path: "/something", + Backend: extensions.IngressBackend{ + ServiceName: "http-svc", + ServicePort: intstr.FromInt(80), + }, + }, { - Path: "/", - Backend: v1beta1.IngressBackend{ + Path: "/somewhereelese", + Backend: extensions.IngressBackend{ ServiceName: "http-svc", ServicePort: intstr.FromInt(80), }, @@ -130,23 +177,160 @@ var _ = framework.IngressNginxDescribe("Annotations - Affinity", func() { }, }) - Expect(err).NotTo(HaveOccurred()) - Expect(ing).NotTo(BeNil()) + f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, fmt.Sprintf("server_name %s ;", host)) + }) + time.Sleep(waitForLuaSync) + + resp, _, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)+"/something"). + Set("Host", host). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(resp.Header.Get("Set-Cookie")).Should(ContainSubstring("Path=/something;")) + + resp, _, errs = gorequest.New(). + Get(f.GetURL(framework.HTTP)+"/somewhereelese"). + Set("Host", host). + End() - err = f.WaitForNginxServer(host, + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(resp.Header.Get("Set-Cookie")).Should(ContainSubstring("Path=/somewhereelese;")) + }) + + It("should set cookie with expires", func() { + host := "cookieexpires.foo.com" + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/affinity": "cookie", + "nginx.ingress.kubernetes.io/session-cookie-name": "ExpiresCookie", + "nginx.ingress.kubernetes.io/session-cookie-expires": "172800", + "nginx.ingress.kubernetes.io/session-cookie-max-age": "259200", + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, func(server string) bool { - return strings.Contains(server, "proxy_pass http://sticky-"+f.Namespace.Name+"-http-svc-80;") + return strings.Contains(server, fmt.Sprintf("server_name %s ;", host)) }) - Expect(err).NotTo(HaveOccurred()) + time.Sleep(waitForLuaSync) - resp, body, errs := gorequest.New(). - Get(f.NginxHTTPURL). + resp, _, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). Set("Host", host). End() - Expect(len(errs)).Should(BeNumerically("==", 0)) + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + local, err := time.LoadLocation("GMT") + Expect(err).ToNot(HaveOccurred()) + Expect(local).ShouldNot(BeNil()) + + duration, _ := time.ParseDuration("48h") + expected := time.Now().In(local).Add(duration).Format("Mon, 02-Jan-06 15:04") + + Expect(resp.Header.Get("Set-Cookie")).Should(ContainSubstring(fmt.Sprintf("Expires=%s", expected))) + Expect(resp.Header.Get("Set-Cookie")).Should(ContainSubstring("Max-Age=259200")) + }) + + It("should work with use-regex annotation and session-cookie-path", func() { + host := "useregex.foo.com" + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/affinity": "cookie", + "nginx.ingress.kubernetes.io/session-cookie-name": "SERVERID", + "nginx.ingress.kubernetes.io/use-regex": "true", + "nginx.ingress.kubernetes.io/session-cookie-path": "/foo/bar", + } + + ing := framework.NewSingleIngress(host, "/foo/.*", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, fmt.Sprintf("server_name %s ;", host)) + }) + time.Sleep(waitForLuaSync) + + resp, _, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)+"/foo/bar"). + Set("Host", host). + End() + + Expect(errs).Should(BeEmpty()) Expect(resp.StatusCode).Should(Equal(http.StatusOK)) - Expect(body).Should(ContainSubstring(fmt.Sprintf("request_uri=http://%v:8080/something/", host))) Expect(resp.Header.Get("Set-Cookie")).Should(ContainSubstring("SERVERID=")) + Expect(resp.Header.Get("Set-Cookie")).Should(ContainSubstring("Path=/foo/bar")) + }) + + It("should warn user when use-regex is true and session-cookie-path is not set", func() { + host := "useregexwarn.foo.com" + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/affinity": "cookie", + "nginx.ingress.kubernetes.io/session-cookie-name": "SERVERID", + "nginx.ingress.kubernetes.io/use-regex": "true", + } + + ing := framework.NewSingleIngress(host, "/foo/.*", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, fmt.Sprintf("server_name %s ;", host)) + }) + time.Sleep(waitForLuaSync) + + resp, _, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)+"/foo/bar"). + Set("Host", host). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + + logs, err := f.NginxLogs() + Expect(err).ToNot(HaveOccurred()) + Expect(logs).To(ContainSubstring(`session-cookie-path should be set when use-regex is true`)) + }) + + It("should not set affinity across all server locations when using separate ingresses", func() { + host := "separate.foo.com" + + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/affinity": "cookie", + } + ing1 := framework.NewSingleIngress("ingress1", "/foo/bar", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing1) + + ing2 := framework.NewSingleIngress("ingress2", "/foo", host, f.Namespace, "http-svc", 80, &map[string]string{}) + f.EnsureIngress(ing2) + + f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, `location /foo/bar`) && strings.Contains(server, `location /foo`) + }) + time.Sleep(waitForLuaSync) + + resp, _, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)+"/foo"). + Set("Host", host). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(resp.Header.Get("Set-Cookie")).Should(Equal("")) + + resp, _, errs = gorequest.New(). + Get(f.GetURL(framework.HTTP)+"/foo/bar"). + Set("Host", host). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(resp.Header.Get("Set-Cookie")).Should(ContainSubstring("Path=/foo/bar")) }) }) diff --git a/test/e2e/annotations/alias.go b/test/e2e/annotations/alias.go index b7fcc9c1ed..96d281a300 100644 --- a/test/e2e/annotations/alias.go +++ b/test/e2e/annotations/alias.go @@ -24,10 +24,6 @@ import ( . "github.com/onsi/gomega" "github.com/parnurzeal/gorequest" - v1beta1 "k8s.io/api/extensions/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/ingress-nginx/test/e2e/framework" ) @@ -35,8 +31,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Alias", func() { f := framework.NewDefaultFramework("alias") BeforeEach(func() { - err := f.NewEchoDeployment() - Expect(err).NotTo(HaveOccurred()) + f.NewEchoDeployment() }) AfterEach(func() { @@ -44,113 +39,57 @@ var _ = framework.IngressNginxDescribe("Annotations - Alias", func() { It("should return status code 200 for host 'foo' and 404 for 'bar'", func() { host := "foo" + annotations := map[string]string{} + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) - ing, err := f.EnsureIngress(&v1beta1.Ingress{ - ObjectMeta: metav1.ObjectMeta{ - Name: host, - Namespace: f.Namespace.Name, - }, - Spec: v1beta1.IngressSpec{ - Rules: []v1beta1.IngressRule{ - { - Host: host, - IngressRuleValue: v1beta1.IngressRuleValue{ - HTTP: &v1beta1.HTTPIngressRuleValue{ - Paths: []v1beta1.HTTPIngressPath{ - { - Path: "/", - Backend: v1beta1.IngressBackend{ - ServiceName: "http-svc", - ServicePort: intstr.FromInt(80), - }, - }, - }, - }, - }, - }, - }, - }, - }) - - Expect(err).NotTo(HaveOccurred()) - Expect(ing).NotTo(BeNil()) - - err = f.WaitForNginxServer(host, + f.WaitForNginxServer(host, func(server string) bool { - return Expect(server).Should(ContainSubstring("server_name foo")) && - Expect(server).ShouldNot(ContainSubstring("return 503")) + return Expect(server).Should(ContainSubstring("server_name foo")) }) - Expect(err).NotTo(HaveOccurred()) resp, body, errs := gorequest.New(). - Get(f.NginxHTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). End() - Expect(len(errs)).Should(BeNumerically("==", 0)) + Expect(errs).Should(BeEmpty()) Expect(resp.StatusCode).Should(Equal(http.StatusOK)) Expect(body).Should(ContainSubstring(fmt.Sprintf("host=%v", host))) resp, body, errs = gorequest.New(). - Get(f.NginxHTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", "bar"). End() - Expect(len(errs)).Should(BeNumerically("==", 0)) + Expect(errs).Should(BeEmpty()) Expect(resp.StatusCode).Should(Equal(http.StatusNotFound)) - Expect(body).Should(ContainSubstring("default backend - 404")) + Expect(body).Should(ContainSubstring("404 Not Found")) }) It("should return status code 200 for host 'foo' and 'bar'", func() { host := "foo" - ing, err := f.EnsureIngress(&v1beta1.Ingress{ - ObjectMeta: metav1.ObjectMeta{ - Name: host, - Namespace: f.Namespace.Name, - Annotations: map[string]string{ - "nginx.ingress.kubernetes.io/server-alias": "bar", - }, - }, - Spec: v1beta1.IngressSpec{ - Rules: []v1beta1.IngressRule{ - { - Host: host, - IngressRuleValue: v1beta1.IngressRuleValue{ - HTTP: &v1beta1.HTTPIngressRuleValue{ - Paths: []v1beta1.HTTPIngressPath{ - { - Path: "/", - Backend: v1beta1.IngressBackend{ - ServiceName: "http-svc", - ServicePort: intstr.FromInt(80), - }, - }, - }, - }, - }, - }, - }, - }, - }) - - Expect(err).NotTo(HaveOccurred()) - Expect(ing).NotTo(BeNil()) - - err = f.WaitForNginxServer(host, + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/server-alias": "bar", + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, func(server string) bool { - return Expect(server).Should(ContainSubstring("server_name foo")) && - Expect(server).ShouldNot(ContainSubstring("return 503")) + return Expect(server).Should(ContainSubstring("server_name foo")) }) - Expect(err).NotTo(HaveOccurred()) hosts := []string{"foo", "bar"} for _, host := range hosts { resp, body, errs := gorequest.New(). - Get(f.NginxHTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). End() - Expect(len(errs)).Should(BeNumerically("==", 0)) + Expect(errs).Should(BeEmpty()) Expect(resp.StatusCode).Should(Equal(http.StatusOK)) Expect(body).Should(ContainSubstring(fmt.Sprintf("host=%v", host))) } diff --git a/test/e2e/annotations/approot.go b/test/e2e/annotations/approot.go new file mode 100644 index 0000000000..8d5621b376 --- /dev/null +++ b/test/e2e/annotations/approot.go @@ -0,0 +1,66 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package annotations + +import ( + "net/http" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/parnurzeal/gorequest" + "k8s.io/ingress-nginx/test/e2e/framework" +) + +var _ = framework.IngressNginxDescribe("Annotations - Approot", func() { + f := framework.NewDefaultFramework("approot") + + BeforeEach(func() { + f.NewEchoDeploymentWithReplicas(2) + }) + + AfterEach(func() { + }) + + It("should redirect to /foo", func() { + host := "approot.bar.com" + + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/app-root": "/foo", + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring(`if ($uri = /) {`)) && + Expect(server).Should(ContainSubstring(`return 302 /foo;`)) + }) + + resp, _, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Retry(10, 1*time.Second, http.StatusNotFound). + RedirectPolicy(noRedirectPolicyFunc). + Set("Host", host). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusFound)) + Expect(resp.Header.Get("Location")).Should(Equal("http://approot.bar.com/foo")) + }) +}) diff --git a/test/e2e/annotations/auth.go b/test/e2e/annotations/auth.go index dfc64e4b6e..6a9dec4b43 100644 --- a/test/e2e/annotations/auth.go +++ b/test/e2e/annotations/auth.go @@ -19,26 +19,25 @@ package annotations import ( "fmt" "net/http" + "net/url" "os/exec" + "time" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/parnurzeal/gorequest" corev1 "k8s.io/api/core/v1" - v1beta1 "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/ingress-nginx/test/e2e/framework" ) -var _ = framework.IngressNginxDescribe("Annotations - Alias", func() { - f := framework.NewDefaultFramework("alias") +var _ = framework.IngressNginxDescribe("Annotations - Auth", func() { + f := framework.NewDefaultFramework("auth") BeforeEach(func() { - err := f.NewEchoDeployment() - Expect(err).NotTo(HaveOccurred()) + f.NewEchoDeployment() }) AfterEach(func() { @@ -47,52 +46,48 @@ var _ = framework.IngressNginxDescribe("Annotations - Alias", func() { It("should return status code 200 when no authentication is configured", func() { host := "auth" - ing, err := f.EnsureIngress(buildIngress(host, f.Namespace.Name)) - Expect(err).NotTo(HaveOccurred()) - Expect(ing).NotTo(BeNil()) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, nil) + f.EnsureIngress(ing) - err = f.WaitForNginxServer(host, + f.WaitForNginxServer(host, func(server string) bool { - return Expect(server).Should(ContainSubstring("server_name auth")) && - Expect(server).ShouldNot(ContainSubstring("return 503")) + return Expect(server).Should(ContainSubstring("server_name auth")) }) - Expect(err).NotTo(HaveOccurred()) resp, body, errs := gorequest.New(). - Get(f.NginxHTTPURL). + Get(f.GetURL(framework.HTTP)). + Retry(10, 1*time.Second, http.StatusNotFound). Set("Host", host). End() - Expect(len(errs)).Should(BeNumerically("==", 0)) + Expect(errs).Should(BeEmpty()) Expect(resp.StatusCode).Should(Equal(http.StatusOK)) Expect(body).Should(ContainSubstring(fmt.Sprintf("host=%v", host))) }) It("should return status code 503 when authentication is configured with an invalid secret", func() { host := "auth" + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/auth-type": "basic", + "nginx.ingress.kubernetes.io/auth-secret": "something", + "nginx.ingress.kubernetes.io/auth-realm": "test auth", + } - bi := buildIngress(host, f.Namespace.Name) - bi.Annotations["nginx.ingress.kubernetes.io/auth-type"] = "basic" - bi.Annotations["nginx.ingress.kubernetes.io/auth-secret"] = "something" - bi.Annotations["nginx.ingress.kubernetes.io/auth-realm"] = "test auth" + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) - ing, err := f.EnsureIngress(bi) - Expect(err).NotTo(HaveOccurred()) - Expect(ing).NotTo(BeNil()) - - err = f.WaitForNginxServer(host, + f.WaitForNginxServer(host, func(server string) bool { - return Expect(server).Should(ContainSubstring("server_name auth")) && - Expect(server).Should(ContainSubstring("return 503")) + return Expect(server).Should(ContainSubstring("server_name auth")) }) - Expect(err).NotTo(HaveOccurred()) resp, body, errs := gorequest.New(). - Get(f.NginxHTTPURL). + Get(f.GetURL(framework.HTTP)). + Retry(10, 1*time.Second, http.StatusNotFound). Set("Host", host). End() - Expect(len(errs)).Should(BeNumerically("==", 0)) + Expect(errs).Should(BeEmpty()) Expect(resp.StatusCode).Should(Equal(http.StatusServiceUnavailable)) Expect(body).Should(ContainSubstring("503 Service Temporarily Unavailable")) }) @@ -100,33 +95,29 @@ var _ = framework.IngressNginxDescribe("Annotations - Alias", func() { It("should return status code 401 when authentication is configured but Authorization header is not configured", func() { host := "auth" - s, err := f.EnsureSecret(buildSecret("foo", "bar", "test", f.Namespace.Name)) - Expect(err).NotTo(HaveOccurred()) - Expect(s).NotTo(BeNil()) - Expect(s.ObjectMeta).NotTo(BeNil()) + s := f.EnsureSecret(buildSecret("foo", "bar", "test", f.Namespace)) - bi := buildIngress(host, f.Namespace.Name) - bi.Annotations["nginx.ingress.kubernetes.io/auth-type"] = "basic" - bi.Annotations["nginx.ingress.kubernetes.io/auth-secret"] = s.Name - bi.Annotations["nginx.ingress.kubernetes.io/auth-realm"] = "test auth" + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/auth-type": "basic", + "nginx.ingress.kubernetes.io/auth-secret": s.Name, + "nginx.ingress.kubernetes.io/auth-realm": "test auth", + } - ing, err := f.EnsureIngress(bi) - Expect(err).NotTo(HaveOccurred()) - Expect(ing).NotTo(BeNil()) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) - err = f.WaitForNginxServer(host, + f.WaitForNginxServer(host, func(server string) bool { - return Expect(server).Should(ContainSubstring("server_name auth")) && - Expect(server).ShouldNot(ContainSubstring("return 503")) + return Expect(server).Should(ContainSubstring("server_name auth")) }) - Expect(err).NotTo(HaveOccurred()) resp, body, errs := gorequest.New(). - Get(f.NginxHTTPURL). + Get(f.GetURL(framework.HTTP)). + Retry(10, 1*time.Second, http.StatusNotFound). Set("Host", host). End() - Expect(len(errs)).Should(BeNumerically("==", 0)) + Expect(errs).Should(BeEmpty()) Expect(resp.StatusCode).Should(Equal(http.StatusUnauthorized)) Expect(body).Should(ContainSubstring("401 Authorization Required")) }) @@ -134,34 +125,30 @@ var _ = framework.IngressNginxDescribe("Annotations - Alias", func() { It("should return status code 401 when authentication is configured and Authorization header is sent with invalid credentials", func() { host := "auth" - s, err := f.EnsureSecret(buildSecret("foo", "bar", "test", f.Namespace.Name)) - Expect(err).NotTo(HaveOccurred()) - Expect(s).NotTo(BeNil()) - Expect(s.ObjectMeta).NotTo(BeNil()) + s := f.EnsureSecret(buildSecret("foo", "bar", "test", f.Namespace)) - bi := buildIngress(host, f.Namespace.Name) - bi.Annotations["nginx.ingress.kubernetes.io/auth-type"] = "basic" - bi.Annotations["nginx.ingress.kubernetes.io/auth-secret"] = s.Name - bi.Annotations["nginx.ingress.kubernetes.io/auth-realm"] = "test auth" + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/auth-type": "basic", + "nginx.ingress.kubernetes.io/auth-secret": s.Name, + "nginx.ingress.kubernetes.io/auth-realm": "test auth", + } - ing, err := f.EnsureIngress(bi) - Expect(err).NotTo(HaveOccurred()) - Expect(ing).NotTo(BeNil()) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) - err = f.WaitForNginxServer(host, + f.WaitForNginxServer(host, func(server string) bool { - return Expect(server).Should(ContainSubstring("server_name auth")) && - Expect(server).ShouldNot(ContainSubstring("return 503")) + return Expect(server).Should(ContainSubstring("server_name auth")) }) - Expect(err).NotTo(HaveOccurred()) resp, body, errs := gorequest.New(). - Get(f.NginxHTTPURL). + Get(f.GetURL(framework.HTTP)). + Retry(10, 1*time.Second, http.StatusNotFound). Set("Host", host). SetBasicAuth("user", "pass"). End() - Expect(len(errs)).Should(BeNumerically("==", 0)) + Expect(errs).Should(BeEmpty()) Expect(resp.StatusCode).Should(Equal(http.StatusUnauthorized)) Expect(body).Should(ContainSubstring("401 Authorization Required")) }) @@ -169,45 +156,41 @@ var _ = framework.IngressNginxDescribe("Annotations - Alias", func() { It("should return status code 200 when authentication is configured and Authorization header is sent", func() { host := "auth" - s, err := f.EnsureSecret(buildSecret("foo", "bar", "test", f.Namespace.Name)) - Expect(err).NotTo(HaveOccurred()) - Expect(s).NotTo(BeNil()) - Expect(s.ObjectMeta).NotTo(BeNil()) + s := f.EnsureSecret(buildSecret("foo", "bar", "test", f.Namespace)) - bi := buildIngress(host, f.Namespace.Name) - bi.Annotations["nginx.ingress.kubernetes.io/auth-type"] = "basic" - bi.Annotations["nginx.ingress.kubernetes.io/auth-secret"] = s.Name - bi.Annotations["nginx.ingress.kubernetes.io/auth-realm"] = "test auth" + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/auth-type": "basic", + "nginx.ingress.kubernetes.io/auth-secret": s.Name, + "nginx.ingress.kubernetes.io/auth-realm": "test auth", + } - ing, err := f.EnsureIngress(bi) - Expect(err).NotTo(HaveOccurred()) - Expect(ing).NotTo(BeNil()) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) - err = f.WaitForNginxServer(host, + f.WaitForNginxServer(host, func(server string) bool { - return Expect(server).Should(ContainSubstring("server_name auth")) && - Expect(server).ShouldNot(ContainSubstring("return 503")) + return Expect(server).Should(ContainSubstring("server_name auth")) }) - Expect(err).NotTo(HaveOccurred()) resp, _, errs := gorequest.New(). - Get(f.NginxHTTPURL). + Get(f.GetURL(framework.HTTP)). + Retry(10, 1*time.Second, http.StatusNotFound). Set("Host", host). SetBasicAuth("foo", "bar"). End() - Expect(len(errs)).Should(BeNumerically("==", 0)) + Expect(errs).Should(BeEmpty()) Expect(resp.StatusCode).Should(Equal(http.StatusOK)) }) It("should return status code 500 when authentication is configured with invalid content and Authorization header is sent", func() { host := "auth" - s, err := f.EnsureSecret( + s := f.EnsureSecret( &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "test", - Namespace: f.Namespace.Name, + Namespace: f.Namespace, }, Data: map[string][]byte{ // invalid content @@ -216,35 +199,129 @@ var _ = framework.IngressNginxDescribe("Annotations - Alias", func() { Type: corev1.SecretTypeOpaque, }, ) - Expect(err).NotTo(HaveOccurred()) - Expect(s).NotTo(BeNil()) - Expect(s.ObjectMeta).NotTo(BeNil()) - bi := buildIngress(host, f.Namespace.Name) - bi.Annotations["nginx.ingress.kubernetes.io/auth-type"] = "basic" - bi.Annotations["nginx.ingress.kubernetes.io/auth-secret"] = s.Name - bi.Annotations["nginx.ingress.kubernetes.io/auth-realm"] = "test auth" + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/auth-type": "basic", + "nginx.ingress.kubernetes.io/auth-secret": s.Name, + "nginx.ingress.kubernetes.io/auth-realm": "test auth", + } - ing, err := f.EnsureIngress(bi) - Expect(err).NotTo(HaveOccurred()) - Expect(ing).NotTo(BeNil()) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) - err = f.WaitForNginxServer(host, + f.WaitForNginxServer(host, func(server string) bool { - return Expect(server).Should(ContainSubstring("server_name auth")) && - Expect(server).ShouldNot(ContainSubstring("return 503")) + return Expect(server).Should(ContainSubstring("server_name auth")) }) - Expect(err).NotTo(HaveOccurred()) resp, _, errs := gorequest.New(). - Get(f.NginxHTTPURL). + Get(f.GetURL(framework.HTTP)). + Retry(10, 1*time.Second, http.StatusNotFound). Set("Host", host). SetBasicAuth("foo", "bar"). End() - Expect(len(errs)).Should(BeNumerically("==", 0)) + Expect(errs).Should(BeEmpty()) Expect(resp.StatusCode).Should(Equal(http.StatusInternalServerError)) }) + + It(`should set snippet "proxy_set_header My-Custom-Header 42;" when external auth is configured`, func() { + host := "auth" + + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/auth-url": "http://foo.bar/basic-auth/user/password", + "nginx.ingress.kubernetes.io/auth-snippet": ` + proxy_set_header My-Custom-Header 42;`, + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring(`proxy_set_header My-Custom-Header 42;`)) + }) + }) + + It(`should not set snippet "proxy_set_header My-Custom-Header 42;" when external auth is not configured`, func() { + host := "auth" + + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/auth-snippet": ` + proxy_set_header My-Custom-Header 42;`, + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).ShouldNot(ContainSubstring(`proxy_set_header My-Custom-Header 42;`)) + }) + }) + + Context("when external authentication is configured", func() { + host := "auth" + + BeforeEach(func() { + f.NewHttpbinDeployment() + + var httpbinIP string + + err := framework.WaitForEndpoints(f.KubeClientSet, framework.DefaultTimeout, "httpbin", f.Namespace, 1) + Expect(err).NotTo(HaveOccurred()) + + e, err := f.KubeClientSet.CoreV1().Endpoints(f.Namespace).Get("httpbin", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + httpbinIP = e.Subsets[0].Addresses[0].IP + + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/auth-url": fmt.Sprintf("http://%s/basic-auth/user/password", httpbinIP), + "nginx.ingress.kubernetes.io/auth-signin": "http://$host/auth/start", + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, func(server string) bool { + return Expect(server).Should(ContainSubstring("server_name auth")) + }) + }) + + It("should return status code 200 when signed in", func() { + resp, _, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Retry(10, 1*time.Second, http.StatusNotFound). + Set("Host", host). + SetBasicAuth("user", "password"). + End() + + for _, err := range errs { + Expect(err).NotTo(HaveOccurred()) + } + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + }) + + It("should redirect to signin url when not signed in", func() { + resp, _, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Retry(10, 1*time.Second, http.StatusNotFound). + Set("Host", host). + RedirectPolicy(func(req gorequest.Request, via []gorequest.Request) error { + return http.ErrUseLastResponse + }). + Param("a", "b"). + Param("c", "d"). + End() + + for _, err := range errs { + Expect(err).NotTo(HaveOccurred()) + } + Expect(resp.StatusCode).Should(Equal(http.StatusFound)) + Expect(resp.Header.Get("Location")).Should(Equal(fmt.Sprintf("http://%s/auth/start?rd=http://%s%s", host, host, url.QueryEscape("/?a=b&c=d")))) + }) + }) }) // TODO: test Digest Auth @@ -253,36 +330,6 @@ var _ = framework.IngressNginxDescribe("Annotations - Alias", func() { // Auth ok // Auth error -func buildIngress(host, namespace string) *v1beta1.Ingress { - return &v1beta1.Ingress{ - ObjectMeta: metav1.ObjectMeta{ - Name: host, - Namespace: namespace, - Annotations: map[string]string{}, - }, - Spec: v1beta1.IngressSpec{ - Rules: []v1beta1.IngressRule{ - { - Host: host, - IngressRuleValue: v1beta1.IngressRuleValue{ - HTTP: &v1beta1.HTTPIngressRuleValue{ - Paths: []v1beta1.HTTPIngressPath{ - { - Path: "/", - Backend: v1beta1.IngressBackend{ - ServiceName: "http-svc", - ServicePort: intstr.FromInt(80), - }, - }, - }, - }, - }, - }, - }, - }, - } -} - func buildSecret(username, password, name, namespace string) *corev1.Secret { out, err := exec.Command("openssl", "passwd", "-crypt", password).CombinedOutput() encpass := fmt.Sprintf("%v:%s\n", username, out) diff --git a/test/e2e/annotations/authtls.go b/test/e2e/annotations/authtls.go new file mode 100644 index 0000000000..e78a3bc2c9 --- /dev/null +++ b/test/e2e/annotations/authtls.go @@ -0,0 +1,185 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package annotations + +import ( + "crypto/tls" + "fmt" + "net/http" + "strings" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/parnurzeal/gorequest" + "k8s.io/ingress-nginx/test/e2e/framework" +) + +var _ = framework.IngressNginxDescribe("Annotations - AuthTLS", func() { + f := framework.NewDefaultFramework("authtls") + + BeforeEach(func() { + f.NewEchoDeploymentWithReplicas(2) + }) + + AfterEach(func() { + }) + + It("should set valid auth-tls-secret", func() { + host := "authtls.foo.com" + nameSpace := f.Namespace + + clientConfig, err := framework.CreateIngressMASecret( + f.KubeClientSet, + host, + host, + nameSpace) + Expect(err).ToNot(HaveOccurred()) + + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/auth-tls-secret": nameSpace + "/" + host, + } + + f.EnsureIngress(framework.NewSingleIngressWithTLS(host, "/", host, []string{host}, nameSpace, "http-svc", 80, &annotations)) + + assertSslClientCertificateConfig(f, host, "on", "1") + + // Send Request without Client Certs + req := gorequest.New() + uri := "/" + resp, _, errs := req. + Get(f.GetURL(framework.HTTPS)+uri). + TLSClientConfig(&tls.Config{ServerName: host, InsecureSkipVerify: true}). + Set("Host", host). + End() + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusBadRequest)) + + // Send Request Passing the Client Certs + resp, _, errs = req. + Get(f.GetURL(framework.HTTPS)+uri). + TLSClientConfig(clientConfig). + Set("Host", host). + End() + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + }) + + It("should set valid auth-tls-secret, sslVerify to off, and sslVerifyDepth to 2", func() { + host := "authtls.foo.com" + nameSpace := f.Namespace + + _, err := framework.CreateIngressMASecret( + f.KubeClientSet, + host, + host, + nameSpace) + Expect(err).ToNot(HaveOccurred()) + + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/auth-tls-secret": nameSpace + "/" + host, + "nginx.ingress.kubernetes.io/auth-tls-verify-client": "off", + "nginx.ingress.kubernetes.io/auth-tls-verify-depth": "2", + } + + f.EnsureIngress(framework.NewSingleIngressWithTLS(host, "/", host, []string{host}, nameSpace, "http-svc", 80, &annotations)) + + assertSslClientCertificateConfig(f, host, "off", "2") + + // Send Request without Client Certs + req := gorequest.New() + uri := "/" + resp, _, errs := req. + Get(f.GetURL(framework.HTTPS)+uri). + TLSClientConfig(&tls.Config{ServerName: host, InsecureSkipVerify: true}). + Set("Host", host). + End() + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + }) + + It("should set valid auth-tls-secret, pass certificate to upstream, and error page", func() { + host := "authtls.foo.com" + nameSpace := f.Namespace + + errorPath := "/error" + + clientConfig, err := framework.CreateIngressMASecret( + f.KubeClientSet, + host, + host, + nameSpace) + Expect(err).ToNot(HaveOccurred()) + + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/auth-tls-secret": nameSpace + "/" + host, + "nginx.ingress.kubernetes.io/auth-tls-error-page": f.GetURL(framework.HTTP) + errorPath, + "nginx.ingress.kubernetes.io/auth-tls-pass-certificate-to-upstream": "true", + } + + f.EnsureIngress(framework.NewSingleIngressWithTLS(host, "/", host, []string{host}, nameSpace, "http-svc", 80, &annotations)) + + assertSslClientCertificateConfig(f, host, "on", "1") + + sslErrorPage := fmt.Sprintf("error_page 495 496 = %s;", f.GetURL(framework.HTTP)+errorPath) + sslUpstreamClientCert := "proxy_set_header ssl-client-cert $ssl_client_escaped_cert;" + + f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, sslErrorPage) && + strings.Contains(server, sslUpstreamClientCert) + }) + + // Send Request without Client Certs + req := gorequest.New() + uri := "/" + resp, _, errs := req. + Get(f.GetURL(framework.HTTPS)+uri). + TLSClientConfig(&tls.Config{ServerName: host, InsecureSkipVerify: true}). + Set("Host", host). + RedirectPolicy(noRedirectPolicyFunc). + End() + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusFound)) + Expect(resp.Header.Get("Location")).Should(Equal(f.GetURL(framework.HTTP) + errorPath)) + + // Send Request Passing the Client Certs + resp, _, errs = req. + Get(f.GetURL(framework.HTTPS)+uri). + TLSClientConfig(clientConfig). + Set("Host", host). + End() + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + }) +}) + +func assertSslClientCertificateConfig(f *framework.Framework, host string, verifyClient string, verifyDepth string) { + sslCertDirective := "ssl_certificate /etc/ingress-controller/ssl/default-fake-certificate.pem;" + sslKeyDirective := "ssl_certificate_key /etc/ingress-controller/ssl/default-fake-certificate.pem;" + sslClientCertDirective := fmt.Sprintf("ssl_client_certificate /etc/ingress-controller/ssl/%s-%s.pem;", f.Namespace, host) + sslVerify := fmt.Sprintf("ssl_verify_client %s;", verifyClient) + sslVerifyDepth := fmt.Sprintf("ssl_verify_depth %s;", verifyDepth) + + f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, sslCertDirective) && + strings.Contains(server, sslKeyDirective) && + strings.Contains(server, sslClientCertDirective) && + strings.Contains(server, sslVerify) && + strings.Contains(server, sslVerifyDepth) + }) +} diff --git a/test/e2e/annotations/backendprotocol.go b/test/e2e/annotations/backendprotocol.go new file mode 100644 index 0000000000..24b57cd48c --- /dev/null +++ b/test/e2e/annotations/backendprotocol.go @@ -0,0 +1,94 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package annotations + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/ingress-nginx/test/e2e/framework" +) + +var _ = framework.IngressNginxDescribe("Annotations - Backendprotocol", func() { + f := framework.NewDefaultFramework("backendprotocol") + + BeforeEach(func() { + f.NewEchoDeploymentWithReplicas(2) + }) + + AfterEach(func() { + }) + + It("should set backend protocol to https://", func() { + host := "backendprotocol.foo.com" + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/backend-protocol": "HTTPS", + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("proxy_pass https://upstream_balancer;")) + }) + }) + + It("should set backend protocol to grpc://", func() { + host := "backendprotocol.foo.com" + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/backend-protocol": "GRPC", + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("grpc_pass grpc://upstream_balancer;")) + }) + }) + + It("should set backend protocol to grpcs://", func() { + host := "backendprotocol.foo.com" + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/backend-protocol": "GRPCS", + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("grpc_pass grpcs://upstream_balancer;")) + }) + }) + + It("should set backend protocol to ''", func() { + host := "backendprotocol.foo.com" + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/backend-protocol": "AJP", + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("ajp_pass upstream_balancer;")) + }) + }) +}) diff --git a/test/e2e/annotations/canary.go b/test/e2e/annotations/canary.go new file mode 100644 index 0000000000..7a9a237c94 --- /dev/null +++ b/test/e2e/annotations/canary.go @@ -0,0 +1,782 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package annotations + +import ( + "fmt" + "net/http" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/parnurzeal/gorequest" + + "k8s.io/ingress-nginx/test/e2e/framework" +) + +const ( + waitForLuaSync = 5 * time.Second +) + +var _ = framework.IngressNginxDescribe("Annotations - canary", func() { + f := framework.NewDefaultFramework("canary") + + BeforeEach(func() { + // Deployment for main backend + f.NewEchoDeployment() + + // Deployment for canary backend + f.NewDeployment("http-svc-canary", "gcr.io/kubernetes-e2e-test-images/echoserver:2.2", 8080, 1) + }) + + Context("when canary is created", func() { + It("should response with a 200 status from the mainline upstream when requests are made to the mainline ingress", func() { + host := "foo" + annotations := map[string]string{} + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("server_name foo")) + }) + + canaryAnnotations := map[string]string{ + "nginx.ingress.kubernetes.io/canary": "true", + "nginx.ingress.kubernetes.io/canary-by-header": "CanaryByHeader", + } + + canaryIngName := fmt.Sprintf("%v-canary", host) + + canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, f.Namespace, "http-svc-canary", + 80, &canaryAnnotations) + f.EnsureIngress(canaryIng) + + time.Sleep(waitForLuaSync) + + resp, body, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(body).Should(ContainSubstring("http-svc")) + Expect(body).ShouldNot(ContainSubstring("http-svc-canary")) + }) + + It("should return 404 status for requests to the canary if no matching ingress is found", func() { + host := "foo" + + canaryAnnotations := map[string]string{ + "nginx.ingress.kubernetes.io/canary": "true", + "nginx.ingress.kubernetes.io/canary-by-header": "CanaryByHeader", + } + + canaryIngName := fmt.Sprintf("%v-canary", host) + + canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, f.Namespace, "http-svc-canary", + 80, &canaryAnnotations) + + f.EnsureIngress(canaryIng) + + time.Sleep(waitForLuaSync) + + resp, _, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + Set("CanaryByHeader", "always"). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusNotFound)) + }) + + /* + + TODO: This test needs improvements made to the e2e framework so that deployment updates work in order to successfully run + + It("should return the correct status codes when endpoints are unavailable", func() { + host := "foo" + annotations := map[string]string{} + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("server_name foo")) + }) + + canaryAnnotations := map[string]string{ + "nginx.ingress.kubernetes.io/canary": "true", + "nginx.ingress.kubernetes.io/canary-by-header": "CanaryByHeader", + } + + canaryIngName := fmt.Sprintf("%v-canary", host) + + canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, f.Namespace, "http-svc-canary", + 80, &canaryAnnotations) + f.EnsureIngress(canaryIng) + + time.Sleep(waitForLuaSync) + + By("returning a 503 status when the mainline deployment has 0 replicas and a request is sent to the canary") + + f.NewEchoDeploymentWithReplicas(0) + + resp, _, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + Set("CanaryByHeader", "always"). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusServiceUnavailable)) + + By("returning a 200 status when the canary deployment has 0 replicas and a request is sent to the mainline ingress") + + f.NewEchoDeploymentWithReplicas(1) + f.NewDeployment("http-svc-canary", "gcr.io/kubernetes-e2e-test-images/echoserver:2.2", 8080, 0) + + resp, _, errs = gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + Set("CanaryByHeader", "never"). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + }) + */ + + It("should route requests to the correct upstream if mainline ingress is created before the canary ingress", func() { + host := "foo" + annotations := map[string]string{} + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("server_name foo")) + }) + + canaryAnnotations := map[string]string{ + "nginx.ingress.kubernetes.io/canary": "true", + "nginx.ingress.kubernetes.io/canary-by-header": "CanaryByHeader", + } + + canaryIngName := fmt.Sprintf("%v-canary", host) + + canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, f.Namespace, "http-svc-canary", + 80, &canaryAnnotations) + f.EnsureIngress(canaryIng) + + time.Sleep(waitForLuaSync) + + By("routing requests destined for the mainline ingress to the maineline upstream") + resp, body, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + Set("CanaryByHeader", "never"). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(body).Should(ContainSubstring("http-svc")) + Expect(body).ShouldNot(ContainSubstring("http-svc-canary")) + + By("routing requests destined for the canary ingress to the canary upstream") + + resp, body, errs = gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + Set("CanaryByHeader", "always"). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(body).Should(ContainSubstring("http-svc-canary")) + }) + + It("should route requests to the correct upstream if mainline ingress is created after the canary ingress", func() { + host := "foo" + + canaryAnnotations := map[string]string{ + "nginx.ingress.kubernetes.io/canary": "true", + "nginx.ingress.kubernetes.io/canary-by-header": "CanaryByHeader", + } + + canaryIngName := fmt.Sprintf("%v-canary", host) + + canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, f.Namespace, "http-svc-canary", + 80, &canaryAnnotations) + f.EnsureIngress(canaryIng) + + time.Sleep(waitForLuaSync) + + annotations := map[string]string{} + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("server_name foo")) + }) + + By("routing requests destined for the mainline ingress to the mainelin upstream") + resp, body, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + Set("CanaryByHeader", "never"). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(body).Should(ContainSubstring("http-svc")) + Expect(body).ShouldNot(ContainSubstring("http-svc-canary")) + + By("routing requests destined for the canary ingress to the canary upstream") + + resp, body, errs = gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + Set("CanaryByHeader", "always"). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(body).Should(ContainSubstring("http-svc-canary")) + }) + + It("should route requests to the correct upstream if the mainline ingress is modified", func() { + host := "foo" + annotations := map[string]string{} + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("server_name foo")) + }) + + canaryAnnotations := map[string]string{ + "nginx.ingress.kubernetes.io/canary": "true", + "nginx.ingress.kubernetes.io/canary-by-header": "CanaryByHeader", + } + + canaryIngName := fmt.Sprintf("%v-canary", host) + + canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, f.Namespace, "http-svc-canary", + 80, &canaryAnnotations) + f.EnsureIngress(canaryIng) + + time.Sleep(waitForLuaSync) + + modAnnotations := map[string]string{ + "foo": "bar", + } + + modIng := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &modAnnotations) + + f.EnsureIngress(modIng) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("server_name foo")) + }) + + By("routing requests destined fro the mainline ingress to the mainline upstream") + + resp, body, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + Set("CanaryByHeader", "never"). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(body).Should(ContainSubstring("http-svc")) + Expect(body).ShouldNot(ContainSubstring("http-svc-canary")) + + By("routing requests destined for the canary ingress to the canary upstream") + + resp, body, errs = gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + Set("CanaryByHeader", "always"). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(body).Should(ContainSubstring("http-svc-canary")) + }) + + It("should route requests to the correct upstream if the canary ingress is modified", func() { + host := "foo" + annotations := map[string]string{} + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("server_name foo")) + }) + + canaryAnnotations := map[string]string{ + "nginx.ingress.kubernetes.io/canary": "true", + "nginx.ingress.kubernetes.io/canary-by-header": "CanaryByHeader", + } + + canaryIngName := fmt.Sprintf("%v-canary", host) + + canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, f.Namespace, "http-svc-canary", + 80, &canaryAnnotations) + f.EnsureIngress(canaryIng) + + time.Sleep(waitForLuaSync) + + modCanaryAnnotations := map[string]string{ + "nginx.ingress.kubernetes.io/canary": "true", + "nginx.ingress.kubernetes.io/canary-by-header": "CanaryByHeader2", + } + + modCanaryIng := framework.NewSingleIngress(canaryIngName, "/", host, f.Namespace, "http-svc-canary", 80, &modCanaryAnnotations) + f.EnsureIngress(modCanaryIng) + + time.Sleep(waitForLuaSync) + + By("routing requests destined for the mainline ingress to the mainline upstream") + + resp, body, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + Set("CanaryByHeader2", "never"). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(body).Should(ContainSubstring("http-svc")) + Expect(body).ShouldNot(ContainSubstring("http-svc-canary")) + + By("routing requests destined for the canary ingress to the canary upstream") + + resp, body, errs = gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + Set("CanaryByHeader2", "always"). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(body).Should(ContainSubstring("http-svc-canary")) + }) + }) + + Context("when canaried by header with no value", func() { + It("should route requests to the correct upstream", func() { + host := "foo" + annotations := map[string]string{} + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("server_name foo")) + }) + + canaryAnnotations := map[string]string{ + "nginx.ingress.kubernetes.io/canary": "true", + "nginx.ingress.kubernetes.io/canary-by-header": "CanaryByHeader", + } + + canaryIngName := fmt.Sprintf("%v-canary", host) + + canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, f.Namespace, "http-svc-canary", + 80, &canaryAnnotations) + f.EnsureIngress(canaryIng) + + time.Sleep(waitForLuaSync) + + By("routing requests to the canary upstream when header is set to 'always'") + + resp, body, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + Set("CanaryByHeader", "always"). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(body).Should(ContainSubstring("http-svc-canary")) + + By("routing requests to the mainline upstream when header is set to 'never'") + + resp, body, errs = gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + Set("CanaryByHeader", "never"). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(body).Should(ContainSubstring("http-svc")) + Expect(body).ShouldNot(ContainSubstring("http-svc-canary")) + + By("routing requests to the mainline upstream when header is set to anything else") + + resp, body, errs = gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + Set("CanaryByHeader", "badheadervalue"). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(body).Should(ContainSubstring("http-svc")) + Expect(body).ShouldNot(ContainSubstring("http-svc-canary")) + }) + }) + + Context("when canaried by header with value", func() { + It("should route requests to the correct upstream", func() { + host := "foo" + annotations := map[string]string{} + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("server_name foo")) + }) + + canaryAnnotations := map[string]string{ + "nginx.ingress.kubernetes.io/canary": "true", + "nginx.ingress.kubernetes.io/canary-by-header": "CanaryByHeader", + "nginx.ingress.kubernetes.io/canary-by-header-value": "DoCanary", + } + + canaryIngName := fmt.Sprintf("%v-canary", host) + + canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, f.Namespace, "http-svc-canary", + 80, &canaryAnnotations) + f.EnsureIngress(canaryIng) + + time.Sleep(waitForLuaSync) + + By("routing requests to the canary upstream when header is set to 'DoCanary'") + + resp, body, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + Set("CanaryByHeader", "DoCanary"). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(body).Should(ContainSubstring("http-svc-canary")) + + By("routing requests to the mainline upstream when header is set to 'always'") + + resp, body, errs = gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + Set("CanaryByHeader", "always"). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(body).Should(ContainSubstring("http-svc")) + Expect(body).ShouldNot(ContainSubstring("http-svc-canary")) + + By("routing requests to the mainline upstream when header is set to 'never'") + + resp, body, errs = gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + Set("CanaryByHeader", "never"). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(body).Should(ContainSubstring("http-svc")) + Expect(body).ShouldNot(ContainSubstring("http-svc-canary")) + + By("routing requests to the mainline upstream when header is set to anything else") + + resp, body, errs = gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + Set("CanaryByHeader", "otherheadervalue"). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(body).Should(ContainSubstring("http-svc")) + Expect(body).ShouldNot(ContainSubstring("http-svc-canary")) + }) + }) + + Context("when canaried by header with value and cookie", func() { + It("should route requests to the correct upstream", func() { + host := "foo" + annotations := map[string]string{} + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("server_name foo")) + }) + + canaryAnnotations := map[string]string{ + "nginx.ingress.kubernetes.io/canary": "true", + "nginx.ingress.kubernetes.io/canary-by-header": "CanaryByHeader", + "nginx.ingress.kubernetes.io/canary-by-header-value": "DoCanary", + "nginx.ingress.kubernetes.io/canary-by-cookie": "CanaryByCookie", + } + + canaryIngName := fmt.Sprintf("%v-canary", host) + + canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, f.Namespace, "http-svc-canary", + 80, &canaryAnnotations) + f.EnsureIngress(canaryIng) + + time.Sleep(waitForLuaSync) + + By("routing requests to the canary upstream when header value does not match and cookie is set to 'always'") + resp, body, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + Set("CanaryByHeader", "otherheadervalue"). + AddCookie(&http.Cookie{Name: "CanaryByCookie", Value: "always"}). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(body).Should(ContainSubstring("http-svc-canary")) + }) + }) + + Context("when canaried by cookie", func() { + It("should route requests to the correct upstream", func() { + host := "foo" + annotations := map[string]string{} + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("server_name foo")) + }) + + canaryAnnotations := map[string]string{ + "nginx.ingress.kubernetes.io/canary": "true", + "nginx.ingress.kubernetes.io/canary-by-cookie": "Canary-By-Cookie", + } + + canaryIngName := fmt.Sprintf("%v-canary", host) + + canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, f.Namespace, "http-svc-canary", + 80, &canaryAnnotations) + f.EnsureIngress(canaryIng) + + time.Sleep(waitForLuaSync) + + By("routing requests to the canary upstream when cookie is set to 'always'") + resp, body, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + AddCookie(&http.Cookie{Name: "Canary-By-Cookie", Value: "always"}). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(body).Should(ContainSubstring("http-svc-canary")) + + By("routing requests to the mainline upstream when cookie is set to 'never'") + + resp, body, errs = gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + AddCookie(&http.Cookie{Name: "Canary-By-Cookie", Value: "never"}). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(body).Should(ContainSubstring("http-svc")) + Expect(body).ShouldNot(ContainSubstring("http-svc-canary")) + + By("routing requests to the mainline upstream when cookie is set to anything else") + + resp, body, errs = gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + AddCookie(&http.Cookie{Name: "Canary-By-Cookie", Value: "badcookievalue"}). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(body).Should(ContainSubstring("http-svc")) + Expect(body).ShouldNot(ContainSubstring("http-svc-canary")) + }) + }) + + // TODO: add testing for canary-weight 0 < weight < 100 + Context("when canaried by weight", func() { + It("should route requests to the correct upstream", func() { + host := "foo" + annotations := map[string]string{} + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("server_name foo")) + }) + + canaryAnnotations := map[string]string{ + "nginx.ingress.kubernetes.io/canary": "true", + "nginx.ingress.kubernetes.io/canary-weight": "0", + } + + canaryIngName := fmt.Sprintf("%v-canary", host) + + canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, f.Namespace, "http-svc-canary", + 80, &canaryAnnotations) + f.EnsureIngress(canaryIng) + + time.Sleep(waitForLuaSync) + + By("returning requests from the mainline only when weight is equal to 0") + + resp, body, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(body).Should(ContainSubstring("http-svc")) + Expect(body).ShouldNot(ContainSubstring("http-svc-canary")) + + By("returning requests from the canary only when weight is equal to 100") + + modCanaryAnnotations := map[string]string{ + "nginx.ingress.kubernetes.io/canary": "true", + "nginx.ingress.kubernetes.io/canary-weight": "100", + } + + modCanaryIng := framework.NewSingleIngress(canaryIngName, "/", host, f.Namespace, "http-svc-canary", 80, &modCanaryAnnotations) + + f.EnsureIngress(modCanaryIng) + + time.Sleep(waitForLuaSync) + + resp, body, errs = gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(body).Should(ContainSubstring("http-svc-canary")) + + }) + }) + + Context("Single canary Ingress", func() { + It("should not use canary as a catch-all server", func() { + host := "foo" + canaryIngName := fmt.Sprintf("%v-canary", host) + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/canary": "true", + "nginx.ingress.kubernetes.io/canary-by-header": "CanaryByHeader", + } + + ing := framework.NewSingleCatchAllIngress(canaryIngName, f.Namespace, "http-svc-canary", 80, &annotations) + f.EnsureIngress(ing) + + ing = framework.NewSingleCatchAllIngress(host, f.Namespace, "http-svc", 80, nil) + f.EnsureIngress(ing) + + f.WaitForNginxServer("_", + func(server string) bool { + upstreamName := fmt.Sprintf(`set $proxy_upstream_name "%s-%s-%s";`, f.Namespace, "http-svc", "80") + canaryUpstreamName := fmt.Sprintf(`set $proxy_upstream_name "%s-%s-%s";`, f.Namespace, "http-svc-canary", "80") + return Expect(server).Should(ContainSubstring(`set $ingress_name "`+host+`";`)) && + Expect(server).ShouldNot(ContainSubstring(`set $proxy_upstream_name "upstream-default-backend";`)) && + Expect(server).ShouldNot(ContainSubstring(canaryUpstreamName)) && + Expect(server).Should(ContainSubstring(upstreamName)) + }) + }) + + It("should not use canary with domain as a server", func() { + host := "foo" + canaryIngName := fmt.Sprintf("%v-canary", host) + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/canary": "true", + "nginx.ingress.kubernetes.io/canary-by-header": "CanaryByHeader", + } + + ing := framework.NewSingleIngress(canaryIngName, "/", host, f.Namespace, "http-svc-canary", 80, &annotations) + f.EnsureIngress(ing) + + otherHost := "bar" + ing = framework.NewSingleIngress(otherHost, "/", otherHost, f.Namespace, "http-svc", 80, nil) + f.EnsureIngress(ing) + + time.Sleep(waitForLuaSync) + + f.WaitForNginxConfiguration(func(cfg string) bool { + return Expect(cfg).Should(ContainSubstring("server_name "+otherHost)) && + Expect(cfg).ShouldNot(ContainSubstring("server_name "+host)) + }) + }) + }) + + It("does not crash when canary ingress has multiple paths to the same non-matching backend", func() { + host := "foo" + canaryIngName := fmt.Sprintf("%v-canary", host) + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/canary": "true", + "nginx.ingress.kubernetes.io/canary-by-header": "CanaryByHeader", + } + + paths := []string{"/foo", "/bar"} + ing := framework.NewSingleIngressWithMultiplePaths(canaryIngName, paths, host, f.Namespace, "httpy-svc-canary", 80, &annotations) + f.EnsureIngress(ing) + + ing = framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, nil) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("server_name foo")) + }) + }) +}) diff --git a/test/e2e/annotations/clientbodybuffersize.go b/test/e2e/annotations/clientbodybuffersize.go new file mode 100644 index 0000000000..cfaa664e14 --- /dev/null +++ b/test/e2e/annotations/clientbodybuffersize.go @@ -0,0 +1,124 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package annotations + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/ingress-nginx/test/e2e/framework" +) + +var _ = framework.IngressNginxDescribe("Annotations - Client-Body-Buffer-Size", func() { + f := framework.NewDefaultFramework("clientbodybuffersize") + + BeforeEach(func() { + f.NewEchoDeploymentWithReplicas(2) + }) + + AfterEach(func() { + }) + + It("should set client_body_buffer_size to 1000", func() { + host := "proxy.foo.com" + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/client-body-buffer-size": "1000", + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("client_body_buffer_size 1000;")) + }) + }) + + It("should set client_body_buffer_size to 1K", func() { + host := "proxy.foo.com" + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/client-body-buffer-size": "1K", + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("client_body_buffer_size 1K;")) + }) + }) + + It("should set client_body_buffer_size to 1k", func() { + host := "proxy.foo.com" + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/client-body-buffer-size": "1k", + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("client_body_buffer_size 1k;")) + }) + }) + + It("should set client_body_buffer_size to 1m", func() { + host := "proxy.foo.com" + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/client-body-buffer-size": "1m", + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("client_body_buffer_size 1m;")) + }) + }) + + It("should set client_body_buffer_size to 1M", func() { + host := "proxy.foo.com" + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/client-body-buffer-size": "1M", + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("client_body_buffer_size 1M;")) + }) + }) + + It("should not set client_body_buffer_size to invalid 1b", func() { + host := "proxy.foo.com" + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/client-body-buffer-size": "1b", + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).ShouldNot(ContainSubstring("client_body_buffer_size 1b;")) + }) + }) +}) diff --git a/test/e2e/annotations/connection.go b/test/e2e/annotations/connection.go new file mode 100644 index 0000000000..5becd22ac9 --- /dev/null +++ b/test/e2e/annotations/connection.go @@ -0,0 +1,64 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package annotations + +import ( + "fmt" + "net/http" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/parnurzeal/gorequest" + "k8s.io/ingress-nginx/test/e2e/framework" +) + +var _ = framework.IngressNginxDescribe("Annotations - Connection", func() { + f := framework.NewDefaultFramework("connection") + + BeforeEach(func() { + f.NewEchoDeploymentWithReplicas(2) + }) + + AfterEach(func() { + }) + + It("set connection header to keep-alive", func() { + host := "connection.foo.com" + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/connection-proxy-header": "keep-alive", + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring(`proxy_set_header Connection keep-alive;`)) + }) + + resp, body, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Retry(10, 1*time.Second, http.StatusNotFound). + Set("Host", host). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(body).Should(ContainSubstring(fmt.Sprintf("connection=keep-alive"))) + }) +}) diff --git a/test/e2e/annotations/cors.go b/test/e2e/annotations/cors.go new file mode 100644 index 0000000000..ec1c5560e5 --- /dev/null +++ b/test/e2e/annotations/cors.go @@ -0,0 +1,161 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package annotations + +import ( + "net/http" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/parnurzeal/gorequest" + + "k8s.io/ingress-nginx/test/e2e/framework" +) + +var _ = framework.IngressNginxDescribe("Annotations - CORS", func() { + f := framework.NewDefaultFramework("cors") + + BeforeEach(func() { + f.NewEchoDeploymentWithReplicas(2) + }) + + AfterEach(func() { + }) + + It("should enable cors", func() { + host := "cors.foo.com" + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/enable-cors": "true", + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("more_set_headers 'Access-Control-Allow-Methods: GET, PUT, POST, DELETE, PATCH, OPTIONS';")) + }) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("more_set_headers 'Access-Control-Allow-Origin: *';")) + }) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("more_set_headers 'Access-Control-Allow-Headers: DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Authorization';")) + }) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("more_set_headers 'Access-Control-Max-Age: 1728000';")) + }) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("more_set_headers 'Access-Control-Allow-Credentials: true';")) + }) + + uri := "/" + resp, _, errs := gorequest.New(). + Options(f.GetURL(framework.HTTP)+uri). + Set("Host", host). + End() + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusNoContent)) + }) + + It("should set cors methods to only allow POST, GET", func() { + host := "cors.foo.com" + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/enable-cors": "true", + "nginx.ingress.kubernetes.io/cors-allow-methods": "POST, GET", + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("more_set_headers 'Access-Control-Allow-Methods: POST, GET';")) + }) + }) + + It("should set cors max-age", func() { + host := "cors.foo.com" + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/enable-cors": "true", + "nginx.ingress.kubernetes.io/cors-max-age": "200", + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("more_set_headers 'Access-Control-Max-Age: 200';")) + }) + }) + + It("should disable cors allow credentials", func() { + host := "cors.foo.com" + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/enable-cors": "true", + "nginx.ingress.kubernetes.io/cors-allow-credentials": "false", + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).ShouldNot(ContainSubstring("more_set_headers 'Access-Control-Allow-Credentials: true';")) + }) + }) + + It("should allow origin for cors", func() { + host := "cors.foo.com" + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/enable-cors": "true", + "nginx.ingress.kubernetes.io/cors-allow-origin": "https://origin.cors.com:8080", + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("more_set_headers 'Access-Control-Allow-Origin: https://origin.cors.com:8080';")) + }) + }) + + It("should allow headers for cors", func() { + host := "cors.foo.com" + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/enable-cors": "true", + "nginx.ingress.kubernetes.io/cors-allow-headers": "DNT, User-Agent", + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("more_set_headers 'Access-Control-Allow-Headers: DNT, User-Agent';")) + }) + }) +}) diff --git a/test/e2e/annotations/customhttperrors.go b/test/e2e/annotations/customhttperrors.go new file mode 100644 index 0000000000..0a623fe923 --- /dev/null +++ b/test/e2e/annotations/customhttperrors.go @@ -0,0 +1,122 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package annotations + +import ( + "fmt" + "strings" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + extensions "k8s.io/api/extensions/v1beta1" + + "k8s.io/ingress-nginx/test/e2e/framework" +) + +func errorBlockName(upstreamName string, errorCode string) string { + return fmt.Sprintf("@custom_%s_%s", upstreamName, errorCode) +} + +var _ = framework.IngressNginxDescribe("Annotations - custom-http-errors", func() { + f := framework.NewDefaultFramework("custom-http-errors") + + BeforeEach(func() { + f.NewEchoDeploymentWithReplicas(1) + }) + + AfterEach(func() { + }) + + It("configures Nginx correctly", func() { + host := "customerrors.foo.com" + + errorCodes := []string{"404", "500"} + + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/custom-http-errors": strings.Join(errorCodes, ","), + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + var serverConfig string + f.WaitForNginxServer(host, func(sc string) bool { + serverConfig = sc + return strings.Contains(serverConfig, fmt.Sprintf("server_name %s", host)) + }) + + By("turning on proxy_intercept_errors directive") + Expect(serverConfig).Should(ContainSubstring("proxy_intercept_errors on;")) + + By("configuring error_page directive") + for _, code := range errorCodes { + Expect(serverConfig).Should(ContainSubstring(fmt.Sprintf("error_page %s = %s", code, errorBlockName("upstream-default-backend", code)))) + } + + By("creating error locations") + for _, code := range errorCodes { + Expect(serverConfig).Should(ContainSubstring(fmt.Sprintf("location %s", errorBlockName("upstream-default-backend", code)))) + } + + By("updating configuration when only custom-http-error value changes") + err := framework.UpdateIngress(f.KubeClientSet, f.Namespace, host, func(ingress *extensions.Ingress) error { + ingress.ObjectMeta.Annotations["nginx.ingress.kubernetes.io/custom-http-errors"] = "503" + return nil + }) + Expect(err).ToNot(HaveOccurred()) + f.WaitForNginxServer(host, func(sc string) bool { + if serverConfig != sc { + serverConfig = sc + return true + } + return false + }) + Expect(serverConfig).Should(ContainSubstring(fmt.Sprintf("location %s", errorBlockName("upstream-default-backend", "503")))) + Expect(serverConfig).ShouldNot(ContainSubstring(fmt.Sprintf("location %s", errorBlockName("upstream-default-backend", "404")))) + Expect(serverConfig).ShouldNot(ContainSubstring(fmt.Sprintf("location %s", errorBlockName("upstream-default-backend", "500")))) + + By("ignoring duplicate values (503 in this case) per server") + annotations["nginx.ingress.kubernetes.io/custom-http-errors"] = "404, 503" + ing = framework.NewSingleIngress(fmt.Sprintf("%s-else", host), "/else", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + f.WaitForNginxServer(host, func(sc string) bool { + serverConfig = sc + return strings.Contains(serverConfig, "location /else") + }) + count := strings.Count(serverConfig, fmt.Sprintf("location %s", errorBlockName("upstream-default-backend", "503"))) + Expect(count).Should(Equal(1)) + + By("using the custom default-backend from annotation for upstream") + customDefaultBackend := "from-annotation" + f.NewEchoDeploymentWithNameAndReplicas(customDefaultBackend, 1) + + err = framework.UpdateIngress(f.KubeClientSet, f.Namespace, host, func(ingress *extensions.Ingress) error { + ingress.ObjectMeta.Annotations["nginx.ingress.kubernetes.io/default-backend"] = customDefaultBackend + return nil + }) + Expect(err).ToNot(HaveOccurred()) + f.WaitForNginxServer(host, func(sc string) bool { + if serverConfig != sc { + serverConfig = sc + return true + } + return false + }) + Expect(serverConfig).Should(ContainSubstring(errorBlockName(fmt.Sprintf("custom-default-backend-%s", customDefaultBackend), "503"))) + Expect(serverConfig).Should(ContainSubstring(fmt.Sprintf("error_page %s = %s", "503", errorBlockName(fmt.Sprintf("custom-default-backend-%s", customDefaultBackend), "503")))) + }) +}) diff --git a/test/e2e/annotations/default_backend.go b/test/e2e/annotations/default_backend.go new file mode 100644 index 0000000000..4aff300f71 --- /dev/null +++ b/test/e2e/annotations/default_backend.go @@ -0,0 +1,72 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package annotations + +import ( + "fmt" + "net/http" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/parnurzeal/gorequest" + + "k8s.io/ingress-nginx/test/e2e/framework" +) + +var _ = framework.IngressNginxDescribe("Annotations - custom default-backend", func() { + f := framework.NewDefaultFramework("default-backend") + + BeforeEach(func() { + f.NewEchoDeployment() + }) + + Context("when default backend annotation is enabled", func() { + It("should use a custom default backend as upstream", func() { + host := "default-backend" + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/default-backend": "http-svc", + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "invalid", 80, &annotations) + f.EnsureIngress(ing) + + time.Sleep(5 * time.Second) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring(fmt.Sprintf("server_name %v", host))) + }) + + uri := "/alma/armud" + requestId := "something-unique" + resp, body, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)+uri). + Set("Host", host). + Set("x-request-id", requestId). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + + Expect(body).To(ContainSubstring("x-code=503")) + Expect(body).To(ContainSubstring(fmt.Sprintf("x-ingress-name=%s", host))) + Expect(body).To(ContainSubstring("x-service-name=invalid")) + Expect(body).To(ContainSubstring(fmt.Sprintf("x-request-id=%s", requestId))) + }) + }) +}) diff --git a/test/e2e/annotations/forcesslredirect.go b/test/e2e/annotations/forcesslredirect.go new file mode 100644 index 0000000000..118d07f22c --- /dev/null +++ b/test/e2e/annotations/forcesslredirect.go @@ -0,0 +1,60 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package annotations + +import ( + "net/http" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/parnurzeal/gorequest" + "k8s.io/ingress-nginx/test/e2e/framework" +) + +var _ = framework.IngressNginxDescribe("Annotations - Forcesslredirect", func() { + f := framework.NewDefaultFramework("forcesslredirect") + + BeforeEach(func() { + f.NewEchoDeploymentWithReplicas(2) + }) + + AfterEach(func() { + }) + + It("should redirect to https", func() { + host := "forcesslredirect.bar.com" + + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + resp, _, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Retry(10, 1*time.Second, http.StatusNotFound). + RedirectPolicy(noRedirectPolicyFunc). + Set("Host", host). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusPermanentRedirect)) + Expect(resp.Header.Get("Location")).Should(Equal("https://forcesslredirect.bar.com/")) + }) +}) diff --git a/test/e2e/annotations/fromtowwwredirect.go b/test/e2e/annotations/fromtowwwredirect.go new file mode 100644 index 0000000000..e9799968b7 --- /dev/null +++ b/test/e2e/annotations/fromtowwwredirect.go @@ -0,0 +1,132 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package annotations + +import ( + "crypto/tls" + "fmt" + "net/http" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/parnurzeal/gorequest" + + "k8s.io/ingress-nginx/test/e2e/framework" +) + +var _ = framework.IngressNginxDescribe("Annotations - from-to-www-redirect", func() { + f := framework.NewDefaultFramework("fromtowwwredirect") + + BeforeEach(func() { + f.NewEchoDeploymentWithReplicas(1) + }) + + AfterEach(func() { + }) + + It("should redirect from www HTTP to HTTP", func() { + By("setting up server for redirect from www") + host := "fromtowwwredirect.bar.com" + + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/from-to-www-redirect": "true", + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxConfiguration( + func(cfg string) bool { + return Expect(cfg).Should(ContainSubstring(`server_name www.fromtowwwredirect.bar.com;`)) && + Expect(cfg).Should(ContainSubstring(`return 308 $scheme://fromtowwwredirect.bar.com$request_uri;`)) + }) + + By("sending request to www.fromtowwwredirect.bar.com") + + resp, _, errs := gorequest.New(). + Get(fmt.Sprintf("%s/%s", f.GetURL(framework.HTTP), "foo")). + Retry(10, 1*time.Second, http.StatusNotFound). + RedirectPolicy(noRedirectPolicyFunc). + Set("Host", fmt.Sprintf("%s.%s", "www", host)). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusPermanentRedirect)) + Expect(resp.Header.Get("Location")).Should(Equal("http://fromtowwwredirect.bar.com/foo")) + }) + + It("should redirect from www HTTPS to HTTPS", func() { + By("setting up server for redirect from www") + + fromHost := fmt.Sprintf("%s.nip.io", f.GetNginxIP()) + toHost := fmt.Sprintf("www.%s", fromHost) + + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/from-to-www-redirect": "true", + "nginx.ingress.kubernetes.io/configuration-snippet": "more_set_headers \"ExpectedHost: $http_host\";", + } + + ing := framework.NewSingleIngressWithTLS(fromHost, "/", fromHost, []string{fromHost, toHost}, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + _, err := framework.CreateIngressTLSSecret(f.KubeClientSet, + ing.Spec.TLS[0].Hosts, + ing.Spec.TLS[0].SecretName, + ing.Namespace) + Expect(err).ToNot(HaveOccurred()) + + f.WaitForNginxServer(toHost, + func(server string) bool { + return Expect(server).Should(ContainSubstring(fmt.Sprintf(`server_name %v;`, toHost))) && + Expect(server).Should(ContainSubstring(fmt.Sprintf(`return 308 $scheme://%v$request_uri;`, fromHost))) + }) + + By("sending request to www should redirect to domain without www") + + resp, _, errs := gorequest.New(). + TLSClientConfig(&tls.Config{ + InsecureSkipVerify: true, + ServerName: toHost, + }). + Get(f.GetURL(framework.HTTPS)). + Retry(10, 1*time.Second, http.StatusNotFound). + RedirectPolicy(noRedirectPolicyFunc). + Set("host", toHost). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusPermanentRedirect)) + Expect(resp.Header.Get("Location")).Should(Equal(fmt.Sprintf("https://%v/", fromHost))) + + By("sending request to domain should redirect to domain with www") + + req := gorequest.New(). + TLSClientConfig(&tls.Config{ + InsecureSkipVerify: true, + ServerName: toHost, + }). + Get(f.GetURL(framework.HTTPS)). + Set("host", toHost) + + resp, _, errs = req.End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(resp.Header.Get("ExpectedHost")).Should(Equal(fromHost)) + }) +}) diff --git a/test/e2e/annotations/grpc.go b/test/e2e/annotations/grpc.go new file mode 100644 index 0000000000..bb3c894afa --- /dev/null +++ b/test/e2e/annotations/grpc.go @@ -0,0 +1,59 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package annotations + +import ( + "fmt" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "k8s.io/ingress-nginx/test/e2e/framework" +) + +var _ = framework.IngressNginxDescribe("Annotations - grpc", func() { + f := framework.NewDefaultFramework("grpc") + + BeforeEach(func() { + f.NewGRPCFortuneTellerDeployment() + }) + + Context("when grpc is enabled", func() { + It("should use grpc_pass in the configuration file", func() { + host := "grpc" + + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/backend-protocol": "GRPC", + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "fortune-teller", 50051, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring(fmt.Sprintf("server_name %v", host))) + }) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("grpc_pass")) && + Expect(server).Should(ContainSubstring("grpc_set_header")) && + Expect(server).ShouldNot(ContainSubstring("proxy_pass")) + }) + }) + }) +}) diff --git a/test/e2e/annotations/http2pushpreload.go b/test/e2e/annotations/http2pushpreload.go new file mode 100644 index 0000000000..5ba7212b1e --- /dev/null +++ b/test/e2e/annotations/http2pushpreload.go @@ -0,0 +1,49 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package annotations + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/ingress-nginx/test/e2e/framework" +) + +var _ = framework.IngressNginxDescribe("Annotations - HTTP2 Push Preload", func() { + f := framework.NewDefaultFramework("http2pushpreload") + + BeforeEach(func() { + f.NewEchoDeploymentWithReplicas(2) + }) + + AfterEach(func() { + }) + + It("enable the http2-push-preload directive", func() { + host := "http2pp.foo.com" + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/http2-push-preload": "true", + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("http2_push_preload on;")) + }) + }) +}) diff --git a/test/e2e/annotations/influxdb.go b/test/e2e/annotations/influxdb.go new file mode 100644 index 0000000000..23917b6b8c --- /dev/null +++ b/test/e2e/annotations/influxdb.go @@ -0,0 +1,188 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package annotations + +import ( + "bytes" + "fmt" + "net/http" + "os/exec" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + jsoniter "github.com/json-iterator/go" + "github.com/parnurzeal/gorequest" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/ingress-nginx/test/e2e/framework" +) + +var _ = framework.IngressNginxDescribe("Annotations - influxdb", func() { + f := framework.NewDefaultFramework("influxdb") + + BeforeEach(func() { + f.NewInfluxDBDeployment() + f.NewEchoDeployment() + }) + + Context("when influxdb is enabled", func() { + It("should send the request metric to the influxdb server", func() { + ifs := createInfluxDBService(f) + + // Ingress configured with InfluxDB annotations + host := "influxdb.e2e.local" + createInfluxDBIngress( + f, + host, + "http-svc", + 8080, + map[string]string{ + "nginx.ingress.kubernetes.io/enable-influxdb": "true", + "nginx.ingress.kubernetes.io/influxdb-host": ifs.Spec.ClusterIP, + "nginx.ingress.kubernetes.io/influxdb-port": "8089", + "nginx.ingress.kubernetes.io/influxdb-measurement": "requests", + "nginx.ingress.kubernetes.io/influxdb-servername": "e2e-nginx-srv", + }, + ) + + // Do a request to the echo server ingress that sends metrics + // to the InfluxDB backend. + res, _, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + End() + + Expect(len(errs)).Should(Equal(0)) + Expect(res.StatusCode).Should(Equal(http.StatusOK)) + + time.Sleep(5 * time.Second) + + var measurements string + var err error + + err = wait.PollImmediate(time.Second, time.Minute, func() (bool, error) { + measurements, err = extractInfluxDBMeasurements(f) + if err != nil { + return false, nil + } + return true, nil + }) + Expect(err).NotTo(HaveOccurred()) + + var results map[string][]map[string]interface{} + jsoniter.ConfigCompatibleWithStandardLibrary.Unmarshal([]byte(measurements), &results) + + Expect(len(measurements)).ShouldNot(Equal(0)) + for _, elem := range results["results"] { + Expect(len(elem)).ShouldNot(Equal(0)) + } + }) + }) +}) + +func createInfluxDBService(f *framework.Framework) *corev1.Service { + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "inflxudb-svc", + Namespace: f.Namespace, + }, + Spec: corev1.ServiceSpec{Ports: []corev1.ServicePort{ + { + Name: "udp", + Port: 8089, + TargetPort: intstr.FromInt(8089), + Protocol: "UDP", + }, + }, + Selector: map[string]string{ + "app": "influxdb-svc", + }, + }, + } + + return f.EnsureService(service) +} + +func createInfluxDBIngress(f *framework.Framework, host, service string, port int, annotations map[string]string) { + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, service, port, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring(fmt.Sprintf("server_name %v", host))) + }) +} + +func extractInfluxDBMeasurements(f *framework.Framework) (string, error) { + l, err := f.KubeClientSet.CoreV1().Pods(f.Namespace).List(metav1.ListOptions{ + LabelSelector: "app=influxdb-svc", + }) + if err != nil { + return "", err + } + + if len(l.Items) == 0 { + return "", err + } + + cmd := "influx -database 'nginx' -execute 'select * from requests' -format 'json' -pretty" + + var pod *corev1.Pod + for _, p := range l.Items { + pod = &p + break + } + + if pod == nil { + return "", fmt.Errorf("no influxdb pods found") + } + + o, err := execInfluxDBCommand(pod, cmd) + if err != nil { + return "", err + } + + return o, nil +} + +func execInfluxDBCommand(pod *corev1.Pod, command string) (string, error) { + var ( + execOut bytes.Buffer + execErr bytes.Buffer + ) + + cmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("%v exec --namespace %s %s -- %s", framework.KubectlPath, pod.Namespace, pod.Name, command)) + cmd.Stdout = &execOut + cmd.Stderr = &execErr + + err := cmd.Run() + + if execErr.Len() > 0 { + return "", fmt.Errorf("stderr: %v", execErr.String()) + } + + if err != nil { + return "", fmt.Errorf("could not execute '%s %s': %v", cmd.Path, cmd.Args, err) + } + + return execOut.String(), nil +} diff --git a/test/e2e/annotations/ipwhitelist.go b/test/e2e/annotations/ipwhitelist.go new file mode 100644 index 0000000000..74fd12d0fd --- /dev/null +++ b/test/e2e/annotations/ipwhitelist.go @@ -0,0 +1,55 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package annotations + +import ( + "strings" + + . "github.com/onsi/ginkgo" + + "k8s.io/ingress-nginx/test/e2e/framework" +) + +var _ = framework.IngressNginxDescribe("Annotations - IPWhiteList", func() { + f := framework.NewDefaultFramework("ipwhitelist") + + BeforeEach(func() { + f.NewEchoDeploymentWithReplicas(2) + }) + + AfterEach(func() { + }) + + It("should set valid ip whitelist range", func() { + host := "ipwhitelist.foo.com" + nameSpace := f.Namespace + + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/whitelist-source-range": "18.0.0.0/8, 56.0.0.0/8", + } + + ing := framework.NewSingleIngress(host, "/", host, nameSpace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, "allow 18.0.0.0/8;") && + strings.Contains(server, "allow 56.0.0.0/8;") && + strings.Contains(server, "deny all;") + }) + }) +}) diff --git a/test/e2e/annotations/log.go b/test/e2e/annotations/log.go new file mode 100644 index 0000000000..068f953bd6 --- /dev/null +++ b/test/e2e/annotations/log.go @@ -0,0 +1,65 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package annotations + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "k8s.io/ingress-nginx/test/e2e/framework" +) + +var _ = framework.IngressNginxDescribe("Annotations - Log", func() { + f := framework.NewDefaultFramework("log") + + BeforeEach(func() { + f.NewEchoDeploymentWithReplicas(2) + }) + + AfterEach(func() { + }) + + It("set access_log off", func() { + host := "log.foo.com" + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/enable-access-log": "false", + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring(`access_log off;`)) + }) + }) + + It("set rewrite_log on", func() { + host := "log.foo.com" + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/enable-rewrite-log": "true", + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring(`rewrite_log on;`)) + }) + }) +}) diff --git a/test/e2e/annotations/luarestywaf.go b/test/e2e/annotations/luarestywaf.go new file mode 100644 index 0000000000..aaeda0c59f --- /dev/null +++ b/test/e2e/annotations/luarestywaf.go @@ -0,0 +1,224 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package annotations + +import ( + "fmt" + "net/http" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/parnurzeal/gorequest" + + "k8s.io/ingress-nginx/test/e2e/framework" +) + +var _ = framework.IngressNginxDescribe("Annotations - lua-resty-waf", func() { + f := framework.NewDefaultFramework("luarestywaf") + + BeforeEach(func() { + f.NewEchoDeployment() + }) + + Context("when lua-resty-waf is enabled", func() { + It("should return 403 for a malicious request that matches a default WAF rule and 200 for other requests", func() { + host := "foo" + createIngress(f, host, "http-svc", 80, map[string]string{"nginx.ingress.kubernetes.io/lua-resty-waf": "active"}) + + url := fmt.Sprintf("%s?msg=XSS", f.GetURL(framework.HTTP)) + resp, _, errs := gorequest.New(). + Get(url). + Set("Host", host). + End() + + Expect(len(errs)).Should(Equal(0)) + Expect(resp.StatusCode).Should(Equal(http.StatusForbidden)) + }) + It("should not apply ignored rulesets", func() { + host := "foo" + createIngress(f, host, "http-svc", 80, map[string]string{ + "nginx.ingress.kubernetes.io/lua-resty-waf": "active", + "nginx.ingress.kubernetes.io/lua-resty-waf-ignore-rulesets": "41000_sqli, 42000_xss"}) + + url := fmt.Sprintf("%s?msg=XSS", f.GetURL(framework.HTTP)) + resp, _, errs := gorequest.New(). + Get(url). + Set("Host", host). + End() + + Expect(len(errs)).Should(Equal(0)) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + }) + It("should apply the score threshold", func() { + host := "foo" + createIngress(f, host, "http-svc", 80, map[string]string{ + "nginx.ingress.kubernetes.io/lua-resty-waf": "active", + "nginx.ingress.kubernetes.io/lua-resty-waf-score-threshold": "20"}) + + url := fmt.Sprintf("%s?msg=XSS", f.GetURL(framework.HTTP)) + resp, _, errs := gorequest.New(). + Get(url). + Set("Host", host). + End() + + Expect(len(errs)).Should(Equal(0)) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + }) + It("should not reject request with an unknown content type", func() { + host := "foo" + contenttype := "application/octet-stream" + createIngress(f, host, "http-svc", 80, map[string]string{ + "nginx.ingress.kubernetes.io/lua-resty-waf-allow-unknown-content-types": "true", + "nginx.ingress.kubernetes.io/lua-resty-waf": "active"}) + + url := fmt.Sprintf("%s?msg=my-message", f.GetURL(framework.HTTP)) + resp, _, errs := gorequest.New(). + Get(url). + Set("Host", host). + Set("Content-Type", contenttype). + End() + + Expect(len(errs)).Should(Equal(0)) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + }) + It("should not fail a request with multipart content type when multipart body processing disabled", func() { + contenttype := "multipart/form-data; boundary=alamofire.boundary.3fc2e849279e18fc" + host := "foo" + createIngress(f, host, "http-svc", 80, map[string]string{ + "nginx.ingress.kubernetes.io/lua-resty-waf-process-multipart-body": "false", + "nginx.ingress.kubernetes.io/lua-resty-waf": "active"}) + + url := fmt.Sprintf("%s?msg=my-message", f.GetURL(framework.HTTP)) + resp, _, errs := gorequest.New(). + Get(url). + Set("Host", host). + Set("Content-Type", contenttype). + End() + + Expect(len(errs)).Should(Equal(0)) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + }) + It("should fail a request with multipart content type when multipart body processing enabled by default", func() { + contenttype := "multipart/form-data; boundary=alamofire.boundary.3fc2e849279e18fc" + host := "foo" + createIngress(f, host, "http-svc", 80, map[string]string{ + "nginx.ingress.kubernetes.io/lua-resty-waf": "active"}) + + url := fmt.Sprintf("%s?msg=my-message", f.GetURL(framework.HTTP)) + resp, _, errs := gorequest.New(). + Get(url). + Set("Host", host). + Set("Content-Type", contenttype). + End() + + Expect(len(errs)).Should(Equal(0)) + Expect(resp.StatusCode).Should(Equal(http.StatusBadRequest)) + }) + It("should apply configured extra rules", func() { + host := "foo" + createIngress(f, host, "http-svc", 80, map[string]string{ + "nginx.ingress.kubernetes.io/lua-resty-waf": "active", + "nginx.ingress.kubernetes.io/lua-resty-waf-extra-rules": `[=[ + { "access": [ + { "actions": { "disrupt" : "DENY" }, + "id": 10001, + "msg": "my custom rule", + "operator": "STR_CONTAINS", + "pattern": "foo", + "vars": [ { "parse": [ "values", 1 ], "type": "REQUEST_ARGS" } ] } + ], + "body_filter": [], + "header_filter":[] + } + ]=]`, + }) + + url := fmt.Sprintf("%s?msg=my-message", f.GetURL(framework.HTTP)) + resp, _, errs := gorequest.New(). + Get(url). + Set("Host", host). + End() + + Expect(len(errs)).Should(Equal(0)) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + + url = fmt.Sprintf("%s?msg=my-foo-message", f.GetURL(framework.HTTP)) + resp, _, errs = gorequest.New(). + Get(url). + Set("Host", host). + End() + + Expect(len(errs)).Should(Equal(0)) + Expect(resp.StatusCode).Should(Equal(http.StatusForbidden)) + }) + }) + Context("when lua-resty-waf is not enabled", func() { + It("should return 200 even for a malicious request", func() { + host := "foo" + createIngress(f, host, "http-svc", 80, map[string]string{}) + + url := fmt.Sprintf("%s?msg=XSS", f.GetURL(framework.HTTP)) + resp, _, errs := gorequest.New(). + Get(url). + Set("Host", host). + End() + + Expect(len(errs)).Should(Equal(0)) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + }) + It("should run in simulate mode", func() { + host := "foo" + createIngress(f, host, "http-svc", 80, map[string]string{"nginx.ingress.kubernetes.io/lua-resty-waf": "simulate"}) + + url := fmt.Sprintf("%s?msg=XSS", f.GetURL(framework.HTTP)) + resp, _, errs := gorequest.New(). + Get(url). + Set("Host", host). + End() + + Expect(len(errs)).Should(Equal(0)) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + + time.Sleep(5 * time.Second) + log, err := f.NginxLogs() + Expect(err).ToNot(HaveOccurred()) + Expect(log).To(ContainSubstring("Request score greater than score threshold")) + }) + }) +}) + +func createIngress(f *framework.Framework, host, service string, port int, annotations map[string]string) { + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, service, port, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring(fmt.Sprintf("server_name %v", host))) + }) + + time.Sleep(1 * time.Second) + + resp, body, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + End() + + Expect(len(errs)).Should(Equal(0)) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(body).Should(ContainSubstring(fmt.Sprintf("host=%v", host))) +} diff --git a/test/e2e/annotations/modsecurity.go b/test/e2e/annotations/modsecurity.go new file mode 100644 index 0000000000..60343f644a --- /dev/null +++ b/test/e2e/annotations/modsecurity.go @@ -0,0 +1,110 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package annotations + +import ( + "strings" + + . "github.com/onsi/ginkgo" + "k8s.io/ingress-nginx/test/e2e/framework" +) + +var _ = framework.IngressNginxDescribe("Annotations - ModSecurityLocation", func() { + f := framework.NewDefaultFramework("modsecuritylocation") + + BeforeEach(func() { + f.NewEchoDeployment() + }) + + AfterEach(func() { + }) + + It("should enable modsecurity", func() { + host := "modsecurity.foo.com" + nameSpace := f.Namespace + + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/enable-modsecurity": "true", + } + + ing := framework.NewSingleIngress(host, "/", host, nameSpace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, "modsecurity on;") && + strings.Contains(server, "modsecurity_rules_file /etc/nginx/modsecurity/modsecurity.conf;") + }) + }) + + It("should enable modsecurity with transaction ID and OWASP rules", func() { + host := "modsecurity.foo.com" + nameSpace := f.Namespace + + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/enable-modsecurity": "true", + "nginx.ingress.kubernetes.io/enable-owasp-core-rules": "true", + "nginx.ingress.kubernetes.io/modsecurity-transaction-id": "modsecurity-$request_id", + } + + ing := framework.NewSingleIngress(host, "/", host, nameSpace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, "modsecurity on;") && + strings.Contains(server, "modsecurity_rules_file /etc/nginx/owasp-modsecurity-crs/nginx-modsecurity.conf;") && + strings.Contains(server, "modsecurity_transaction_id \"modsecurity-$request_id\";") + }) + }) + + It("should disable modsecurity", func() { + host := "modsecurity.foo.com" + nameSpace := f.Namespace + + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/enable-modsecurity": "false", + } + + ing := framework.NewSingleIngress(host, "/", host, nameSpace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return !strings.Contains(server, "modsecurity on;") + }) + }) + + It("should enable modsecurity with snippet", func() { + host := "modsecurity.foo.com" + nameSpace := f.Namespace + + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/enable-modsecurity": "true", + "nginx.ingress.kubernetes.io/modsecurity-snippet": "SecRuleEngine On", + } + + ing := framework.NewSingleIngress(host, "/", host, nameSpace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, "modsecurity on;") && + strings.Contains(server, "SecRuleEngine On") + }) + }) +}) diff --git a/test/e2e/annotations/proxy.go b/test/e2e/annotations/proxy.go new file mode 100644 index 0000000000..8a12d63380 --- /dev/null +++ b/test/e2e/annotations/proxy.go @@ -0,0 +1,233 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package annotations + +import ( + "strings" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "k8s.io/ingress-nginx/test/e2e/framework" +) + +var _ = framework.IngressNginxDescribe("Annotations - Proxy", func() { + f := framework.NewDefaultFramework("proxy") + host := "proxy.foo.com" + + BeforeEach(func() { + f.NewEchoDeploymentWithReplicas(2) + }) + + AfterEach(func() { + }) + + It("should set proxy_redirect to off", func() { + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/proxy-redirect-from": "off", + "nginx.ingress.kubernetes.io/proxy-redirect-to": "goodbye.com", + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("proxy_redirect off;")) + }) + }) + + It("should set proxy_redirect to default", func() { + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/proxy-redirect-from": "default", + "nginx.ingress.kubernetes.io/proxy-redirect-to": "goodbye.com", + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("proxy_redirect default;")) + }) + }) + + It("should set proxy_redirect to hello.com goodbye.com", func() { + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/proxy-redirect-from": "hello.com", + "nginx.ingress.kubernetes.io/proxy-redirect-to": "goodbye.com", + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("proxy_redirect hello.com goodbye.com;")) + }) + }) + + It("should set proxy client-max-body-size to 8m", func() { + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/proxy-body-size": "8m", + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("client_max_body_size 8m;")) + }) + }) + + It("should not set proxy client-max-body-size to incorrect value", func() { + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/proxy-body-size": "15r", + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).ShouldNot(ContainSubstring("client_max_body_size 15r;")) + }) + }) + + It("should set valid proxy timeouts", func() { + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/proxy-connect-timeout": "50", + "nginx.ingress.kubernetes.io/proxy-send-timeout": "20", + "nginx.ingress.kubernetes.io/proxy-read-timeout": "20", + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, "proxy_connect_timeout 50s;") && + strings.Contains(server, "proxy_send_timeout 20s;") && + strings.Contains(server, "proxy_read_timeout 20s;") + }) + }) + + It("should not set invalid proxy timeouts", func() { + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/proxy-connect-timeout": "50k", + "nginx.ingress.kubernetes.io/proxy-send-timeout": "20k", + "nginx.ingress.kubernetes.io/proxy-read-timeout": "20k", + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return !strings.Contains(server, "proxy_connect_timeout 50ks;") && + !strings.Contains(server, "proxy_send_timeout 20ks;") && + !strings.Contains(server, "proxy_read_timeout 20ks;") + }) + }) + + It("should turn on proxy-buffering", func() { + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/proxy-buffering": "on", + "nginx.ingress.kubernetes.io/proxy-buffers-number": "8", + "nginx.ingress.kubernetes.io/proxy-buffer-size": "8k", + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, "proxy_buffering on;") && + strings.Contains(server, "proxy_buffer_size 8k;") && + strings.Contains(server, "proxy_buffers 8 8k;") && + strings.Contains(server, "proxy_request_buffering on;") + }) + }) + + It("should turn off proxy-request-buffering", func() { + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/proxy-request-buffering": "off", + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("proxy_request_buffering off;")) + }) + }) + + It("should build proxy next upstream", func() { + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/proxy-next-upstream": "error timeout http_502", + "nginx.ingress.kubernetes.io/proxy-next-upstream-timeout": "10", + "nginx.ingress.kubernetes.io/proxy-next-upstream-tries": "5", + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, "proxy_next_upstream error timeout http_502;") && + strings.Contains(server, "proxy_next_upstream_timeout 10;") && + strings.Contains(server, "proxy_next_upstream_tries 5;") + }) + }) + + It("should build proxy next upstream using configmap values", func() { + annotations := map[string]string{} + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.SetNginxConfigMapData(map[string]string{ + "proxy-next-upstream": "timeout http_502", + "proxy-next-upstream-timeout": "53", + "proxy-next-upstream-tries": "44", + }) + + f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, "proxy_next_upstream timeout http_502;") && + strings.Contains(server, "proxy_next_upstream_timeout 53;") && + strings.Contains(server, "proxy_next_upstream_tries 44;") + }) + }) + + It("should setup proxy cookies", func() { + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/proxy-cookie-domain": "localhost example.org", + "nginx.ingress.kubernetes.io/proxy-cookie-path": "/one/ /", + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, "proxy_cookie_domain localhost example.org;") && + strings.Contains(server, "proxy_cookie_path /one/ /;") + }) + }) +}) diff --git a/test/e2e/annotations/redirect.go b/test/e2e/annotations/redirect.go new file mode 100644 index 0000000000..7e6171ba8e --- /dev/null +++ b/test/e2e/annotations/redirect.go @@ -0,0 +1,113 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package annotations + +import ( + "fmt" + "net/http" + "strconv" + "strings" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/parnurzeal/gorequest" + + "k8s.io/ingress-nginx/test/e2e/framework" +) + +func noRedirectPolicyFunc(gorequest.Request, []gorequest.Request) error { + return http.ErrUseLastResponse +} + +var _ = framework.IngressNginxDescribe("Annotations - Redirect", func() { + f := framework.NewDefaultFramework("redirect") + + BeforeEach(func() { + }) + + AfterEach(func() { + }) + + It("should respond with a standard redirect code", func() { + By("setting permanent-redirect annotation") + + host := "redirect" + redirectPath := "/something" + redirectURL := "http://redirect.example.com" + + annotations := map[string]string{"nginx.ingress.kubernetes.io/permanent-redirect": redirectURL} + + ing := framework.NewSingleIngress(host, redirectPath, host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, fmt.Sprintf("if ($uri ~* %s) {", redirectPath)) && + strings.Contains(server, fmt.Sprintf("return 301 %s;", redirectURL)) + }) + + By("sending request to redirected URL path") + + resp, body, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)+redirectPath). + Set("Host", host). + RedirectPolicy(noRedirectPolicyFunc). + End() + + Expect(errs).To(BeNil()) + Expect(resp.StatusCode).Should(BeNumerically("==", http.StatusMovedPermanently)) + Expect(resp.Header.Get("Location")).Should(Equal(redirectURL)) + Expect(body).Should(ContainSubstring("nginx/")) + }) + + It("should respond with a custom redirect code", func() { + By("setting permanent-redirect-code annotation") + + host := "redirect" + redirectPath := "/something" + redirectURL := "http://redirect.example.com" + redirectCode := http.StatusFound + + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/permanent-redirect": redirectURL, + "nginx.ingress.kubernetes.io/permanent-redirect-code": strconv.Itoa(redirectCode), + } + + ing := framework.NewSingleIngress(host, redirectPath, host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, fmt.Sprintf("if ($uri ~* %s) {", redirectPath)) && + strings.Contains(server, fmt.Sprintf("return %d %s;", redirectCode, redirectURL)) + }) + + By("sending request to redirected URL path") + + resp, body, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)+redirectPath). + Set("Host", host). + RedirectPolicy(noRedirectPolicyFunc). + End() + + Expect(errs).To(BeNil()) + Expect(resp.StatusCode).Should(BeNumerically("==", redirectCode)) + Expect(resp.Header.Get("Location")).Should(Equal(redirectURL)) + Expect(body).Should(ContainSubstring("nginx/")) + }) +}) diff --git a/test/e2e/annotations/rewrite.go b/test/e2e/annotations/rewrite.go new file mode 100644 index 0000000000..c5137a34c9 --- /dev/null +++ b/test/e2e/annotations/rewrite.go @@ -0,0 +1,221 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package annotations + +import ( + "fmt" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "net/http" + "strings" + + "github.com/parnurzeal/gorequest" + + "k8s.io/ingress-nginx/test/e2e/framework" +) + +var _ = framework.IngressNginxDescribe("Annotations - Rewrite", func() { + f := framework.NewDefaultFramework("rewrite") + + BeforeEach(func() { + f.NewEchoDeployment() + }) + + AfterEach(func() { + }) + + It("should write rewrite logs", func() { + By("setting enable-rewrite-log annotation") + + host := "rewrite.bar.com" + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/rewrite-target": "/", + "nginx.ingress.kubernetes.io/enable-rewrite-log": "true", + } + + ing := framework.NewSingleIngress(host, "/something", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, "rewrite_log on;") + }) + + resp, _, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)+"/something"). + Set("Host", host). + End() + + Expect(len(errs)).Should(Equal(0)) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + + logs, err := f.NginxLogs() + Expect(err).ToNot(HaveOccurred()) + Expect(logs).To(ContainSubstring(`"(?i)/something" matches "/something", client:`)) + Expect(logs).To(ContainSubstring(`rewritten data: "/", args: "",`)) + }) + + It("should use correct longest path match", func() { + host := "rewrite.bar.com" + + By("creating a regular ingress definition") + ing := framework.NewSingleIngress("kube-lego", "/.well-known/acme/challenge", host, f.Namespace, "http-svc", 80, &map[string]string{}) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, "/.well-known/acme/challenge") + }) + + By("making a request to the non-rewritten location") + resp, body, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)+"/.well-known/acme/challenge"). + Set("Host", host). + End() + expectBodyRequestURI := fmt.Sprintf("request_uri=http://%v:8080/.well-known/acme/challenge", host) + Expect(len(errs)).Should(Equal(0)) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(body).Should(ContainSubstring(expectBodyRequestURI)) + + By(`creating an ingress definition with the rewrite-target annotation set on the "/" location`) + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/rewrite-target": "/new/backend", + } + rewriteIng := framework.NewSingleIngress("rewrite-index", "/", host, f.Namespace, "http-svc", 80, &annotations) + + f.EnsureIngress(rewriteIng) + + f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, `location ~* "^/" {`) && strings.Contains(server, `location ~* "^/.well-known/acme/challenge" {`) + }) + + By("making a second request to the non-rewritten location") + resp, body, errs = gorequest.New(). + Get(f.GetURL(framework.HTTP)+"/.well-known/acme/challenge"). + Set("Host", host). + End() + Expect(len(errs)).Should(Equal(0)) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(body).Should(ContainSubstring(expectBodyRequestURI)) + }) + + It("should use ~* location modifier if regex annotation is present", func() { + host := "rewrite.bar.com" + + By("creating a regular ingress definition") + ing := framework.NewSingleIngress("foo", "/foo", host, f.Namespace, "http-svc", 80, &map[string]string{}) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, "location /foo {") + }) + + By(`creating an ingress definition with the use-regex amd rewrite-target annotation`) + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/use-regex": "true", + "nginx.ingress.kubernetes.io/rewrite-target": "/new/backend", + } + ing = framework.NewSingleIngress("regex", "/foo.+", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, `location ~* "^/foo" {`) && strings.Contains(server, `location ~* "^/foo.+" {`) + }) + + By("ensuring '/foo' matches '~* ^/foo'") + resp, body, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)+"/foo"). + Set("Host", host). + End() + expectBodyRequestURI := fmt.Sprintf("request_uri=http://%v:8080/foo", host) + Expect(len(errs)).Should(Equal(0)) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(body).Should(ContainSubstring(expectBodyRequestURI)) + + By("ensuring '/foo/bar' matches '~* ^/foo.+'") + resp, body, errs = gorequest.New(). + Get(f.GetURL(framework.HTTP)+"/foo/bar"). + Set("Host", host). + End() + expectBodyRequestURI = fmt.Sprintf("request_uri=http://%v:8080/new/backend", host) + Expect(len(errs)).Should(Equal(0)) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(body).Should(ContainSubstring(expectBodyRequestURI)) + }) + + It("should fail to use longest match for documented warning", func() { + host := "rewrite.bar.com" + + By("creating a regular ingress definition") + ing := framework.NewSingleIngress("foo", "/foo/bar/bar", host, f.Namespace, "http-svc", 80, &map[string]string{}) + f.EnsureIngress(ing) + + By(`creating an ingress definition with the use-regex annotation`) + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/use-regex": "true", + "nginx.ingress.kubernetes.io/rewrite-target": "/new/backend", + } + ing = framework.NewSingleIngress("regex", "/foo/bar/[a-z]{3}", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, `location ~* "^/foo/bar/bar" {`) && strings.Contains(server, `location ~* "^/foo/bar/[a-z]{3}" {`) + }) + + By("check that '/foo/bar/bar' does not match the longest exact path") + resp, body, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)+"/foo/bar/bar"). + Set("Host", host). + End() + expectBodyRequestURI := fmt.Sprintf("request_uri=http://%v:8080/new/backend", host) + Expect(len(errs)).Should(Equal(0)) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(body).Should(ContainSubstring(expectBodyRequestURI)) + }) + + It("should allow for custom rewrite parameters", func() { + host := "rewrite.bar.com" + + By(`creating an ingress definition with the use-regex annotation`) + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/use-regex": "true", + "nginx.ingress.kubernetes.io/rewrite-target": "/new/backend/$1", + } + ing := framework.NewSingleIngress("regex", "/foo/bar/(.+)", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, `location ~* "^/foo/bar/(.+)" {`) + }) + + By("check that '/foo/bar/bar' redirects to cusotm rewrite") + resp, body, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)+"/foo/bar/bar"). + Set("Host", host). + End() + expectBodyRequestURI := fmt.Sprintf("request_uri=http://%v:8080/new/backend/bar", host) + Expect(len(errs)).Should(Equal(0)) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(body).Should(ContainSubstring(expectBodyRequestURI)) + }) + +}) diff --git a/test/e2e/annotations/satisfy.go b/test/e2e/annotations/satisfy.go new file mode 100644 index 0000000000..5fbfde31d2 --- /dev/null +++ b/test/e2e/annotations/satisfy.go @@ -0,0 +1,149 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package annotations + +import ( + "fmt" + "net/http" + "net/url" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/parnurzeal/gorequest" + extensions "k8s.io/api/extensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/ingress-nginx/test/e2e/framework" +) + +var _ = framework.IngressNginxDescribe("Annotations - SATISFY", func() { + f := framework.NewDefaultFramework("satisfy") + + BeforeEach(func() { + f.NewEchoDeployment() + }) + + AfterEach(func() { + }) + + It("should configure satisfy directive correctly", func() { + host := "satisfy" + annotationKey := "nginx.ingress.kubernetes.io/satisfy" + + annotations := map[string]string{ + "any": "any", + "all": "all", + } + + results := map[string]string{ + "any": "satisfy any", + "all": "satisfy all", + } + + initAnnotations := map[string]string{ + annotationKey: "all", + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &initAnnotations) + f.EnsureIngress(ing) + + for key, result := range results { + err := framework.UpdateIngress(f.KubeClientSet, f.Namespace, host, func(ingress *extensions.Ingress) error { + ingress.ObjectMeta.Annotations[annotationKey] = annotations[key] + return nil + }) + Expect(err).ToNot(HaveOccurred()) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring(result)) + }) + + resp, body, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Retry(10, 1*time.Second, http.StatusNotFound). + Set("Host", host). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(body).Should(ContainSubstring(fmt.Sprintf("host=%v", host))) + } + }) + + It("should allow multiple auth with satisfy any", func() { + host := "auth" + + // setup external auth + f.NewHttpbinDeployment() + + err := framework.WaitForEndpoints(f.KubeClientSet, framework.DefaultTimeout, "httpbin", f.Namespace, 1) + Expect(err).NotTo(HaveOccurred()) + + e, err := f.KubeClientSet.CoreV1().Endpoints(f.Namespace).Get("httpbin", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + httpbinIP := e.Subsets[0].Addresses[0].IP + + // create basic auth secret at ingress + s := f.EnsureSecret(buildSecret("uname", "pwd", "basic-secret", f.Namespace)) + + annotations := map[string]string{ + // annotations for basic auth at ingress + "nginx.ingress.kubernetes.io/auth-type": "basic", + "nginx.ingress.kubernetes.io/auth-secret": s.Name, + "nginx.ingress.kubernetes.io/auth-realm": "test basic auth", + + // annotations for external auth + "nginx.ingress.kubernetes.io/auth-url": fmt.Sprintf("http://%s/basic-auth/user/password", httpbinIP), + "nginx.ingress.kubernetes.io/auth-signin": "http://$host/auth/start", + + // set satisfy any + "nginx.ingress.kubernetes.io/satisfy": "any", + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, func(server string) bool { + return Expect(server).Should(ContainSubstring("server_name auth")) + }) + + // with basic auth cred + resp, _, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Retry(10, 1*time.Second, http.StatusNotFound). + Set("Host", host). + SetBasicAuth("uname", "pwd").End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + + // reroute to signin if without basic cred + resp, _, errs = gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Retry(10, 1*time.Second, http.StatusNotFound). + Set("Host", host). + RedirectPolicy(func(req gorequest.Request, via []gorequest.Request) error { + return http.ErrUseLastResponse + }).Param("a", "b").Param("c", "d"). + End() + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusFound)) + Expect(resp.Header.Get("Location")).Should(Equal(fmt.Sprintf("http://%s/auth/start?rd=http://%s%s", host, host, url.QueryEscape("/?a=b&c=d")))) + }) +}) diff --git a/test/e2e/annotations/serversnippet.go b/test/e2e/annotations/serversnippet.go new file mode 100644 index 0000000000..6995d6c227 --- /dev/null +++ b/test/e2e/annotations/serversnippet.go @@ -0,0 +1,53 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package annotations + +import ( + "strings" + + . "github.com/onsi/ginkgo" + + "k8s.io/ingress-nginx/test/e2e/framework" +) + +var _ = framework.IngressNginxDescribe("Annotations - ServerSnippet", func() { + f := framework.NewDefaultFramework("serversnippet") + + BeforeEach(func() { + f.NewEchoDeploymentWithReplicas(2) + }) + + AfterEach(func() { + }) + + It(`add valid directives to server via server snippet"`, func() { + host := "serversnippet.foo.com" + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/server-snippet": ` + more_set_headers "Content-Length: $content_length"; + more_set_headers "Content-Type: $content_type";`, + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, `more_set_headers "Content-Length: $content_length`) && strings.Contains(server, `more_set_headers "Content-Type: $content_type";`) + }) + }) +}) diff --git a/test/e2e/annotations/snippet.go b/test/e2e/annotations/snippet.go new file mode 100644 index 0000000000..13da543ae4 --- /dev/null +++ b/test/e2e/annotations/snippet.go @@ -0,0 +1,50 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package annotations + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/ingress-nginx/test/e2e/framework" +) + +var _ = framework.IngressNginxDescribe("Annotations - Configurationsnippet", func() { + f := framework.NewDefaultFramework("configurationsnippet") + + BeforeEach(func() { + f.NewEchoDeploymentWithReplicas(2) + }) + + AfterEach(func() { + }) + + It(`set snippet "more_set_headers "Request-Id: $req_id";" in all locations"`, func() { + host := "configurationsnippet.foo.com" + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/configuration-snippet": ` + more_set_headers "Request-Id: $req_id";`, + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring(`more_set_headers "Request-Id: $req_id";`)) + }) + }) +}) diff --git a/test/e2e/annotations/sslciphers.go b/test/e2e/annotations/sslciphers.go new file mode 100644 index 0000000000..5ea01f57ba --- /dev/null +++ b/test/e2e/annotations/sslciphers.go @@ -0,0 +1,50 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package annotations + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "k8s.io/ingress-nginx/test/e2e/framework" +) + +var _ = framework.IngressNginxDescribe("Annotations - SSL CIPHERS", func() { + f := framework.NewDefaultFramework("sslciphers") + + BeforeEach(func() { + f.NewEchoDeploymentWithReplicas(2) + }) + + AfterEach(func() { + }) + + It("should change ssl ciphers", func() { + host := "ciphers.foo.com" + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/ssl-ciphers": "ALL:!aNULL:!EXPORT56:RC4+RSA:+HIGH:+MEDIUM:+LOW:+SSLv2:+EXP", + } + + ing := framework.NewSingleIngress(host, "/something", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("ssl_ciphers ALL:!aNULL:!EXPORT56:RC4+RSA:+HIGH:+MEDIUM:+LOW:+SSLv2:+EXP;")) + }) + }) +}) diff --git a/test/e2e/annotations/upstreamhashby.go b/test/e2e/annotations/upstreamhashby.go new file mode 100644 index 0000000000..59c2c765e2 --- /dev/null +++ b/test/e2e/annotations/upstreamhashby.go @@ -0,0 +1,106 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package annotations + +import ( + "fmt" + "net/http" + "regexp" + "strings" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/parnurzeal/gorequest" + + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/ingress-nginx/test/e2e/framework" +) + +func startIngress(f *framework.Framework, annotations *map[string]string) map[string]bool { + host := "upstream-hash-by.foo.com" + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, annotations) + f.EnsureIngress(ing) + f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, fmt.Sprintf("server_name %s ;", host)) + }) + + err := wait.PollImmediate(framework.Poll, framework.DefaultTimeout, func() (bool, error) { + resp, _, _ := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + End() + if resp.StatusCode == http.StatusOK { + return true, nil + } + return false, nil + }) + Expect(err).Should(BeNil()) + + re, _ := regexp.Compile(`Hostname: http-svc.*`) + podMap := map[string]bool{} + + for i := 0; i < 100; i++ { + _, body, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + End() + + Expect(errs).Should(BeEmpty()) + + podName := re.FindString(body) + Expect(podName).ShouldNot(Equal("")) + podMap[podName] = true + + } + + return podMap +} + +var _ = framework.IngressNginxDescribe("Annotations - UpstreamHashBy", func() { + f := framework.NewDefaultFramework("upstream-hash-by") + + BeforeEach(func() { + f.NewEchoDeploymentWithReplicas(6) + }) + + AfterEach(func() { + }) + + It("should connect to the same pod", func() { + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/upstream-hash-by": "$request_uri", + } + + podMap := startIngress(f, &annotations) + Expect(len(podMap)).Should(Equal(1)) + + }) + + It("should connect to the same subset of pods", func() { + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/upstream-hash-by": "$request_uri", + "nginx.ingress.kubernetes.io/upstream-hash-by-subset": "true", + "nginx.ingress.kubernetes.io/upstream-hash-by-subset-size": "3", + } + + podMap := startIngress(f, &annotations) + Expect(len(podMap)).Should(Equal(3)) + + }) +}) diff --git a/test/e2e/annotations/upstreamvhost.go b/test/e2e/annotations/upstreamvhost.go new file mode 100644 index 0000000000..594eca9d1d --- /dev/null +++ b/test/e2e/annotations/upstreamvhost.go @@ -0,0 +1,49 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package annotations + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/ingress-nginx/test/e2e/framework" +) + +var _ = framework.IngressNginxDescribe("Annotations - Upstreamvhost", func() { + f := framework.NewDefaultFramework("upstreamvhost") + + BeforeEach(func() { + f.NewEchoDeploymentWithReplicas(2) + }) + + AfterEach(func() { + }) + + It("set host to upstreamvhost.bar.com", func() { + host := "upstreamvhost.foo.com" + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/upstream-vhost": "upstreamvhost.bar.com", + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring(`proxy_set_header Host "upstreamvhost.bar.com";`)) + }) + }) +}) diff --git a/test/e2e/annotations/xforwardedprefix.go b/test/e2e/annotations/xforwardedprefix.go new file mode 100644 index 0000000000..479aa4de08 --- /dev/null +++ b/test/e2e/annotations/xforwardedprefix.go @@ -0,0 +1,84 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package annotations + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/parnurzeal/gorequest" + "k8s.io/ingress-nginx/test/e2e/framework" + "net/http" +) + +var _ = framework.IngressNginxDescribe("Annotations - X-Forwarded-Prefix", func() { + f := framework.NewDefaultFramework("xforwardedprefix") + + BeforeEach(func() { + f.NewEchoDeployment() + }) + + AfterEach(func() { + }) + + It("should set the X-Forwarded-Prefix to the annotation value", func() { + host := "xfp.baz.com" + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/x-forwarded-prefix": "/test/value", + "nginx.ingress.kubernetes.io/rewrite-target": "/foo", + } + + f.EnsureIngress(framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations)) + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("proxy_set_header X-Forwarded-Prefix \"/test/value\";")) + }) + + uri := "/" + resp, body, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)+uri). + Set("Host", host). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(body).To(ContainSubstring("x-forwarded-prefix=/test/value")) + }) + + It("should not add X-Forwarded-Prefix if the annotation value is empty", func() { + host := "noxfp.baz.com" + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/x-forwarded-prefix": "", + "nginx.ingress.kubernetes.io/rewrite-target": "/foo", + } + + f.EnsureIngress(framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations)) + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(And(ContainSubstring(host), Not(ContainSubstring("proxy_set_header X-Forwarded-Prefix")))) + }) + + uri := "/" + resp, body, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)+uri). + Set("Host", host). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(body).To(Not(ContainSubstring("x-forwarded-prefix"))) + }) +}) diff --git a/test/e2e/dbg/main.go b/test/e2e/dbg/main.go new file mode 100644 index 0000000000..55dfe3a77a --- /dev/null +++ b/test/e2e/dbg/main.go @@ -0,0 +1,102 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dbg + +import ( + "encoding/json" + "strings" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "k8s.io/ingress-nginx/test/e2e/framework" +) + +var _ = framework.IngressNginxDescribe("Debug Tool", func() { + f := framework.NewDefaultFramework("debug-tool") + host := "foo.com" + + BeforeEach(func() { + f.NewEchoDeploymentWithReplicas(1) + }) + + AfterEach(func() { + }) + + It("should list the backend servers", func() { + annotations := map[string]string{} + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxConfiguration(func(cfg string) bool { + return Expect(cfg).Should(ContainSubstring(host)) + }) + + cmd := "/dbg backends list" + output, err := f.ExecIngressPod(cmd) + Expect(err).Should(BeNil()) + + // Should be 2: the default and the echo deployment + numUpstreams := len(strings.Split(strings.Trim(string(output), "\n"), "\n")) + Expect(numUpstreams).Should(Equal(2)) + + }) + + It("should get information for a specific backend server", func() { + annotations := map[string]string{} + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxConfiguration(func(cfg string) bool { + return Expect(cfg).Should(ContainSubstring(host)) + }) + + cmd := "/dbg backends list" + output, err := f.ExecIngressPod(cmd) + Expect(err).Should(BeNil()) + + backends := strings.Split(string(output), "\n") + Expect(len(backends)).Should(BeNumerically(">", 0)) + + getCmd := "/dbg backends get " + backends[0] + output, err = f.ExecIngressPod(getCmd) + + var f map[string]interface{} + unmarshalErr := json.Unmarshal([]byte(output), &f) + Expect(unmarshalErr).Should(BeNil()) + + // Check that the backend we've gotten has the same name as the one we requested + Expect(backends[0]).Should(Equal(f["name"].(string))) + }) + + It("should produce valid JSON for /dbg general", func() { + annotations := map[string]string{} + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + cmd := "/dbg general" + output, err := f.ExecIngressPod(cmd) + Expect(err).Should(BeNil()) + + var f interface{} + unmarshalErr := json.Unmarshal([]byte(output), &f) + Expect(unmarshalErr).Should(BeNil()) + }) +}) diff --git a/test/e2e/defaultbackend/custom_default_backend.go b/test/e2e/defaultbackend/custom_default_backend.go new file mode 100644 index 0000000000..a6e4ab0bcf --- /dev/null +++ b/test/e2e/defaultbackend/custom_default_backend.go @@ -0,0 +1,60 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package defaultbackend + +import ( + "net/http" + "strings" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/parnurzeal/gorequest" + + appsv1 "k8s.io/api/apps/v1" + + "k8s.io/ingress-nginx/test/e2e/framework" +) + +var _ = framework.IngressNginxDescribe("Custom Default Backend", func() { + f := framework.NewDefaultFramework("custom-default-backend") + + BeforeEach(func() { + f.NewEchoDeploymentWithReplicas(1) + + framework.UpdateDeployment(f.KubeClientSet, f.Namespace, "nginx-ingress-controller", 1, + func(deployment *appsv1.Deployment) error { + args := deployment.Spec.Template.Spec.Containers[0].Args + args = append(args, "--default-backend-service=$(POD_NAMESPACE)/http-svc") + deployment.Spec.Template.Spec.Containers[0].Args = args + _, err := f.KubeClientSet.AppsV1().Deployments(f.Namespace).Update(deployment) + + return err + }) + + f.WaitForNginxServer("_", + func(server string) bool { + return strings.Contains(server, "set $proxy_upstream_name \"upstream-default-backend\"") + }) + }) + + It("uses custom default backend", func() { + resp, _, errs := gorequest.New().Get(f.GetURL(framework.HTTP)).End() + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + }) +}) diff --git a/test/e2e/defaultbackend/default_backend.go b/test/e2e/defaultbackend/default_backend.go index 4e552630da..04dec9f399 100644 --- a/test/e2e/defaultbackend/default_backend.go +++ b/test/e2e/defaultbackend/default_backend.go @@ -76,9 +76,9 @@ var _ = framework.IngressNginxDescribe("Default backend", func() { switch test.Scheme { case framework.HTTP: - cm = request.CustomMethod(test.Method, f.NginxHTTPURL) + cm = request.CustomMethod(test.Method, f.GetURL(framework.HTTP)) case framework.HTTPS: - cm = request.CustomMethod(test.Method, f.NginxHTTPSURL) + cm = request.CustomMethod(test.Method, f.GetURL(framework.HTTPS)) // the default backend uses a self generated certificate cm.Transport = &http.Transport{ TLSClientConfig: &tls.Config{ @@ -94,8 +94,39 @@ var _ = framework.IngressNginxDescribe("Default backend", func() { } resp, _, errs := cm.End() - Expect(len(errs)).Should(BeNumerically("==", 0)) + Expect(errs).Should(BeEmpty()) Expect(resp.StatusCode).Should(Equal(test.Status)) } }) + It("enables access logging for default backend", func() { + f.UpdateNginxConfigMapData("enable-access-log-for-default-backend", "true") + host := "foo" + resp, _, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)+"/somethingOne"). + Set("Host", host). + End() + + Expect(len(errs)).Should(Equal(0)) + Expect(resp.StatusCode).Should(Equal(http.StatusNotFound)) + + logs, err := f.NginxLogs() + Expect(err).ToNot(HaveOccurred()) + Expect(logs).To(ContainSubstring("/somethingOne")) + }) + + It("disables access logging for default backend", func() { + f.UpdateNginxConfigMapData("enable-access-log-for-default-backend", "false") + host := "bar" + resp, _, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)+"/somethingTwo"). + Set("Host", host). + End() + + Expect(len(errs)).Should(Equal(0)) + Expect(resp.StatusCode).Should(Equal(http.StatusNotFound)) + + logs, err := f.NginxLogs() + Expect(err).ToNot(HaveOccurred()) + Expect(logs).ToNot(ContainSubstring("/somethingTwo")) + }) }) diff --git a/test/e2e/defaultbackend/ssl.go b/test/e2e/defaultbackend/ssl.go index 49511d4912..685aa3a3eb 100644 --- a/test/e2e/defaultbackend/ssl.go +++ b/test/e2e/defaultbackend/ssl.go @@ -38,13 +38,13 @@ var _ = framework.IngressNginxDescribe("Default backend - SSL", func() { It("should return a self generated SSL certificate", func() { By("checking SSL Certificate using the NGINX IP address") resp, _, errs := gorequest.New(). - Post(f.NginxHTTPSURL). + Post(f.GetURL(framework.HTTPS)). TLSClientConfig(&tls.Config{ // the default backend uses a self generated certificate InsecureSkipVerify: true, }).End() - Expect(len(errs)).Should(BeNumerically("==", 0)) + Expect(errs).Should(BeEmpty()) Expect(len(resp.TLS.PeerCertificates)).Should(BeNumerically("==", 1)) for _, pc := range resp.TLS.PeerCertificates { @@ -53,13 +53,13 @@ var _ = framework.IngressNginxDescribe("Default backend - SSL", func() { By("checking SSL Certificate using the NGINX catch all server") resp, _, errs = gorequest.New(). - Post(f.NginxHTTPSURL). + Post(f.GetURL(framework.HTTPS)). TLSClientConfig(&tls.Config{ // the default backend uses a self generated certificate InsecureSkipVerify: true, }). Set("Host", "foo.bar.com").End() - Expect(len(errs)).Should(BeNumerically("==", 0)) + Expect(errs).Should(BeEmpty()) Expect(len(resp.TLS.PeerCertificates)).Should(BeNumerically("==", 1)) for _, pc := range resp.TLS.PeerCertificates { Expect(pc.Issuer.CommonName).Should(Equal("Kubernetes Ingress Controller Fake Certificate")) diff --git a/test/e2e/defaultbackend/with_hosts.go b/test/e2e/defaultbackend/with_hosts.go new file mode 100644 index 0000000000..ce74374a11 --- /dev/null +++ b/test/e2e/defaultbackend/with_hosts.go @@ -0,0 +1,84 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package defaultbackend + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "net/http" + "strings" + + "github.com/parnurzeal/gorequest" + extensions "k8s.io/api/extensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/ingress-nginx/test/e2e/framework" +) + +var _ = framework.IngressNginxDescribe("Default backend with hosts", func() { + f := framework.NewDefaultFramework("default-backend-hosts") + host := "foo.com" + + BeforeEach(func() { + f.NewEchoDeploymentWithReplicas(1) + }) + + AfterEach(func() { + }) + + It("should apply the annotation to the default backend", func() { + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/proxy-buffer-size": "8k", + } + + ing := &extensions.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default-backend-annotations", + Namespace: f.Namespace, + Annotations: annotations, + }, + Spec: extensions.IngressSpec{ + Backend: &extensions.IngressBackend{ + ServiceName: "http-svc", + ServicePort: intstr.FromInt(8080), + }, + Rules: []extensions.IngressRule{ + { + Host: host, + }, + }, + }, + } + + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, "proxy_buffer_size 8k;") + }) + + resp, _, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", "foo.com"). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + }) + +}) diff --git a/test/e2e/down.sh b/test/e2e/down.sh new file mode 100755 index 0000000000..4948d905ef --- /dev/null +++ b/test/e2e/down.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +SCRIPT_ROOT=$(dirname ${BASH_SOURCE})/.. + +export PATH="$HOME/.kubeadm-dind-cluster:$SCRIPT_ROOT/build:$PATH" + +dind-cluster-v1.11.sh down +dind-cluster-v1.11.sh clean + diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go index e448df03c2..54caf7568e 100644 --- a/test/e2e/e2e.go +++ b/test/e2e/e2e.go @@ -17,21 +17,31 @@ limitations under the License. package e2e import ( + "os" "testing" - "github.com/golang/glog" "github.com/onsi/ginkgo" "github.com/onsi/ginkgo/config" "github.com/onsi/gomega" - "k8s.io/apiserver/pkg/util/logs" + "k8s.io/component-base/logs" + + // required _ "k8s.io/client-go/plugin/pkg/client/auth" "k8s.io/ingress-nginx/test/e2e/framework" + // tests to run _ "k8s.io/ingress-nginx/test/e2e/annotations" + _ "k8s.io/ingress-nginx/test/e2e/dbg" _ "k8s.io/ingress-nginx/test/e2e/defaultbackend" + _ "k8s.io/ingress-nginx/test/e2e/gracefulshutdown" + _ "k8s.io/ingress-nginx/test/e2e/loadbalance" + _ "k8s.io/ingress-nginx/test/e2e/lua" + _ "k8s.io/ingress-nginx/test/e2e/servicebackend" _ "k8s.io/ingress-nginx/test/e2e/settings" _ "k8s.io/ingress-nginx/test/e2e/ssl" + _ "k8s.io/ingress-nginx/test/e2e/status" + _ "k8s.io/ingress-nginx/test/e2e/tcpudp" ) // RunE2ETests checks configuration parameters (specified through flags) and then runs @@ -46,6 +56,17 @@ func RunE2ETests(t *testing.T) { config.GinkgoConfig.SkipString = `\[Flaky\]|\[Feature:.+\]` } - glog.Infof("Starting e2e run %q on Ginkgo node %d", framework.RunID, config.GinkgoConfig.ParallelNode) + if os.Getenv("KUBECTL_PATH") != "" { + framework.KubectlPath = os.Getenv("KUBECTL_PATH") + framework.Logf("Using kubectl path '%s'", framework.KubectlPath) + } + + framework.Logf("Starting e2e run %q on Ginkgo node %d", framework.RunID, config.GinkgoConfig.ParallelNode) ginkgo.RunSpecs(t, "nginx-ingress-controller e2e suite") } + +var _ = ginkgo.SynchronizedAfterSuite(func() { + // Run on all Ginkgo nodes + framework.Logf("Running AfterSuite actions on all nodes") + framework.RunCleanupActions() +}, func() {}) diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 8de277bc5b..f7546229b9 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -19,18 +19,15 @@ package e2e import ( "testing" - "github.com/golang/glog" - "k8s.io/client-go/tools/clientcmd" - "k8s.io/ingress-nginx/test/e2e/framework" ) func init() { framework.RegisterParseFlags() - if "" == framework.TestContext.KubeConfig { - glog.Fatalf("environment variable %v must be set", clientcmd.RecommendedConfigPathEnvVar) - } + // if "" == framework.TestContext.KubeConfig { + // klog.Fatalf("environment variable %v must be set", clientcmd.RecommendedConfigPathEnvVar) + // } } func TestE2E(t *testing.T) { RunE2ETests(t) diff --git a/test/e2e/framework/cleanup.go b/test/e2e/framework/cleanup.go index c7f256d06a..c8da7d1fbd 100644 --- a/test/e2e/framework/cleanup.go +++ b/test/e2e/framework/cleanup.go @@ -18,13 +18,14 @@ package framework import "sync" +// CleanupActionHandle is a handle used to perform a cleanup action. type CleanupActionHandle *int var cleanupActionsLock sync.Mutex var cleanupActions = map[CleanupActionHandle]func(){} // AddCleanupAction installs a function that will be called in the event of the -// whole test being terminated. This allows arbitrary pieces of the overall +// whole test being terminated. This allows arbitrary pieces of the overall // test to hook into SynchronizedAfterSuite(). func AddCleanupAction(fn func()) CleanupActionHandle { p := CleanupActionHandle(new(int)) @@ -41,7 +42,7 @@ func RemoveCleanupAction(p CleanupActionHandle) { delete(cleanupActions, p) } -// RunCleanupActions runs all functions installed by AddCleanupAction. It does +// RunCleanupActions runs all functions installed by AddCleanupAction. It does // not remove them (see RemoveCleanupAction) but it does run unlocked, so they // may remove themselves. func RunCleanupActions() { diff --git a/test/e2e/framework/deployment.go b/test/e2e/framework/deployment.go new file mode 100644 index 0000000000..4c9268fb16 --- /dev/null +++ b/test/e2e/framework/deployment.go @@ -0,0 +1,140 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + . "github.com/onsi/gomega" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// NewEchoDeployment creates a new single replica deployment of the echoserver image in a particular namespace +func (f *Framework) NewEchoDeployment() { + f.NewEchoDeploymentWithReplicas(1) +} + +// NewEchoDeploymentWithReplicas creates a new deployment of the echoserver image in a particular namespace. Number of +// replicas is configurable +func (f *Framework) NewEchoDeploymentWithReplicas(replicas int32) { + f.NewEchoDeploymentWithNameAndReplicas("http-svc", replicas) +} + +// NewEchoDeploymentWithNameAndReplicas creates a new deployment of the echoserver image in a particular namespace. Number of +// replicas is configurable and +// name is configurable +func (f *Framework) NewEchoDeploymentWithNameAndReplicas(name string, replicas int32) { + f.NewDeployment(name, "gcr.io/kubernetes-e2e-test-images/echoserver:2.2", 8080, replicas) +} + +// NewSlowEchoDeployment creates a new deployment of the slow echo server image in a particular namespace. +func (f *Framework) NewSlowEchoDeployment() { + f.NewDeployment("slowecho", "breunigs/slowechoserver", 8080, 1) +} + +// NewHttpbinDeployment creates a new single replica deployment of the httpbin image in a particular namespace. +func (f *Framework) NewHttpbinDeployment() { + f.NewDeployment("httpbin", "kennethreitz/httpbin", 80, 1) +} + +// NewDeployment creates a new deployment in a particular namespace. +func (f *Framework) NewDeployment(name, image string, port int32, replicas int32) { + probe := &corev1.Probe{ + InitialDelaySeconds: 5, + PeriodSeconds: 10, + SuccessThreshold: 1, + TimeoutSeconds: 1, + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromString("http"), + Path: "/", + }, + }, + } + + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: f.Namespace, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: NewInt32(replicas), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": name, + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": name, + }, + }, + Spec: corev1.PodSpec{ + TerminationGracePeriodSeconds: NewInt64(0), + Containers: []corev1.Container{ + { + Name: name, + Image: image, + Env: []corev1.EnvVar{}, + Ports: []corev1.ContainerPort{ + { + Name: "http", + ContainerPort: port, + }, + }, + ReadinessProbe: probe, + LivenessProbe: probe, + }, + }, + }, + }, + }, + } + + d, err := f.EnsureDeployment(deployment) + Expect(err).NotTo(HaveOccurred(), "failed to create a deployment") + Expect(d).NotTo(BeNil(), "expected a deployment but none returned") + + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: f.Namespace, + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + { + Name: "http", + Port: 80, + TargetPort: intstr.FromInt(int(port)), + Protocol: corev1.ProtocolTCP, + }, + }, + Selector: map[string]string{ + "app": name, + }, + }, + } + + s := f.EnsureService(service) + Expect(s).NotTo(BeNil(), "expected a service but none returned") + + err = WaitForEndpoints(f.KubeClientSet, DefaultTimeout, name, f.Namespace, int(replicas)) + Expect(err).NotTo(HaveOccurred(), "failed to wait for endpoints to become ready") +} diff --git a/test/e2e/framework/echo.go b/test/e2e/framework/echo.go deleted file mode 100644 index 98e67b1875..0000000000 --- a/test/e2e/framework/echo.go +++ /dev/null @@ -1,124 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package framework - -import ( - "fmt" - "time" - - "github.com/pkg/errors" - - corev1 "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/util/intstr" -) - -// NewEchoDeployment creates a new single replica deployment of the echoserver image in a particular namespace -func (f *Framework) NewEchoDeployment() error { - return f.NewEchoDeploymentWithReplicas(1) -} - -// NewEchoDeploymentWithReplicas creates a new deployment of the echoserver image in a particular namespace. Number of -// replicas is configurable -func (f *Framework) NewEchoDeploymentWithReplicas(replicas int32) error { - deployment := &extensions.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "http-svc", - Namespace: f.Namespace.Name, - }, - Spec: extensions.DeploymentSpec{ - Replicas: NewInt32(replicas), - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "app": "http-svc", - }, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "app": "http-svc", - }, - }, - Spec: corev1.PodSpec{ - TerminationGracePeriodSeconds: NewInt64(1), - Containers: []corev1.Container{ - { - Name: "http-svc", - Image: "gcr.io/google_containers/echoserver:1.8", - Env: []corev1.EnvVar{}, - Ports: []corev1.ContainerPort{ - { - Name: "http", - ContainerPort: 8080, - }, - }, - }, - }, - }, - }, - }, - } - - d, err := f.EnsureDeployment(deployment) - if err != nil { - return err - } - - if d == nil { - return fmt.Errorf("unexpected error creating deployement for echoserver") - } - - err = f.WaitForPodsReady(10*time.Second, int(replicas), metav1.ListOptions{ - LabelSelector: fields.SelectorFromSet(fields.Set(d.Spec.Template.ObjectMeta.Labels)).String(), - }) - if err != nil { - return errors.Wrap(err, "failed to wait for to become ready") - } - - service := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "http-svc", - Namespace: f.Namespace.Name, - }, - Spec: corev1.ServiceSpec{ - Ports: []corev1.ServicePort{ - { - Name: "http", - Port: 80, - TargetPort: intstr.FromInt(8080), - Protocol: "TCP", - }, - }, - Selector: map[string]string{ - "app": "http-svc", - }, - }, - } - - s, err := f.EnsureService(service) - if err != nil { - return err - } - - if s == nil { - return fmt.Errorf("unexpected error creating service for echoserver deployment") - } - - return nil -} diff --git a/test/e2e/framework/exec.go b/test/e2e/framework/exec.go index aecc0170b8..1da7f586bc 100644 --- a/test/e2e/framework/exec.go +++ b/test/e2e/framework/exec.go @@ -19,11 +19,25 @@ package framework import ( "bytes" "fmt" + "io" "os/exec" + "regexp" + "strconv" + "strings" "k8s.io/api/core/v1" ) +// ExecIngressPod executes a command inside the first container in ingress controller running pod +func (f *Framework) ExecIngressPod(command string) (string, error) { + pod, err := getIngressNGINXPod(f.Namespace, f.KubeClientSet) + if err != nil { + return "", err + } + + return f.ExecCommand(pod, command) +} + // ExecCommand executes a command inside a the first container in a running pod func (f *Framework) ExecCommand(pod *v1.Pod, command string) (string, error) { var ( @@ -31,18 +45,14 @@ func (f *Framework) ExecCommand(pod *v1.Pod, command string) (string, error) { execErr bytes.Buffer ) - if len(pod.Spec.Containers) != 1 { - return "", fmt.Errorf("could not determine which container to use") - } - - args := fmt.Sprintf("kubectl exec -n %v %v -- %v", pod.Namespace, pod.Name, command) - cmd := exec.Command("/bin/bash", "-c", args) + cmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("%v exec --namespace %s %s --container nginx-ingress-controller -- %s", KubectlPath, pod.Namespace, pod.Name, command)) cmd.Stdout = &execOut cmd.Stderr = &execErr err := cmd.Run() if err != nil { - return "", fmt.Errorf("could not execute: %v", err) + return "", fmt.Errorf("could not execute '%s %s': %v", cmd.Path, cmd.Args, err) + } if execErr.Len() > 0 { @@ -51,3 +61,63 @@ func (f *Framework) ExecCommand(pod *v1.Pod, command string) (string, error) { return execOut.String(), nil } + +// NewIngressController deploys a new NGINX Ingress controller in a namespace +func (f *Framework) NewIngressController(namespace string) error { + // Creates an nginx deployment + cmd := exec.Command("./wait-for-nginx.sh", namespace) + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("Unexpected error waiting for ingress controller deployment: %v.\nLogs:\n%v", err, string(out)) + } + + return nil +} + +var ( + proxyRegexp = regexp.MustCompile("Starting to serve on .*:([0-9]+)") +) + +// KubectlProxy creates a proxy to kubernetes apiserver +func (f *Framework) KubectlProxy(port int) (int, *exec.Cmd, error) { + cmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("%s proxy --accept-hosts=.* --address=0.0.0.0 --port=%d", KubectlPath, port)) + stdout, stderr, err := startCmdAndStreamOutput(cmd) + if err != nil { + return -1, nil, err + } + + defer stdout.Close() + defer stderr.Close() + + buf := make([]byte, 128) + var n int + if n, err = stdout.Read(buf); err != nil { + return -1, cmd, fmt.Errorf("Failed to read from kubectl proxy stdout: %v", err) + } + + output := string(buf[:n]) + match := proxyRegexp.FindStringSubmatch(output) + if len(match) == 2 { + if port, err := strconv.Atoi(match[1]); err == nil { + return port, cmd, nil + } + } + + return -1, cmd, fmt.Errorf("Failed to parse port from proxy stdout: %s", output) +} + +func startCmdAndStreamOutput(cmd *exec.Cmd) (stdout, stderr io.ReadCloser, err error) { + stdout, err = cmd.StdoutPipe() + if err != nil { + return + } + + stderr, err = cmd.StderrPipe() + if err != nil { + return + } + + Logf("Asynchronously running '%s'", strings.Join(cmd.Args, " ")) + err = cmd.Start() + return +} diff --git a/test/e2e/framework/framework.go b/test/e2e/framework/framework.go index 20994decc6..cc784497da 100644 --- a/test/e2e/framework/framework.go +++ b/test/e2e/framework/framework.go @@ -15,31 +15,28 @@ package framework import ( "fmt" - "os/exec" "strings" "time" - "k8s.io/api/core/v1" + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + extensions "k8s.io/api/extensions/v1beta1" apiextcs "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" - "github.com/golang/glog" + "github.com/pkg/errors" + "k8s.io/klog" + . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) -const ( - podName = "test-ingress-controller" -) - -const ( - MaxRetry = 200 - NoRetry = 1 -) - +// RequestScheme define a scheme used in a test request. type RequestScheme string // These are valid test request schemes. @@ -48,6 +45,11 @@ const ( HTTPS RequestScheme = "https" ) +var ( + // KubectlPath defines the full path of the kubectl binary + KubectlPath = "/usr/local/bin/kubectl" +) + // Framework supports common operations used by e2e tests; it will keep a client & a namespace for you. type Framework struct { BaseName string @@ -57,16 +59,12 @@ type Framework struct { KubeConfig *restclient.Config APIExtensionsClientSet apiextcs.Interface - // Namespace in which all test resources should reside - Namespace *v1.Namespace - // To make sure that this framework cleans up after itself, no matter what, - // we install a Cleanup action before each test and clear it after. If we + // we install a Cleanup action before each test and clear it after. If we // should abort, the AfterSuite hook should run all Cleanup actions. cleanupHandle CleanupActionHandle - NginxHTTPURL string - NginxHTTPSURL string + Namespace string } // NewDefaultFramework makes a new framework and sets up a BeforeEach/AfterEach for @@ -87,7 +85,11 @@ func (f *Framework) BeforeEach() { f.cleanupHandle = AddCleanupAction(f.AfterEach) By("Creating a kubernetes client") - kubeConfig, err := LoadConfig(TestContext.KubeConfig, TestContext.KubeContext) + kubeConfig, err := restclient.InClusterConfig() + if err != nil { + panic(err.Error()) + } + Expect(err).NotTo(HaveOccurred()) f.KubeConfig = kubeConfig @@ -95,140 +97,391 @@ func (f *Framework) BeforeEach() { Expect(err).NotTo(HaveOccurred()) By("Building a namespace api object") - f.Namespace, err = CreateKubeNamespace(f.BaseName, f.KubeClientSet) + ingressNamespace, err := CreateKubeNamespace(f.BaseName, f.KubeClientSet) Expect(err).NotTo(HaveOccurred()) - By("Building NGINX HTTP URL") - f.NginxHTTPURL, err = f.GetNginxURL(HTTP) + f.Namespace = ingressNamespace + + By("Starting new ingress controller") + err = f.NewIngressController(f.Namespace) Expect(err).NotTo(HaveOccurred()) - By("Building NGINX HTTPS URL") - f.NginxHTTPSURL, err = f.GetNginxURL(HTTPS) + err = WaitForPodsReady(f.KubeClientSet, DefaultTimeout, 1, f.Namespace, metav1.ListOptions{ + LabelSelector: "app.kubernetes.io/name=ingress-nginx", + }) Expect(err).NotTo(HaveOccurred()) + + // we wait for any change in the informers and SSL certificate generation + time.Sleep(5 * time.Second) } // AfterEach deletes the namespace, after reading its events. func (f *Framework) AfterEach() { RemoveCleanupAction(f.cleanupHandle) - By("Deleting test namespace") - err := DeleteKubeNamespace(f.KubeClientSet, f.Namespace.Name) - Expect(err).NotTo(HaveOccurred()) - By("Waiting for test namespace to no longer exist") - err = WaitForNoPodsInNamespace(f.KubeClientSet, f.Namespace.Name) + err := DeleteKubeNamespace(f.KubeClientSet, f.Namespace) Expect(err).NotTo(HaveOccurred()) + + if CurrentGinkgoTestDescription().Failed { + log, err := f.NginxLogs() + Expect(err).ToNot(HaveOccurred()) + By("Dumping NGINX logs after a failure running a test") + Logf("%v", log) + } } -// IngressNginxDescribe wrapper function for ginkgo describe. Adds namespacing. +// IngressNginxDescribe wrapper function for ginkgo describe. Adds namespacing. func IngressNginxDescribe(text string, body func()) bool { return Describe("[nginx-ingress] "+text, body) } -// GetNginxIP returns the IP address of the minikube cluster -// where the NGINX ingress controller is running -func (f *Framework) GetNginxIP() (string, error) { - out, err := exec.Command("minikube", "ip").Output() +// GetNginxIP returns the number of TCP port where NGINX is running +func (f *Framework) GetNginxIP() string { + s, err := f.KubeClientSet. + CoreV1(). + Services(f.Namespace). + Get("ingress-nginx", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred(), "unexpected error obtaning NGINX IP address") + return s.Spec.ClusterIP +} + +// GetURL returns the URL should be used to make a request to NGINX +func (f *Framework) GetURL(scheme RequestScheme) string { + ip := f.GetNginxIP() + return fmt.Sprintf("%v://%v", scheme, ip) +} + +// WaitForNginxServer waits until the nginx configuration contains a particular server section +func (f *Framework) WaitForNginxServer(name string, matcher func(cfg string) bool) { + err := wait.Poll(Poll, DefaultTimeout, f.matchNginxConditions(name, matcher)) + Expect(err).NotTo(HaveOccurred(), "unexpected error waiting for nginx server condition/s") + time.Sleep(5 * time.Second) +} + +// WaitForNginxConfiguration waits until the nginx configuration contains a particular configuration +func (f *Framework) WaitForNginxConfiguration(matcher func(cfg string) bool) { + err := wait.Poll(Poll, DefaultTimeout, f.matchNginxConditions("", matcher)) + Expect(err).NotTo(HaveOccurred(), "unexpected error waiting for nginx server condition/s") + time.Sleep(5 * time.Second) +} + +func nginxLogs(client kubernetes.Interface, namespace string) (string, error) { + pod, err := getIngressNGINXPod(namespace, client) if err != nil { return "", err } - return strings.TrimSpace(string(out)), nil -} -// GetNginxPort returns the number of TCP port where NGINX is running -func (f *Framework) GetNginxPort(name string) (int, error) { - s, err := f.KubeClientSet.CoreV1().Services("ingress-nginx").Get("ingress-nginx", metav1.GetOptions{}) - if err != nil { - return -1, err + if isRunning, err := podRunningReady(pod); err == nil && isRunning { + return Logs(pod) } - for _, p := range s.Spec.Ports { - if p.NodePort != 0 && p.Name == name { - return int(p.NodePort), nil + return "", fmt.Errorf("no nginx ingress controller pod is running (logs)") +} + +// NginxLogs returns the logs of the nginx ingress controller pod running +func (f *Framework) NginxLogs() (string, error) { + return nginxLogs(f.KubeClientSet, f.Namespace) +} + +func (f *Framework) matchNginxConditions(name string, matcher func(cfg string) bool) wait.ConditionFunc { + return func() (bool, error) { + pod, err := getIngressNGINXPod(f.Namespace, f.KubeClientSet) + if err != nil { + return false, nil + } + + var cmd string + if name == "" { + cmd = fmt.Sprintf("cat /etc/nginx/nginx.conf") + } else { + cmd = fmt.Sprintf("cat /etc/nginx/nginx.conf | awk '/## start server %v/,/## end server %v/'", name, name) } - } - return -1, err + o, err := f.ExecCommand(pod, cmd) + if err != nil { + return false, nil + } + + var match bool + errs := InterceptGomegaFailures(func() { + if klog.V(10) && len(o) > 0 { + klog.Infof("nginx.conf:\n%v", o) + } + + // passes the nginx config to the passed function + if matcher(strings.Join(strings.Fields(o), " ")) { + match = true + } + }) + + if match { + return true, nil + } + + if len(errs) > 0 { + klog.V(2).Infof("Errors waiting for conditions: %v", errs) + } + + return false, nil + } } -// GetNginxURL returns the URL should be used to make a request to NGINX -func (f *Framework) GetNginxURL(scheme RequestScheme) (string, error) { - ip, err := f.GetNginxIP() +func (f *Framework) getNginxConfigMap() (*v1.ConfigMap, error) { + if f.KubeClientSet == nil { + return nil, fmt.Errorf("KubeClientSet not initialized") + } + + config, err := f.KubeClientSet. + CoreV1(). + ConfigMaps(f.Namespace). + Get("nginx-configuration", metav1.GetOptions{}) if err != nil { - return "", err + return nil, err } - port, err := f.GetNginxPort(fmt.Sprintf("%v", scheme)) + return config, err +} + +// GetNginxConfigMapData gets ingress-nginx's nginx-configuration map's data +func (f *Framework) GetNginxConfigMapData() (map[string]string, error) { + config, err := f.getNginxConfigMap() if err != nil { - return "", err + return nil, err + } + if config.Data == nil { + config.Data = map[string]string{} } - return fmt.Sprintf("%v://%v:%v", scheme, ip, port), nil + return config.Data, err } -// WaitForNginxServer waits until the nginx configuration contains a particular server section -func (f *Framework) WaitForNginxServer(name string, matcher func(cfg string) bool) error { - // initial wait to allow the update of the ingress controller +// SetNginxConfigMapData sets ingress-nginx's nginx-configuration configMap data +func (f *Framework) SetNginxConfigMapData(cmData map[string]string) { + // Needs to do a Get and Set, Update will not take just the Data field + // or a configMap that is not the very last revision + config, err := f.getNginxConfigMap() + Expect(err).NotTo(HaveOccurred()) + Expect(config).NotTo(BeNil(), "expected a configmap but none returned") + + config.Data = cmData + + _, err = f.KubeClientSet. + CoreV1(). + ConfigMaps(f.Namespace). + Update(config) + Expect(err).NotTo(HaveOccurred()) + time.Sleep(5 * time.Second) - return wait.PollImmediate(Poll, time.Minute*2, f.matchNginxConditions(name, matcher)) } -// NginxLogs returns the logs of the nginx ingress controller pod running -func (f *Framework) NginxLogs() (string, error) { - l, err := f.KubeClientSet.CoreV1().Pods("ingress-nginx").List(metav1.ListOptions{ - LabelSelector: "app=ingress-nginx", +// UpdateNginxConfigMapData updates single field in ingress-nginx's nginx-configuration map data +func (f *Framework) UpdateNginxConfigMapData(key string, value string) { + config, err := f.GetNginxConfigMapData() + Expect(err).NotTo(HaveOccurred(), "unexpected error reading configmap") + + config[key] = value + + f.SetNginxConfigMapData(config) +} + +// DeleteNGINXPod deletes the currently running pod. It waits for the replacement pod to be up. +// Grace period to wait for pod shutdown is in seconds. +func (f *Framework) DeleteNGINXPod(grace int64) { + ns := f.Namespace + pod, err := getIngressNGINXPod(ns, f.KubeClientSet) + Expect(err).NotTo(HaveOccurred(), "expected ingress nginx pod to be running") + + err = f.KubeClientSet.CoreV1().Pods(ns).Delete(pod.GetName(), metav1.NewDeleteOptions(grace)) + Expect(err).NotTo(HaveOccurred(), "unexpected error deleting ingress nginx pod") + + err = wait.Poll(Poll, DefaultTimeout, func() (bool, error) { + pod, err := getIngressNGINXPod(ns, f.KubeClientSet) + if err != nil || pod == nil { + return false, nil + } + return pod.GetName() != "", nil }) + Expect(err).NotTo(HaveOccurred(), "unexpected error while waiting for ingress nginx pod to come up again") +} + +// UpdateDeployment runs the given updateFunc on the deployment and waits for it to be updated +func UpdateDeployment(kubeClientSet kubernetes.Interface, namespace string, name string, replicas int, updateFunc func(d *appsv1.Deployment) error) error { + deployment, err := kubeClientSet.AppsV1().Deployments(namespace).Get(name, metav1.GetOptions{}) if err != nil { - return "", err + return err } - if len(l.Items) == 0 { - return "", fmt.Errorf("no nginx ingress controller pod is running") + if updateFunc != nil { + if err := updateFunc(deployment); err != nil { + return err + } + } + + if *deployment.Spec.Replicas != int32(replicas) { + klog.Infof("updating replica count from %v to %v...", *deployment.Spec.Replicas, replicas) + deployment, err := kubeClientSet.AppsV1().Deployments(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + return err + } + + deployment.Spec.Replicas = NewInt32(int32(replicas)) + _, err = kubeClientSet.AppsV1().Deployments(namespace).Update(deployment) + if err != nil { + return errors.Wrapf(err, "scaling the number of replicas to %v", replicas) + } } - if len(l.Items) != 1 { - return "", fmt.Errorf("unexpected number of nginx ingress controller pod is running (%v)", len(l.Items)) + err = WaitForPodsReady(kubeClientSet, DefaultTimeout, replicas, namespace, metav1.ListOptions{ + LabelSelector: fields.SelectorFromSet(fields.Set(deployment.Spec.Template.ObjectMeta.Labels)).String(), + }) + if err != nil { + return errors.Wrapf(err, "waiting for nginx-ingress-controller replica count to be %v", replicas) } - return f.Logs(&l.Items[0]) + return nil } -func (f *Framework) matchNginxConditions(name string, matcher func(cfg string) bool) wait.ConditionFunc { - return func() (bool, error) { - l, err := f.KubeClientSet.CoreV1().Pods("ingress-nginx").List(metav1.ListOptions{ - LabelSelector: "app=ingress-nginx", - }) - if err != nil { - return false, err - } +// UpdateIngress runs the given updateFunc on the ingress +func UpdateIngress(kubeClientSet kubernetes.Interface, namespace string, name string, updateFunc func(d *extensions.Ingress) error) error { + ingress, err := kubeClientSet.ExtensionsV1beta1().Ingresses(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + return err + } - if len(l.Items) == 0 { - return false, fmt.Errorf("no nginx ingress controller pod is running") - } + if err := updateFunc(ingress); err != nil { + return err + } - if len(l.Items) != 1 { - return false, fmt.Errorf("unexpected number of nginx ingress controller pod is running (%v)", len(l.Items)) - } + _, err = kubeClientSet.ExtensionsV1beta1().Ingresses(namespace).Update(ingress) + return err +} - cmd := fmt.Sprintf("cat /etc/nginx/nginx.conf | awk '/## start server %v/,/## end server %v/'", name, name) - o, err := f.ExecCommand(&l.Items[0], cmd) - if err != nil { - return false, err - } +// NewSingleIngressWithTLS creates a simple ingress rule with TLS spec included +func NewSingleIngressWithTLS(name, path, host string, tlsHosts []string, ns, service string, port int, annotations *map[string]string) *extensions.Ingress { + return newSingleIngressWithRules(name, path, host, ns, service, port, annotations, tlsHosts) +} - var match bool - errs := InterceptGomegaFailures(func() { - if matcher(o) { - match = true - } +// NewSingleIngress creates a simple ingress rule +func NewSingleIngress(name, path, host, ns, service string, port int, annotations *map[string]string) *extensions.Ingress { + return newSingleIngressWithRules(name, path, host, ns, service, port, annotations, nil) +} + +// NewSingleIngressWithMultiplePaths creates a simple ingress rule with multiple paths +func NewSingleIngressWithMultiplePaths(name string, paths []string, host, ns, service string, port int, annotations *map[string]string) *extensions.Ingress { + spec := extensions.IngressSpec{ + Rules: []extensions.IngressRule{ + { + Host: host, + IngressRuleValue: extensions.IngressRuleValue{ + HTTP: &extensions.HTTPIngressRuleValue{}, + }, + }, + }, + } + + for _, path := range paths { + spec.Rules[0].IngressRuleValue.HTTP.Paths = append(spec.Rules[0].IngressRuleValue.HTTP.Paths, extensions.HTTPIngressPath{ + Path: path, + Backend: extensions.IngressBackend{ + ServiceName: service, + ServicePort: intstr.FromInt(port), + }, }) + } - glog.V(2).Infof("Errors waiting for conditions: %v", errs) + return newSingleIngress(name, ns, annotations, spec) +} - if match { - return true, nil +func newSingleIngressWithRules(name, path, host, ns, service string, port int, annotations *map[string]string, tlsHosts []string) *extensions.Ingress { + + spec := extensions.IngressSpec{ + Rules: []extensions.IngressRule{ + { + Host: host, + IngressRuleValue: extensions.IngressRuleValue{ + HTTP: &extensions.HTTPIngressRuleValue{ + Paths: []extensions.HTTPIngressPath{ + { + Path: path, + Backend: extensions.IngressBackend{ + ServiceName: service, + ServicePort: intstr.FromInt(port), + }, + }, + }, + }, + }, + }, + }, + } + + if len(tlsHosts) > 0 { + spec.TLS = []extensions.IngressTLS{ + { + Hosts: tlsHosts, + SecretName: host, + }, } + } - return false, nil + return newSingleIngress(name, ns, annotations, spec) +} + +// NewSingleIngressWithBackendAndRules creates an ingress with both a default backend and a rule +func NewSingleIngressWithBackendAndRules(name, path, host, ns, defaultService string, defaultPort int, service string, port int, annotations *map[string]string) *extensions.Ingress { + spec := extensions.IngressSpec{ + Backend: &extensions.IngressBackend{ + ServiceName: defaultService, + ServicePort: intstr.FromInt(defaultPort), + }, + Rules: []extensions.IngressRule{ + { + Host: host, + IngressRuleValue: extensions.IngressRuleValue{ + HTTP: &extensions.HTTPIngressRuleValue{ + Paths: []extensions.HTTPIngressPath{ + { + Path: path, + Backend: extensions.IngressBackend{ + ServiceName: service, + ServicePort: intstr.FromInt(port), + }, + }, + }, + }, + }, + }, + }, } + + return newSingleIngress(name, ns, annotations, spec) +} + +// NewSingleCatchAllIngress creates a simple ingress with a catch-all backend +func NewSingleCatchAllIngress(name, ns, service string, port int, annotations *map[string]string) *extensions.Ingress { + spec := extensions.IngressSpec{ + Backend: &extensions.IngressBackend{ + ServiceName: service, + ServicePort: intstr.FromInt(port), + }, + } + return newSingleIngress(name, ns, annotations, spec) +} + +func newSingleIngress(name, ns string, annotations *map[string]string, spec extensions.IngressSpec) *extensions.Ingress { + if annotations == nil { + annotations = &map[string]string{} + } + + ing := &extensions.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ns, + Annotations: *annotations, + }, + Spec: spec, + } + + return ing } diff --git a/test/e2e/framework/grpc_fortune_teller.go b/test/e2e/framework/grpc_fortune_teller.go new file mode 100644 index 0000000000..7cab3e026e --- /dev/null +++ b/test/e2e/framework/grpc_fortune_teller.go @@ -0,0 +1,106 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + . "github.com/onsi/gomega" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// NewGRPCFortuneTellerDeployment creates a new single replica +// deployment of the fortune teller image in a particular namespace +func (f *Framework) NewGRPCFortuneTellerDeployment() { + f.NewNewGRPCFortuneTellerDeploymentWithReplicas(1) +} + +// NewNewGRPCFortuneTellerDeploymentWithReplicas creates a new deployment of the +// fortune teller image in a particular namespace. Number of replicas is configurable +func (f *Framework) NewNewGRPCFortuneTellerDeploymentWithReplicas(replicas int32) { + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "fortune-teller", + Namespace: f.Namespace, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: NewInt32(replicas), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "fortune-teller", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "fortune-teller", + }, + }, + Spec: corev1.PodSpec{ + TerminationGracePeriodSeconds: NewInt64(0), + Containers: []corev1.Container{ + { + Name: "fortune-teller", + Image: "quay.io/kubernetes-ingress-controller/grpc-fortune-teller:0.1", + Env: []corev1.EnvVar{}, + Ports: []corev1.ContainerPort{ + { + Name: "grpc", + ContainerPort: 50051, + }, + }, + }, + }, + }, + }, + }, + } + + d, err := f.EnsureDeployment(deployment) + Expect(err).NotTo(HaveOccurred()) + Expect(d).NotTo(BeNil(), "expected a fortune-teller deployment") + + err = WaitForPodsReady(f.KubeClientSet, DefaultTimeout, int(replicas), f.Namespace, metav1.ListOptions{ + LabelSelector: fields.SelectorFromSet(fields.Set(d.Spec.Template.ObjectMeta.Labels)).String(), + }) + Expect(err).NotTo(HaveOccurred(), "failed to wait for to become ready") + + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "fortune-teller", + Namespace: f.Namespace, + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + { + Name: "grpc", + Port: 50051, + TargetPort: intstr.FromInt(50051), + Protocol: "TCP", + }, + }, + Selector: map[string]string{ + "app": "fortune-teller", + }, + }, + } + + f.EnsureService(service) +} diff --git a/test/e2e/framework/influxdb.go b/test/e2e/framework/influxdb.go new file mode 100644 index 0000000000..c2ca8d7f00 --- /dev/null +++ b/test/e2e/framework/influxdb.go @@ -0,0 +1,147 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + . "github.com/onsi/gomega" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" +) + +const influxConfig = ` +reporting-disabled = true +bind-address = "0.0.0.0:8088" + +[meta] + dir = "/var/lib/influxdb/meta" + retention-autocreate = true + logging-enabled = true + +[data] + dir = "/var/lib/influxdb/data" + index-version = "inmem" + wal-dir = "/var/lib/influxdb/wal" + wal-fsync-delay = "0s" + query-log-enabled = true + cache-max-memory-size = 1073741824 + cache-snapshot-memory-size = 26214400 + cache-snapshot-write-cold-duration = "10m0s" + compact-full-write-cold-duration = "4h0m0s" + max-series-per-database = 1000000 + max-values-per-tag = 100000 + max-concurrent-compactions = 0 + trace-logging-enabled = false + +[[udp]] + enabled = true + bind-address = ":8089" + database = "nginx" +` + +// NewInfluxDBDeployment creates an InfluxDB server configured to reply +// on 8086/tcp and 8089/udp +func (f *Framework) NewInfluxDBDeployment() { + configuration := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "influxdb-config", + Namespace: f.Namespace, + }, + Data: map[string]string{ + "influxd.conf": influxConfig, + }, + } + + cm, err := f.EnsureConfigMap(configuration) + Expect(err).NotTo(HaveOccurred(), "failed to create an Influxdb deployment") + + Expect(cm).NotTo(BeNil(), "expected a configmap but none returned") + + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "influxdb-svc", + Namespace: f.Namespace, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: NewInt32(1), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "influxdb-svc", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "influxdb-svc", + }, + }, + Spec: corev1.PodSpec{ + TerminationGracePeriodSeconds: NewInt64(0), + Volumes: []corev1.Volume{ + { + Name: "influxdb-config", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "influxdb-config", + }, + }, + }, + }, + }, + Containers: []corev1.Container{ + { + Name: "influxdb-svc", + Image: "docker.io/influxdb:1.5", + Env: []corev1.EnvVar{}, + Command: []string{"influxd", "-config", "/influxdb-config/influxd.conf"}, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "influxdb-config", + ReadOnly: true, + MountPath: "/influxdb-config", + }, + }, + Ports: []corev1.ContainerPort{ + { + Name: "http", + ContainerPort: 8086, + }, + { + Name: "udp", + ContainerPort: 8089, + }, + }, + }, + }, + }, + }, + }, + } + + d, err := f.EnsureDeployment(deployment) + Expect(err).NotTo(HaveOccurred(), "failed to create an Influxdb deployment") + + Expect(d).NotTo(BeNil(), "unexpected error creating deployment for influxdb") + + err = WaitForPodsReady(f.KubeClientSet, DefaultTimeout, 1, f.Namespace, metav1.ListOptions{ + LabelSelector: fields.SelectorFromSet(fields.Set(d.Spec.Template.ObjectMeta.Labels)).String(), + }) + Expect(err).NotTo(HaveOccurred(), "failed to wait for influxdb to become ready") +} diff --git a/test/e2e/framework/k8s.go b/test/e2e/framework/k8s.go index 6e77f27964..c30a958b78 100644 --- a/test/e2e/framework/k8s.go +++ b/test/e2e/framework/k8s.go @@ -17,73 +17,122 @@ limitations under the License. package framework import ( + "fmt" + "strings" "time" + . "github.com/onsi/gomega" + + appsv1 "k8s.io/api/apps/v1" api "k8s.io/api/core/v1" core "k8s.io/api/core/v1" extensions "k8s.io/api/extensions/v1beta1" k8sErrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" ) -func (f *Framework) EnsureSecret(secret *api.Secret) (*api.Secret, error) { +// EnsureSecret creates a Secret object or returns it if it already exists. +func (f *Framework) EnsureSecret(secret *api.Secret) *api.Secret { s, err := f.KubeClientSet.CoreV1().Secrets(secret.Namespace).Create(secret) if err != nil { if k8sErrors.IsAlreadyExists(err) { - return f.KubeClientSet.CoreV1().Secrets(secret.Namespace).Update(secret) + s, err := f.KubeClientSet.CoreV1().Secrets(secret.Namespace).Update(secret) + Expect(err).NotTo(HaveOccurred(), "unexpected error updating secret") + + return s + } + + Expect(err).NotTo(HaveOccurred(), "unexpected error creating secret") + } + + Expect(s).NotTo(BeNil()) + Expect(s.ObjectMeta).NotTo(BeNil()) + + return s +} + +// EnsureConfigMap creates a ConfigMap object or returns it if it already exists. +func (f *Framework) EnsureConfigMap(configMap *api.ConfigMap) (*api.ConfigMap, error) { + cm, err := f.KubeClientSet.CoreV1().ConfigMaps(configMap.Namespace).Create(configMap) + if err != nil { + if k8sErrors.IsAlreadyExists(err) { + return f.KubeClientSet.CoreV1().ConfigMaps(configMap.Namespace).Update(configMap) } return nil, err } - return s, nil + + return cm, nil } -func (f *Framework) EnsureIngress(ingress *extensions.Ingress) (*extensions.Ingress, error) { - s, err := f.KubeClientSet.ExtensionsV1beta1().Ingresses(ingress.Namespace).Update(ingress) +// EnsureIngress creates an Ingress object or returns it if it already exists. +func (f *Framework) EnsureIngress(ingress *extensions.Ingress) *extensions.Ingress { + ing, err := f.KubeClientSet.ExtensionsV1beta1().Ingresses(ingress.Namespace).Update(ingress) if err != nil { if k8sErrors.IsNotFound(err) { - return f.KubeClientSet.ExtensionsV1beta1().Ingresses(ingress.Namespace).Create(ingress) + ing, err = f.KubeClientSet.ExtensionsV1beta1().Ingresses(ingress.Namespace).Create(ingress) + Expect(err).NotTo(HaveOccurred(), "unexpected error creating ingress") + return ing } - return nil, err + + Expect(err).NotTo(HaveOccurred()) } - return s, nil + + Expect(ing).NotTo(BeNil()) + + if ing.Annotations == nil { + ing.Annotations = make(map[string]string) + } + + return ing } -func (f *Framework) EnsureService(service *core.Service) (*core.Service, error) { +// EnsureService creates a Service object or returns it if it already exists. +func (f *Framework) EnsureService(service *core.Service) *core.Service { s, err := f.KubeClientSet.CoreV1().Services(service.Namespace).Update(service) if err != nil { if k8sErrors.IsNotFound(err) { - return f.KubeClientSet.CoreV1().Services(service.Namespace).Create(service) + s, err := f.KubeClientSet.CoreV1().Services(service.Namespace).Create(service) + Expect(err).NotTo(HaveOccurred(), "unexpected error creating service") + return s + } - return nil, err + + Expect(err).NotTo(HaveOccurred()) } - return s, nil + + Expect(s).NotTo(BeNil(), "expected a service but none returned") + + return s } -func (f *Framework) EnsureDeployment(deployment *extensions.Deployment) (*extensions.Deployment, error) { - d, err := f.KubeClientSet.Extensions().Deployments(deployment.Namespace).Update(deployment) +// EnsureDeployment creates a Deployment object or returns it if it already exists. +func (f *Framework) EnsureDeployment(deployment *appsv1.Deployment) (*appsv1.Deployment, error) { + d, err := f.KubeClientSet.AppsV1().Deployments(deployment.Namespace).Update(deployment) if err != nil { if k8sErrors.IsNotFound(err) { - return f.KubeClientSet.Extensions().Deployments(deployment.Namespace).Create(deployment) + return f.KubeClientSet.AppsV1().Deployments(deployment.Namespace).Create(deployment) } return nil, err } return d, nil } -func (f *Framework) WaitForPodsReady(timeout time.Duration, expectedReplicas int, opts metav1.ListOptions) error { - return wait.Poll(time.Second, timeout, func() (bool, error) { - pl, err := f.KubeClientSet.CoreV1().Pods(f.Namespace.Name).List(opts) +// WaitForPodsReady waits for a given amount of time until a group of Pods is running in the given namespace. +func WaitForPodsReady(kubeClientSet kubernetes.Interface, timeout time.Duration, expectedReplicas int, namespace string, opts metav1.ListOptions) error { + return wait.Poll(2*time.Second, timeout, func() (bool, error) { + pl, err := kubeClientSet.CoreV1().Pods(namespace).List(opts) if err != nil { - return false, err + return false, nil } r := 0 for _, p := range pl.Items { - if p.Status.Phase != core.PodRunning { - continue + if isRunning, _ := podRunningReady(&p); isRunning { + r++ } - r++ } if r == expectedReplicas { @@ -93,3 +142,77 @@ func (f *Framework) WaitForPodsReady(timeout time.Duration, expectedReplicas int return false, nil }) } + +// WaitForEndpoints waits for a given amount of time until an endpoint contains. +func WaitForEndpoints(kubeClientSet kubernetes.Interface, timeout time.Duration, name, ns string, expectedEndpoints int) error { + if expectedEndpoints == 0 { + return nil + } + return wait.Poll(2*time.Second, timeout, func() (bool, error) { + endpoint, err := kubeClientSet.CoreV1().Endpoints(ns).Get(name, metav1.GetOptions{}) + if k8sErrors.IsNotFound(err) { + return false, nil + } + Expect(err).NotTo(HaveOccurred()) + if len(endpoint.Subsets) == 0 || len(endpoint.Subsets[0].Addresses) == 0 { + return false, nil + } + + r := 0 + for _, es := range endpoint.Subsets { + r += len(es.Addresses) + } + + if r == expectedEndpoints { + return true, nil + } + + return false, nil + }) +} + +// podRunningReady checks whether pod p's phase is running and it has a ready +// condition of status true. +func podRunningReady(p *core.Pod) (bool, error) { + // Check the phase is running. + if p.Status.Phase != core.PodRunning { + return false, fmt.Errorf("want pod '%s' on '%s' to be '%v' but was '%v'", + p.ObjectMeta.Name, p.Spec.NodeName, core.PodRunning, p.Status.Phase) + } + // Check the ready condition is true. + if !podutil.IsPodReady(p) { + return false, fmt.Errorf("pod '%s' on '%s' didn't have condition {%v %v}; conditions: %v", + p.ObjectMeta.Name, p.Spec.NodeName, core.PodReady, core.ConditionTrue, p.Status.Conditions) + } + return true, nil +} + +func getIngressNGINXPod(ns string, kubeClientSet kubernetes.Interface) (*core.Pod, error) { + l, err := kubeClientSet.CoreV1().Pods(ns).List(metav1.ListOptions{ + LabelSelector: "app.kubernetes.io/name=ingress-nginx", + }) + if err != nil { + return nil, nil + } + + if len(l.Items) == 0 { + return nil, fmt.Errorf("There is no ingress-nginx pods running in namespace %v", ns) + } + + var pod *core.Pod + + for _, p := range l.Items { + if strings.HasPrefix(p.GetName(), "nginx-ingress-controller") { + if isRunning, err := podRunningReady(&p); err == nil && isRunning { + pod = &p + break + } + } + } + + if pod == nil { + return nil, fmt.Errorf("There is no ingress-nginx pods running in namespace %v", ns) + } + + return pod, nil +} diff --git a/test/e2e/framework/logs.go b/test/e2e/framework/logs.go index 19aef7865a..21a93e569e 100644 --- a/test/e2e/framework/logs.go +++ b/test/e2e/framework/logs.go @@ -24,7 +24,8 @@ import ( "k8s.io/api/core/v1" ) -func (f *Framework) Logs(pod *v1.Pod) (string, error) { +// Logs returns the log entries of a given Pod. +func Logs(pod *v1.Pod) (string, error) { var ( execOut bytes.Buffer execErr bytes.Buffer @@ -34,14 +35,13 @@ func (f *Framework) Logs(pod *v1.Pod) (string, error) { return "", fmt.Errorf("could not determine which container to use") } - args := fmt.Sprintf("kubectl logs -n %v %v", pod.Namespace, pod.Name) - cmd := exec.Command("/bin/bash", "-c", args) + cmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("%v logs --namespace %s %s", KubectlPath, pod.Namespace, pod.Name)) cmd.Stdout = &execOut cmd.Stderr = &execErr err := cmd.Run() if err != nil { - return "", fmt.Errorf("could not execute: %v", err) + return "", fmt.Errorf("could not execute '%s %s': %v", cmd.Path, cmd.Args, err) } if execErr.Len() > 0 { diff --git a/test/e2e/framework/ssl.go b/test/e2e/framework/ssl.go index 41f0dd0eb1..35fcb755eb 100644 --- a/test/e2e/framework/ssl.go +++ b/test/e2e/framework/ssl.go @@ -1,9 +1,26 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package framework import ( "bytes" "crypto/rand" "crypto/rsa" + "crypto/tls" "crypto/x509" "crypto/x509/pkix" "encoding/pem" @@ -11,11 +28,15 @@ import ( "io" "math/big" "net" + net_url "net/url" "strings" "time" - "k8s.io/api/core/v1" + . "github.com/onsi/gomega" + + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" ) @@ -24,45 +45,114 @@ const ( validFor = 365 * 24 * time.Hour ) -// CreateIngressTLSSecret creates a secret containing TLS certificates for the given Ingress. -// If a secret with the same name already pathExists in the namespace of the -// Ingress, it's updated. -func CreateIngressTLSSecret(client kubernetes.Interface, hosts []string, secreName, namespace string) (host string, rootCA, privKey []byte, err error) { - var k, c bytes.Buffer - host = strings.Join(hosts, ",") - if err = generateRSACerts(host, true, &k, &c); err != nil { - return - } - cert := c.Bytes() - key := k.Bytes() - secret := &v1.Secret{ +// CreateIngressTLSSecret creates or updates a Secret containing a TLS +// certificate for the given Ingress and returns a TLS configuration suitable +// for HTTP clients to use against that particular Ingress. +func CreateIngressTLSSecret(client kubernetes.Interface, hosts []string, secretName, namespace string) (*tls.Config, error) { + if len(hosts) == 0 { + return nil, fmt.Errorf("require a non-empty host for client hello") + } + + var serverKey, serverCert bytes.Buffer + var data map[string][]byte + host := strings.Join(hosts, ",") + + if err := generateRSACert(host, true, &serverKey, &serverCert); err != nil { + return nil, err + } + + data = map[string][]byte{ + v1.TLSCertKey: serverCert.Bytes(), + v1.TLSPrivateKeyKey: serverKey.Bytes(), + } + + newSecret := &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: secreName, - }, - Data: map[string][]byte{ - v1.TLSCertKey: cert, - v1.TLSPrivateKeyKey: key, + Name: secretName, }, + Data: data, } - var s *v1.Secret - if s, err = client.CoreV1().Secrets(namespace).Get(secreName, metav1.GetOptions{}); err == nil { - s.Data = secret.Data - _, err = client.CoreV1().Secrets(namespace).Update(s) + + var apierr error + curSecret, err := client.CoreV1().Secrets(namespace).Get(secretName, metav1.GetOptions{}) + if err == nil && curSecret != nil { + curSecret.Data = newSecret.Data + _, apierr = client.CoreV1().Secrets(namespace).Update(curSecret) } else { - _, err = client.CoreV1().Secrets(namespace).Create(secret) + _, apierr = client.CoreV1().Secrets(namespace).Create(newSecret) } - return host, cert, key, err + if apierr != nil { + return nil, apierr + } + + serverName := hosts[0] + return tlsConfig(serverName, serverCert.Bytes()) } -// generateRSACerts generates a basic self signed certificate using a key length -// of rsaBits, valid for validFor time. -func generateRSACerts(host string, isCA bool, keyOut, certOut io.Writer) error { +// CreateIngressMASecret creates or updates a Secret containing a Mutual Auth +// certificate-chain for the given Ingress and returns a TLS configuration suitable +// for HTTP clients to use against that particular Ingress. +func CreateIngressMASecret(client kubernetes.Interface, host string, secretName, namespace string) (*tls.Config, error) { if len(host) == 0 { - return fmt.Errorf("Require a non-empty host for client hello") + return nil, fmt.Errorf("requires a non-empty host") + } + + var caCert, serverKey, serverCert, clientKey, clientCert bytes.Buffer + var data map[string][]byte + + if err := generateRSAMutualAuthCerts(host, &caCert, &serverKey, &serverCert, &clientKey, &clientCert); err != nil { + return nil, err } + + data = map[string][]byte{ + v1.TLSCertKey: serverCert.Bytes(), + v1.TLSPrivateKeyKey: serverKey.Bytes(), + "ca.crt": caCert.Bytes(), + } + + newSecret := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + }, + Data: data, + } + + var apierr error + curSecret, err := client.CoreV1().Secrets(namespace).Get(secretName, metav1.GetOptions{}) + if err == nil && curSecret != nil { + curSecret.Data = newSecret.Data + _, apierr = client.CoreV1().Secrets(namespace).Update(curSecret) + } else { + _, apierr = client.CoreV1().Secrets(namespace).Create(newSecret) + } + if apierr != nil { + return nil, apierr + } + + clientPair, err := tls.X509KeyPair(clientCert.Bytes(), clientKey.Bytes()) + if err != nil { + return nil, err + } + + return &tls.Config{ + ServerName: host, + Certificates: []tls.Certificate{clientPair}, + InsecureSkipVerify: true, + }, nil +} + +// WaitForTLS waits until the TLS handshake with a given server completes successfully. +func WaitForTLS(url string, tlsConfig *tls.Config) { + err := wait.Poll(Poll, DefaultTimeout, matchTLSServerName(url, tlsConfig)) + Expect(err).NotTo(HaveOccurred(), "timeout waiting for TLS configuration in URL %s", url) +} + +// generateRSACert generates a basic self signed certificate using a key length +// of rsaBits, valid for validFor time. +func generateRSACert(host string, isCA bool, keyOut, certOut io.Writer) error { priv, err := rsa.GenerateKey(rand.Reader, rsaBits) if err != nil { - return fmt.Errorf("Failed to generate key: %v", err) + return fmt.Errorf("failed to generate key: %v", err) } notBefore := time.Now() notAfter := notBefore.Add(validFor) @@ -103,13 +193,169 @@ func generateRSACerts(host string, isCA bool, keyOut, certOut io.Writer) error { derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv) if err != nil { - return fmt.Errorf("Failed to create certificate: %s", err) + return fmt.Errorf("failed to create certificate: %s", err) } if err := pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}); err != nil { - return fmt.Errorf("Failed creating cert: %v", err) + return fmt.Errorf("failed creating cert: %v", err) } if err := pem.Encode(keyOut, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)}); err != nil { - return fmt.Errorf("Failed creating keay: %v", err) + return fmt.Errorf("failed creating key: %v", err) + } + + return nil +} + +// generateRSAMutualAuthCerts generates a complete basic self-signed certificate-chain (ca, server, client) using a +// key-length of rsaBits, valid for validFor time. +func generateRSAMutualAuthCerts(host string, caCertOut, serverKeyOut, serverCertOut, clientKeyOut, clientCertOut io.Writer) error { + notBefore := time.Now() + notAfter := notBefore.Add(validFor) + + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + return fmt.Errorf("failed to generate serial number: %s", err) + } + + // Generate the CA key and CA cert + caKey, err := rsa.GenerateKey(rand.Reader, rsaBits) + if err != nil { + return fmt.Errorf("failed to generate key: %v", err) + } + + caTemplate := x509.Certificate{ + Subject: pkix.Name{ + CommonName: host + "-ca", + Organization: []string{"Acme Co"}, + }, + SerialNumber: serialNumber, + NotBefore: notBefore, + NotAfter: notAfter, + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, + BasicConstraintsValid: true, + } + + caTemplate.IsCA = true + caTemplate.KeyUsage |= x509.KeyUsageCertSign + + caBytes, err := x509.CreateCertificate(rand.Reader, &caTemplate, &caTemplate, &caKey.PublicKey, caKey) + if err != nil { + return fmt.Errorf("failed to create certificate: %s", err) + } + if err := pem.Encode(caCertOut, &pem.Block{Type: "CERTIFICATE", Bytes: caBytes}); err != nil { + return fmt.Errorf("failed creating cert: %v", err) + } + + // Generate the Server Key and CSR for the server + serverKey, err := rsa.GenerateKey(rand.Reader, rsaBits) + if err != nil { + return fmt.Errorf("failed to generate key: %v", err) + } + + // Create the server cert and sign with the csr + serialNumber, err = rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + return fmt.Errorf("failed to generate serial number: %s", err) + } + + serverTemplate := x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + CommonName: host, + Organization: []string{"Acme Co"}, + }, + NotBefore: notBefore, + NotAfter: notAfter, + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, + } + + serverBytes, err := x509.CreateCertificate(rand.Reader, &serverTemplate, &caTemplate, &serverKey.PublicKey, caKey) + if err != nil { + return fmt.Errorf("failed to create certificate: %s", err) + } + if err := pem.Encode(serverCertOut, &pem.Block{Type: "CERTIFICATE", Bytes: serverBytes}); err != nil { + return fmt.Errorf("failed creating cert: %v", err) + } + if err := pem.Encode(serverKeyOut, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(serverKey)}); err != nil { + return fmt.Errorf("failed creating key: %v", err) + } + + // Create the client key and certificate + clientKey, err := rsa.GenerateKey(rand.Reader, rsaBits) + if err != nil { + return fmt.Errorf("failed to generate key: %v", err) } + + serialNumber, err = rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + return fmt.Errorf("failed to generate serial number: %s", err) + } + clientTemplate := x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + CommonName: host + "-client", + Organization: []string{"Acme Co"}, + }, + NotBefore: notBefore, + NotAfter: notAfter, + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, + } + + clientBytes, err := x509.CreateCertificate(rand.Reader, &clientTemplate, &caTemplate, &clientKey.PublicKey, caKey) + if err != nil { + return fmt.Errorf("failed to create certificate: %s", err) + } + if err := pem.Encode(clientCertOut, &pem.Block{Type: "CERTIFICATE", Bytes: clientBytes}); err != nil { + return fmt.Errorf("failed creating cert: %v", err) + } + if err := pem.Encode(clientKeyOut, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(clientKey)}); err != nil { + return fmt.Errorf("failed creating key: %v", err) + } + return nil } + +// tlsConfig returns a client TLS configuration for the given server name and +// CA certificate (PEM). +func tlsConfig(serverName string, pemCA []byte) (*tls.Config, error) { + rootCAPool := x509.NewCertPool() + if !rootCAPool.AppendCertsFromPEM(pemCA) { + return nil, fmt.Errorf("error creating CA certificate pool (%s)", serverName) + } + return &tls.Config{ + ServerName: serverName, + RootCAs: rootCAPool, + }, nil +} + +// matchTLSServerName connects to the network address corresponding to the +// given URL using the given TLS configuration and returns whether the TLS +// handshake completed successfully. +func matchTLSServerName(url string, tlsConfig *tls.Config) wait.ConditionFunc { + return func() (bool, error) { + u, err := net_url.Parse(url) + if err != nil { + return false, err + } + + port := u.Port() + if port == "" { + port = "443" + } + + conn, err := tls.Dial("tcp", fmt.Sprintf("%v:%v", u.Host, port), tlsConfig) + if err != nil { + Logf("Unexpected TLS error: %v", err) + return false, nil + } + conn.Close() + + return true, nil + } +} diff --git a/test/e2e/framework/test_context.go b/test/e2e/framework/test_context.go index 1d713cd0d0..d91540cfc6 100644 --- a/test/e2e/framework/test_context.go +++ b/test/e2e/framework/test_context.go @@ -18,26 +18,21 @@ package framework import ( "flag" - "os" "github.com/onsi/ginkgo/config" - - "k8s.io/client-go/tools/clientcmd" -) - -const ( - RecommendedConfigPathEnvVar = "INGRESSNGINXCONFIG" ) +// TestContextType describes the client context to use in communications with the Kubernetes API. type TestContextType struct { - KubeHost string - KubeConfig string + KubeHost string + //KubeConfig string KubeContext string } +// TestContext is the global client context for tests. var TestContext TestContextType -// Register flags common to all e2e test suites. +// RegisterCommonFlags registers flags common to all e2e test suites. func RegisterCommonFlags() { // Turn on verbose by default to get spec names config.DefaultReporterConfig.Verbose = true @@ -48,11 +43,16 @@ func RegisterCommonFlags() { // Randomize specs as well as suites config.GinkgoConfig.RandomizeAllSpecs = true + // Default SlowSpecThreshold is 5 seconds. + // Too low for the kind of operations we need to tests + config.DefaultReporterConfig.SlowSpecThreshold = 20 + flag.StringVar(&TestContext.KubeHost, "kubernetes-host", "http://127.0.0.1:8080", "The kubernetes host, or apiserver, to connect to") - flag.StringVar(&TestContext.KubeConfig, "kubernetes-config", os.Getenv(clientcmd.RecommendedConfigPathEnvVar), "Path to config containing embedded authinfo for kubernetes. Default value is from environment variable "+clientcmd.RecommendedConfigPathEnvVar) - flag.StringVar(&TestContext.KubeContext, "kubernetes-context", "", "config context to use for kuberentes. If unset, will use value from 'current-context'") + //flag.StringVar(&TestContext.KubeConfig, "kubernetes-config", os.Getenv(clientcmd.RecommendedConfigPathEnvVar), "Path to config containing embedded authinfo for kubernetes. Default value is from environment variable "+clientcmd.RecommendedConfigPathEnvVar) + flag.StringVar(&TestContext.KubeContext, "kubernetes-context", "", "config context to use for kubernetes. If unset, will use value from 'current-context'") } +// RegisterParseFlags registers and parses flags for the test binary. func RegisterParseFlags() { RegisterCommonFlags() flag.Parse() diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index cbd7f5038d..2b7025d964 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -23,23 +23,23 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/clientcmd/api" + "k8s.io/ingress-nginx/internal/file" ) const ( // Poll how often to poll for conditions - Poll = 2 * time.Second + Poll = 3 * time.Second - // Default time to wait for operations to complete - defaultTimeout = 30 * time.Second + // DefaultTimeout time to wait for operations to complete + DefaultTimeout = 3 * time.Minute ) func nowStamp() string { @@ -50,26 +50,30 @@ func log(level string, format string, args ...interface{}) { fmt.Fprintf(GinkgoWriter, nowStamp()+": "+level+": "+format+"\n", args...) } +// Logf logs to the INFO logs. func Logf(format string, args ...interface{}) { log("INFO", format, args...) } +// Failf logs to the INFO logs and fails the test. func Failf(format string, args ...interface{}) { msg := fmt.Sprintf(format, args...) log("INFO", msg) Fail(nowStamp()+": "+msg, 1) } +// Skipf logs to the INFO logs and skips the test. func Skipf(format string, args ...interface{}) { msg := fmt.Sprintf(format, args...) log("INFO", msg) Skip(nowStamp() + ": " + msg) } +// RestclientConfig deserializes the contents of a kubeconfig file into a Config object. func RestclientConfig(config, context string) (*api.Config, error) { Logf(">>> config: %s\n", config) if config == "" { - return nil, fmt.Errorf("Config file must be specified to load client config") + return nil, fmt.Errorf("config file must be specified to load client config") } c, err := clientcmd.LoadFromFile(config) if err != nil { @@ -82,30 +86,22 @@ func RestclientConfig(config, context string) (*api.Config, error) { return c, nil } -type ClientConfigGetter func() (*rest.Config, error) - -func LoadConfig(config, context string) (*rest.Config, error) { - c, err := RestclientConfig(config, context) - if err != nil { - return nil, err - } - return clientcmd.NewDefaultClientConfig(*c, &clientcmd.ConfigOverrides{}).ClientConfig() -} - // RunID unique identifier of the e2e run var RunID = uuid.NewUUID() // CreateKubeNamespace creates a new namespace in the cluster -func CreateKubeNamespace(baseName string, c kubernetes.Interface) (*v1.Namespace, error) { +func CreateKubeNamespace(baseName string, c kubernetes.Interface) (string, error) { + ts := time.Now().UnixNano() ns := &v1.Namespace{ ObjectMeta: metav1.ObjectMeta{ - GenerateName: fmt.Sprintf("e2e-tests-%v-", baseName), + GenerateName: fmt.Sprintf("e2e-tests-%v-%v-", baseName, ts), }, } // Be robust about making the namespace creation call. var got *v1.Namespace - err := wait.PollImmediate(Poll, defaultTimeout, func() (bool, error) { - var err error + var err error + + err = wait.PollImmediate(Poll, DefaultTimeout, func() (bool, error) { got, err = c.CoreV1().Namespaces().Create(ns) if err != nil { Logf("Unexpected error while creating namespace: %v", err) @@ -115,9 +111,9 @@ func CreateKubeNamespace(baseName string, c kubernetes.Interface) (*v1.Namespace return true, nil }) if err != nil { - return nil, err + return "", err } - return got, nil + return got.Name, nil } // DeleteKubeNamespace deletes a namespace and all the objects inside @@ -125,6 +121,7 @@ func DeleteKubeNamespace(c kubernetes.Interface, namespace string) error { return c.CoreV1().Namespaces().Delete(namespace, metav1.NewDeleteOptions(0)) } +// ExpectNoError tests whether an error occurred. func ExpectNoError(err error, explain ...interface{}) { if err != nil { Logf("Unexpected error occurred: %v", err) @@ -134,7 +131,7 @@ func ExpectNoError(err error, explain ...interface{}) { // WaitForKubeNamespaceNotExist waits until a namespaces is not present in the cluster func WaitForKubeNamespaceNotExist(c kubernetes.Interface, namespace string) error { - return wait.PollImmediate(Poll, time.Minute*2, namespaceNotExist(c, namespace)) + return wait.PollImmediate(Poll, DefaultTimeout, namespaceNotExist(c, namespace)) } func namespaceNotExist(c kubernetes.Interface, namespace string) wait.ConditionFunc { @@ -152,7 +149,7 @@ func namespaceNotExist(c kubernetes.Interface, namespace string) wait.ConditionF // WaitForNoPodsInNamespace waits until there are no pods running in a namespace func WaitForNoPodsInNamespace(c kubernetes.Interface, namespace string) error { - return wait.PollImmediate(Poll, time.Minute*2, noPodsInNamespace(c, namespace)) + return wait.PollImmediate(Poll, DefaultTimeout, noPodsInNamespace(c, namespace)) } func noPodsInNamespace(c kubernetes.Interface, namespace string) wait.ConditionFunc { @@ -178,23 +175,23 @@ func WaitForPodRunningInNamespace(c kubernetes.Interface, pod *v1.Pod) error { if pod.Status.Phase == v1.PodRunning { return nil } - return waitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, defaultTimeout) + return waitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, DefaultTimeout) } func waitTimeoutForPodRunningInNamespace(c kubernetes.Interface, podName, namespace string, timeout time.Duration) error { - return wait.PollImmediate(Poll, defaultTimeout, podRunning(c, podName, namespace)) + return wait.PollImmediate(Poll, DefaultTimeout, podRunning(c, podName, namespace)) } // WaitForSecretInNamespace waits a default amount of time for the specified secret is present in a particular namespace func WaitForSecretInNamespace(c kubernetes.Interface, namespace, name string) error { - return wait.PollImmediate(1*time.Second, time.Minute*2, secretInNamespace(c, namespace, name)) + return wait.PollImmediate(Poll, DefaultTimeout, secretInNamespace(c, namespace, name)) } func secretInNamespace(c kubernetes.Interface, namespace, name string) wait.ConditionFunc { return func() (bool, error) { s, err := c.CoreV1().Secrets(namespace).Get(name, metav1.GetOptions{}) if apierrors.IsNotFound(err) { - return false, err + return false, nil } if err != nil { return false, err @@ -207,9 +204,33 @@ func secretInNamespace(c kubernetes.Interface, namespace, name string) wait.Cond } } +// WaitForFileInFS waits a default amount of time for the specified file is present in the filesystem +func WaitForFileInFS(file string, fs file.Filesystem) error { + return wait.PollImmediate(Poll, DefaultTimeout, fileInFS(file, fs)) +} + +func fileInFS(file string, fs file.Filesystem) wait.ConditionFunc { + return func() (bool, error) { + stat, err := fs.Stat(file) + if err != nil { + return false, err + } + + if stat == nil { + return false, fmt.Errorf("file %v does not exist", file) + } + + if stat.Size() > 0 { + return true, nil + } + + return false, fmt.Errorf("the file %v exists but it is empty", file) + } +} + // WaitForNoIngressInNamespace waits until there is no ingress object in a particular namespace func WaitForNoIngressInNamespace(c kubernetes.Interface, namespace, name string) error { - return wait.PollImmediate(1*time.Second, time.Minute*2, noIngressInNamespace(c, namespace, name)) + return wait.PollImmediate(Poll, DefaultTimeout, noIngressInNamespace(c, namespace, name)) } func noIngressInNamespace(c kubernetes.Interface, namespace, name string) wait.ConditionFunc { @@ -231,14 +252,14 @@ func noIngressInNamespace(c kubernetes.Interface, namespace, name string) wait.C // WaitForIngressInNamespace waits until a particular ingress object exists namespace func WaitForIngressInNamespace(c kubernetes.Interface, namespace, name string) error { - return wait.PollImmediate(1*time.Second, time.Minute*2, ingressInNamespace(c, namespace, name)) + return wait.PollImmediate(Poll, DefaultTimeout, ingressInNamespace(c, namespace, name)) } func ingressInNamespace(c kubernetes.Interface, namespace, name string) wait.ConditionFunc { return func() (bool, error) { ing, err := c.ExtensionsV1beta1().Ingresses(namespace).Get(name, metav1.GetOptions{}) if apierrors.IsNotFound(err) { - return false, err + return false, nil } if err != nil { return false, err @@ -255,7 +276,7 @@ func podRunning(c kubernetes.Interface, podName, namespace string) wait.Conditio return func() (bool, error) { pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{}) if err != nil { - return false, err + return false, nil } switch pod.Status.Phase { case v1.PodRunning: diff --git a/test/e2e/gracefulshutdown/slow_requests.go b/test/e2e/gracefulshutdown/slow_requests.go new file mode 100644 index 0000000000..793881b96f --- /dev/null +++ b/test/e2e/gracefulshutdown/slow_requests.go @@ -0,0 +1,63 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gracefulshutdown + +import ( + "net/http" + "strings" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/parnurzeal/gorequest" + "k8s.io/ingress-nginx/test/e2e/framework" +) + +var _ = framework.IngressNginxDescribe("Graceful Shutdown - Slow Requests", func() { + f := framework.NewDefaultFramework("shutdown-slow-requests") + + BeforeEach(func() { + f.NewSlowEchoDeployment() + f.UpdateNginxConfigMapData("worker-shutdown-timeout", "50s") + }) + + It("should let slow requests finish before shutting down", func() { + host := "graceful-shutdown" + + f.EnsureIngress(framework.NewSingleIngress(host, "/", host, f.Namespace, "slowecho", 8080, nil)) + f.WaitForNginxConfiguration( + func(conf string) bool { + return strings.Contains(conf, "worker_shutdown_timeout") + }) + + done := make(chan bool) + go func() { + defer func() { done <- true }() + defer GinkgoRecover() + resp, _, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)+"/sleep/30"). + Set("Host", host). + End() + Expect(errs).To(BeNil()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + }() + + time.Sleep(1 * time.Second) + f.DeleteNGINXPod(60) + <-done + }) +}) diff --git a/test/e2e/kind.yaml b/test/e2e/kind.yaml new file mode 100644 index 0000000000..f7c166ac22 --- /dev/null +++ b/test/e2e/kind.yaml @@ -0,0 +1,6 @@ +kind: Config +apiVersion: kind.sigs.k8s.io/v1alpha2 +nodes: + - role: control-plane + - role: worker + replicas: 2 diff --git a/test/e2e/loadbalance/configmap.go b/test/e2e/loadbalance/configmap.go new file mode 100644 index 0000000000..abd4c500a3 --- /dev/null +++ b/test/e2e/loadbalance/configmap.go @@ -0,0 +1,72 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package loadbalance + +import ( + "encoding/json" + "strings" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "k8s.io/ingress-nginx/test/e2e/framework" +) + +const ( + waitForLuaSync = 5 * time.Second +) + +var _ = framework.IngressNginxDescribe("Load Balance - Configmap value", func() { + f := framework.NewDefaultFramework("lb-configmap") + + BeforeEach(func() { + f.NewEchoDeploymentWithReplicas(1) + }) + + AfterEach(func() { + }) + + It("should apply the configmap load-balance setting", func() { + host := "load-balance.com" + + f.UpdateNginxConfigMapData("load-balance", "ewma") + + f.EnsureIngress(framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, nil)) + f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, "server_name load-balance.com") + }) + time.Sleep(waitForLuaSync) + + getCmd := "/dbg backends all" + output, err := f.ExecIngressPod(getCmd) + Expect(err).Should(BeNil()) + + var backends []map[string]interface{} + unmarshalErr := json.Unmarshal([]byte(output), &backends) + Expect(unmarshalErr).Should(BeNil()) + + for _, backend := range backends { + if backend["name"].(string) != "upstream-default-backend" { + lb, ok := backend["load-balance"].(string) + Expect(ok).Should(Equal(true)) + Expect(lb).Should(Equal("ewma")) + } + } + }) +}) diff --git a/test/e2e/loadbalance/round_robin.go b/test/e2e/loadbalance/round_robin.go new file mode 100644 index 0000000000..5b16f2ad91 --- /dev/null +++ b/test/e2e/loadbalance/round_robin.go @@ -0,0 +1,76 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package loadbalance + +import ( + "regexp" + "strings" + + "github.com/parnurzeal/gorequest" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "k8s.io/ingress-nginx/test/e2e/framework" +) + +var _ = framework.IngressNginxDescribe("Load Balance - Round Robin", func() { + f := framework.NewDefaultFramework("round-robin") + + BeforeEach(func() { + f.NewEchoDeploymentWithReplicas(3) + f.UpdateNginxConfigMapData("worker-processes", "1") + }) + + AfterEach(func() { + f.UpdateNginxConfigMapData("worker-processes", "") + }) + + It("should evenly distribute requests with round-robin (default algorithm)", func() { + host := "load-balance.com" + + f.EnsureIngress(framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, nil)) + f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, "server_name load-balance.com") + }) + + re, _ := regexp.Compile(`http-svc.*`) + replicaRequestCount := map[string]int{} + + for i := 0; i < 600; i++ { + _, body, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + End() + Expect(errs).Should(BeEmpty()) + + replica := re.FindString(body) + Expect(replica).ShouldNot(Equal("")) + + if _, ok := replicaRequestCount[replica]; !ok { + replicaRequestCount[replica] = 1 + } else { + replicaRequestCount[replica]++ + } + } + + for _, v := range replicaRequestCount { + Expect(v).Should(Equal(200)) + } + }) +}) diff --git a/test/e2e/lua/dynamic_certificates.go b/test/e2e/lua/dynamic_certificates.go new file mode 100644 index 0000000000..bdf4c09b97 --- /dev/null +++ b/test/e2e/lua/dynamic_certificates.go @@ -0,0 +1,229 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package lua + +import ( + "fmt" + "strings" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + extensions "k8s.io/api/extensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "k8s.io/ingress-nginx/test/e2e/framework" +) + +var _ = framework.IngressNginxDescribe("Dynamic Certificate", func() { + f := framework.NewDefaultFramework("dynamic-certificate") + host := "foo.com" + + BeforeEach(func() { + f.NewEchoDeploymentWithReplicas(1) + }) + + It("picks up the certificate when we add TLS spec to existing ingress", func() { + ensureIngress(f, host, "http-svc") + + ing, err := f.KubeClientSet.ExtensionsV1beta1().Ingresses(f.Namespace).Get(host, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + ing.Spec.TLS = []extensions.IngressTLS{ + { + Hosts: []string{host}, + SecretName: host, + }, + } + _, err = framework.CreateIngressTLSSecret(f.KubeClientSet, + ing.Spec.TLS[0].Hosts, + ing.Spec.TLS[0].SecretName, + ing.Namespace) + Expect(err).ToNot(HaveOccurred()) + _, err = f.KubeClientSet.ExtensionsV1beta1().Ingresses(f.Namespace).Update(ing) + Expect(err).ToNot(HaveOccurred()) + time.Sleep(waitForLuaSync) + + ensureHTTPSRequest(f.GetURL(framework.HTTPS), host, host) + }) + + It("picks up the previously missing secret for a given ingress without reloading", func() { + ing := framework.NewSingleIngressWithTLS(host, "/", host, []string{host}, f.Namespace, "http-svc", 80, nil) + f.EnsureIngress(ing) + + time.Sleep(waitForLuaSync) + + ensureHTTPSRequest(fmt.Sprintf("%s?id=dummy_log_splitter_foo_bar", f.GetURL(framework.HTTPS)), host, "ingress.local") + + _, err := framework.CreateIngressTLSSecret(f.KubeClientSet, + ing.Spec.TLS[0].Hosts, + ing.Spec.TLS[0].SecretName, + ing.Namespace) + Expect(err).ToNot(HaveOccurred()) + + By("configuring certificate_by_lua and skipping Nginx configuration of the new certificate") + f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, "ssl_certificate_by_lua_block") && + !strings.Contains(server, fmt.Sprintf("ssl_certificate /etc/ingress-controller/ssl/%s-%s.pem;", ing.Namespace, host)) && + !strings.Contains(server, fmt.Sprintf("ssl_certificate_key /etc/ingress-controller/ssl/%s-%s.pem;", ing.Namespace, host)) && + strings.Contains(server, "listen 443") + }) + + time.Sleep(waitForLuaSync) + + By("serving the configured certificate on HTTPS endpoint") + ensureHTTPSRequest(f.GetURL(framework.HTTPS), host, host) + + log, err := f.NginxLogs() + Expect(err).ToNot(HaveOccurred()) + Expect(log).ToNot(BeEmpty()) + index := strings.Index(log, "id=dummy_log_splitter_foo_bar") + restOfLogs := log[index:] + + By("skipping Nginx reload") + Expect(restOfLogs).ToNot(ContainSubstring(logRequireBackendReload)) + Expect(restOfLogs).ToNot(ContainSubstring(logBackendReloadSuccess)) + }) + + Context("given an ingress with TLS correctly configured", func() { + BeforeEach(func() { + ing := f.EnsureIngress(framework.NewSingleIngressWithTLS(host, "/", host, []string{host}, f.Namespace, "http-svc", 80, nil)) + + time.Sleep(waitForLuaSync) + + ensureHTTPSRequest(f.GetURL(framework.HTTPS), host, "ingress.local") + + _, err := framework.CreateIngressTLSSecret(f.KubeClientSet, + ing.Spec.TLS[0].Hosts, + ing.Spec.TLS[0].SecretName, + ing.Namespace) + Expect(err).ToNot(HaveOccurred()) + time.Sleep(waitForLuaSync) + + By("configuring certificate_by_lua and skipping Nginx configuration of the new certificate") + f.WaitForNginxServer(ing.Spec.TLS[0].Hosts[0], + func(server string) bool { + return strings.Contains(server, "ssl_certificate_by_lua_block") && + !strings.Contains(server, fmt.Sprintf("ssl_certificate /etc/ingress-controller/ssl/%s-%s.pem;", ing.Namespace, host)) && + !strings.Contains(server, fmt.Sprintf("ssl_certificate_key /etc/ingress-controller/ssl/%s-%s.pem;", ing.Namespace, host)) && + strings.Contains(server, "listen 443") + }) + + time.Sleep(waitForLuaSync) + + By("serving the configured certificate on HTTPS endpoint") + ensureHTTPSRequest(f.GetURL(framework.HTTPS), host, host) + }) + + It("picks up the updated certificate without reloading", func() { + ing, err := f.KubeClientSet.ExtensionsV1beta1().Ingresses(f.Namespace).Get(host, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + + ensureHTTPSRequest(fmt.Sprintf("%s?id=dummy_log_splitter_foo_bar", f.GetURL(framework.HTTPS)), host, host) + + _, err = framework.CreateIngressTLSSecret(f.KubeClientSet, + ing.Spec.TLS[0].Hosts, + ing.Spec.TLS[0].SecretName, + ing.Namespace) + Expect(err).ToNot(HaveOccurred()) + time.Sleep(waitForLuaSync) + + By("configuring certificate_by_lua and skipping Nginx configuration of the new certificate") + f.WaitForNginxServer(ing.Spec.TLS[0].Hosts[0], + func(server string) bool { + return strings.Contains(server, "ssl_certificate_by_lua_block") && + !strings.Contains(server, fmt.Sprintf("ssl_certificate /etc/ingress-controller/ssl/%s-%s.pem;", ing.Namespace, host)) && + !strings.Contains(server, fmt.Sprintf("ssl_certificate_key /etc/ingress-controller/ssl/%s-%s.pem;", ing.Namespace, host)) && + strings.Contains(server, "listen 443") + }) + + time.Sleep(waitForLuaSync) + + By("serving the configured certificate on HTTPS endpoint") + ensureHTTPSRequest(f.GetURL(framework.HTTPS), host, host) + + log, err := f.NginxLogs() + Expect(err).ToNot(HaveOccurred()) + Expect(log).ToNot(BeEmpty()) + index := strings.Index(log, "id=dummy_log_splitter_foo_bar") + restOfLogs := log[index:] + + By("skipping Nginx reload") + Expect(restOfLogs).ToNot(ContainSubstring(logRequireBackendReload)) + Expect(restOfLogs).ToNot(ContainSubstring(logBackendReloadSuccess)) + }) + + It("falls back to using default certificate when secret gets deleted without reloading", func() { + ing, err := f.KubeClientSet.ExtensionsV1beta1().Ingresses(f.Namespace).Get(host, metav1.GetOptions{}) + + ensureHTTPSRequest(fmt.Sprintf("%s?id=dummy_log_splitter_foo_bar", f.GetURL(framework.HTTPS)), host, host) + + f.KubeClientSet.CoreV1().Secrets(ing.Namespace).Delete(ing.Spec.TLS[0].SecretName, nil) + Expect(err).ToNot(HaveOccurred()) + time.Sleep(waitForLuaSync) + + By("configuring certificate_by_lua and skipping Nginx configuration of the new certificate") + f.WaitForNginxServer(ing.Spec.TLS[0].Hosts[0], + func(server string) bool { + return strings.Contains(server, "ssl_certificate_by_lua_block") && + strings.Contains(server, "ssl_certificate /etc/ingress-controller/ssl/default-fake-certificate.pem;") && + strings.Contains(server, "ssl_certificate_key /etc/ingress-controller/ssl/default-fake-certificate.pem;") && + strings.Contains(server, "listen 443") + }) + + time.Sleep(waitForLuaSync) + + By("serving the default certificate on HTTPS endpoint") + ensureHTTPSRequest(f.GetURL(framework.HTTPS), host, "ingress.local") + + log, err := f.NginxLogs() + Expect(err).ToNot(HaveOccurred()) + Expect(log).ToNot(BeEmpty()) + index := strings.Index(log, "id=dummy_log_splitter_foo_bar") + restOfLogs := log[index:] + + By("skipping Nginx reload") + Expect(restOfLogs).ToNot(ContainSubstring(logRequireBackendReload)) + Expect(restOfLogs).ToNot(ContainSubstring(logBackendReloadSuccess)) + }) + + It("picks up a non-certificate only change", func() { + newHost := "foo2.com" + ing, err := f.KubeClientSet.ExtensionsV1beta1().Ingresses(f.Namespace).Get(host, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + ing.Spec.Rules[0].Host = newHost + _, err = f.KubeClientSet.ExtensionsV1beta1().Ingresses(f.Namespace).Update(ing) + Expect(err).ToNot(HaveOccurred()) + time.Sleep(waitForLuaSync) + + By("serving the configured certificate on HTTPS endpoint") + ensureHTTPSRequest(f.GetURL(framework.HTTPS), newHost, "ingress.local") + }) + + It("removes HTTPS configuration when we delete TLS spec", func() { + ing, err := f.KubeClientSet.ExtensionsV1beta1().Ingresses(f.Namespace).Get(host, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + ing.Spec.TLS = []extensions.IngressTLS{} + _, err = f.KubeClientSet.ExtensionsV1beta1().Ingresses(f.Namespace).Update(ing) + Expect(err).ToNot(HaveOccurred()) + time.Sleep(waitForLuaSync) + + ensureRequest(f, host) + }) + }) +}) diff --git a/test/e2e/lua/dynamic_configuration.go b/test/e2e/lua/dynamic_configuration.go new file mode 100644 index 0000000000..a5eb34d53f --- /dev/null +++ b/test/e2e/lua/dynamic_configuration.go @@ -0,0 +1,296 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package lua + +import ( + "crypto/tls" + "fmt" + "net/http" + "regexp" + "strings" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/parnurzeal/gorequest" + + extensions "k8s.io/api/extensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "k8s.io/ingress-nginx/internal/nginx" + "k8s.io/ingress-nginx/test/e2e/framework" +) + +const ( + logDynamicConfigSuccess = "Dynamic reconfiguration succeeded" + logDynamicConfigFailure = "Dynamic reconfiguration failed" + logRequireBackendReload = "Configuration changes detected, backend reload required" + logBackendReloadSuccess = "Backend successfully reloaded" + logInitialConfigSync = "Initial synchronization of the NGINX configuration" + waitForLuaSync = 5 * time.Second +) + +var _ = framework.IngressNginxDescribe("Dynamic Configuration", func() { + f := framework.NewDefaultFramework("dynamic-configuration") + + BeforeEach(func() { + f.NewEchoDeploymentWithReplicas(1) + ensureIngress(f, "foo.com", "http-svc") + }) + + It("configures balancer Lua middleware correctly", func() { + f.WaitForNginxConfiguration(func(cfg string) bool { + return strings.Contains(cfg, "balancer.init_worker()") && strings.Contains(cfg, "balancer.balance()") + }) + + host := "foo.com" + f.WaitForNginxServer(host, func(server string) bool { + return strings.Contains(server, "balancer.rewrite()") && strings.Contains(server, "balancer.log()") + }) + }) + + It("sets nameservers for Lua", func() { + f.WaitForNginxConfiguration(func(cfg string) bool { + r := regexp.MustCompile(`configuration.nameservers = { [".,0-9a-zA-Z]+ }`) + return r.MatchString(cfg) + }) + }) + + Context("when only backends change", func() { + It("handles endpoints only changes", func() { + var nginxConfig string + f.WaitForNginxConfiguration(func(cfg string) bool { + nginxConfig = cfg + return true + }) + + replicas := 2 + err := framework.UpdateDeployment(f.KubeClientSet, f.Namespace, "http-svc", replicas, nil) + Expect(err).NotTo(HaveOccurred()) + time.Sleep(waitForLuaSync) + + ensureRequest(f, "foo.com") + + var newNginxConfig string + f.WaitForNginxConfiguration(func(cfg string) bool { + newNginxConfig = cfg + return true + }) + Expect(nginxConfig).Should(Equal(newNginxConfig)) + }) + + It("handles endpoints only changes (down scaling of replicas)", func() { + var nginxConfig string + f.WaitForNginxConfiguration(func(cfg string) bool { + nginxConfig = cfg + return true + }) + + replicas := 2 + err := framework.UpdateDeployment(f.KubeClientSet, f.Namespace, "http-svc", replicas, nil) + Expect(err).NotTo(HaveOccurred()) + time.Sleep(waitForLuaSync * 2) + + ensureRequest(f, "foo.com") + + var newNginxConfig string + f.WaitForNginxConfiguration(func(cfg string) bool { + newNginxConfig = cfg + return true + }) + Expect(nginxConfig).Should(Equal(newNginxConfig)) + + err = framework.UpdateDeployment(f.KubeClientSet, f.Namespace, "http-svc", 0, nil) + + Expect(err).NotTo(HaveOccurred()) + time.Sleep(waitForLuaSync * 2) + + ensureRequestWithStatus(f, "foo.com", 503) + }) + + It("handles endpoints only changes consistently (down scaling of replicas vs. empty service)", func() { + deploymentName := "scalingecho" + f.NewEchoDeploymentWithNameAndReplicas(deploymentName, 0) + createIngress(f, "scaling.foo.com", deploymentName) + originalResponseCode := runRequest(f, "scaling.foo.com") + + replicas := 2 + err := framework.UpdateDeployment(f.KubeClientSet, f.Namespace, deploymentName, replicas, nil) + Expect(err).NotTo(HaveOccurred()) + time.Sleep(waitForLuaSync * 2) + + expectedSuccessResponseCode := runRequest(f, "scaling.foo.com") + + replicas = 0 + err = framework.UpdateDeployment(f.KubeClientSet, f.Namespace, deploymentName, replicas, nil) + Expect(err).NotTo(HaveOccurred()) + time.Sleep(waitForLuaSync * 2) + + expectedFailureResponseCode := runRequest(f, "scaling.foo.com") + + Expect(originalResponseCode).To(Equal(503), "Expected empty service to return 503 response.") + Expect(expectedFailureResponseCode).To(Equal(503), "Expected downscaled replicaset to return 503 response.") + Expect(expectedSuccessResponseCode).To(Equal(200), "Expected intermediate scaled replicaset to return a 200 response.") + }) + + It("handles an annotation change", func() { + var nginxConfig string + f.WaitForNginxConfiguration(func(cfg string) bool { + nginxConfig = cfg + return true + }) + + ingress, err := f.KubeClientSet.ExtensionsV1beta1().Ingresses(f.Namespace).Get("foo.com", metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + + ingress.ObjectMeta.Annotations["nginx.ingress.kubernetes.io/load-balance"] = "round_robin" + _, err = f.KubeClientSet.ExtensionsV1beta1().Ingresses(f.Namespace).Update(ingress) + Expect(err).ToNot(HaveOccurred()) + time.Sleep(waitForLuaSync) + + ensureRequest(f, "foo.com") + + var newNginxConfig string + f.WaitForNginxConfiguration(func(cfg string) bool { + newNginxConfig = cfg + return true + }) + + Expect(nginxConfig).Should(Equal(newNginxConfig)) + }) + }) + + It("handles a non backend update", func() { + var nginxConfig string + f.WaitForNginxConfiguration(func(cfg string) bool { + nginxConfig = cfg + return true + }) + + ingress, err := f.KubeClientSet.ExtensionsV1beta1().Ingresses(f.Namespace).Get("foo.com", metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + ingress.Spec.TLS = []extensions.IngressTLS{ + { + Hosts: []string{"foo.com"}, + SecretName: "foo.com", + }, + } + _, err = framework.CreateIngressTLSSecret(f.KubeClientSet, + ingress.Spec.TLS[0].Hosts, + ingress.Spec.TLS[0].SecretName, + ingress.Namespace) + Expect(err).ToNot(HaveOccurred()) + _, err = f.KubeClientSet.ExtensionsV1beta1().Ingresses(f.Namespace).Update(ingress) + Expect(err).ToNot(HaveOccurred()) + + var newNginxConfig string + f.WaitForNginxConfiguration(func(cfg string) bool { + newNginxConfig = cfg + return true + }) + Expect(nginxConfig).ShouldNot(Equal(newNginxConfig)) + }) + + It("sets controllerPodsCount in Lua general configuration", func() { + // https://github.com/curl/curl/issues/936 + curlCmd := fmt.Sprintf("curl --fail --silent --unix-socket %v http://localhost/configuration/general", nginx.StatusSocket) + + output, err := f.ExecIngressPod(curlCmd) + Expect(err).ToNot(HaveOccurred()) + Expect(output).Should(Equal(`{"controllerPodsCount":1}`)) + + err = framework.UpdateDeployment(f.KubeClientSet, f.Namespace, "nginx-ingress-controller", 3, nil) + Expect(err).ToNot(HaveOccurred()) + time.Sleep(waitForLuaSync) + + output, err = f.ExecIngressPod(curlCmd) + Expect(err).ToNot(HaveOccurred()) + Expect(output).Should(Equal(`{"controllerPodsCount":3}`)) + }) +}) + +func ensureIngress(f *framework.Framework, host string, deploymentName string) *extensions.Ingress { + ing := createIngress(f, host, deploymentName) + time.Sleep(waitForLuaSync) + ensureRequest(f, host) + + return ing +} + +func createIngress(f *framework.Framework, host string, deploymentName string) *extensions.Ingress { + ing := f.EnsureIngress(framework.NewSingleIngress(host, "/", host, f.Namespace, deploymentName, 80, + &map[string]string{"nginx.ingress.kubernetes.io/load-balance": "ewma"})) + + f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, fmt.Sprintf("server_name %s ;", host)) && + strings.Contains(server, "proxy_pass http://upstream_balancer;") + }) + + return ing +} + +func ensureRequest(f *framework.Framework, host string) { + resp, _, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + End() + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) +} + +func ensureRequestWithStatus(f *framework.Framework, host string, statusCode int) { + resp, _, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + End() + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(statusCode)) +} + +func runRequest(f *framework.Framework, host string) int { + resp, _, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + End() + Expect(errs).Should(BeEmpty()) + return resp.StatusCode +} + +func ensureHTTPSRequest(url string, host string, expectedDNSName string) { + resp, _, errs := gorequest.New(). + Get(url). + Set("Host", host). + TLSClientConfig(&tls.Config{ + InsecureSkipVerify: true, + ServerName: host, + }). + End() + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(len(resp.TLS.PeerCertificates)).Should(BeNumerically("==", 1)) + Expect(resp.TLS.PeerCertificates[0].DNSNames[0]).Should(Equal(expectedDNSName)) +} + +func getCookie(name string, cookies []*http.Cookie) (*http.Cookie, error) { + for _, cookie := range cookies { + if cookie.Name == name { + return cookie, nil + } + } + return &http.Cookie{}, fmt.Errorf("Cookie does not exist") +} diff --git a/test/e2e/run.sh b/test/e2e/run.sh new file mode 100755 index 0000000000..580a4b084f --- /dev/null +++ b/test/e2e/run.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +# Copyright 2019 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +KIND_LOG_LEVEL="info" + +if ! [ -z $DEBUG ]; then + set -x + KIND_LOG_LEVEL="debug" +fi + +set -o errexit +set -o nounset +set -o pipefail + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +export TAG=dev +export ARCH=amd64 +export REGISTRY=ingress-controller + +export K8S_VERSION=${K8S_VERSION:-v1.14.1} + +KIND_CLUSTER_NAME="ingress-nginx-dev" + +kind --version || $(echo "Please install kind before running e2e tests";exit 1) + +echo "[dev-env] creating Kubernetes cluster with kind" +# TODO: replace the custom images after https://github.com/kubernetes-sigs/kind/issues/531 +kind create cluster \ + --loglevel=${KIND_LOG_LEVEL} \ + --name ${KIND_CLUSTER_NAME} \ + --config ${DIR}/kind.yaml \ + --image "aledbf/kind-node:${K8S_VERSION}" + +export KUBECONFIG="$(kind get kubeconfig-path --name="${KIND_CLUSTER_NAME}")" + +echo "Kubernetes cluster:" +kubectl get nodes -o wide + +kubectl config set-context kubernetes-admin@${KIND_CLUSTER_NAME} + +echo "[dev-env] building container" +make -C ${DIR}/../../ build container +make -C ${DIR}/../../ e2e-test-image + +echo "[dev-env] copying docker images to cluster..." +kind load docker-image --name="${KIND_CLUSTER_NAME}" nginx-ingress-controller:e2e +kind load docker-image --name="${KIND_CLUSTER_NAME}" ${REGISTRY}/nginx-ingress-controller:${TAG} + +echo "[dev-env] running e2e tests..." +make -C ${DIR}/../../ e2e-test + +kind delete cluster \ + --loglevel=${KIND_LOG_LEVEL} \ + --name ${KIND_CLUSTER_NAME} diff --git a/test/e2e/servicebackend/service_backend.go b/test/e2e/servicebackend/service_backend.go new file mode 100644 index 0000000000..3829104bbc --- /dev/null +++ b/test/e2e/servicebackend/service_backend.go @@ -0,0 +1,163 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package servicebackend + +import ( + "strings" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/parnurzeal/gorequest" + + corev1 "k8s.io/api/core/v1" + extensions "k8s.io/api/extensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + + "k8s.io/ingress-nginx/test/e2e/framework" +) + +var _ = framework.IngressNginxDescribe("Service backend - 503", func() { + f := framework.NewDefaultFramework("service-backend") + + BeforeEach(func() { + }) + + AfterEach(func() { + }) + + It("should return 503 when backend service does not exist", func() { + host := "nonexistent.svc.com" + + bi := buildIngressWithNonexistentService(host, f.Namespace, "/") + f.EnsureIngress(bi) + + f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, "proxy_pass http://upstream_balancer;") + }) + + resp, _, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + End() + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(503)) + }) + + It("should return 503 when all backend service endpoints are unavailable", func() { + host := "unavailable.svc.com" + + bi, bs := buildIngressWithUnavailableServiceEndpoints(host, f.Namespace, "/") + + svc := f.EnsureService(bs) + Expect(svc).NotTo(BeNil()) + + f.EnsureIngress(bi) + + f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, "proxy_pass http://upstream_balancer;") + }) + + resp, _, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + End() + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(503)) + }) + +}) + +func buildIngressWithNonexistentService(host, namespace, path string) *extensions.Ingress { + backendService := "nonexistent-svc" + return &extensions.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: host, + Namespace: namespace, + }, + Spec: extensions.IngressSpec{ + Rules: []extensions.IngressRule{ + { + Host: host, + IngressRuleValue: extensions.IngressRuleValue{ + HTTP: &extensions.HTTPIngressRuleValue{ + Paths: []extensions.HTTPIngressPath{ + { + Path: path, + Backend: extensions.IngressBackend{ + ServiceName: backendService, + ServicePort: intstr.FromInt(80), + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func buildIngressWithUnavailableServiceEndpoints(host, namespace, path string) (*extensions.Ingress, *corev1.Service) { + backendService := "unavailable-svc" + return &extensions.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: host, + Namespace: namespace, + }, + Spec: extensions.IngressSpec{ + Rules: []extensions.IngressRule{ + { + Host: host, + IngressRuleValue: extensions.IngressRuleValue{ + HTTP: &extensions.HTTPIngressRuleValue{ + Paths: []extensions.HTTPIngressPath{ + { + Path: path, + Backend: extensions.IngressBackend{ + ServiceName: backendService, + ServicePort: intstr.FromInt(80), + }, + }, + }, + }, + }, + }, + }, + }, + }, &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: backendService, + Namespace: namespace, + }, + Spec: corev1.ServiceSpec{Ports: []corev1.ServicePort{ + { + Name: "tcp", + Port: 80, + TargetPort: intstr.FromInt(80), + Protocol: "TCP", + }, + }, + Selector: map[string]string{ + "app": backendService, + }, + }, + } +} diff --git a/test/e2e/servicebackend/service_externalname.go b/test/e2e/servicebackend/service_externalname.go new file mode 100644 index 0000000000..7ae65616d5 --- /dev/null +++ b/test/e2e/servicebackend/service_externalname.go @@ -0,0 +1,146 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package servicebackend + +import ( + "strings" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/parnurzeal/gorequest" + + core "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + + "k8s.io/ingress-nginx/test/e2e/framework" +) + +var _ = framework.IngressNginxDescribe("Service Type ExternalName", func() { + f := framework.NewDefaultFramework("type-externalname") + + BeforeEach(func() { + }) + + AfterEach(func() { + }) + + It("should return 200 for service type=ExternalName without a port defined", func() { + host := "echo" + + svc := &core.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "httpbin", + Namespace: f.Namespace, + }, + Spec: corev1.ServiceSpec{ + ExternalName: "httpbin.org", + Type: corev1.ServiceTypeExternalName, + }, + } + + f.EnsureService(svc) + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "httpbin", 80, nil) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, "proxy_pass http://upstream_balancer;") + }) + + resp, _, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)+"/get"). + Set("Host", host). + End() + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(200)) + }) + + It("should return 200 for service type=ExternalName with a port defined", func() { + host := "echo" + + svc := &core.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "httpbin", + Namespace: f.Namespace, + }, + Spec: corev1.ServiceSpec{ + ExternalName: "httpbin.org", + Type: corev1.ServiceTypeExternalName, + Ports: []corev1.ServicePort{ + { + Name: host, + Port: 80, + TargetPort: intstr.FromInt(80), + Protocol: "TCP", + }, + }, + }, + } + f.EnsureService(svc) + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "httpbin", 80, nil) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, "proxy_pass http://upstream_balancer;") + }) + + resp, _, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)+"/get"). + Set("Host", host). + End() + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(200)) + }) + + It("should return status 503 for service type=ExternalName with an invalid host", func() { + host := "echo" + + svc := &core.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "httpbin", + Namespace: f.Namespace, + }, + Spec: corev1.ServiceSpec{ + ExternalName: "invalid.hostname", + Type: corev1.ServiceTypeExternalName, + }, + } + + f.EnsureService(svc) + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "httpbin", 80, nil) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, "proxy_pass http://upstream_balancer;") + }) + + resp, _, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)+"/get"). + Set("Host", host). + End() + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(503)) + }) +}) diff --git a/test/e2e/settings/configmap_change.go b/test/e2e/settings/configmap_change.go new file mode 100644 index 0000000000..bacb90c215 --- /dev/null +++ b/test/e2e/settings/configmap_change.go @@ -0,0 +1,83 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package settings + +import ( + "regexp" + "strings" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "k8s.io/ingress-nginx/test/e2e/framework" +) + +var _ = framework.IngressNginxDescribe("Configmap change", func() { + f := framework.NewDefaultFramework("configmap-change") + + BeforeEach(func() { + f.NewEchoDeployment() + }) + + AfterEach(func() { + }) + + It("should reload after an update in the configuration", func() { + host := "configmap-change" + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, nil) + f.EnsureIngress(ing) + + wlKey := "whitelist-source-range" + wlValue := "1.1.1.1" + + By("adding a whitelist-source-range") + + f.UpdateNginxConfigMapData(wlKey, wlValue) + + checksumRegex := regexp.MustCompile("Configuration checksum:\\s+(\\d+)") + checksum := "" + + f.WaitForNginxConfiguration( + func(cfg string) bool { + // before returning, extract the current checksum + match := checksumRegex.FindStringSubmatch(cfg) + if len(match) > 0 { + checksum = match[1] + } + + return strings.Contains(cfg, "allow 1.1.1.1;") + }) + Expect(checksum).NotTo(BeEmpty()) + + By("changing error-log-level") + + f.UpdateNginxConfigMapData("error-log-level", "debug") + + newChecksum := "" + f.WaitForNginxConfiguration( + func(cfg string) bool { + match := checksumRegex.FindStringSubmatch(cfg) + if len(match) > 0 { + newChecksum = match[1] + } + + return strings.ContainsAny(cfg, "error_log /var/log/nginx/error.log debug;") + }) + Expect(checksum).NotTo(BeEquivalentTo(newChecksum)) + }) +}) diff --git a/test/e2e/settings/default_ssl_certificate.go b/test/e2e/settings/default_ssl_certificate.go new file mode 100644 index 0000000000..01e5b32f56 --- /dev/null +++ b/test/e2e/settings/default_ssl_certificate.go @@ -0,0 +1,98 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package settings + +import ( + "crypto/tls" + "fmt" + "strings" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + appsv1 "k8s.io/api/apps/v1" + + "k8s.io/ingress-nginx/test/e2e/framework" +) + +var _ = framework.IngressNginxDescribe("default-ssl-certificate", func() { + f := framework.NewDefaultFramework("default-ssl-certificate") + var tlsConfig *tls.Config + secretName := "my-custom-cert" + service := "http-svc" + port := 80 + + BeforeEach(func() { + f.NewEchoDeploymentWithReplicas(1) + + var err error + tlsConfig, err = framework.CreateIngressTLSSecret(f.KubeClientSet, + []string{"*"}, + secretName, + f.Namespace) + Expect(err).NotTo(HaveOccurred()) + + framework.UpdateDeployment(f.KubeClientSet, f.Namespace, "nginx-ingress-controller", 1, + func(deployment *appsv1.Deployment) error { + args := deployment.Spec.Template.Spec.Containers[0].Args + args = append(args, "--default-ssl-certificate=$(POD_NAMESPACE)/"+secretName) + deployment.Spec.Template.Spec.Containers[0].Args = args + _, err := f.KubeClientSet.AppsV1().Deployments(f.Namespace).Update(deployment) + + return err + }) + + // this asserts that it configures default custom ssl certificate without an ingress at all + framework.WaitForTLS(f.GetURL(framework.HTTPS), tlsConfig) + }) + + It("uses default ssl certificate for catch-all ingress", func() { + ing := framework.NewSingleCatchAllIngress("catch-all", f.Namespace, service, port, nil) + f.EnsureIngress(ing) + + By("making sure new ingress is deployed") + expectedConfig := fmt.Sprintf("set $proxy_upstream_name \"%v-%v-%v\";", f.Namespace, service, port) + f.WaitForNginxServer("_", func(cfg string) bool { + return strings.Contains(cfg, expectedConfig) + }) + + By("making sure new ingress is responding") + + By("making sure the configured default ssl certificate is being used") + framework.WaitForTLS(f.GetURL(framework.HTTPS), tlsConfig) + }) + + It("uses default ssl certificate for host based ingress when configured certificate does not match host", func() { + host := "foo" + + ing := f.EnsureIngress(framework.NewSingleIngressWithTLS(host, "/", host, []string{host}, f.Namespace, service, port, nil)) + _, err := framework.CreateIngressTLSSecret(f.KubeClientSet, + []string{"not.foo"}, + ing.Spec.TLS[0].SecretName, + ing.Namespace) + Expect(err).NotTo(HaveOccurred()) + + By("making sure new ingress is deployed") + expectedConfig := fmt.Sprintf("set $proxy_upstream_name \"%v-%v-%v\";", f.Namespace, service, port) + f.WaitForNginxServer(host, func(cfg string) bool { + return strings.Contains(cfg, expectedConfig) + }) + + By("making sure the configured default ssl certificate is being used") + framework.WaitForTLS(f.GetURL(framework.HTTPS), tlsConfig) + }) +}) diff --git a/test/e2e/settings/disable_catch_all.go b/test/e2e/settings/disable_catch_all.go new file mode 100644 index 0000000000..a24623142d --- /dev/null +++ b/test/e2e/settings/disable_catch_all.go @@ -0,0 +1,132 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package settings + +import ( + "net/http" + "strings" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/parnurzeal/gorequest" + appsv1 "k8s.io/api/apps/v1" + extensions "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/util/intstr" + + "k8s.io/ingress-nginx/test/e2e/framework" +) + +var _ = framework.IngressNginxDescribe("Disabled catch-all", func() { + f := framework.NewDefaultFramework("disabled-catch-all") + + BeforeEach(func() { + f.NewEchoDeploymentWithReplicas(1) + + framework.UpdateDeployment(f.KubeClientSet, f.Namespace, "nginx-ingress-controller", 1, + func(deployment *appsv1.Deployment) error { + args := deployment.Spec.Template.Spec.Containers[0].Args + args = append(args, "--disable-catch-all=true") + deployment.Spec.Template.Spec.Containers[0].Args = args + _, err := f.KubeClientSet.AppsV1().Deployments(f.Namespace).Update(deployment) + + return err + }) + }) + + AfterEach(func() { + }) + + It("should ignore catch all Ingress", func() { + host := "foo" + + ing := framework.NewSingleCatchAllIngress("catch-all", f.Namespace, "http-svc", 80, nil) + f.EnsureIngress(ing) + + ing = framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, nil) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, func(cfg string) bool { + return strings.Contains(cfg, "server_name foo") + }) + + f.WaitForNginxServer("_", func(cfg string) bool { + return strings.Contains(cfg, `set $ingress_name ""`) && + strings.Contains(cfg, `set $proxy_upstream_name "upstream-default-backend"`) + }) + }) + + It("should delete Ingress updated to catch-all", func() { + host := "foo" + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, nil) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, "server_name foo") + }) + + resp, _, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + End() + Expect(errs).To(BeNil()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + + err := framework.UpdateIngress(f.KubeClientSet, f.Namespace, host, func(ingress *extensions.Ingress) error { + ingress.Spec.Rules = nil + ingress.Spec.Backend = &extensions.IngressBackend{ + ServiceName: "http-svc", + ServicePort: intstr.FromInt(80), + } + return nil + }) + Expect(err).ToNot(HaveOccurred()) + + f.WaitForNginxConfiguration(func(cfg string) bool { + return !strings.Contains(cfg, "server_name foo") + }) + + resp, _, errs = gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + End() + Expect(errs).To(BeNil()) + Expect(resp.StatusCode).Should(Equal(http.StatusNotFound)) + }) + + It("should allow Ingress with both a default backend and rules", func() { + host := "foo" + + ing := framework.NewSingleIngressWithBackendAndRules("not-catch-all", "/rulepath", host, f.Namespace, "http-svc", 80, "http-svc", 80, nil) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, func(cfg string) bool { + return strings.Contains(cfg, "server_name foo") + }) + + resp, _, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + End() + + Expect(errs).To(BeNil()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + + }) +}) diff --git a/test/e2e/settings/forwarded_headers.go b/test/e2e/settings/forwarded_headers.go new file mode 100644 index 0000000000..3a4832c6c9 --- /dev/null +++ b/test/e2e/settings/forwarded_headers.go @@ -0,0 +1,124 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package settings + +import ( + "fmt" + "net/http" + "strings" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/parnurzeal/gorequest" + + "k8s.io/ingress-nginx/test/e2e/framework" +) + +var _ = framework.IngressNginxDescribe("X-Forwarded headers", func() { + f := framework.NewDefaultFramework("forwarded-headers") + + setting := "use-forwarded-headers" + + BeforeEach(func() { + f.NewEchoDeployment() + f.UpdateNginxConfigMapData(setting, "false") + }) + + AfterEach(func() { + }) + + It("should trust X-Forwarded headers when setting is true", func() { + host := "forwarded-headers" + + f.UpdateNginxConfigMapData(setting, "true") + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, nil) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, "server_name forwarded-headers") + }) + + By("ensuring single values are parsed correctly") + resp, body, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + Set("X-Forwarded-Port", "1234"). + Set("X-Forwarded-Proto", "myproto"). + Set("X-Forwarded-For", "1.2.3.4"). + Set("X-Forwarded-Host", "myhost"). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(body).Should(ContainSubstring(fmt.Sprintf("host=myhost"))) + Expect(body).Should(ContainSubstring(fmt.Sprintf("x-forwarded-host=myhost"))) + Expect(body).Should(ContainSubstring(fmt.Sprintf("x-forwarded-proto=myproto"))) + Expect(body).Should(ContainSubstring(fmt.Sprintf("x-forwarded-port=1234"))) + Expect(body).Should(ContainSubstring(fmt.Sprintf("x-forwarded-for=1.2.3.4"))) + + By("ensuring that first entry in X-Forwarded-Host is used as the best host") + resp, body, errs = gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + Set("X-Forwarded-Port", "1234"). + Set("X-Forwarded-Proto", "myproto"). + Set("X-Forwarded-For", "1.2.3.4"). + Set("X-Forwarded-Host", "myhost.com, another.host,example.net"). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(body).Should(ContainSubstring(fmt.Sprintf("host=myhost.com"))) + Expect(body).Should(ContainSubstring(fmt.Sprintf("x-forwarded-host=myhost.com"))) + + }) + It("should not trust X-Forwarded headers when setting is false", func() { + host := "forwarded-headers" + + f.UpdateNginxConfigMapData(setting, "false") + + f.EnsureIngress(framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, nil)) + + f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, "server_name forwarded-headers") + }) + + resp, body, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + Set("X-Forwarded-Port", "1234"). + Set("X-Forwarded-Proto", "myproto"). + Set("X-Forwarded-For", "1.2.3.4"). + Set("X-Forwarded-Host", "myhost"). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(body).Should(ContainSubstring(fmt.Sprintf("host=forwarded-headers"))) + Expect(body).Should(ContainSubstring(fmt.Sprintf("x-forwarded-port=80"))) + Expect(body).Should(ContainSubstring(fmt.Sprintf("x-forwarded-proto=http"))) + Expect(body).Should(ContainSubstring(fmt.Sprintf("x-original-forwarded-for=1.2.3.4"))) + Expect(body).ShouldNot(ContainSubstring(fmt.Sprintf("host=myhost"))) + Expect(body).ShouldNot(ContainSubstring(fmt.Sprintf("x-forwarded-host=myhost"))) + Expect(body).ShouldNot(ContainSubstring(fmt.Sprintf("x-forwarded-proto=myproto"))) + Expect(body).ShouldNot(ContainSubstring(fmt.Sprintf("x-forwarded-port=1234"))) + Expect(body).ShouldNot(ContainSubstring(fmt.Sprintf("x-forwarded-for=1.2.3.4"))) + }) +}) diff --git a/test/e2e/settings/geoip2.go b/test/e2e/settings/geoip2.go new file mode 100644 index 0000000000..6c464c78de --- /dev/null +++ b/test/e2e/settings/geoip2.go @@ -0,0 +1,91 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package settings + +import ( + "strings" + + "net/http" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/parnurzeal/gorequest" + "k8s.io/ingress-nginx/test/e2e/framework" +) + +var _ = framework.IngressNginxDescribe("Geoip2", func() { + f := framework.NewDefaultFramework("geoip2") + + host := "geoip2" + + BeforeEach(func() { + f.NewEchoDeployment() + }) + + It("should only allow requests from specific countries", func() { + f.UpdateNginxConfigMapData("use-geoip2", "true") + + httpSnippetAllowingOnlyAustralia := + `map $geoip2_city_country_code $blocked_country { + default 1; + AU 0; +}` + f.UpdateNginxConfigMapData("http-snippet", httpSnippetAllowingOnlyAustralia) + f.UpdateNginxConfigMapData("use-forwarded-headers", "true") + + f.WaitForNginxConfiguration( + func(cfg string) bool { + return strings.Contains(cfg, "map $geoip2_city_country_code $blocked_country") + }) + + configSnippet := + `if ($blocked_country) { + return 403; +}` + + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/configuration-snippet": configSnippet, + } + + f.EnsureIngress(framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations)) + + f.WaitForNginxConfiguration( + func(cfg string) bool { + return strings.Contains(cfg, "if ($blocked_country)") + }) + + // Should be blocked + usIP := "8.8.8.8" + resp, _, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + Set("X-Forwarded-For", usIP). + End() + Expect(errs).To(BeNil()) + Expect(resp.StatusCode).Should(Equal(http.StatusForbidden)) + + // Shouldn't be blocked + australianIP := "1.1.1.1" + resp, _, errs = gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + Set("X-Forwarded-For", australianIP). + End() + Expect(errs).To(BeNil()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + }) +}) diff --git a/test/e2e/settings/global_access_block.go b/test/e2e/settings/global_access_block.go new file mode 100644 index 0000000000..3e1abb94fc --- /dev/null +++ b/test/e2e/settings/global_access_block.go @@ -0,0 +1,134 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package settings + +import ( + "net/http" + "strings" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/parnurzeal/gorequest" + + "k8s.io/ingress-nginx/test/e2e/framework" +) + +var _ = framework.IngressNginxDescribe("Global access block", func() { + f := framework.NewDefaultFramework("global-access-block") + + host := "global-access-block" + + BeforeEach(func() { + f.NewEchoDeploymentWithReplicas(1) + f.EnsureIngress(framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, nil)) + }) + + AfterEach(func() { + }) + + It("should block CIDRs defined in the ConfigMap", func() { + f.UpdateNginxConfigMapData("block-cidrs", "172.16.0.0/12,192.168.0.0/16,10.0.0.0/8") + + f.WaitForNginxConfiguration( + func(cfg string) bool { + return strings.Contains(cfg, "deny 172.16.0.0/12;") && + strings.Contains(cfg, "deny 192.168.0.0/16;") && + strings.Contains(cfg, "deny 10.0.0.0/8;") + }) + + // This test works for minikube, but may have problems with real kubernetes clusters, + // especially if connection is done via Internet. In this case, the test should be disabled/removed. + resp, _, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + End() + Expect(errs).To(BeNil()) + Expect(resp.StatusCode).Should(Equal(http.StatusForbidden)) + }) + + It("should block User-Agents defined in the ConfigMap", func() { + f.UpdateNginxConfigMapData("block-user-agents", "~*chrome\\/68\\.0\\.3440\\.106\\ safari\\/537\\.36,AlphaBot") + + f.WaitForNginxConfiguration( + func(cfg string) bool { + return strings.Contains(cfg, "~*chrome\\/68\\.0\\.3440\\.106\\ safari\\/537\\.36 1;") && + strings.Contains(cfg, "AlphaBot 1;") + }) + + // Should be blocked + resp, _, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + Set("User-Agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36"). + End() + Expect(errs).To(BeNil()) + Expect(resp.StatusCode).Should(Equal(http.StatusForbidden)) + + resp, _, errs = gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + Set("User-Agent", "AlphaBot"). + End() + Expect(errs).To(BeNil()) + Expect(resp.StatusCode).Should(Equal(http.StatusForbidden)) + + // Shouldn't be blocked + resp, _, errs = gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + Set("User-Agent", "Mozilla/5.0 (iPhone; CPU iPhone OS 11_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.0 Mobile/15E148 Safari/604.1"). + End() + Expect(errs).To(BeNil()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + }) + + It("should block Referers defined in the ConfigMap", func() { + f.UpdateNginxConfigMapData("block-referers", "~*example\\.com,qwerty") + + f.WaitForNginxConfiguration( + func(cfg string) bool { + return strings.Contains(cfg, "~*example\\.com 1;") && + strings.Contains(cfg, "qwerty 1;") + }) + + // Should be blocked + resp, _, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + Set("Referer", "example.com"). + End() + Expect(errs).To(BeNil()) + Expect(resp.StatusCode).Should(Equal(http.StatusForbidden)) + + resp, _, errs = gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + Set("Referer", "qwerty"). + End() + Expect(errs).To(BeNil()) + Expect(resp.StatusCode).Should(Equal(http.StatusForbidden)) + + // Shouldn't be blocked + resp, _, errs = gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + Set("Referer", "qwerty123"). + End() + Expect(errs).To(BeNil()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + }) +}) diff --git a/test/e2e/settings/global_external_auth.go b/test/e2e/settings/global_external_auth.go new file mode 100755 index 0000000000..c3916e869c --- /dev/null +++ b/test/e2e/settings/global_external_auth.go @@ -0,0 +1,217 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package settings + +import ( + "fmt" + "net/http" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/parnurzeal/gorequest" + + "k8s.io/ingress-nginx/test/e2e/framework" +) + +var _ = framework.IngressNginxDescribe("Global External Auth", func() { + f := framework.NewDefaultFramework("global-external-auth") + + host := "global-external-auth" + + echoServiceName := "http-svc" + + globalExternalAuthURLSetting := "global-auth-url" + + fooPath := "/foo" + barPath := "/bar" + + noAuthSetting := "no-auth-locations" + noAuthLocations := barPath + + enableGlobalExternalAuthAnnotation := "nginx.ingress.kubernetes.io/enable-global-auth" + + BeforeEach(func() { + f.NewEchoDeployment() + f.NewHttpbinDeployment() + }) + + AfterEach(func() { + }) + + Context("when global external authentication is configured", func() { + + BeforeEach(func() { + globalExternalAuthURL := fmt.Sprintf("http://httpbin.%s.svc.cluster.local:80/status/401", f.Namespace) + + By("Adding an ingress rule for /foo") + fooIng := framework.NewSingleIngress("foo-ingress", fooPath, host, f.Namespace, echoServiceName, 80, nil) + f.EnsureIngress(fooIng) + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("location /foo")) + }) + + By("Adding an ingress rule for /bar") + barIng := framework.NewSingleIngress("bar-ingress", barPath, host, f.Namespace, echoServiceName, 80, nil) + f.EnsureIngress(barIng) + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("location /bar")) + }) + + By("Adding a global-auth-url to configMap") + f.UpdateNginxConfigMapData(globalExternalAuthURLSetting, globalExternalAuthURL) + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring(globalExternalAuthURL)) + }) + }) + + It("should return status code 401 when request any protected service", func() { + + By("Sending a request to protected service /foo") + fooResp, _, _ := gorequest.New(). + Get(f.GetURL(framework.HTTP)+fooPath). + Set("Host", host). + End() + Expect(fooResp.StatusCode).Should(Equal(http.StatusUnauthorized)) + + By("Sending a request to protected service /bar") + barResp, _, _ := gorequest.New(). + Get(f.GetURL(framework.HTTP)+barPath). + Set("Host", host). + End() + Expect(barResp.StatusCode).Should(Equal(http.StatusUnauthorized)) + }) + + It("should return status code 200 when request whitelisted (via no-auth-locations) service and 401 when request protected service", func() { + + By("Adding a no-auth-locations for /bar to configMap") + f.UpdateNginxConfigMapData(noAuthSetting, noAuthLocations) + + By("Sending a request to protected service /foo") + fooResp, _, _ := gorequest.New(). + Get(f.GetURL(framework.HTTP)+fooPath). + Set("Host", host). + End() + Expect(fooResp.StatusCode).Should(Equal(http.StatusUnauthorized)) + + By("Sending a request to whitelisted service /bar") + barResp, _, _ := gorequest.New(). + Get(f.GetURL(framework.HTTP)+barPath). + Set("Host", host). + End() + Expect(barResp.StatusCode).Should(Equal(http.StatusOK)) + }) + + It("should return status code 200 when request whitelisted (via ingress annotation) service and 401 when request protected service", func() { + + By("Adding an ingress rule for /bar with annotation enable-global-auth = false") + annotations := map[string]string{ + enableGlobalExternalAuthAnnotation: "false", + } + barIng := framework.NewSingleIngress("bar-ingress", barPath, host, f.Namespace, echoServiceName, 80, &annotations) + f.EnsureIngress(barIng) + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("location /bar")) + }) + + By("Sending a request to protected service /foo") + fooResp, _, _ := gorequest.New(). + Get(f.GetURL(framework.HTTP)+fooPath). + Set("Host", host). + End() + Expect(fooResp.StatusCode).Should(Equal(http.StatusUnauthorized)) + + By("Sending a request to whitelisted service /bar") + barResp, _, _ := gorequest.New(). + Get(f.GetURL(framework.HTTP)+barPath). + Set("Host", host). + End() + Expect(barResp.StatusCode).Should(Equal(http.StatusOK)) + }) + + It(`should proxy_method method when global-auth-method is configured`, func() { + + globalExternalAuthMethodSetting := "global-auth-method" + globalExternalAuthMethod := "GET" + + By("Adding a global-auth-method to configMap") + f.UpdateNginxConfigMapData(globalExternalAuthMethodSetting, globalExternalAuthMethod) + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("proxy_method")) + }) + }) + + It(`should add custom error page when global-auth-signin url is configured`, func() { + + globalExternalAuthSigninSetting := "global-auth-signin" + globalExternalAuthSignin := "http://foo.com/global-error-page" + + By("Adding a global-auth-signin to configMap") + f.UpdateNginxConfigMapData(globalExternalAuthSigninSetting, globalExternalAuthSignin) + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("error_page 401 = ")) + }) + }) + + It(`should add auth headers when global-auth-response-headers is configured`, func() { + + globalExternalAuthResponseHeadersSetting := "global-auth-response-headers" + globalExternalAuthResponseHeaders := "Foo, Bar" + + By("Adding a global-auth-response-headers to configMap") + f.UpdateNginxConfigMapData(globalExternalAuthResponseHeadersSetting, globalExternalAuthResponseHeaders) + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("auth_request_set $authHeader0 $upstream_http_foo;")) && + Expect(server).Should(ContainSubstring("auth_request_set $authHeader1 $upstream_http_bar;")) + }) + }) + + It(`should set request-redirect when global-auth-request-redirect is configured`, func() { + + globalExternalAuthRequestRedirectSetting := "global-auth-request-redirect" + globalExternalAuthRequestRedirect := "Foo-Redirect" + + By("Adding a global-auth-request-redirect to configMap") + f.UpdateNginxConfigMapData(globalExternalAuthRequestRedirectSetting, globalExternalAuthRequestRedirect) + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring(globalExternalAuthRequestRedirect)) + }) + }) + + It(`should set snippet when global external auth is configured`, func() { + + globalExternalAuthSnippetSetting := "global-auth-snippet" + globalExternalAuthSnippet := "proxy_set_header My-Custom-Header 42;" + + By("Adding a global-auth-snippet to configMap") + f.UpdateNginxConfigMapData(globalExternalAuthSnippetSetting, globalExternalAuthSnippet) + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring(globalExternalAuthSnippet)) + }) + }) + + }) + +}) diff --git a/test/e2e/settings/ingress_class.go b/test/e2e/settings/ingress_class.go new file mode 100644 index 0000000000..73e26c9309 --- /dev/null +++ b/test/e2e/settings/ingress_class.go @@ -0,0 +1,160 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package settings + +import ( + "net/http" + "strings" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/parnurzeal/gorequest" + appsv1 "k8s.io/api/apps/v1" + + "k8s.io/ingress-nginx/test/e2e/framework" +) + +var _ = framework.IngressNginxDescribe("Ingress class", func() { + f := framework.NewDefaultFramework("ingress-class") + + BeforeEach(func() { + f.NewEchoDeploymentWithReplicas(1) + }) + + AfterEach(func() { + }) + + Context("Without a specific ingress-class", func() { + + It("should ignore Ingress with class", func() { + invalidHost := "foo" + annotations := map[string]string{ + "kubernetes.io/ingress.class": "testclass", + } + ing := framework.NewSingleIngress(invalidHost, "/", invalidHost, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + validHost := "bar" + ing = framework.NewSingleIngress(validHost, "/", validHost, f.Namespace, "http-svc", 80, nil) + f.EnsureIngress(ing) + + f.WaitForNginxConfiguration(func(cfg string) bool { + return !strings.Contains(cfg, "server_name foo") && + strings.Contains(cfg, "server_name bar") + }) + + resp, _, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", invalidHost). + End() + Expect(errs).To(BeNil()) + Expect(resp.StatusCode).Should(Equal(http.StatusNotFound)) + + resp, _, errs = gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", validHost). + End() + Expect(errs).To(BeNil()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + }) + }) + + Context("With a specific ingress-class", func() { + BeforeEach(func() { + framework.UpdateDeployment(f.KubeClientSet, f.Namespace, "nginx-ingress-controller", 1, + func(deployment *appsv1.Deployment) error { + args := deployment.Spec.Template.Spec.Containers[0].Args + args = append(args, "--ingress-class=testclass") + deployment.Spec.Template.Spec.Containers[0].Args = args + _, err := f.KubeClientSet.AppsV1().Deployments(f.Namespace).Update(deployment) + + return err + }) + }) + + It("should ignore Ingress with no class", func() { + invalidHost := "bar" + + ing := framework.NewSingleIngress(invalidHost, "/", invalidHost, f.Namespace, "http-svc", 80, nil) + f.EnsureIngress(ing) + + validHost := "foo" + annotations := map[string]string{ + "kubernetes.io/ingress.class": "testclass", + } + ing = framework.NewSingleIngress(validHost, "/", validHost, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(validHost, func(cfg string) bool { + return strings.Contains(cfg, "server_name foo") + }) + + f.WaitForNginxConfiguration(func(cfg string) bool { + return !strings.Contains(cfg, "server_name bar") + }) + + resp, _, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", validHost). + End() + Expect(errs).To(BeNil()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + + resp, _, errs = gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", invalidHost). + End() + Expect(errs).To(BeNil()) + Expect(resp.StatusCode).Should(Equal(http.StatusNotFound)) + }) + + It("should delete Ingress when class is removed", func() { + host := "foo" + annotations := map[string]string{ + "kubernetes.io/ingress.class": "testclass", + } + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + ing = f.EnsureIngress(ing) + + f.WaitForNginxServer(host, func(cfg string) bool { + return strings.Contains(cfg, "server_name foo") + }) + + resp, _, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + End() + Expect(errs).To(BeNil()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + + delete(ing.Annotations, "kubernetes.io/ingress.class") + ing = f.EnsureIngress(ing) + + f.WaitForNginxConfiguration(func(cfg string) bool { + return !strings.Contains(cfg, "server_name foo") + }) + + resp, _, errs = gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + End() + Expect(errs).To(BeNil()) + Expect(resp.StatusCode).Should(Equal(http.StatusNotFound)) + }) + }) + +}) diff --git a/test/e2e/settings/main_snippet.go b/test/e2e/settings/main_snippet.go new file mode 100644 index 0000000000..0a7d4e4cc0 --- /dev/null +++ b/test/e2e/settings/main_snippet.go @@ -0,0 +1,40 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package settings + +import ( + "strings" + + . "github.com/onsi/ginkgo" + + "k8s.io/ingress-nginx/test/e2e/framework" +) + +var _ = framework.IngressNginxDescribe("Main Snippet", func() { + f := framework.NewDefaultFramework("main-snippet") + mainSnippet := "main-snippet" + + It("should add value of main-snippet setting to nginx config", func() { + expectedComment := "# main snippet" + f.UpdateNginxConfigMapData(mainSnippet, expectedComment) + + f.WaitForNginxConfiguration( + func(cfg string) bool { + return strings.Contains(cfg, expectedComment) + }) + }) +}) diff --git a/test/e2e/settings/multi_accept.go b/test/e2e/settings/multi_accept.go new file mode 100644 index 0000000000..94d1349fe7 --- /dev/null +++ b/test/e2e/settings/multi_accept.go @@ -0,0 +1,58 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package settings + +import ( + "strings" + + . "github.com/onsi/ginkgo" + + "k8s.io/ingress-nginx/test/e2e/framework" +) + +var _ = framework.IngressNginxDescribe("Multi Accept", func() { + multiAccept := "enable-multi-accept" + f := framework.NewDefaultFramework(multiAccept) + + It("should be enabled by default", func() { + expectedDirective := "multi_accept on;" + f.WaitForNginxConfiguration( + func(cfg string) bool { + return strings.Contains(cfg, expectedDirective) + }) + }) + + It("should be enabled when set to true", func() { + expectedDirective := "multi_accept on;" + f.UpdateNginxConfigMapData(multiAccept, "true") + + f.WaitForNginxConfiguration( + func(cfg string) bool { + return strings.Contains(cfg, expectedDirective) + }) + }) + + It("should be disabled when set to false", func() { + expectedDirective := "multi_accept off;" + f.UpdateNginxConfigMapData(multiAccept, "false") + + f.WaitForNginxConfiguration( + func(cfg string) bool { + return strings.Contains(cfg, expectedDirective) + }) + }) +}) diff --git a/test/e2e/settings/no_auth_locations.go b/test/e2e/settings/no_auth_locations.go new file mode 100644 index 0000000000..0612dd583d --- /dev/null +++ b/test/e2e/settings/no_auth_locations.go @@ -0,0 +1,164 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package settings + +import ( + "fmt" + "net/http" + "os/exec" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/parnurzeal/gorequest" + + corev1 "k8s.io/api/core/v1" + extensions "k8s.io/api/extensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/ingress-nginx/test/e2e/framework" +) + +var _ = framework.IngressNginxDescribe("No Auth locations", func() { + f := framework.NewDefaultFramework("no-auth-locations") + + setting := "no-auth-locations" + username := "foo" + password := "bar" + secretName := "test-secret" + host := "no-auth-locations" + noAuthPath := "/noauth" + + BeforeEach(func() { + f.NewEchoDeployment() + + s := f.EnsureSecret(buildSecret(username, password, secretName, f.Namespace)) + + f.UpdateNginxConfigMapData(setting, noAuthPath) + + bi := buildBasicAuthIngressWithSecondPath(host, f.Namespace, s.Name, noAuthPath) + f.EnsureIngress(bi) + }) + + AfterEach(func() { + }) + + It("should return status code 401 when accessing '/' unauthentication", func() { + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("test auth")) + }) + + resp, body, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusUnauthorized)) + Expect(body).Should(ContainSubstring("401 Authorization Required")) + }) + + It("should return status code 200 when accessing '/' authentication", func() { + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("test auth")) + }) + + resp, _, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + SetBasicAuth(username, password). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + }) + + It("should return status code 200 when accessing '/noauth' unauthenticated", func() { + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("test auth")) + }) + + resp, _, errs := gorequest.New(). + Get(fmt.Sprintf("%s/noauth", f.GetURL(framework.HTTP))). + Set("Host", host). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + }) +}) + +func buildBasicAuthIngressWithSecondPath(host, namespace, secretName, pathName string) *extensions.Ingress { + return &extensions.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: host, + Namespace: namespace, + Annotations: map[string]string{"nginx.ingress.kubernetes.io/auth-type": "basic", + "nginx.ingress.kubernetes.io/auth-secret": secretName, + "nginx.ingress.kubernetes.io/auth-realm": "test auth", + }, + }, + Spec: extensions.IngressSpec{ + Rules: []extensions.IngressRule{ + { + Host: host, + IngressRuleValue: extensions.IngressRuleValue{ + HTTP: &extensions.HTTPIngressRuleValue{ + Paths: []extensions.HTTPIngressPath{ + { + Path: "/", + Backend: extensions.IngressBackend{ + ServiceName: "http-svc", + ServicePort: intstr.FromInt(80), + }, + }, + { + Path: pathName, + Backend: extensions.IngressBackend{ + ServiceName: "http-svc", + ServicePort: intstr.FromInt(80), + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func buildSecret(username, password, name, namespace string) *corev1.Secret { + out, err := exec.Command("openssl", "passwd", "-crypt", password).CombinedOutput() + Expect(err).NotTo(HaveOccurred(), "creating password") + + encpass := fmt.Sprintf("%v:%s\n", username, out) + + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + DeletionGracePeriodSeconds: framework.NewInt64(1), + }, + Data: map[string][]byte{ + "auth": []byte(encpass), + }, + Type: corev1.SecretTypeOpaque, + } +} diff --git a/test/e2e/settings/pod_security_policy.go b/test/e2e/settings/pod_security_policy.go new file mode 100644 index 0000000000..9c7c65d2ac --- /dev/null +++ b/test/e2e/settings/pod_security_policy.go @@ -0,0 +1,131 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package settings + +import ( + "fmt" + "net/http" + "strings" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/parnurzeal/gorequest" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + extensions "k8s.io/api/extensions/v1beta1" + rbacv1 "k8s.io/api/rbac/v1" + k8sErrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "k8s.io/ingress-nginx/test/e2e/framework" +) + +const ( + ingressControllerPSP = "ingress-controller-psp" +) + +var _ = framework.IngressNginxDescribe("Pod Security Policies", func() { + f := framework.NewDefaultFramework("pod-security-policies") + + BeforeEach(func() { + psp := createPodSecurityPolicy() + _, err := f.KubeClientSet.ExtensionsV1beta1().PodSecurityPolicies().Create(psp) + if !k8sErrors.IsAlreadyExists(err) { + Expect(err).NotTo(HaveOccurred(), "creating Pod Security Policy") + } + + role, err := f.KubeClientSet.RbacV1().ClusterRoles().Get(fmt.Sprintf("nginx-ingress-clusterrole-%v", f.Namespace), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred(), "getting ingress controller cluster role") + Expect(role).NotTo(BeNil()) + + role.Rules = append(role.Rules, rbacv1.PolicyRule{ + APIGroups: []string{"policy"}, + Resources: []string{"podsecuritypolicies"}, + ResourceNames: []string{ingressControllerPSP}, + Verbs: []string{"use"}, + }) + + _, err = f.KubeClientSet.RbacV1().ClusterRoles().Update(role) + Expect(err).NotTo(HaveOccurred(), "updating ingress controller cluster role to use a pod security policy") + + // update the deployment just to trigger a rolling update and the use of the security policy + err = framework.UpdateDeployment(f.KubeClientSet, f.Namespace, "nginx-ingress-controller", 1, + func(deployment *appsv1.Deployment) error { + args := deployment.Spec.Template.Spec.Containers[0].Args + args = append(args, "--v=2") + deployment.Spec.Template.Spec.Containers[0].Args = args + _, err := f.KubeClientSet.AppsV1().Deployments(f.Namespace).Update(deployment) + + return err + }) + Expect(err).NotTo(HaveOccurred()) + + f.NewEchoDeployment() + }) + + It("should be running with a Pod Security Policy", func() { + f.WaitForNginxConfiguration( + func(cfg string) bool { + return strings.Contains(cfg, "server_tokens on") + }) + + resp, _, _ := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", "foo.bar.com"). + End() + Expect(resp.StatusCode).Should(Equal(http.StatusNotFound)) + }) +}) + +func createPodSecurityPolicy() *extensions.PodSecurityPolicy { + trueValue := true + return &extensions.PodSecurityPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: ingressControllerPSP, + }, + Spec: extensions.PodSecurityPolicySpec{ + AllowPrivilegeEscalation: &trueValue, + RequiredDropCapabilities: []corev1.Capability{"All"}, + RunAsUser: extensions.RunAsUserStrategyOptions{ + Rule: "RunAsAny", + }, + SELinux: extensions.SELinuxStrategyOptions{ + Rule: "RunAsAny", + }, + FSGroup: extensions.FSGroupStrategyOptions{ + Ranges: []extensions.IDRange{ + { + Min: 1, + Max: 65535, + }, + }, + Rule: "MustRunAs", + }, + SupplementalGroups: extensions.SupplementalGroupsStrategyOptions{ + Ranges: []extensions.IDRange{ + { + Min: 1, + Max: 65535, + }, + }, + Rule: "MustRunAs", + }, + }, + } + +} diff --git a/test/e2e/settings/proxy_host.go b/test/e2e/settings/proxy_host.go new file mode 100644 index 0000000000..d9789e7fd0 --- /dev/null +++ b/test/e2e/settings/proxy_host.go @@ -0,0 +1,89 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package settings + +import ( + "fmt" + "net/http" + "strings" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/parnurzeal/gorequest" + + "k8s.io/ingress-nginx/test/e2e/framework" +) + +var _ = framework.IngressNginxDescribe("Proxy host variable", func() { + test := "proxy-host" + f := framework.NewDefaultFramework(test) + + BeforeEach(func() { + f.NewEchoDeploymentWithReplicas(1) + }) + + It("should exist a proxy_host", func() { + upstreamName := fmt.Sprintf("%v-http-svc-80", f.Namespace) + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/configuration-snippet": `more_set_headers "Custom-Header: $proxy_host"`, + } + f.EnsureIngress(framework.NewSingleIngress(test, "/", test, f.Namespace, "http-svc", 80, &annotations)) + + f.WaitForNginxConfiguration( + func(server string) bool { + return strings.Contains(server, fmt.Sprintf("server_name %v", test)) && + strings.Contains(server, "set $proxy_host $proxy_upstream_name") + }) + + resp, _, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Retry(10, 1*time.Second, http.StatusNotFound). + Set("Host", test). + End() + + Expect(len(errs)).Should(Equal(0)) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(resp.Header.Get("Custom-Header")).Should(Equal(upstreamName)) + }) + + It("should exist a proxy_host using the upstream-vhost annotation value", func() { + upstreamName := fmt.Sprintf("%v-http-svc-80", f.Namespace) + upstreamVHost := "different.host" + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/upstream-vhost": upstreamVHost, + "nginx.ingress.kubernetes.io/configuration-snippet": `more_set_headers "Custom-Header: $proxy_host"`, + } + f.EnsureIngress(framework.NewSingleIngress(test, "/", test, f.Namespace, "http-svc", 80, &annotations)) + + f.WaitForNginxConfiguration( + func(server string) bool { + return strings.Contains(server, fmt.Sprintf("server_name %v", test)) && + strings.Contains(server, fmt.Sprintf("set $proxy_host $proxy_upstream_name")) + }) + + resp, _, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Retry(10, 1*time.Second, http.StatusNotFound). + Set("Host", test). + End() + + Expect(len(errs)).Should(Equal(0)) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(resp.Header.Get("Custom-Header")).Should(Equal(upstreamName)) + }) +}) diff --git a/test/e2e/settings/proxy_protocol.go b/test/e2e/settings/proxy_protocol.go index cde00fe209..95fdde595b 100644 --- a/test/e2e/settings/proxy_protocol.go +++ b/test/e2e/settings/proxy_protocol.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package setting +package settings import ( "fmt" @@ -25,19 +25,17 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - "k8s.io/api/extensions/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/client-go/kubernetes" "k8s.io/ingress-nginx/test/e2e/framework" ) var _ = framework.IngressNginxDescribe("Proxy Protocol", func() { f := framework.NewDefaultFramework("proxy-protocol") + setting := "use-proxy-protocol" + BeforeEach(func() { - err := f.NewEchoDeployment() - Expect(err).NotTo(HaveOccurred()) + f.NewEchoDeployment() + f.UpdateNginxConfigMapData(setting, "false") }) AfterEach(func() { @@ -46,55 +44,20 @@ var _ = framework.IngressNginxDescribe("Proxy Protocol", func() { It("should respect port passed by the PROXY Protocol", func() { host := "proxy-protocol" - setting := "use-proxy-protocol" - updateConfigmap(setting, "true", f.KubeClientSet) - defer updateConfigmap(setting, "false", f.KubeClientSet) - - ing, err := f.EnsureIngress(&v1beta1.Ingress{ - ObjectMeta: metav1.ObjectMeta{ - Name: host, - Namespace: f.Namespace.Name, - Annotations: map[string]string{}, - }, - Spec: v1beta1.IngressSpec{ - Rules: []v1beta1.IngressRule{ - { - Host: host, - IngressRuleValue: v1beta1.IngressRuleValue{ - HTTP: &v1beta1.HTTPIngressRuleValue{ - Paths: []v1beta1.HTTPIngressPath{ - { - Path: "/", - Backend: v1beta1.IngressBackend{ - ServiceName: "http-svc", - ServicePort: intstr.FromInt(80), - }, - }, - }, - }, - }, - }, - }, - }, - }) - - Expect(err).NotTo(HaveOccurred()) - Expect(ing).NotTo(BeNil()) - - err = f.WaitForNginxServer(host, + f.UpdateNginxConfigMapData(setting, "true") + + f.EnsureIngress(framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, nil)) + + f.WaitForNginxServer(host, func(server string) bool { return strings.Contains(server, "server_name proxy-protocol") && strings.Contains(server, "listen 80 proxy_protocol") }) - Expect(err).NotTo(HaveOccurred()) - ip, err := f.GetNginxIP() - Expect(err).NotTo(HaveOccurred()) - port, err := f.GetNginxPort("http") - Expect(err).NotTo(HaveOccurred()) + ip := f.GetNginxIP() - conn, err := net.Dial("tcp", fmt.Sprintf("%v:%v", ip, port)) - Expect(err).NotTo(HaveOccurred()) + conn, err := net.Dial("tcp", net.JoinHostPort(ip, "80")) + Expect(err).NotTo(HaveOccurred(), "unexpected error creating connection to %s:80", ip) defer conn.Close() header := "PROXY TCP4 192.168.0.1 192.168.0.11 56324 1234\r\n" @@ -102,29 +65,10 @@ var _ = framework.IngressNginxDescribe("Proxy Protocol", func() { conn.Write([]byte("GET / HTTP/1.1\r\nHost: proxy-protocol\r\n\r\n")) data, err := ioutil.ReadAll(conn) - Expect(err).NotTo(HaveOccurred()) + Expect(err).NotTo(HaveOccurred(), "unexpected error reading connection data") body := string(data) Expect(body).Should(ContainSubstring(fmt.Sprintf("host=%v", "proxy-protocol"))) Expect(body).Should(ContainSubstring(fmt.Sprintf("x-forwarded-port=80"))) Expect(body).Should(ContainSubstring(fmt.Sprintf("x-forwarded-for=192.168.0.1"))) }) }) - -func updateConfigmap(k, v string, c kubernetes.Interface) { - By(fmt.Sprintf("updating configuration configmap setting %v to '%v'", k, v)) - config, err := c.CoreV1().ConfigMaps("ingress-nginx").Get("nginx-configuration", metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - Expect(config).NotTo(BeNil()) - - if config.Data == nil { - config.Data = map[string]string{} - } - - if config.Data[k] == v { - return - } - - config.Data[k] = v - _, err = c.CoreV1().ConfigMaps("ingress-nginx").Update(config) - Expect(err).NotTo(HaveOccurred()) -} diff --git a/test/e2e/settings/server_tokens.go b/test/e2e/settings/server_tokens.go new file mode 100644 index 0000000000..403506bf30 --- /dev/null +++ b/test/e2e/settings/server_tokens.go @@ -0,0 +1,89 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package settings + +import ( + "strings" + + . "github.com/onsi/ginkgo" + + extensions "k8s.io/api/extensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/ingress-nginx/test/e2e/framework" +) + +var _ = framework.IngressNginxDescribe("Server Tokens", func() { + f := framework.NewDefaultFramework("server-tokens") + serverTokens := "server-tokens" + + BeforeEach(func() { + f.NewEchoDeployment() + }) + + AfterEach(func() { + }) + + It("should not exists Server header in the response", func() { + f.UpdateNginxConfigMapData(serverTokens, "false") + + f.EnsureIngress(framework.NewSingleIngress(serverTokens, "/", serverTokens, f.Namespace, "http-svc", 80, nil)) + + f.WaitForNginxConfiguration( + func(cfg string) bool { + return strings.Contains(cfg, "server_tokens off") && + strings.Contains(cfg, "more_clear_headers Server;") + }) + }) + + It("should exists Server header in the response when is enabled", func() { + f.UpdateNginxConfigMapData(serverTokens, "true") + + f.EnsureIngress(&extensions.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: serverTokens, + Namespace: f.Namespace, + Annotations: map[string]string{}, + }, + Spec: extensions.IngressSpec{ + Rules: []extensions.IngressRule{ + { + Host: serverTokens, + IngressRuleValue: extensions.IngressRuleValue{ + HTTP: &extensions.HTTPIngressRuleValue{ + Paths: []extensions.HTTPIngressPath{ + { + Path: "/", + Backend: extensions.IngressBackend{ + ServiceName: "http-svc", + ServicePort: intstr.FromInt(80), + }, + }, + }, + }, + }, + }, + }, + }, + }) + + f.WaitForNginxConfiguration( + func(cfg string) bool { + return strings.Contains(cfg, "server_tokens on") + }) + }) +}) diff --git a/test/e2e/settings/tls.go b/test/e2e/settings/tls.go new file mode 100644 index 0000000000..cbf1f0c5f0 --- /dev/null +++ b/test/e2e/settings/tls.go @@ -0,0 +1,220 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package settings + +import ( + "crypto/tls" + "fmt" + "net/http" + "strings" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/parnurzeal/gorequest" + + "k8s.io/ingress-nginx/test/e2e/framework" +) + +func noRedirectPolicyFunc(gorequest.Request, []gorequest.Request) error { + return http.ErrUseLastResponse +} + +var _ = framework.IngressNginxDescribe("Settings - TLS)", func() { + f := framework.NewDefaultFramework("settings-tls") + host := "settings-tls" + + BeforeEach(func() { + f.NewEchoDeployment() + f.UpdateNginxConfigMapData("use-forwarded-headers", "false") + }) + + AfterEach(func() { + }) + + It("should configure TLS protocol", func() { + sslCiphers := "ssl-ciphers" + sslProtocols := "ssl-protocols" + + // Two ciphers supported by each of TLSv1.2 and TLSv1. + // https://www.openssl.org/docs/man1.1.0/apps/ciphers.html - "CIPHER SUITE NAMES" + testCiphers := "ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-SHA" + + ing := f.EnsureIngress(framework.NewSingleIngressWithTLS(host, "/", host, []string{host}, f.Namespace, "http-svc", 80, nil)) + tlsConfig, err := framework.CreateIngressTLSSecret(f.KubeClientSet, + ing.Spec.TLS[0].Hosts, + ing.Spec.TLS[0].SecretName, + ing.Namespace) + Expect(err).NotTo(HaveOccurred()) + + framework.WaitForTLS(f.GetURL(framework.HTTPS), tlsConfig) + + By("setting cipher suite") + f.UpdateNginxConfigMapData(sslCiphers, testCiphers) + + f.WaitForNginxConfiguration( + func(cfg string) bool { + return strings.Contains(cfg, fmt.Sprintf("ssl_ciphers '%s';", testCiphers)) + }) + + resp, _, errs := gorequest.New(). + Get(f.GetURL(framework.HTTPS)). + TLSClientConfig(tlsConfig). + Set("Host", host). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(resp.TLS.Version).Should(BeNumerically("==", tls.VersionTLS12)) + Expect(resp.TLS.CipherSuite).Should(BeNumerically("==", tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384)) + + By("enforcing TLS v1.0") + f.UpdateNginxConfigMapData(sslProtocols, "TLSv1") + + f.WaitForNginxConfiguration( + func(cfg string) bool { + return strings.Contains(cfg, "ssl_protocols TLSv1;") + }) + + resp, _, errs = gorequest.New(). + Get(f.GetURL(framework.HTTPS)). + TLSClientConfig(tlsConfig). + Set("Host", host). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(resp.TLS.Version).Should(BeNumerically("==", tls.VersionTLS10)) + Expect(resp.TLS.CipherSuite).Should(BeNumerically("==", tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA)) + }) + + It("should configure HSTS policy header", func() { + hstsMaxAge := "hsts-max-age" + hstsIncludeSubdomains := "hsts-include-subdomains" + hstsPreload := "hsts-preload" + + ing := f.EnsureIngress(framework.NewSingleIngressWithTLS(host, "/", host, []string{host}, f.Namespace, "http-svc", 80, nil)) + tlsConfig, err := framework.CreateIngressTLSSecret(f.KubeClientSet, + ing.Spec.TLS[0].Hosts, + ing.Spec.TLS[0].SecretName, + ing.Namespace) + Expect(err).NotTo(HaveOccurred()) + + framework.WaitForTLS(f.GetURL(framework.HTTPS), tlsConfig) + + By("setting max-age parameter") + f.UpdateNginxConfigMapData(hstsMaxAge, "86400") + + f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, "Strict-Transport-Security: max-age=86400; includeSubDomains\"") + }) + + resp, _, errs := gorequest.New(). + Get(f.GetURL(framework.HTTPS)). + TLSClientConfig(tlsConfig). + Set("Host", host). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(resp.Header.Get("Strict-Transport-Security")).Should(ContainSubstring("max-age=86400")) + + By("setting includeSubDomains parameter") + f.UpdateNginxConfigMapData(hstsIncludeSubdomains, "false") + + f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, "Strict-Transport-Security: max-age=86400\"") + }) + + resp, _, errs = gorequest.New(). + Get(f.GetURL(framework.HTTPS)). + TLSClientConfig(tlsConfig). + Set("Host", host). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(resp.Header.Get("Strict-Transport-Security")).ShouldNot(ContainSubstring("includeSubDomains")) + + By("setting preload parameter") + f.UpdateNginxConfigMapData(hstsPreload, "true") + + f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, "Strict-Transport-Security: max-age=86400; preload\"") + }) + + resp, _, errs = gorequest.New(). + Get(f.GetURL(framework.HTTPS)). + TLSClientConfig(tlsConfig). + Set("Host", host). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(resp.Header.Get("Strict-Transport-Security")).Should(ContainSubstring("preload")) + }) + + It("should not use ports during the HTTP to HTTPS redirection", func() { + ing := f.EnsureIngress(framework.NewSingleIngressWithTLS(host, "/", host, []string{host}, f.Namespace, "http-svc", 80, nil)) + tlsConfig, err := framework.CreateIngressTLSSecret(f.KubeClientSet, + ing.Spec.TLS[0].Hosts, + ing.Spec.TLS[0].SecretName, + ing.Namespace) + Expect(err).NotTo(HaveOccurred()) + + framework.WaitForTLS(f.GetURL(framework.HTTPS), tlsConfig) + + resp, _, errs := gorequest.New(). + Get(fmt.Sprintf(f.GetURL(framework.HTTP))). + Retry(10, 1*time.Second, http.StatusNotFound). + RedirectPolicy(noRedirectPolicyFunc). + Set("Host", host). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusPermanentRedirect)) + Expect(resp.Header.Get("Location")).Should(Equal(fmt.Sprintf("https://%v/", host))) + }) + + It("should not use ports or X-Forwarded-Host during the HTTP to HTTPS redirection", func() { + f.UpdateNginxConfigMapData("use-forwarded-headers", "true") + + ing := f.EnsureIngress(framework.NewSingleIngressWithTLS(host, "/", host, []string{host}, f.Namespace, "http-svc", 80, nil)) + tlsConfig, err := framework.CreateIngressTLSSecret(f.KubeClientSet, + ing.Spec.TLS[0].Hosts, + ing.Spec.TLS[0].SecretName, + ing.Namespace) + Expect(err).NotTo(HaveOccurred()) + + framework.WaitForTLS(f.GetURL(framework.HTTPS), tlsConfig) + + resp, _, errs := gorequest.New(). + Get(fmt.Sprintf(f.GetURL(framework.HTTP))). + Retry(10, 1*time.Second, http.StatusNotFound). + RedirectPolicy(noRedirectPolicyFunc). + Set("Host", host). + Set("X-Forwarded-Host", "example.com:80"). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusPermanentRedirect)) + Expect(resp.Header.Get("Location")).Should(Equal("https://example.com/")) + }) +}) diff --git a/test/e2e/ssl/secret_update.go b/test/e2e/ssl/secret_update.go index 27e28081a1..ed77c6b6cd 100644 --- a/test/e2e/ssl/secret_update.go +++ b/test/e2e/ssl/secret_update.go @@ -24,10 +24,8 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - "k8s.io/api/core/v1" - "k8s.io/api/extensions/v1beta1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/ingress-nginx/test/e2e/framework" ) @@ -35,8 +33,7 @@ var _ = framework.IngressNginxDescribe("SSL", func() { f := framework.NewDefaultFramework("ssl") BeforeEach(func() { - err := f.NewEchoDeployment() - Expect(err).NotTo(HaveOccurred()) + f.NewEchoDeployment() }) AfterEach(func() { @@ -45,76 +42,39 @@ var _ = framework.IngressNginxDescribe("SSL", func() { It("should not appear references to secret updates not used in ingress rules", func() { host := "ssl-update" - dummySecret, err := f.EnsureSecret(&v1.Secret{ + dummySecret := f.EnsureSecret(&v1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "dummy", - Namespace: f.Namespace.Name, + Namespace: f.Namespace, }, Data: map[string][]byte{ "key": []byte("value"), }, }) - Expect(err).NotTo(HaveOccurred()) - ing, err := f.EnsureIngress(&v1beta1.Ingress{ - ObjectMeta: metav1.ObjectMeta{ - Name: host, - Namespace: f.Namespace.Name, - }, - Spec: v1beta1.IngressSpec{ - TLS: []v1beta1.IngressTLS{ - { - Hosts: []string{host}, - SecretName: host, - }, - }, - Rules: []v1beta1.IngressRule{ - { - Host: host, - IngressRuleValue: v1beta1.IngressRuleValue{ - HTTP: &v1beta1.HTTPIngressRuleValue{ - Paths: []v1beta1.HTTPIngressPath{ - { - Path: "/", - Backend: v1beta1.IngressBackend{ - ServiceName: "http-svc", - ServicePort: intstr.FromInt(80), - }, - }, - }, - }, - }, - }, - }, - }, - }) - - Expect(err).ToNot(HaveOccurred()) - Expect(ing).ToNot(BeNil()) - - _, _, _, err = framework.CreateIngressTLSSecret(f.KubeClientSet, + ing := f.EnsureIngress(framework.NewSingleIngressWithTLS(host, "/", host, []string{host}, f.Namespace, "http-svc", 80, nil)) + _, err := framework.CreateIngressTLSSecret(f.KubeClientSet, ing.Spec.TLS[0].Hosts, ing.Spec.TLS[0].SecretName, ing.Namespace) Expect(err).ToNot(HaveOccurred()) - err = f.WaitForNginxServer(host, + f.WaitForNginxServer(host, func(server string) bool { return strings.Contains(server, "server_name ssl-update") && strings.Contains(server, "listen 443") }) - Expect(err).ToNot(HaveOccurred()) log, err := f.NginxLogs() Expect(err).ToNot(HaveOccurred()) Expect(log).ToNot(BeEmpty()) - Expect(log).ToNot(ContainSubstring(fmt.Sprintf("starting syncing of secret %v/dummy", f.Namespace.Name))) - time.Sleep(30 * time.Second) + Expect(log).ToNot(ContainSubstring(fmt.Sprintf("starting syncing of secret %v/dummy", f.Namespace))) + time.Sleep(5 * time.Second) dummySecret.Data["some-key"] = []byte("some value") - f.KubeClientSet.CoreV1().Secrets(f.Namespace.Name).Update(dummySecret) - time.Sleep(30 * time.Second) - Expect(log).ToNot(ContainSubstring(fmt.Sprintf("starting syncing of secret %v/dummy", f.Namespace.Name))) - Expect(log).ToNot(ContainSubstring(fmt.Sprintf("error obtaining PEM from secret %v/dummy", f.Namespace.Name))) + f.KubeClientSet.CoreV1().Secrets(f.Namespace).Update(dummySecret) + time.Sleep(5 * time.Second) + Expect(log).ToNot(ContainSubstring(fmt.Sprintf("starting syncing of secret %v/dummy", f.Namespace))) + Expect(log).ToNot(ContainSubstring(fmt.Sprintf("error obtaining PEM from secret %v/dummy", f.Namespace))) }) }) diff --git a/test/e2e/status/update.go b/test/e2e/status/update.go new file mode 100644 index 0000000000..a8dcb073ed --- /dev/null +++ b/test/e2e/status/update.go @@ -0,0 +1,141 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package settings + +import ( + "fmt" + "log" + "net" + "strings" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + appsv1 "k8s.io/api/apps/v1" + apiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + + "k8s.io/ingress-nginx/test/e2e/framework" +) + +var _ = framework.IngressNginxDescribe("Status Update [Status]", func() { + f := framework.NewDefaultFramework("status-update") + host := "status-update" + address := getHostIP() + + BeforeEach(func() { + }) + + AfterEach(func() { + }) + + It("should update status field after client-go reconnection", func() { + port, cmd, err := f.KubectlProxy(0) + Expect(err).NotTo(HaveOccurred(), "unexpected error starting kubectl proxy") + + err = framework.UpdateDeployment(f.KubeClientSet, f.Namespace, "nginx-ingress-controller", 1, + func(deployment *appsv1.Deployment) error { + args := deployment.Spec.Template.Spec.Containers[0].Args + args = append(args, fmt.Sprintf("--apiserver-host=http://%s:%d", address.String(), port)) + args = append(args, "--publish-status-address=1.1.0.0") + // flags --publish-service and --publish-status-address are mutually exclusive + var index int + for k, v := range args { + if strings.Index(v, "--publish-service") != -1 { + index = k + break + } + } + if index > -1 { + args[index] = "" + } + + deployment.Spec.Template.Spec.Containers[0].Args = args + _, err := f.KubeClientSet.AppsV1().Deployments(f.Namespace).Update(deployment) + return err + }) + Expect(err).NotTo(HaveOccurred(), "unexpected error updating ingress controller deployment flags") + + f.NewEchoDeploymentWithReplicas(1) + + ing := f.EnsureIngress(framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, nil)) + + f.WaitForNginxConfiguration( + func(cfg string) bool { + return strings.Contains(cfg, fmt.Sprintf("server_name %s", host)) + }) + + framework.Logf("waiting for leader election and initial status update") + time.Sleep(30 * time.Second) + + err = cmd.Process.Kill() + Expect(err).NotTo(HaveOccurred(), "unexpected error terminating kubectl proxy") + + ing, err = f.KubeClientSet.ExtensionsV1beta1().Ingresses(f.Namespace).Get(host, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred(), "unexpected error getting %s/%v Ingress", f.Namespace, host) + + ing.Status.LoadBalancer.Ingress = []apiv1.LoadBalancerIngress{} + _, err = f.KubeClientSet.ExtensionsV1beta1().Ingresses(f.Namespace).UpdateStatus(ing) + Expect(err).NotTo(HaveOccurred(), "unexpected error cleaning Ingress status") + time.Sleep(10 * time.Second) + + err = f.KubeClientSet.CoreV1(). + ConfigMaps(f.Namespace). + Delete("ingress-controller-leader-nginx", &metav1.DeleteOptions{}) + Expect(err).NotTo(HaveOccurred(), "unexpected error deleting leader election configmap") + + _, cmd, err = f.KubectlProxy(port) + Expect(err).NotTo(HaveOccurred(), "unexpected error starting kubectl proxy") + defer func() { + if cmd != nil { + err := cmd.Process.Kill() + Expect(err).NotTo(HaveOccurred(), "unexpected error terminating kubectl proxy") + } + }() + + err = wait.Poll(10*time.Second, framework.DefaultTimeout, func() (done bool, err error) { + ing, err = f.KubeClientSet.ExtensionsV1beta1().Ingresses(f.Namespace).Get(host, metav1.GetOptions{}) + if err != nil { + return false, nil + } + + if len(ing.Status.LoadBalancer.Ingress) != 1 { + return false, nil + } + + return true, nil + }) + Expect(err).NotTo(HaveOccurred(), "unexpected error waiting for ingress status") + Expect(ing.Status.LoadBalancer.Ingress).Should(Equal([]apiv1.LoadBalancerIngress{ + {IP: "1.1.0.0"}, + })) + }) +}) + +func getHostIP() net.IP { + conn, err := net.Dial("udp", "8.8.8.8:80") + if err != nil { + log.Fatal(err) + } + defer conn.Close() + + localAddr := conn.LocalAddr().(*net.UDPAddr) + + return localAddr.IP +} diff --git a/test/e2e/tcpudp/tcp.go b/test/e2e/tcpudp/tcp.go new file mode 100644 index 0000000000..8a9fa36b84 --- /dev/null +++ b/test/e2e/tcpudp/tcp.go @@ -0,0 +1,192 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package settings + +import ( + "context" + "fmt" + "net" + "strings" + "time" + + "github.com/parnurzeal/gorequest" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "k8s.io/ingress-nginx/test/e2e/framework" +) + +const ( + waitForLuaSync = 5 * time.Second +) + +var _ = framework.IngressNginxDescribe("TCP Feature", func() { + f := framework.NewDefaultFramework("tcp") + + BeforeEach(func() { + }) + + AfterEach(func() { + }) + + It("should expose a TCP service", func() { + f.NewEchoDeploymentWithReplicas(1) + + config, err := f.KubeClientSet. + CoreV1(). + ConfigMaps(f.Namespace). + Get("tcp-services", metav1.GetOptions{}) + Expect(err).To(BeNil(), "unexpected error obtaining tcp-services configmap") + Expect(config).NotTo(BeNil(), "expected a configmap but none returned") + + if config.Data == nil { + config.Data = map[string]string{} + } + + config.Data["8080"] = fmt.Sprintf("%v/http-svc:80", f.Namespace) + + _, err = f.KubeClientSet. + CoreV1(). + ConfigMaps(f.Namespace). + Update(config) + Expect(err).NotTo(HaveOccurred(), "unexpected error updating configmap") + + svc, err := f.KubeClientSet. + CoreV1(). + Services(f.Namespace). + Get("ingress-nginx", metav1.GetOptions{}) + Expect(err).To(BeNil(), "unexpected error obtaining ingress-nginx service") + Expect(svc).NotTo(BeNil(), "expected a service but none returned") + + svc.Spec.Ports = append(svc.Spec.Ports, corev1.ServicePort{ + Name: "http-svc", + Port: 8080, + TargetPort: intstr.FromInt(8080), + }) + _, err = f.KubeClientSet. + CoreV1(). + Services(f.Namespace). + Update(svc) + Expect(err).NotTo(HaveOccurred(), "unexpected error updating service") + + f.WaitForNginxConfiguration( + func(cfg string) bool { + return strings.Contains(cfg, fmt.Sprintf(`ngx.var.proxy_upstream_name="tcp-%v-http-svc-80"`, f.Namespace)) + }) + + ip := f.GetNginxIP() + resp, _, errs := gorequest.New(). + Get(fmt.Sprintf("http://%v:8080", ip)). + End() + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(200)) + }) + + It("should expose an ExternalName TCP service", func() { + // Setup: + // - Create an external name service for DNS lookups on port 5353. Point it to google's DNS server + // - Expose port 5353 on the nginx ingress NodePort service to open a hole for this test + // - Update the `tcp-services` configmap to proxy traffic to the configured external name service + + // Create an external service for DNS + externalService := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dns-external-name-svc", + Namespace: f.Namespace, + }, + + Spec: corev1.ServiceSpec{ + ExternalName: "google-public-dns-a.google.com", + Ports: []corev1.ServicePort{ + { + Name: "dns-external-name-svc", + Port: 5353, + TargetPort: intstr.FromInt(53), + }, + }, + Type: corev1.ServiceTypeExternalName, + }, + } + f.EnsureService(externalService) + + // Expose the `external name` port on the `ingress-nginx` service + svc, err := f.KubeClientSet. + CoreV1(). + Services(f.Namespace). + Get("ingress-nginx", metav1.GetOptions{}) + Expect(err).To(BeNil(), "unexpected error obtaining ingress-nginx service") + Expect(svc).NotTo(BeNil(), "expected a service but none returned") + + svc.Spec.Ports = append(svc.Spec.Ports, corev1.ServicePort{ + Name: "dns-svc", + Port: 5353, + TargetPort: intstr.FromInt(5353), + }) + _, err = f.KubeClientSet. + CoreV1(). + Services(f.Namespace). + Update(svc) + Expect(err).NotTo(HaveOccurred(), "unexpected error updating service") + + // Update the TCP configmap to link port 5353 to the DNS external name service + config, err := f.KubeClientSet. + CoreV1(). + ConfigMaps(f.Namespace). + Get("tcp-services", metav1.GetOptions{}) + Expect(err).To(BeNil(), "unexpected error obtaining tcp-services configmap") + Expect(config).NotTo(BeNil(), "expected a configmap but none returned") + + if config.Data == nil { + config.Data = map[string]string{} + } + + config.Data["5353"] = fmt.Sprintf("%v/dns-external-name-svc:5353", f.Namespace) + + _, err = f.KubeClientSet. + CoreV1(). + ConfigMaps(f.Namespace). + Update(config) + Expect(err).NotTo(HaveOccurred(), "unexpected error updating configmap") + + time.Sleep(waitForLuaSync) + + // Validate that the generated nginx config contains the expected `proxy_upstream_name` value + f.WaitForNginxConfiguration( + func(cfg string) bool { + return strings.Contains(cfg, fmt.Sprintf(`ngx.var.proxy_upstream_name="tcp-%v-dns-external-name-svc-5353"`, f.Namespace)) + }) + + // Execute the test. Use the `external name` service to resolve a domain name. + ip := f.GetNginxIP() + resolver := net.Resolver{ + PreferGo: true, + Dial: func(ctx context.Context, network, address string) (net.Conn, error) { + d := net.Dialer{} + return d.DialContext(ctx, "tcp", fmt.Sprintf("%v:5353", ip)) + }, + } + ips, err := resolver.LookupHost(context.Background(), "google-public-dns-b.google.com") + Expect(err).NotTo(HaveOccurred(), "unexpected error from DNS resolver") + Expect(ips).Should(ContainElement("8.8.4.4")) + + }) +}) diff --git a/test/e2e/up.sh b/test/e2e/up.sh deleted file mode 100755 index e98b155d61..0000000000 --- a/test/e2e/up.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/bash - -# Copyright 2017 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -export JSONPATH='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}{end}' - -echo "downloading kubectl..." -curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/$KUBERNETES_VERSION/bin/linux/amd64/kubectl && \ - chmod +x kubectl && sudo mv kubectl /usr/local/bin/ - -echo "downloading minikube..." -curl -Lo minikube https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 && \ - chmod +x minikube && \ - sudo mv minikube /usr/local/bin/ - -echo "starting minikube..." -sudo minikube start --vm-driver=none --kubernetes-version=$KUBERNETES_VERSION - -minikube update-context - -echo "waiting for kubernetes cluster" -until kubectl get nodes -o jsonpath="$JSONPATH" 2>&1 | grep -q "Ready=True"; -do - sleep 1; -done diff --git a/test/e2e/wait-for-nginx.sh b/test/e2e/wait-for-nginx.sh index 19e4d5ebb0..a9c6658bdd 100755 --- a/test/e2e/wait-for-nginx.sh +++ b/test/e2e/wait-for-nginx.sh @@ -14,48 +14,56 @@ # See the License for the specific language governing permissions and # limitations under the License. -export JSONPATH='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}{end}' - -echo "deploying NGINX Ingress controller" -cat deploy/namespace.yaml | kubectl apply -f - -cat deploy/default-backend.yaml | kubectl apply -f - -cat deploy/configmap.yaml | kubectl apply -f - -cat deploy/tcp-services-configmap.yaml | kubectl apply -f - -cat deploy/udp-services-configmap.yaml | kubectl apply -f - -cat deploy/without-rbac.yaml | kubectl apply -f - -cat deploy/provider/baremetal/service-nodeport.yaml | kubectl apply -f - - -echo "updating image..." -kubectl set image \ - deployments \ - --namespace ingress-nginx \ - --selector app=ingress-nginx \ - nginx-ingress-controller=quay.io/kubernetes-ingress-controller/nginx-ingress-controller:test - -sleep 5 - -echo "waiting NGINX ingress pod..." - -function waitForPod() { - until kubectl get pods -n ingress-nginx -l app=ingress-nginx -o jsonpath="$JSONPATH" 2>&1 | grep -q "Ready=True"; - do - sleep 1; - done +set -e +if ! [ -z $DEBUG ]; then + set -x +fi + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +export NAMESPACE=$1 + +echo "deploying NGINX Ingress controller in namespace $NAMESPACE" + +function on_exit { + local error_code="$?" + + test $error_code == 0 && return; + + echo "Obtaining ingress controller pod logs..." + kubectl logs -l app.kubernetes.io/name=ingress-nginx -n $NAMESPACE } +trap on_exit EXIT -export -f waitForPod +CLUSTER_WIDE="$DIR/cluster-wide-$NAMESPACE" -timeout 10s bash -c waitForPod +mkdir "$CLUSTER_WIDE" -if kubectl get pods -n ingress-nginx -l app=ingress-nginx -o jsonpath="$JSONPATH" 2>&1 | grep -q "Ready=True"; -then - echo "Kubernetes deployments started" -else - echo "Kubernetes deployments with issues:" - kubectl get pods -n ingress-nginx +cat << EOF > "$CLUSTER_WIDE/kustomization.yaml" +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +bases: +- ../cluster-wide +nameSuffix: "-$NAMESPACE" +EOF - echo "Reason:" - kubectl describe pods -n ingress-nginx - kubectl logs -n ingress-nginx -l app=ingress-nginx - exit 1 -fi +OVERLAY="$DIR/overlay-$NAMESPACE" + +mkdir "$OVERLAY" + +cat << EOF > "$OVERLAY/kustomization.yaml" +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: $NAMESPACE +bases: +- ../overlay +- ../cluster-wide-$NAMESPACE +EOF + +kubectl apply --kustomize "$OVERLAY" + +# wait for the deployment and fail if there is an error before starting the execution of any test +kubectl rollout status \ + --request-timeout=3m \ + --namespace $NAMESPACE \ + deployment nginx-ingress-controller diff --git a/test/manifests/configuration-a.json b/test/manifests/configuration-a.json index d2fb585b4e..673b12696a 100644 --- a/test/manifests/configuration-a.json +++ b/test/manifests/configuration-a.json @@ -55,8 +55,7 @@ "sessionAffinityConfig": { "name": "", "cookieSessionAffinity": { - "name": "", - "hash": "" + "name": "" } } }, { @@ -128,8 +127,7 @@ "sessionAffinityConfig": { "name": "", "cookieSessionAffinity": { - "name": "", - "hash": "" + "name": "" } } }, { @@ -194,8 +192,7 @@ "sessionAffinityConfig": { "name": "", "cookieSessionAffinity": { - "name": "", - "hash": "" + "name": "" } } }], @@ -276,7 +273,6 @@ }, "redirect": { "target": "", - "addBaseUrl": false, "sslRedirect": false, "forceSSLRedirect": false, "appRoot": "" @@ -384,7 +380,6 @@ }, "redirect": { "target": "", - "addBaseUrl": false, "sslRedirect": true, "forceSSLRedirect": false, "appRoot": "" @@ -463,7 +458,6 @@ }, "redirect": { "target": "", - "addBaseUrl": false, "sslRedirect": false, "forceSSLRedirect": false, "appRoot": "" @@ -576,7 +570,6 @@ }, "redirect": { "target": "/", - "addBaseUrl": false, "sslRedirect": true, "forceSSLRedirect": false, "appRoot": "" @@ -655,7 +648,6 @@ }, "redirect": { "target": "", - "addBaseUrl": false, "sslRedirect": false, "forceSSLRedirect": false, "appRoot": "" diff --git a/test/manifests/configuration-b.json b/test/manifests/configuration-b.json index 9fb62a406a..21a1cfc188 100644 --- a/test/manifests/configuration-b.json +++ b/test/manifests/configuration-b.json @@ -55,8 +55,7 @@ "sessionAffinityConfig": { "name": "", "cookieSessionAffinity": { - "name": "", - "hash": "" + "name": "" } } }, { @@ -128,8 +127,7 @@ "sessionAffinityConfig": { "name": "", "cookieSessionAffinity": { - "name": "", - "hash": "" + "name": "" } } }, { @@ -194,8 +192,7 @@ "sessionAffinityConfig": { "name": "", "cookieSessionAffinity": { - "name": "", - "hash": "" + "name": "" } } }], @@ -276,7 +273,6 @@ }, "redirect": { "target": "", - "addBaseUrl": false, "sslRedirect": false, "forceSSLRedirect": false, "appRoot": "" @@ -384,7 +380,6 @@ }, "redirect": { "target": "", - "addBaseUrl": false, "sslRedirect": true, "forceSSLRedirect": false, "appRoot": "" @@ -463,7 +458,6 @@ }, "redirect": { "target": "", - "addBaseUrl": false, "sslRedirect": false, "forceSSLRedirect": false, "appRoot": "" @@ -576,7 +570,6 @@ }, "redirect": { "target": "/", - "addBaseUrl": false, "sslRedirect": true, "forceSSLRedirect": false, "appRoot": "" @@ -675,7 +668,6 @@ }, "redirect": { "target": "", - "addBaseUrl": false, "sslRedirect": false, "forceSSLRedirect": false, "appRoot": "" diff --git a/test/manifests/configuration-c.json b/test/manifests/configuration-c.json index ee2078015c..3f226c7cad 100644 --- a/test/manifests/configuration-c.json +++ b/test/manifests/configuration-c.json @@ -26,8 +26,7 @@ "SessionAffinity": { "name": "", "CookieSessionAffinity": { - "name": "", - "hash": "" + "name": "" } } }, { @@ -80,8 +79,7 @@ "SessionAffinity": { "name": "", "CookieSessionAffinity": { - "name": "", - "hash": "" + "name": "" } } }], @@ -128,7 +126,6 @@ }, "redirect": { "target": "", - "addBaseUrl": false, "sslRedirect": false, "forceSSLRedirect": false, "appRoot": "" @@ -229,7 +226,6 @@ }, "redirect": { "target": "", - "addBaseUrl": false, "sslRedirect": true, "forceSSLRedirect": false, "appRoot": "" diff --git a/vendor/cloud.google.com/go/.travis.yml b/vendor/cloud.google.com/go/.travis.yml deleted file mode 100644 index decefd6bff..0000000000 --- a/vendor/cloud.google.com/go/.travis.yml +++ /dev/null @@ -1,21 +0,0 @@ -sudo: false -language: go -go: -- 1.6 -- 1.7 -- 1.8 -- 1.9 -install: -- go get -v cloud.google.com/go/... -script: -- openssl aes-256-cbc -K $encrypted_a8b3f4fc85f4_key -iv $encrypted_a8b3f4fc85f4_iv -in keys.tar.enc -out keys.tar -d -- tar xvf keys.tar -- GCLOUD_TESTS_GOLANG_PROJECT_ID="dulcet-port-762" - GCLOUD_TESTS_GOLANG_KEY="$(pwd)/dulcet-port-762-key.json" - GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID="gcloud-golang-firestore-tests" - GCLOUD_TESTS_GOLANG_FIRESTORE_KEY="$(pwd)/gcloud-golang-firestore-tests-key.json" - ./run-tests.sh $TRAVIS_COMMIT -env: - matrix: - # The GCLOUD_TESTS_API_KEY environment variable. - secure: VdldogUOoubQ60LhuHJ+g/aJoBiujkSkWEWl79Zb8cvQorcQbxISS+JsOOp4QkUOU4WwaHAm8/3pIH1QMWOR6O78DaLmDKi5Q4RpkVdCpUXy+OAfQaZIcBsispMrjxLXnqFjo9ELnrArfjoeCTzaX0QTCfwQwVmigC8rR30JBKI= diff --git a/vendor/cloud.google.com/go/CONTRIBUTING.md b/vendor/cloud.google.com/go/CONTRIBUTING.md deleted file mode 100644 index 95c94a48b4..0000000000 --- a/vendor/cloud.google.com/go/CONTRIBUTING.md +++ /dev/null @@ -1,152 +0,0 @@ -# Contributing - -1. Sign one of the contributor license agreements below. -1. `go get golang.org/x/review/git-codereview` to install the code reviewing tool. - 1. You will need to ensure that your `GOBIN` directory (by default - `$GOPATH/bin`) is in your `PATH` so that git can find the command. - 1. If you would like, you may want to set up aliases for git-codereview, - such that `git codereview change` becomes `git change`. See the - [godoc](https://godoc.org/golang.org/x/review/git-codereview) for details. - 1. Should you run into issues with the git-codereview tool, please note - that all error messages will assume that you have set up these - aliases. -1. Get the cloud package by running `go get -d cloud.google.com/go`. - 1. If you have already checked out the source, make sure that the remote git - origin is https://code.googlesource.com/gocloud: - - git remote set-url origin https://code.googlesource.com/gocloud -1. Make sure your auth is configured correctly by visiting - https://code.googlesource.com, clicking "Generate Password", and following - the directions. -1. Make changes and create a change by running `git codereview change `, -provide a commit message, and use `git codereview mail` to create a Gerrit CL. -1. Keep amending to the change with `git codereview change` and mail as your receive -feedback. Each new mailed amendment will create a new patch set for your change in Gerrit. - -## Integration Tests - -In addition to the unit tests, you may run the integration test suite. - -To run the integrations tests, creating and configuration of a project in the -Google Developers Console is required. - -After creating a project, you must [create a service account](https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount). -Ensure the project-level **Owner** -[IAM role](console.cloud.google.com/iam-admin/iam/project) role is added to the -service account. Alternatively, the account can be granted all of the following roles: -- **Editor** -- **Logs Configuration Writer** -- **PubSub Admin** - -Once you create a project, set the following environment variables to be able to -run the against the actual APIs. - -- **GCLOUD_TESTS_GOLANG_PROJECT_ID**: Developers Console project's ID (e.g. bamboo-shift-455) -- **GCLOUD_TESTS_GOLANG_KEY**: The path to the JSON key file. -- **GCLOUD_TESTS_API_KEY**: Your API key. - -Firestore requires a different project and key: - -- **GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID**: Developers Console project's ID - supporting Firestore -- **GCLOUD_TESTS_GOLANG_FIRESTORE_KEY**: The path to the JSON key file. - -Install the [gcloud command-line tool][gcloudcli] to your machine and use it -to create some resources used in integration tests. - -From the project's root directory: - -``` sh -# Set the default project in your env. -$ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID - -# Authenticate the gcloud tool with your account. -$ gcloud auth login - -# Create the indexes used in the datastore integration tests. -$ gcloud preview datastore create-indexes datastore/testdata/index.yaml - -# Create a Google Cloud storage bucket with the same name as your test project, -# and with the Stackdriver Logging service account as owner, for the sink -# integration tests in logging. -$ gsutil mb gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID -$ gsutil acl ch -g cloud-logs@google.com:O gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID - -# Create a PubSub topic for integration tests of storage notifications. -$ gcloud beta pubsub topics create go-storage-notification-test - -# Create a Spanner instance for the spanner integration tests. -$ gcloud beta spanner instances create go-integration-test --config regional-us-central1 --nodes 1 --description 'Instance for go client test' -# NOTE: Spanner instances are priced by the node-hour, so you may want to delete -# the instance after testing with 'gcloud beta spanner instances delete'. - - -``` - -Once you've set the environment variables, you can run the integration tests by -running: - -``` sh -$ go test -v cloud.google.com/go/... -``` - -## Contributor License Agreements - -Before we can accept your pull requests you'll need to sign a Contributor -License Agreement (CLA): - -- **If you are an individual writing original source code** and **you own the -intellectual property**, then you'll need to sign an [individual CLA][indvcla]. -- **If you work for a company that wants to allow you to contribute your -work**, then you'll need to sign a [corporate CLA][corpcla]. - -You can sign these electronically (just scroll to the bottom). After that, -we'll be able to accept your pull requests. - -## Contributor Code of Conduct - -As contributors and maintainers of this project, -and in the interest of fostering an open and welcoming community, -we pledge to respect all people who contribute through reporting issues, -posting feature requests, updating documentation, -submitting pull requests or patches, and other activities. - -We are committed to making participation in this project -a harassment-free experience for everyone, -regardless of level of experience, gender, gender identity and expression, -sexual orientation, disability, personal appearance, -body size, race, ethnicity, age, religion, or nationality. - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery -* Personal attacks -* Trolling or insulting/derogatory comments -* Public or private harassment -* Publishing other's private information, -such as physical or electronic -addresses, without explicit permission -* Other unethical or unprofessional conduct. - -Project maintainers have the right and responsibility to remove, edit, or reject -comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct. -By adopting this Code of Conduct, -project maintainers commit themselves to fairly and consistently -applying these principles to every aspect of managing this project. -Project maintainers who do not follow or enforce the Code of Conduct -may be permanently removed from the project team. - -This code of conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. - -Instances of abusive, harassing, or otherwise unacceptable behavior -may be reported by opening an issue -or contacting one or more of the project maintainers. - -This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0, -available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/) - -[gcloudcli]: https://developers.google.com/cloud/sdk/gcloud/ -[indvcla]: https://developers.google.com/open-source/cla/individual -[corpcla]: https://developers.google.com/open-source/cla/corporate diff --git a/vendor/cloud.google.com/go/CONTRIBUTORS b/vendor/cloud.google.com/go/CONTRIBUTORS index d4b376c7cc..3b3cbed98e 100644 --- a/vendor/cloud.google.com/go/CONTRIBUTORS +++ b/vendor/cloud.google.com/go/CONTRIBUTORS @@ -22,10 +22,13 @@ David Symonds Filippo Valsorda Glenn Lewis Ingo Oeser +James Hall Johan Euphrosine Jonathan Amsterdam +Kunpei Sakai Luna Duclos Magnus Hiie +Mario Castro Michael McGreevy Omar Jarjur Paweł Knap diff --git a/vendor/cloud.google.com/go/LICENSE b/vendor/cloud.google.com/go/LICENSE index a4c5efd822..d645695673 100644 --- a/vendor/cloud.google.com/go/LICENSE +++ b/vendor/cloud.google.com/go/LICENSE @@ -187,7 +187,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2014 Google Inc. + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/MIGRATION.md b/vendor/cloud.google.com/go/MIGRATION.md deleted file mode 100644 index 791210de19..0000000000 --- a/vendor/cloud.google.com/go/MIGRATION.md +++ /dev/null @@ -1,54 +0,0 @@ -# Code Changes - -## v0.10.0 - -- pubsub: Replace - - ``` - sub.ModifyPushConfig(ctx, pubsub.PushConfig{Endpoint: "https://example.com/push"}) - ``` - - with - - ``` - sub.Update(ctx, pubsub.SubscriptionConfigToUpdate{ - PushConfig: &pubsub.PushConfig{Endpoint: "https://example.com/push"}, - }) - ``` - -- trace: traceGRPCServerInterceptor will be provided from *trace.Client. -Given an initialized `*trace.Client` named `tc`, instead of - - ``` - s := grpc.NewServer(grpc.UnaryInterceptor(trace.GRPCServerInterceptor(tc))) - ``` - - write - - ``` - s := grpc.NewServer(grpc.UnaryInterceptor(tc.GRPCServerInterceptor())) - ``` - -- trace trace.GRPCClientInterceptor will also provided from *trace.Client. -Instead of - - ``` - conn, err := grpc.Dial(srv.Addr, grpc.WithUnaryInterceptor(trace.GRPCClientInterceptor())) - ``` - - write - - ``` - conn, err := grpc.Dial(srv.Addr, grpc.WithUnaryInterceptor(tc.GRPCClientInterceptor())) - ``` - -- trace: We removed the deprecated `trace.EnableGRPCTracing`. Use the gRPC -interceptor as a dial option as shown below when initializing Cloud package -clients: - - ``` - c, err := pubsub.NewClient(ctx, "project-id", option.WithGRPCDialOption(grpc.WithUnaryInterceptor(tc.GRPCClientInterceptor()))) - if err != nil { - ... - } - ``` diff --git a/vendor/cloud.google.com/go/README.md b/vendor/cloud.google.com/go/README.md deleted file mode 100644 index 2dbfefc198..0000000000 --- a/vendor/cloud.google.com/go/README.md +++ /dev/null @@ -1,541 +0,0 @@ -# Google Cloud Client Libraries for Go - -[![GoDoc](https://godoc.org/cloud.google.com/go?status.svg)](https://godoc.org/cloud.google.com/go) - -Go packages for [Google Cloud Platform](https://cloud.google.com) services. - -``` go -import "cloud.google.com/go" -``` - -To install the packages on your system, - -``` -$ go get -u cloud.google.com/go/... -``` - -**NOTE:** Some of these packages are under development, and may occasionally -make backwards-incompatible changes. - -**NOTE:** Github repo is a mirror of [https://code.googlesource.com/gocloud](https://code.googlesource.com/gocloud). - - * [News](#news) - * [Supported APIs](#supported-apis) - * [Go Versions Supported](#go-versions-supported) - * [Authorization](#authorization) - * [Cloud Datastore](#cloud-datastore-) - * [Cloud Storage](#cloud-storage-) - * [Cloud Pub/Sub](#cloud-pub-sub-) - * [Cloud BigQuery](#cloud-bigquery-) - * [Stackdriver Logging](#stackdriver-logging-) - * [Cloud Spanner](#cloud-spanner-) - - -## News - -_October 30, 2017_ - -*v0.16.0* - -- Other bigquery changes: - - `JobIterator.Next` returns `*Job`; removed `JobInfo` (BREAKING CHANGE). - - UseStandardSQL is deprecated; set UseLegacySQL to true if you need - Legacy SQL. - - Uploader.Put will generate a random insert ID if you do not provide one. - - Support time partitioning for load jobs. - - Support dry-run queries. - - A `Job` remembers its last retrieved status. - - Support retrieving job configuration. - - Support labels for jobs and tables. - - Support dataset access lists. - - Improve support for external data sources, including data from Bigtable and - Google Sheets, and tables with external data. - - Support updating a table's view configuration. - - Fix uploading civil times with nanoseconds. - -- storage: - - Support PubSub notifications. - - Support Requester Pays buckets. - -- profiler: Support goroutine and mutex profile types. - - -_October 3, 2017_ - -*v0.15.0* - -- firestore: beta release. See the - [announcement](https://firebase.googleblog.com/2017/10/introducing-cloud-firestore.html). - -- errorreporting: The existing package has been redesigned. - -- errors: This package has been removed. Use errorreporting. - - -_September 28, 2017_ - -*v0.14.0* - -- bigquery BREAKING CHANGES: - - Standard SQL is the default for queries and views. - - `Table.Create` takes `TableMetadata` as a second argument, instead of - options. - - `Dataset.Create` takes `DatasetMetadata` as a second argument. - - `DatasetMetadata` field `ID` renamed to `FullID` - - `TableMetadata` field `ID` renamed to `FullID` - -- Other bigquery changes: - - The client will append a random suffix to a provided job ID if you set - `AddJobIDSuffix` to true in a job config. - - Listing jobs is supported. - - Better retry logic. - -- vision, language, speech: clients are now stable - -- monitoring: client is now beta - -- profiler: - - Rename InstanceName to Instance, ZoneName to Zone - - Auto-detect service name and version on AppEngine. - -_September 8, 2017_ - -*v0.13.0* - -- bigquery: UseLegacySQL options for CreateTable and QueryConfig. Use these - options to continue using Legacy SQL after the client switches its default - to Standard SQL. - -- bigquery: Support for updating dataset labels. - -- bigquery: Set DatasetIterator.ProjectID to list datasets in a project other - than the client's. DatasetsInProject is no longer needed and is deprecated. - -- bigtable: Fail ListInstances when any zones fail. - -- spanner: support decoding of slices of basic types (e.g. []string, []int64, - etc.) - -- logging/logadmin: UpdateSink no longer creates a sink if it is missing - (actually a change to the underlying service, not the client) - -- profiler: Service and ServiceVersion replace Target in Config. - -_August 22, 2017_ - -*v0.12.0* - -- pubsub: Subscription.Receive now uses streaming pull. - -- pubsub: add Client.TopicInProject to access topics in a different project - than the client. - -- errors: renamed errorreporting. The errors package will be removed shortly. - -- datastore: improved retry behavior. - -- bigquery: support updates to dataset metadata, with etags. - -- bigquery: add etag support to Table.Update (BREAKING: etag argument added). - -- bigquery: generate all job IDs on the client. - -- storage: support bucket lifecycle configurations. - - -[Older news](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/old-news.md) - -## Supported APIs - -Google API | Status | Package ----------------------------------|--------------|----------------------------------------------------------- -[Datastore][cloud-datastore] | stable | [`cloud.google.com/go/datastore`][cloud-datastore-ref] -[Firestore][cloud-firestore] | beta | [`cloud.google.com/go/firestore`][cloud-firestore-ref] -[Storage][cloud-storage] | stable | [`cloud.google.com/go/storage`][cloud-storage-ref] -[Bigtable][cloud-bigtable] | beta | [`cloud.google.com/go/bigtable`][cloud-bigtable-ref] -[BigQuery][cloud-bigquery] | beta | [`cloud.google.com/go/bigquery`][cloud-bigquery-ref] -[Logging][cloud-logging] | stable | [`cloud.google.com/go/logging`][cloud-logging-ref] -[Monitoring][cloud-monitoring] | beta | [`cloud.google.com/go/monitoring/apiv3`][cloud-monitoring-ref] -[Pub/Sub][cloud-pubsub] | beta | [`cloud.google.com/go/pubsub`][cloud-pubsub-ref] -[Vision][cloud-vision] | stable | [`cloud.google.com/go/vision/apiv1`][cloud-vision-ref] -[Language][cloud-language] | stable | [`cloud.google.com/go/language/apiv1`][cloud-language-ref] -[Speech][cloud-speech] | stable | [`cloud.google.com/go/speech/apiv1`][cloud-speech-ref] -[Spanner][cloud-spanner] | beta | [`cloud.google.com/go/spanner`][cloud-spanner-ref] -[Translation][cloud-translation] | stable | [`cloud.google.com/go/translate`][cloud-translation-ref] -[Trace][cloud-trace] | alpha | [`cloud.google.com/go/trace`][cloud-trace-ref] -[Video Intelligence][cloud-video]| beta | [`cloud.google.com/go/videointelligence/apiv1beta1`][cloud-video-ref] -[ErrorReporting][cloud-errors] | alpha | [`cloud.google.com/go/errorreporting`][cloud-errors-ref] - - -> **Alpha status**: the API is still being actively developed. As a -> result, it might change in backward-incompatible ways and is not recommended -> for production use. -> -> **Beta status**: the API is largely complete, but still has outstanding -> features and bugs to be addressed. There may be minor backwards-incompatible -> changes where necessary. -> -> **Stable status**: the API is mature and ready for production use. We will -> continue addressing bugs and feature requests. - -Documentation and examples are available at -https://godoc.org/cloud.google.com/go - -Visit or join the -[google-api-go-announce group](https://groups.google.com/forum/#!forum/google-api-go-announce) -for updates on these packages. - -## Go Versions Supported - -We support the two most recent major versions of Go. If Google App Engine uses -an older version, we support that as well. You can see which versions are -currently supported by looking at the lines following `go:` in -[`.travis.yml`](.travis.yml). - -## Authorization - -By default, each API will use [Google Application Default Credentials][default-creds] -for authorization credentials used in calling the API endpoints. This will allow your -application to run in many environments without requiring explicit configuration. - -[snip]:# (auth) -```go -client, err := storage.NewClient(ctx) -``` - -To authorize using a -[JSON key file](https://cloud.google.com/iam/docs/managing-service-account-keys), -pass -[`option.WithServiceAccountFile`](https://godoc.org/google.golang.org/api/option#WithServiceAccountFile) -to the `NewClient` function of the desired package. For example: - -[snip]:# (auth-JSON) -```go -client, err := storage.NewClient(ctx, option.WithServiceAccountFile("path/to/keyfile.json")) -``` - -You can exert more control over authorization by using the -[`golang.org/x/oauth2`](https://godoc.org/golang.org/x/oauth2) package to -create an `oauth2.TokenSource`. Then pass -[`option.WithTokenSource`](https://godoc.org/google.golang.org/api/option#WithTokenSource) -to the `NewClient` function: -[snip]:# (auth-ts) -```go -tokenSource := ... -client, err := storage.NewClient(ctx, option.WithTokenSource(tokenSource)) -``` - -## Cloud Datastore [![GoDoc](https://godoc.org/cloud.google.com/go/datastore?status.svg)](https://godoc.org/cloud.google.com/go/datastore) - -- [About Cloud Datastore][cloud-datastore] -- [Activating the API for your project][cloud-datastore-activation] -- [API documentation][cloud-datastore-docs] -- [Go client documentation](https://godoc.org/cloud.google.com/go/datastore) -- [Complete sample program](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/datastore/tasks) - -### Example Usage - -First create a `datastore.Client` to use throughout your application: - -[snip]:# (datastore-1) -```go -client, err := datastore.NewClient(ctx, "my-project-id") -if err != nil { - log.Fatal(err) -} -``` - -Then use that client to interact with the API: - -[snip]:# (datastore-2) -```go -type Post struct { - Title string - Body string `datastore:",noindex"` - PublishedAt time.Time -} -keys := []*datastore.Key{ - datastore.NameKey("Post", "post1", nil), - datastore.NameKey("Post", "post2", nil), -} -posts := []*Post{ - {Title: "Post 1", Body: "...", PublishedAt: time.Now()}, - {Title: "Post 2", Body: "...", PublishedAt: time.Now()}, -} -if _, err := client.PutMulti(ctx, keys, posts); err != nil { - log.Fatal(err) -} -``` - -## Cloud Storage [![GoDoc](https://godoc.org/cloud.google.com/go/storage?status.svg)](https://godoc.org/cloud.google.com/go/storage) - -- [About Cloud Storage][cloud-storage] -- [API documentation][cloud-storage-docs] -- [Go client documentation](https://godoc.org/cloud.google.com/go/storage) -- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/storage) - -### Example Usage - -First create a `storage.Client` to use throughout your application: - -[snip]:# (storage-1) -```go -client, err := storage.NewClient(ctx) -if err != nil { - log.Fatal(err) -} -``` - -[snip]:# (storage-2) -```go -// Read the object1 from bucket. -rc, err := client.Bucket("bucket").Object("object1").NewReader(ctx) -if err != nil { - log.Fatal(err) -} -defer rc.Close() -body, err := ioutil.ReadAll(rc) -if err != nil { - log.Fatal(err) -} -``` - -## Cloud Pub/Sub [![GoDoc](https://godoc.org/cloud.google.com/go/pubsub?status.svg)](https://godoc.org/cloud.google.com/go/pubsub) - -- [About Cloud Pubsub][cloud-pubsub] -- [API documentation][cloud-pubsub-docs] -- [Go client documentation](https://godoc.org/cloud.google.com/go/pubsub) -- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/pubsub) - -### Example Usage - -First create a `pubsub.Client` to use throughout your application: - -[snip]:# (pubsub-1) -```go -client, err := pubsub.NewClient(ctx, "project-id") -if err != nil { - log.Fatal(err) -} -``` - -Then use the client to publish and subscribe: - -[snip]:# (pubsub-2) -```go -// Publish "hello world" on topic1. -topic := client.Topic("topic1") -res := topic.Publish(ctx, &pubsub.Message{ - Data: []byte("hello world"), -}) -// The publish happens asynchronously. -// Later, you can get the result from res: -... -msgID, err := res.Get(ctx) -if err != nil { - log.Fatal(err) -} - -// Use a callback to receive messages via subscription1. -sub := client.Subscription("subscription1") -err = sub.Receive(ctx, func(ctx context.Context, m *pubsub.Message) { - fmt.Println(m.Data) - m.Ack() // Acknowledge that we've consumed the message. -}) -if err != nil { - log.Println(err) -} -``` - -## Cloud BigQuery [![GoDoc](https://godoc.org/cloud.google.com/go/bigquery?status.svg)](https://godoc.org/cloud.google.com/go/bigquery) - -- [About Cloud BigQuery][cloud-bigquery] -- [API documentation][cloud-bigquery-docs] -- [Go client documentation][cloud-bigquery-ref] -- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/bigquery) - -### Example Usage - -First create a `bigquery.Client` to use throughout your application: -[snip]:# (bq-1) -```go -c, err := bigquery.NewClient(ctx, "my-project-ID") -if err != nil { - // TODO: Handle error. -} -``` - -Then use that client to interact with the API: -[snip]:# (bq-2) -```go -// Construct a query. -q := c.Query(` - SELECT year, SUM(number) - FROM [bigquery-public-data:usa_names.usa_1910_2013] - WHERE name = "William" - GROUP BY year - ORDER BY year -`) -// Execute the query. -it, err := q.Read(ctx) -if err != nil { - // TODO: Handle error. -} -// Iterate through the results. -for { - var values []bigquery.Value - err := it.Next(&values) - if err == iterator.Done { - break - } - if err != nil { - // TODO: Handle error. - } - fmt.Println(values) -} -``` - - -## Stackdriver Logging [![GoDoc](https://godoc.org/cloud.google.com/go/logging?status.svg)](https://godoc.org/cloud.google.com/go/logging) - -- [About Stackdriver Logging][cloud-logging] -- [API documentation][cloud-logging-docs] -- [Go client documentation][cloud-logging-ref] -- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/logging) - -### Example Usage - -First create a `logging.Client` to use throughout your application: -[snip]:# (logging-1) -```go -ctx := context.Background() -client, err := logging.NewClient(ctx, "my-project") -if err != nil { - // TODO: Handle error. -} -``` - -Usually, you'll want to add log entries to a buffer to be periodically flushed -(automatically and asynchronously) to the Stackdriver Logging service. -[snip]:# (logging-2) -```go -logger := client.Logger("my-log") -logger.Log(logging.Entry{Payload: "something happened!"}) -``` - -Close your client before your program exits, to flush any buffered log entries. -[snip]:# (logging-3) -```go -err = client.Close() -if err != nil { - // TODO: Handle error. -} -``` - -## Cloud Spanner [![GoDoc](https://godoc.org/cloud.google.com/go/spanner?status.svg)](https://godoc.org/cloud.google.com/go/spanner) - -- [About Cloud Spanner][cloud-spanner] -- [API documentation][cloud-spanner-docs] -- [Go client documentation](https://godoc.org/cloud.google.com/go/spanner) - -### Example Usage - -First create a `spanner.Client` to use throughout your application: - -[snip]:# (spanner-1) -```go -client, err := spanner.NewClient(ctx, "projects/P/instances/I/databases/D") -if err != nil { - log.Fatal(err) -} -``` - -[snip]:# (spanner-2) -```go -// Simple Reads And Writes -_, err = client.Apply(ctx, []*spanner.Mutation{ - spanner.Insert("Users", - []string{"name", "email"}, - []interface{}{"alice", "a@example.com"})}) -if err != nil { - log.Fatal(err) -} -row, err := client.Single().ReadRow(ctx, "Users", - spanner.Key{"alice"}, []string{"email"}) -if err != nil { - log.Fatal(err) -} -``` - - -## Contributing - -Contributions are welcome. Please, see the -[CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CONTRIBUTING.md) -document for details. We're using Gerrit for our code reviews. Please don't open pull -requests against this repo, new pull requests will be automatically closed. - -Please note that this project is released with a Contributor Code of Conduct. -By participating in this project you agree to abide by its terms. -See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CONTRIBUTING.md#contributor-code-of-conduct) -for more information. - -[cloud-datastore]: https://cloud.google.com/datastore/ -[cloud-datastore-ref]: https://godoc.org/cloud.google.com/go/datastore -[cloud-datastore-docs]: https://cloud.google.com/datastore/docs -[cloud-datastore-activation]: https://cloud.google.com/datastore/docs/activate - -[cloud-firestore]: https://cloud.google.com/firestore/ -[cloud-firestore-ref]: https://godoc.org/cloud.google.com/go/firestore -[cloud-firestore-docs]: https://cloud.google.com/firestore/docs -[cloud-firestore-activation]: https://cloud.google.com/firestore/docs/activate - -[cloud-pubsub]: https://cloud.google.com/pubsub/ -[cloud-pubsub-ref]: https://godoc.org/cloud.google.com/go/pubsub -[cloud-pubsub-docs]: https://cloud.google.com/pubsub/docs - -[cloud-storage]: https://cloud.google.com/storage/ -[cloud-storage-ref]: https://godoc.org/cloud.google.com/go/storage -[cloud-storage-docs]: https://cloud.google.com/storage/docs -[cloud-storage-create-bucket]: https://cloud.google.com/storage/docs/cloud-console#_creatingbuckets - -[cloud-bigtable]: https://cloud.google.com/bigtable/ -[cloud-bigtable-ref]: https://godoc.org/cloud.google.com/go/bigtable - -[cloud-bigquery]: https://cloud.google.com/bigquery/ -[cloud-bigquery-docs]: https://cloud.google.com/bigquery/docs -[cloud-bigquery-ref]: https://godoc.org/cloud.google.com/go/bigquery - -[cloud-logging]: https://cloud.google.com/logging/ -[cloud-logging-docs]: https://cloud.google.com/logging/docs -[cloud-logging-ref]: https://godoc.org/cloud.google.com/go/logging - -[cloud-monitoring]: https://cloud.google.com/monitoring/ -[cloud-monitoring-ref]: https://godoc.org/cloud.google.com/go/monitoring/apiv3 - -[cloud-vision]: https://cloud.google.com/vision -[cloud-vision-ref]: https://godoc.org/cloud.google.com/go/vision/apiv1 - -[cloud-language]: https://cloud.google.com/natural-language -[cloud-language-ref]: https://godoc.org/cloud.google.com/go/language/apiv1 - -[cloud-speech]: https://cloud.google.com/speech -[cloud-speech-ref]: https://godoc.org/cloud.google.com/go/speech/apiv1 - -[cloud-spanner]: https://cloud.google.com/spanner/ -[cloud-spanner-ref]: https://godoc.org/cloud.google.com/go/spanner -[cloud-spanner-docs]: https://cloud.google.com/spanner/docs - -[cloud-translation]: https://cloud.google.com/translation -[cloud-translation-ref]: https://godoc.org/cloud.google.com/go/translation - -[cloud-trace]: https://cloud.google.com/trace/ -[cloud-trace-ref]: https://godoc.org/cloud.google.com/go/trace - -[cloud-video]: https://cloud.google.com/video-intelligence/ -[cloud-video-ref]: https://godoc.org/cloud.google.com/go/videointelligence/apiv1beta1 - -[cloud-errors]: https://cloud.google.com/error-reporting/ -[cloud-errors-ref]: https://godoc.org/cloud.google.com/go/errorreporting - -[default-creds]: https://developers.google.com/identity/protocols/application-default-credentials diff --git a/vendor/cloud.google.com/go/appveyor.yml b/vendor/cloud.google.com/go/appveyor.yml deleted file mode 100644 index e66cd00af4..0000000000 --- a/vendor/cloud.google.com/go/appveyor.yml +++ /dev/null @@ -1,32 +0,0 @@ -# This file configures AppVeyor (http://www.appveyor.com), -# a Windows-based CI service similar to Travis. - -# Identifier for this run -version: "{build}" - -# Clone the repo into this path, which conforms to the standard -# Go workspace structure. -clone_folder: c:\gopath\src\cloud.google.com\go - -environment: - GOPATH: c:\gopath - GCLOUD_TESTS_GOLANG_PROJECT_ID: dulcet-port-762 - GCLOUD_TESTS_GOLANG_KEY: c:\gopath\src\cloud.google.com\go\key.json - KEYFILE_CONTENTS: - secure: IvRbDAhM2PIQqzVkjzJ4FjizUvoQ+c3vG/qhJQG+HlZ/L5KEkqLu+x6WjLrExrNMyGku4znB2jmbTrUW3Ob4sGG+R5vvqeQ3YMHCVIkw5CxY+/bUDkW5RZWsVbuCnNa/vKsWmCP+/sZW6ICe29yKJ2ZOb6QaauI4s9R6j+cqBbU9pumMGYFRb0Rw3uUU7DKmVFCy+NjTENZIlDP9rmjANgAzigowJJEb2Tg9sLlQKmQeKiBSRN8lKc5Nq60a+fIzHGKvql4eIitDDDpOpyHv15/Xr1BzFw2yDoiR4X1lng0u7q0X9RgX4VIYa6gT16NXBEmQgbuX8gh7SfPMp9RhiZD9sVUaV+yogEabYpyPnmUURo0hXwkctKaBkQlEmKvjHwF5dvbg8+yqGhwtjAgFNimXG3INrwQsfQsZskkQWanutbJf9xy50GyWWFZZdi0uT4oXP/b5P7aklPXKXsvrJKBh7RjEaqBrhi86IJwOjBspvoR4l2WmcQyxb2xzQS1pjbBJFQfYJJ8+JgsstTL8PBO9d4ybJC0li1Om1qnWxkaewvPxxuoHJ9LpRKof19yRYWBmhTXb2tTASKG/zslvl4fgG4DmQBS93WC7dsiGOhAraGw2eCTgd0lYZOhk1FjWl9TS80aktXxzH/7nTvem5ohm+eDl6O0wnTL4KXjQVNSQ1PyLn4lGRJ5MNGzBTRFWIr2API2rca4Fysyfh/UdmazPGlNbY9JPGqb9+F04QzLfqm+Zz/cHy59E7lOSMBlUI4KD6d6ZNNKNRH+/g9i+fSiyiXKugTfda8KBnWGyPwprxuWGYaiQUGUYOwJY5R6x5c4mjImAB310V+Wo33UbWFJiwxEDsiCNqW1meVkBzt2er26vh4qbgCUIQ3iM3gFPfHgy+QxkmIhic7Q1HYacQElt8AAP41M7cCKWCuZidegP37MBB//mjjiNt047ZSQEvB4tqsX/OvfbByVef+cbtVw9T0yjHvmCdPW1XrhyrCCgclu6oYYdbmc5D7BBDRbjjMWGv6YvceAbfGf6ukdB5PuV+TGEN/FoQ1QTRA6Aqf+3fLMg4mS4oyTfw5xyYNbv3qoyLPrp+BnxI53WB9p0hfMg4n9FD6NntBxjDq+Q3Lk/bjC/Y4MaRWdzbMzF9a0lgGfcw9DURlK5p7uGJC9vg34feNoQprxVEZRQ01cHLeob6eGkYm4HxSRx8JY39Mh+9wzJo+k/aIvFleNC3e35NOrkXr6wb5e42n2DwBdPqdNolTLtLFRglAL1LTpp27UjvjieWJAKfoDTR5CKl01sZqt0wPdLLcvsMj6CiPFmccUIOYeZMe86kLBD61Qa5F1EwkgO3Om2qSjW96FzL4skRc+BmU5RrHlAFSldR1wpUgtkUMv9vH5Cy+UJdcvpZ8KbmhZ2PsjF7ddJ1ve9RAw3cP325AyIMwZ77Ef1mgTM0NJze6eSW1qKlEsgt1FADPyeUu1NQTA2H2dueMPGlArWTSUgyWR9AdfpqouT7eg0JWI5w+yUZZC+/rPglYbt84oLmYpwuli0z8FyEQRPIc3EtkfWIv/yYgDr2TZ0N2KvGfpi/MAUWgxI1gleC2uKgEOEtuJthd3XZjF2NoE7IBqjQOINybcJOjyeB5vRLDY1FLuxYzdg1y1etkV4XQig/vje - -install: - # Info for debugging. - - echo %PATH% - - go version - - go env - - go get -v -d -t ./... - - -# Provide a build script, or AppVeyor will call msbuild. -build_script: - - go install -v ./... - - echo %KEYFILE_CONTENTS% > %GCLOUD_TESTS_GOLANG_KEY% - -test_script: - - go test -v ./... diff --git a/vendor/cloud.google.com/go/authexample_test.go b/vendor/cloud.google.com/go/authexample_test.go deleted file mode 100644 index fe75467f94..0000000000 --- a/vendor/cloud.google.com/go/authexample_test.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2016 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cloud_test - -import ( - "cloud.google.com/go/datastore" - "golang.org/x/net/context" - "google.golang.org/api/option" -) - -func Example_applicationDefaultCredentials() { - // Google Application Default Credentials is the recommended way to authorize - // and authenticate clients. - // - // See the following link on how to create and obtain Application Default Credentials: - // https://developers.google.com/identity/protocols/application-default-credentials. - client, err := datastore.NewClient(context.Background(), "project-id") - if err != nil { - // TODO: handle error. - } - _ = client // Use the client. -} - -func Example_serviceAccountFile() { - // Use a JSON key file associated with a Google service account to - // authenticate and authorize. Service Account keys can be created and - // downloaded from https://console.developers.google.com/permissions/serviceaccounts. - // - // Note: This example uses the datastore client, but the same steps apply to - // the other client libraries underneath this package. - client, err := datastore.NewClient(context.Background(), - "project-id", option.WithServiceAccountFile("/path/to/service-account-key.json")) - if err != nil { - // TODO: handle error. - } - _ = client // Use the client. -} diff --git a/vendor/cloud.google.com/go/cloud.go b/vendor/cloud.google.com/go/cloud.go deleted file mode 100644 index 6ba428dc63..0000000000 --- a/vendor/cloud.google.com/go/cloud.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package cloud is the root of the packages used to access Google Cloud -// Services. See https://godoc.org/cloud.google.com/go for a full list -// of sub-packages. -// -// This package documents how to authorize and authenticate the sub packages. -package cloud // import "cloud.google.com/go" diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index e708c031b9..125b7033c9 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -1,4 +1,4 @@ -// Copyright 2014 Google Inc. All Rights Reserved. +// Copyright 2014 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -20,6 +20,7 @@ package metadata // import "cloud.google.com/go/compute/metadata" import ( + "context" "encoding/json" "fmt" "io/ioutil" @@ -31,9 +32,6 @@ import ( "strings" "sync" "time" - - "golang.org/x/net/context" - "golang.org/x/net/context/ctxhttp" ) const ( @@ -64,7 +62,7 @@ var ( ) var ( - metaClient = &http.Client{ + defaultClient = &Client{hc: &http.Client{ Transport: &http.Transport{ Dial: (&net.Dialer{ Timeout: 2 * time.Second, @@ -72,15 +70,15 @@ var ( }).Dial, ResponseHeaderTimeout: 2 * time.Second, }, - } - subscribeClient = &http.Client{ + }} + subscribeClient = &Client{hc: &http.Client{ Transport: &http.Transport{ Dial: (&net.Dialer{ Timeout: 2 * time.Second, KeepAlive: 30 * time.Second, }).Dial, }, - } + }} ) // NotDefinedError is returned when requested metadata is not defined. @@ -95,74 +93,16 @@ func (suffix NotDefinedError) Error() string { return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) } -// Get returns a value from the metadata service. -// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". -// -// If the GCE_METADATA_HOST environment variable is not defined, a default of -// 169.254.169.254 will be used instead. -// -// If the requested metadata is not defined, the returned error will -// be of type NotDefinedError. -func Get(suffix string) (string, error) { - val, _, err := getETag(metaClient, suffix) - return val, err -} - -// getETag returns a value from the metadata service as well as the associated -// ETag using the provided client. This func is otherwise equivalent to Get. -func getETag(client *http.Client, suffix string) (value, etag string, err error) { - // Using a fixed IP makes it very difficult to spoof the metadata service in - // a container, which is an important use-case for local testing of cloud - // deployments. To enable spoofing of the metadata service, the environment - // variable GCE_METADATA_HOST is first inspected to decide where metadata - // requests shall go. - host := os.Getenv(metadataHostEnv) - if host == "" { - // Using 169.254.169.254 instead of "metadata" here because Go - // binaries built with the "netgo" tag and without cgo won't - // know the search suffix for "metadata" is - // ".google.internal", and this IP address is documented as - // being stable anyway. - host = metadataIP - } - url := "http://" + host + "/computeMetadata/v1/" + suffix - req, _ := http.NewRequest("GET", url, nil) - req.Header.Set("Metadata-Flavor", "Google") - req.Header.Set("User-Agent", userAgent) - res, err := client.Do(req) - if err != nil { - return "", "", err - } - defer res.Body.Close() - if res.StatusCode == http.StatusNotFound { - return "", "", NotDefinedError(suffix) - } - if res.StatusCode != 200 { - return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url) - } - all, err := ioutil.ReadAll(res.Body) - if err != nil { - return "", "", err - } - return string(all), res.Header.Get("Etag"), nil -} - -func getTrimmed(suffix string) (s string, err error) { - s, err = Get(suffix) - s = strings.TrimSpace(s) - return -} - -func (c *cachedValue) get() (v string, err error) { +func (c *cachedValue) get(cl *Client) (v string, err error) { defer c.mu.Unlock() c.mu.Lock() if c.v != "" { return c.v, nil } if c.trim { - v, err = getTrimmed(c.k) + v, err = cl.getTrimmed(c.k) } else { - v, err = Get(c.k) + v, err = cl.Get(c.k) } if err == nil { c.v = v @@ -197,11 +137,11 @@ func testOnGCE() bool { resc := make(chan bool, 2) // Try two strategies in parallel. - // See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194 + // See https://github.com/googleapis/google-cloud-go/issues/194 go func() { req, _ := http.NewRequest("GET", "http://"+metadataIP, nil) req.Header.Set("User-Agent", userAgent) - res, err := ctxhttp.Do(ctx, metaClient, req) + res, err := defaultClient.hc.Do(req.WithContext(ctx)) if err != nil { resc <- false return @@ -266,78 +206,183 @@ func systemInfoSuggestsGCE() bool { return name == "Google" || name == "Google Compute Engine" } -// Subscribe subscribes to a value from the metadata service. -// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". -// The suffix may contain query parameters. -// -// Subscribe calls fn with the latest metadata value indicated by the provided -// suffix. If the metadata value is deleted, fn is called with the empty string -// and ok false. Subscribe blocks until fn returns a non-nil error or the value -// is deleted. Subscribe returns the error value returned from the last call to -// fn, which may be nil when ok == false. +// Subscribe calls Client.Subscribe on a client designed for subscribing (one with no +// ResponseHeaderTimeout). func Subscribe(suffix string, fn func(v string, ok bool) error) error { - const failedSubscribeSleep = time.Second * 5 + return subscribeClient.Subscribe(suffix, fn) +} - // First check to see if the metadata value exists at all. - val, lastETag, err := getETag(subscribeClient, suffix) - if err != nil { - return err - } +// Get calls Client.Get on the default client. +func Get(suffix string) (string, error) { return defaultClient.Get(suffix) } - if err := fn(val, true); err != nil { - return err +// ProjectID returns the current instance's project ID string. +func ProjectID() (string, error) { return defaultClient.ProjectID() } + +// NumericProjectID returns the current instance's numeric project ID. +func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() } + +// InternalIP returns the instance's primary internal IP address. +func InternalIP() (string, error) { return defaultClient.InternalIP() } + +// ExternalIP returns the instance's primary external (public) IP address. +func ExternalIP() (string, error) { return defaultClient.ExternalIP() } + +// Hostname returns the instance's hostname. This will be of the form +// ".c..internal". +func Hostname() (string, error) { return defaultClient.Hostname() } + +// InstanceTags returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() } + +// InstanceID returns the current VM's numeric instance ID. +func InstanceID() (string, error) { return defaultClient.InstanceID() } + +// InstanceName returns the current VM's instance ID string. +func InstanceName() (string, error) { return defaultClient.InstanceName() } + +// Zone returns the current VM's zone, such as "us-central1-b". +func Zone() (string, error) { return defaultClient.Zone() } + +// InstanceAttributes calls Client.InstanceAttributes on the default client. +func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() } + +// ProjectAttributes calls Client.ProjectAttributes on the default client. +func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() } + +// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client. +func InstanceAttributeValue(attr string) (string, error) { + return defaultClient.InstanceAttributeValue(attr) +} + +// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client. +func ProjectAttributeValue(attr string) (string, error) { + return defaultClient.ProjectAttributeValue(attr) +} + +// Scopes calls Client.Scopes on the default client. +func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) } + +func strsContains(ss []string, s string) bool { + for _, v := range ss { + if v == s { + return true + } } + return false +} - ok := true - if strings.ContainsRune(suffix, '?') { - suffix += "&wait_for_change=true&last_etag=" - } else { - suffix += "?wait_for_change=true&last_etag=" +// A Client provides metadata. +type Client struct { + hc *http.Client +} + +// NewClient returns a Client that can be used to fetch metadata. All HTTP requests +// will use the given http.Client instead of the default client. +func NewClient(c *http.Client) *Client { + return &Client{hc: c} +} + +// getETag returns a value from the metadata service as well as the associated ETag. +// This func is otherwise equivalent to Get. +func (c *Client) getETag(suffix string) (value, etag string, err error) { + // Using a fixed IP makes it very difficult to spoof the metadata service in + // a container, which is an important use-case for local testing of cloud + // deployments. To enable spoofing of the metadata service, the environment + // variable GCE_METADATA_HOST is first inspected to decide where metadata + // requests shall go. + host := os.Getenv(metadataHostEnv) + if host == "" { + // Using 169.254.169.254 instead of "metadata" here because Go + // binaries built with the "netgo" tag and without cgo won't + // know the search suffix for "metadata" is + // ".google.internal", and this IP address is documented as + // being stable anyway. + host = metadataIP } - for { - val, etag, err := getETag(subscribeClient, suffix+url.QueryEscape(lastETag)) - if err != nil { - if _, deleted := err.(NotDefinedError); !deleted { - time.Sleep(failedSubscribeSleep) - continue // Retry on other errors. - } - ok = false - } - lastETag = etag + u := "http://" + host + "/computeMetadata/v1/" + suffix + req, _ := http.NewRequest("GET", u, nil) + req.Header.Set("Metadata-Flavor", "Google") + req.Header.Set("User-Agent", userAgent) + res, err := c.hc.Do(req) + if err != nil { + return "", "", err + } + defer res.Body.Close() + if res.StatusCode == http.StatusNotFound { + return "", "", NotDefinedError(suffix) + } + all, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", err + } + if res.StatusCode != 200 { + return "", "", &Error{Code: res.StatusCode, Message: string(all)} + } + return string(all), res.Header.Get("Etag"), nil +} - if err := fn(val, ok); err != nil || !ok { - return err - } +// Get returns a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// +// If the GCE_METADATA_HOST environment variable is not defined, a default of +// 169.254.169.254 will be used instead. +// +// If the requested metadata is not defined, the returned error will +// be of type NotDefinedError. +func (c *Client) Get(suffix string) (string, error) { + val, _, err := c.getETag(suffix) + return val, err +} + +func (c *Client) getTrimmed(suffix string) (s string, err error) { + s, err = c.Get(suffix) + s = strings.TrimSpace(s) + return +} + +func (c *Client) lines(suffix string) ([]string, error) { + j, err := c.Get(suffix) + if err != nil { + return nil, err } + s := strings.Split(strings.TrimSpace(j), "\n") + for i := range s { + s[i] = strings.TrimSpace(s[i]) + } + return s, nil } // ProjectID returns the current instance's project ID string. -func ProjectID() (string, error) { return projID.get() } +func (c *Client) ProjectID() (string, error) { return projID.get(c) } // NumericProjectID returns the current instance's numeric project ID. -func NumericProjectID() (string, error) { return projNum.get() } +func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) } + +// InstanceID returns the current VM's numeric instance ID. +func (c *Client) InstanceID() (string, error) { return instID.get(c) } // InternalIP returns the instance's primary internal IP address. -func InternalIP() (string, error) { - return getTrimmed("instance/network-interfaces/0/ip") +func (c *Client) InternalIP() (string, error) { + return c.getTrimmed("instance/network-interfaces/0/ip") } // ExternalIP returns the instance's primary external (public) IP address. -func ExternalIP() (string, error) { - return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") +func (c *Client) ExternalIP() (string, error) { + return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") } // Hostname returns the instance's hostname. This will be of the form // ".c..internal". -func Hostname() (string, error) { - return getTrimmed("instance/hostname") +func (c *Client) Hostname() (string, error) { + return c.getTrimmed("instance/hostname") } // InstanceTags returns the list of user-defined instance tags, // assigned when initially creating a GCE instance. -func InstanceTags() ([]string, error) { +func (c *Client) InstanceTags() ([]string, error) { var s []string - j, err := Get("instance/tags") + j, err := c.Get("instance/tags") if err != nil { return nil, err } @@ -347,14 +392,9 @@ func InstanceTags() ([]string, error) { return s, nil } -// InstanceID returns the current VM's numeric instance ID. -func InstanceID() (string, error) { - return instID.get() -} - // InstanceName returns the current VM's instance ID string. -func InstanceName() (string, error) { - host, err := Hostname() +func (c *Client) InstanceName() (string, error) { + host, err := c.Hostname() if err != nil { return "", err } @@ -362,8 +402,8 @@ func InstanceName() (string, error) { } // Zone returns the current VM's zone, such as "us-central1-b". -func Zone() (string, error) { - zone, err := getTrimmed("instance/zone") +func (c *Client) Zone() (string, error) { + zone, err := c.getTrimmed("instance/zone") // zone is of the form "projects//zones/". if err != nil { return "", err @@ -374,24 +414,12 @@ func Zone() (string, error) { // InstanceAttributes returns the list of user-defined attributes, // assigned when initially creating a GCE VM instance. The value of an // attribute can be obtained with InstanceAttributeValue. -func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") } +func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") } // ProjectAttributes returns the list of user-defined attributes // applying to the project as a whole, not just this VM. The value of // an attribute can be obtained with ProjectAttributeValue. -func ProjectAttributes() ([]string, error) { return lines("project/attributes/") } - -func lines(suffix string) ([]string, error) { - j, err := Get(suffix) - if err != nil { - return nil, err - } - s := strings.Split(strings.TrimSpace(j), "\n") - for i := range s { - s[i] = strings.TrimSpace(s[i]) - } - return s, nil -} +func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") } // InstanceAttributeValue returns the value of the provided VM // instance attribute. @@ -401,8 +429,8 @@ func lines(suffix string) ([]string, error) { // // InstanceAttributeValue may return ("", nil) if the attribute was // defined to be the empty string. -func InstanceAttributeValue(attr string) (string, error) { - return Get("instance/attributes/" + attr) +func (c *Client) InstanceAttributeValue(attr string) (string, error) { + return c.Get("instance/attributes/" + attr) } // ProjectAttributeValue returns the value of the provided @@ -413,25 +441,73 @@ func InstanceAttributeValue(attr string) (string, error) { // // ProjectAttributeValue may return ("", nil) if the attribute was // defined to be the empty string. -func ProjectAttributeValue(attr string) (string, error) { - return Get("project/attributes/" + attr) +func (c *Client) ProjectAttributeValue(attr string) (string, error) { + return c.Get("project/attributes/" + attr) } // Scopes returns the service account scopes for the given account. // The account may be empty or the string "default" to use the instance's // main account. -func Scopes(serviceAccount string) ([]string, error) { +func (c *Client) Scopes(serviceAccount string) ([]string, error) { if serviceAccount == "" { serviceAccount = "default" } - return lines("instance/service-accounts/" + serviceAccount + "/scopes") + return c.lines("instance/service-accounts/" + serviceAccount + "/scopes") } -func strsContains(ss []string, s string) bool { - for _, v := range ss { - if v == s { - return true +// Subscribe subscribes to a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// The suffix may contain query parameters. +// +// Subscribe calls fn with the latest metadata value indicated by the provided +// suffix. If the metadata value is deleted, fn is called with the empty string +// and ok false. Subscribe blocks until fn returns a non-nil error or the value +// is deleted. Subscribe returns the error value returned from the last call to +// fn, which may be nil when ok == false. +func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error { + const failedSubscribeSleep = time.Second * 5 + + // First check to see if the metadata value exists at all. + val, lastETag, err := c.getETag(suffix) + if err != nil { + return err + } + + if err := fn(val, true); err != nil { + return err + } + + ok := true + if strings.ContainsRune(suffix, '?') { + suffix += "&wait_for_change=true&last_etag=" + } else { + suffix += "?wait_for_change=true&last_etag=" + } + for { + val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag)) + if err != nil { + if _, deleted := err.(NotDefinedError); !deleted { + time.Sleep(failedSubscribeSleep) + continue // Retry on other errors. + } + ok = false + } + lastETag = etag + + if err := fn(val, ok); err != nil || !ok { + return err } } - return false +} + +// Error contains an error response from the server. +type Error struct { + // Code is the HTTP response status code. + Code int + // Message is the server response message. + Message string +} + +func (e *Error) Error() string { + return fmt.Sprintf("compute: Received %d `%s`", e.Code, e.Message) } diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata_test.go b/vendor/cloud.google.com/go/compute/metadata/metadata_test.go deleted file mode 100644 index 9ac5926918..0000000000 --- a/vendor/cloud.google.com/go/compute/metadata/metadata_test.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2016 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metadata - -import ( - "os" - "sync" - "testing" -) - -func TestOnGCE_Stress(t *testing.T) { - if testing.Short() { - t.Skip("skipping in -short mode") - } - var last bool - for i := 0; i < 100; i++ { - onGCEOnce = sync.Once{} - - now := OnGCE() - if i > 0 && now != last { - t.Errorf("%d. changed from %v to %v", i, last, now) - } - last = now - } - t.Logf("OnGCE() = %v", last) -} - -func TestOnGCE_Force(t *testing.T) { - onGCEOnce = sync.Once{} - old := os.Getenv(metadataHostEnv) - defer os.Setenv(metadataHostEnv, old) - os.Setenv(metadataHostEnv, "127.0.0.1") - if !OnGCE() { - t.Error("OnGCE() = false; want true") - } -} diff --git a/vendor/cloud.google.com/go/keys.tar.enc b/vendor/cloud.google.com/go/keys.tar.enc deleted file mode 100644 index c54408c93a..0000000000 Binary files a/vendor/cloud.google.com/go/keys.tar.enc and /dev/null differ diff --git a/vendor/cloud.google.com/go/license_test.go b/vendor/cloud.google.com/go/license_test.go deleted file mode 100644 index 4b87878a02..0000000000 --- a/vendor/cloud.google.com/go/license_test.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2016 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cloud - -import ( - "bytes" - "io/ioutil" - "os" - "path/filepath" - "strings" - "testing" -) - -var sentinels = []string{ - "Copyright", - "Google Inc", - `Licensed under the Apache License, Version 2.0 (the "License");`, -} - -func TestLicense(t *testing.T) { - err := filepath.Walk(".", func(path string, fi os.FileInfo, err error) error { - if err != nil { - return err - } - - if ext := filepath.Ext(path); ext != ".go" && ext != ".proto" { - return nil - } - if strings.HasSuffix(path, ".pb.go") { - // .pb.go files are generated from the proto files. - // .proto files must have license headers. - return nil - } - if path == "bigtable/cmd/cbt/cbtdoc.go" { - // Automatically generated. - return nil - } - - src, err := ioutil.ReadFile(path) - if err != nil { - return nil - } - src = src[:140] // Ensure all of the sentinel values are at the top of the file. - - // Find license - for _, sentinel := range sentinels { - if !bytes.Contains(src, []byte(sentinel)) { - t.Errorf("%v: license header not present. want %q", path, sentinel) - return nil - } - } - - return nil - }) - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/cloud.google.com/go/old-news.md b/vendor/cloud.google.com/go/old-news.md deleted file mode 100644 index 896980d96c..0000000000 --- a/vendor/cloud.google.com/go/old-news.md +++ /dev/null @@ -1,486 +0,0 @@ -_July 31, 2017_ - -*v0.11.0* - -- Clients for spanner, pubsub and video are now in beta. - -- New client for DLP. - -- spanner: performance and testing improvements. - -- storage: requester-pays buckets are supported. - -- storage, profiler, bigtable, bigquery: bug fixes and other minor improvements. - -- pubsub: bug fixes and other minor improvements - -_June 17, 2017_ - - -*v0.10.0* - -- pubsub: Subscription.ModifyPushConfig replaced with Subscription.Update. - -- pubsub: Subscription.Receive now runs concurrently for higher throughput. - -- vision: cloud.google.com/go/vision is deprecated. Use -cloud.google.com/go/vision/apiv1 instead. - -- translation: now stable. - -- trace: several changes to the surface. See the link below. - -[Code changes required from v0.9.0.](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/MIGRATION.md) - - -_March 17, 2017_ - -Breaking Pubsub changes. -* Publish is now asynchronous -([announcement](https://groups.google.com/d/topic/google-api-go-announce/aaqRDIQ3rvU/discussion)). -* Subscription.Pull replaced by Subscription.Receive, which takes a callback ([announcement](https://groups.google.com/d/topic/google-api-go-announce/8pt6oetAdKc/discussion)). -* Message.Done replaced with Message.Ack and Message.Nack. - -_February 14, 2017_ - -Release of a client library for Spanner. See -the -[blog post](https://cloudplatform.googleblog.com/2017/02/introducing-Cloud-Spanner-a-global-database-service-for-mission-critical-applications.html). - -Note that although the Spanner service is beta, the Go client library is alpha. - -_December 12, 2016_ - -Beta release of BigQuery, DataStore, Logging and Storage. See the -[blog post](https://cloudplatform.googleblog.com/2016/12/announcing-new-google-cloud-client.html). - -Also, BigQuery now supports structs. Read a row directly into a struct with -`RowIterator.Next`, and upload a row directly from a struct with `Uploader.Put`. -You can also use field tags. See the [package documentation][cloud-bigquery-ref] -for details. - -_December 5, 2016_ - -More changes to BigQuery: - -* The `ValueList` type was removed. It is no longer necessary. Instead of - ```go - var v ValueList - ... it.Next(&v) .. - ``` - use - - ```go - var v []Value - ... it.Next(&v) ... - ``` - -* Previously, repeatedly calling `RowIterator.Next` on the same `[]Value` or - `ValueList` would append to the slice. Now each call resets the size to zero first. - -* Schema inference will infer the SQL type BYTES for a struct field of - type []byte. Previously it inferred STRING. - -* The types `uint`, `uint64` and `uintptr` are no longer supported in schema - inference. BigQuery's integer type is INT64, and those types may hold values - that are not correctly represented in a 64-bit signed integer. - -* The SQL types DATE, TIME and DATETIME are now supported. They correspond to - the `Date`, `Time` and `DateTime` types in the new `cloud.google.com/go/civil` - package. - -_November 17, 2016_ - -Change to BigQuery: values from INTEGER columns will now be returned as int64, -not int. This will avoid errors arising from large values on 32-bit systems. - -_November 8, 2016_ - -New datastore feature: datastore now encodes your nested Go structs as Entity values, -instead of a flattened list of the embedded struct's fields. -This means that you may now have twice-nested slices, eg. -```go -type State struct { - Cities []struct{ - Populations []int - } -} -``` - -See [the announcement](https://groups.google.com/forum/#!topic/google-api-go-announce/79jtrdeuJAg) for -more details. - -_November 8, 2016_ - -Breaking changes to datastore: contexts no longer hold namespaces; instead you -must set a key's namespace explicitly. Also, key functions have been changed -and renamed. - -* The WithNamespace function has been removed. To specify a namespace in a Query, use the Query.Namespace method: - ```go - q := datastore.NewQuery("Kind").Namespace("ns") - ``` - -* All the fields of Key are exported. That means you can construct any Key with a struct literal: - ```go - k := &Key{Kind: "Kind", ID: 37, Namespace: "ns"} - ``` - -* As a result of the above, the Key methods Kind, ID, d.Name, Parent, SetParent and Namespace have been removed. - -* `NewIncompleteKey` has been removed, replaced by `IncompleteKey`. Replace - ```go - NewIncompleteKey(ctx, kind, parent) - ``` - with - ```go - IncompleteKey(kind, parent) - ``` - and if you do use namespaces, make sure you set the namespace on the returned key. - -* `NewKey` has been removed, replaced by `NameKey` and `IDKey`. Replace - ```go - NewKey(ctx, kind, name, 0, parent) - NewKey(ctx, kind, "", id, parent) - ``` - with - ```go - NameKey(kind, name, parent) - IDKey(kind, id, parent) - ``` - and if you do use namespaces, make sure you set the namespace on the returned key. - -* The `Done` variable has been removed. Replace `datastore.Done` with `iterator.Done`, from the package `google.golang.org/api/iterator`. - -* The `Client.Close` method will have a return type of error. It will return the result of closing the underlying gRPC connection. - -See [the announcement](https://groups.google.com/forum/#!topic/google-api-go-announce/hqXtM_4Ix-0) for -more details. - -_October 27, 2016_ - -Breaking change to bigquery: `NewGCSReference` is now a function, -not a method on `Client`. - -New bigquery feature: `Table.LoaderFrom` now accepts a `ReaderSource`, enabling -loading data into a table from a file or any `io.Reader`. - -_October 21, 2016_ - -Breaking change to pubsub: removed `pubsub.Done`. - -Use `iterator.Done` instead, where `iterator` is the package -`google.golang.org/api/iterator`. - -_October 19, 2016_ - -Breaking changes to cloud.google.com/go/bigquery: - -* Client.Table and Client.OpenTable have been removed. - Replace - ```go - client.OpenTable("project", "dataset", "table") - ``` - with - ```go - client.DatasetInProject("project", "dataset").Table("table") - ``` - -* Client.CreateTable has been removed. - Replace - ```go - client.CreateTable(ctx, "project", "dataset", "table") - ``` - with - ```go - client.DatasetInProject("project", "dataset").Table("table").Create(ctx) - ``` - -* Dataset.ListTables have been replaced with Dataset.Tables. - Replace - ```go - tables, err := ds.ListTables(ctx) - ``` - with - ```go - it := ds.Tables(ctx) - for { - table, err := it.Next() - if err == iterator.Done { - break - } - if err != nil { - // TODO: Handle error. - } - // TODO: use table. - } - ``` - -* Client.Read has been replaced with Job.Read, Table.Read and Query.Read. - Replace - ```go - it, err := client.Read(ctx, job) - ``` - with - ```go - it, err := job.Read(ctx) - ``` - and similarly for reading from tables or queries. - -* The iterator returned from the Read methods is now named RowIterator. Its - behavior is closer to the other iterators in these libraries. It no longer - supports the Schema method; see the next item. - Replace - ```go - for it.Next(ctx) { - var vals ValueList - if err := it.Get(&vals); err != nil { - // TODO: Handle error. - } - // TODO: use vals. - } - if err := it.Err(); err != nil { - // TODO: Handle error. - } - ``` - with - ``` - for { - var vals ValueList - err := it.Next(&vals) - if err == iterator.Done { - break - } - if err != nil { - // TODO: Handle error. - } - // TODO: use vals. - } - ``` - Instead of the `RecordsPerRequest(n)` option, write - ```go - it.PageInfo().MaxSize = n - ``` - Instead of the `StartIndex(i)` option, write - ```go - it.StartIndex = i - ``` - -* ValueLoader.Load now takes a Schema in addition to a slice of Values. - Replace - ```go - func (vl *myValueLoader) Load(v []bigquery.Value) - ``` - with - ```go - func (vl *myValueLoader) Load(v []bigquery.Value, s bigquery.Schema) - ``` - - -* Table.Patch is replace by Table.Update. - Replace - ```go - p := table.Patch() - p.Description("new description") - metadata, err := p.Apply(ctx) - ``` - with - ```go - metadata, err := table.Update(ctx, bigquery.TableMetadataToUpdate{ - Description: "new description", - }) - ``` - -* Client.Copy is replaced by separate methods for each of its four functions. - All options have been replaced by struct fields. - - * To load data from Google Cloud Storage into a table, use Table.LoaderFrom. - - Replace - ```go - client.Copy(ctx, table, gcsRef) - ``` - with - ```go - table.LoaderFrom(gcsRef).Run(ctx) - ``` - Instead of passing options to Copy, set fields on the Loader: - ```go - loader := table.LoaderFrom(gcsRef) - loader.WriteDisposition = bigquery.WriteTruncate - ``` - - * To extract data from a table into Google Cloud Storage, use - Table.ExtractorTo. Set fields on the returned Extractor instead of - passing options. - - Replace - ```go - client.Copy(ctx, gcsRef, table) - ``` - with - ```go - table.ExtractorTo(gcsRef).Run(ctx) - ``` - - * To copy data into a table from one or more other tables, use - Table.CopierFrom. Set fields on the returned Copier instead of passing options. - - Replace - ```go - client.Copy(ctx, dstTable, srcTable) - ``` - with - ```go - dst.Table.CopierFrom(srcTable).Run(ctx) - ``` - - * To start a query job, create a Query and call its Run method. Set fields - on the query instead of passing options. - - Replace - ```go - client.Copy(ctx, table, query) - ``` - with - ```go - query.Run(ctx) - ``` - -* Table.NewUploader has been renamed to Table.Uploader. Instead of options, - configure an Uploader by setting its fields. - Replace - ```go - u := table.NewUploader(bigquery.UploadIgnoreUnknownValues()) - ``` - with - ```go - u := table.NewUploader(bigquery.UploadIgnoreUnknownValues()) - u.IgnoreUnknownValues = true - ``` - -_October 10, 2016_ - -Breaking changes to cloud.google.com/go/storage: - -* AdminClient replaced by methods on Client. - Replace - ```go - adminClient.CreateBucket(ctx, bucketName, attrs) - ``` - with - ```go - client.Bucket(bucketName).Create(ctx, projectID, attrs) - ``` - -* BucketHandle.List replaced by BucketHandle.Objects. - Replace - ```go - for query != nil { - objs, err := bucket.List(d.ctx, query) - if err != nil { ... } - query = objs.Next - for _, obj := range objs.Results { - fmt.Println(obj) - } - } - ``` - with - ```go - iter := bucket.Objects(d.ctx, query) - for { - obj, err := iter.Next() - if err == iterator.Done { - break - } - if err != nil { ... } - fmt.Println(obj) - } - ``` - (The `iterator` package is at `google.golang.org/api/iterator`.) - - Replace `Query.Cursor` with `ObjectIterator.PageInfo().Token`. - - Replace `Query.MaxResults` with `ObjectIterator.PageInfo().MaxSize`. - - -* ObjectHandle.CopyTo replaced by ObjectHandle.CopierFrom. - Replace - ```go - attrs, err := src.CopyTo(ctx, dst, nil) - ``` - with - ```go - attrs, err := dst.CopierFrom(src).Run(ctx) - ``` - - Replace - ```go - attrs, err := src.CopyTo(ctx, dst, &storage.ObjectAttrs{ContextType: "text/html"}) - ``` - with - ```go - c := dst.CopierFrom(src) - c.ContextType = "text/html" - attrs, err := c.Run(ctx) - ``` - -* ObjectHandle.ComposeFrom replaced by ObjectHandle.ComposerFrom. - Replace - ```go - attrs, err := dst.ComposeFrom(ctx, []*storage.ObjectHandle{src1, src2}, nil) - ``` - with - ```go - attrs, err := dst.ComposerFrom(src1, src2).Run(ctx) - ``` - -* ObjectHandle.Update's ObjectAttrs argument replaced by ObjectAttrsToUpdate. - Replace - ```go - attrs, err := obj.Update(ctx, &storage.ObjectAttrs{ContextType: "text/html"}) - ``` - with - ```go - attrs, err := obj.Update(ctx, storage.ObjectAttrsToUpdate{ContextType: "text/html"}) - ``` - -* ObjectHandle.WithConditions replaced by ObjectHandle.If. - Replace - ```go - obj.WithConditions(storage.Generation(gen), storage.IfMetaGenerationMatch(mgen)) - ``` - with - ```go - obj.Generation(gen).If(storage.Conditions{MetagenerationMatch: mgen}) - ``` - - Replace - ```go - obj.WithConditions(storage.IfGenerationMatch(0)) - ``` - with - ```go - obj.If(storage.Conditions{DoesNotExist: true}) - ``` - -* `storage.Done` replaced by `iterator.Done` (from package `google.golang.org/api/iterator`). - -_October 6, 2016_ - -Package preview/logging deleted. Use logging instead. - -_September 27, 2016_ - -Logging client replaced with preview version (see below). - -_September 8, 2016_ - -* New clients for some of Google's Machine Learning APIs: Vision, Speech, and -Natural Language. - -* Preview version of a new [Stackdriver Logging][cloud-logging] client in -[`cloud.google.com/go/preview/logging`](https://godoc.org/cloud.google.com/go/preview/logging). -This client uses gRPC as its transport layer, and supports log reading, sinks -and metrics. It will replace the current client at `cloud.google.com/go/logging` shortly. - diff --git a/vendor/cloud.google.com/go/run-tests.sh b/vendor/cloud.google.com/go/run-tests.sh deleted file mode 100755 index f47ff50a5e..0000000000 --- a/vendor/cloud.google.com/go/run-tests.sh +++ /dev/null @@ -1,88 +0,0 @@ -#!/bin/bash - -# Selectively run tests for this repo, based on what has changed -# in a commit. Runs short tests for the whole repo, and full tests -# for changed directories. - -set -e - -prefix=cloud.google.com/go - -dryrun=false -if [[ $1 == "-n" ]]; then - dryrun=true - shift -fi - -if [[ $1 == "" ]]; then - echo >&2 "usage: $0 [-n] COMMIT" - exit 1 -fi - -# Files or directories that cause all tests to run if modified. -declare -A run_all -run_all=([.travis.yml]=1 [run-tests.sh]=1) - -function run { - if $dryrun; then - echo $* - else - (set -x; $*) - fi -} - - -# Find all the packages that have changed in this commit. -declare -A changed_packages - -for f in $(git diff-tree --no-commit-id --name-only -r $1); do - if [[ ${run_all[$f]} == 1 ]]; then - # This change requires a full test. Do it and exit. - run go test -race -v $prefix/... - exit - fi - # Map, e.g., "spanner/client.go" to "$prefix/spanner". - d=$(dirname $f) - if [[ $d == "." ]]; then - pkg=$prefix - else - pkg=$prefix/$d - fi - changed_packages[$pkg]=1 -done - -echo "changed packages: ${!changed_packages[*]}" - - -# Reports whether its argument, a package name, depends (recursively) -# on a changed package. -function depends_on_changed_package { - # According to go list, a package does not depend on itself, so - # we test that separately. - if [[ ${changed_packages[$1]} == 1 ]]; then - return 0 - fi - for dep in $(go list -f '{{range .Deps}}{{.}} {{end}}' $1); do - if [[ ${changed_packages[$dep]} == 1 ]]; then - return 0 - fi - done - return 1 -} - -# Collect the packages into two separate lists. (It is faster go test a list of -# packages than to individually go test each one.) - -shorts= -fulls= -for pkg in $(go list $prefix/...); do # for each package in the repo - if depends_on_changed_package $pkg; then # if it depends on a changed package - fulls="$fulls $pkg" # run the full test - else # otherwise - shorts="$shorts $pkg" # run the short test - fi -done -run go test -race -v -short $shorts -if [[ $fulls != "" ]]; then - run go test -race -v $fulls -fi diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/.travis.yml b/vendor/contrib.go.opencensus.io/exporter/ocagent/.travis.yml new file mode 100644 index 0000000000..ee417bbe6b --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/.travis.yml @@ -0,0 +1,18 @@ +language: go + +go: + - 1.11.x + +go_import_path: contrib.go.opencensus.io/exporter/ocagent + +before_script: + - GO_FILES=$(find . -iname '*.go' | grep -v /vendor/) # All the .go files, excluding vendor/ if any + - PKGS=$(go list ./... | grep -v /vendor/) # All the import paths, excluding vendor/ if any + +script: + - go build ./... # Ensure dependency updates don't break build + - if [ -n "$(gofmt -s -l $GO_FILES)" ]; then echo "gofmt the following files:"; gofmt -s -l $GO_FILES; exit 1; fi + - go vet ./... + - GO111MODULE=on go test -v -race $PKGS # Run all the tests with the race detector enabled + - GO111MODULE=off go test -v -race $PKGS # Make sure tests still pass when not using Go modules. + - 'if [[ $TRAVIS_GO_VERSION = 1.8* ]]; then ! golint ./... | grep -vE "(_mock|_string|\.pb)\.go:"; fi' diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/CONTRIBUTING.md b/vendor/contrib.go.opencensus.io/exporter/ocagent/CONTRIBUTING.md new file mode 100644 index 0000000000..0786fdf434 --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/CONTRIBUTING.md @@ -0,0 +1,24 @@ +# How to contribute + +We'd love to accept your patches and contributions to this project. There are +just a few small guidelines you need to follow. + +## Contributor License Agreement + +Contributions to this project must be accompanied by a Contributor License +Agreement. You (or your employer) retain the copyright to your contribution, +this simply gives us permission to use and redistribute your contributions as +part of the project. Head over to to see +your current agreements on file or to sign a new one. + +You generally only need to submit a CLA once, so if you've already submitted one +(even if it was for a different project), you probably don't need to do it +again. + +## Code reviews + +All submissions, including submissions by project members, require review. We +use GitHub pull requests for this purpose. Consult [GitHub Help] for more +information on using pull requests. + +[GitHub Help]: https://help.github.com/articles/about-pull-requests/ diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/LICENSE b/vendor/contrib.go.opencensus.io/exporter/ocagent/LICENSE similarity index 100% rename from images/404-server/vendor/github.com/prometheus/client_golang/LICENSE rename to vendor/contrib.go.opencensus.io/exporter/ocagent/LICENSE diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/README.md b/vendor/contrib.go.opencensus.io/exporter/ocagent/README.md new file mode 100644 index 0000000000..3b9e908f59 --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/README.md @@ -0,0 +1,61 @@ +# OpenCensus Agent Go Exporter + +[![Build Status][travis-image]][travis-url] [![GoDoc][godoc-image]][godoc-url] + + +This repository contains the Go implementation of the OpenCensus Agent (OC-Agent) Exporter. +OC-Agent is a deamon process running in a VM that can retrieve spans/stats/metrics from +OpenCensus Library, export them to other backends and possibly push configurations back to +Library. See more details on [OC-Agent Readme][OCAgentReadme]. + +Note: This is an experimental repository and is likely to get backwards-incompatible changes. +Ultimately we may want to move the OC-Agent Go Exporter to [OpenCensus Go core library][OpenCensusGo]. + +## Installation + +```bash +$ go get -u contrib.go.opencensus.io/exporter/ocagent +``` + +## Usage + +```go +import ( + "context" + "fmt" + "log" + "time" + + "contrib.go.opencensus.io/exporter/ocagent" + "go.opencensus.io/trace" +) + +func Example() { + exp, err := ocagent.NewExporter(ocagent.WithInsecure(), ocagent.WithServiceName("your-service-name")) + if err != nil { + log.Fatalf("Failed to create the agent exporter: %v", err) + } + defer exp.Stop() + + // Now register it as a trace exporter. + trace.RegisterExporter(exp) + + // Then use the OpenCensus tracing library, like we normally would. + ctx, span := trace.StartSpan(context.Background(), "AgentExporter-Example") + defer span.End() + + for i := 0; i < 10; i++ { + _, iSpan := trace.StartSpan(ctx, fmt.Sprintf("Sample-%d", i)) + <-time.After(6 * time.Millisecond) + iSpan.End() + } +} +``` + +[OCAgentReadme]: https://github.com/census-instrumentation/opencensus-proto/tree/master/opencensus/proto/agent#opencensus-agent-proto +[OpenCensusGo]: https://github.com/census-instrumentation/opencensus-go +[godoc-image]: https://godoc.org/contrib.go.opencensus.io/exporter/ocagent?status.svg +[godoc-url]: https://godoc.org/contrib.go.opencensus.io/exporter/ocagent +[travis-image]: https://travis-ci.org/census-ecosystem/opencensus-go-exporter-ocagent.svg?branch=master +[travis-url]: https://travis-ci.org/census-ecosystem/opencensus-go-exporter-ocagent + diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/common.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/common.go new file mode 100644 index 0000000000..297e44b6e7 --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/common.go @@ -0,0 +1,38 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocagent + +import ( + "math/rand" + "time" +) + +var randSrc = rand.New(rand.NewSource(time.Now().UnixNano())) + +// retries function fn upto n times, if fn returns an error lest it returns nil early. +// It applies exponential backoff in units of (1< 0 { + ctx = metadata.NewOutgoingContext(ctx, metadata.New(ae.headers)) + } + traceExporter, err := traceSvcClient.Export(ctx) + if err != nil { + return fmt.Errorf("Exporter.Start:: TraceServiceClient: %v", err) + } + + firstTraceMessage := &agenttracepb.ExportTraceServiceRequest{ + Node: node, + Resource: ae.resource, + } + if err := traceExporter.Send(firstTraceMessage); err != nil { + return fmt.Errorf("Exporter.Start:: Failed to initiate the Config service: %v", err) + } + + ae.mu.Lock() + ae.traceExporter = traceExporter + ae.mu.Unlock() + + // Initiate the config service by sending over node identifier info. + configStream, err := traceSvcClient.Config(context.Background()) + if err != nil { + return fmt.Errorf("Exporter.Start:: ConfigStream: %v", err) + } + firstCfgMessage := &agenttracepb.CurrentLibraryConfig{Node: node} + if err := configStream.Send(firstCfgMessage); err != nil { + return fmt.Errorf("Exporter.Start:: Failed to initiate the Config service: %v", err) + } + + // In the background, handle trace configurations that are beamed down + // by the agent, but also reply to it with the applied configuration. + go ae.handleConfigStreaming(configStream) + + return nil +} + +func (ae *Exporter) createMetricsServiceConnection(cc *grpc.ClientConn, node *commonpb.Node) error { + metricsSvcClient := agentmetricspb.NewMetricsServiceClient(cc) + metricsExporter, err := metricsSvcClient.Export(context.Background()) + if err != nil { + return fmt.Errorf("MetricsExporter: failed to start the service client: %v", err) + } + // Initiate the metrics service by sending over the first message just containing the Node and Resource. + firstMetricsMessage := &agentmetricspb.ExportMetricsServiceRequest{ + Node: node, + Resource: ae.resource, + } + if err := metricsExporter.Send(firstMetricsMessage); err != nil { + return fmt.Errorf("MetricsExporter:: failed to send the first message: %v", err) + } + + ae.mu.Lock() + ae.metricsExporter = metricsExporter + ae.mu.Unlock() + + // With that we are good to go and can start sending metrics + return nil +} + +func (ae *Exporter) dialToAgent() (*grpc.ClientConn, error) { + addr := ae.prepareAgentAddress() + var dialOpts []grpc.DialOption + if ae.clientTransportCredentials != nil { + dialOpts = append(dialOpts, grpc.WithTransportCredentials(ae.clientTransportCredentials)) + } else if ae.canDialInsecure { + dialOpts = append(dialOpts, grpc.WithInsecure()) + } + if ae.compressor != "" { + dialOpts = append(dialOpts, grpc.WithDefaultCallOptions(grpc.UseCompressor(ae.compressor))) + } + dialOpts = append(dialOpts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{})) + + ctx := context.Background() + if len(ae.headers) > 0 { + ctx = metadata.NewOutgoingContext(ctx, metadata.New(ae.headers)) + } + return grpc.DialContext(ctx, addr, dialOpts...) +} + +func (ae *Exporter) handleConfigStreaming(configStream agenttracepb.TraceService_ConfigClient) error { + // Note: We haven't yet implemented configuration sending so we + // should NOT be changing connection states within this function for now. + for { + recv, err := configStream.Recv() + if err != nil { + // TODO: Check if this is a transient error or exponential backoff-able. + return err + } + cfg := recv.Config + if cfg == nil { + continue + } + + // Otherwise now apply the trace configuration sent down from the agent + if psamp := cfg.GetProbabilitySampler(); psamp != nil { + trace.ApplyConfig(trace.Config{DefaultSampler: trace.ProbabilitySampler(psamp.SamplingProbability)}) + } else if csamp := cfg.GetConstantSampler(); csamp != nil { + alwaysSample := csamp.Decision == tracepb.ConstantSampler_ALWAYS_ON + if alwaysSample { + trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) + } else { + trace.ApplyConfig(trace.Config{DefaultSampler: trace.NeverSample()}) + } + } else { // TODO: Add the rate limiting sampler here + } + + // Then finally send back to upstream the newly applied configuration + err = configStream.Send(&agenttracepb.CurrentLibraryConfig{Config: &tracepb.TraceConfig{Sampler: cfg.Sampler}}) + if err != nil { + return err + } + } +} + +// Stop shuts down all the connections and resources +// related to the exporter. +func (ae *Exporter) Stop() error { + ae.mu.RLock() + cc := ae.grpcClientConn + started := ae.started + stopped := ae.stopped + ae.mu.RUnlock() + + if !started { + return errNotStarted + } + if stopped { + // TODO: tell the user that we've already stopped, so perhaps a sentinel error? + return nil + } + + ae.Flush() + + // Now close the underlying gRPC connection. + var err error + if cc != nil { + err = cc.Close() + } + + // At this point we can change the state variables: started and stopped + ae.mu.Lock() + ae.started = false + ae.stopped = true + ae.mu.Unlock() + close(ae.stopCh) + + // Ensure that the backgroundConnector returns + <-ae.backgroundConnectionDoneCh + + return err +} + +func (ae *Exporter) ExportSpan(sd *trace.SpanData) { + if sd == nil { + return + } + _ = ae.traceBundler.Add(sd, 1) +} + +func (ae *Exporter) ExportTraceServiceRequest(batch *agenttracepb.ExportTraceServiceRequest) error { + if batch == nil || len(batch.Spans) == 0 { + return nil + } + + select { + case <-ae.stopCh: + return errStopped + + default: + if !ae.connected() { + return errNoConnection + } + + ae.senderMu.Lock() + err := ae.traceExporter.Send(batch) + ae.senderMu.Unlock() + if err != nil { + ae.setStateDisconnected() + return err + } + return nil + } +} + +func (ae *Exporter) ExportView(vd *view.Data) { + if vd == nil { + return + } + _ = ae.viewDataBundler.Add(vd, 1) +} + +func ocSpanDataToPbSpans(sdl []*trace.SpanData) []*tracepb.Span { + if len(sdl) == 0 { + return nil + } + protoSpans := make([]*tracepb.Span, 0, len(sdl)) + for _, sd := range sdl { + if sd != nil { + protoSpans = append(protoSpans, ocSpanToProtoSpan(sd)) + } + } + return protoSpans +} + +func (ae *Exporter) uploadTraces(sdl []*trace.SpanData) { + select { + case <-ae.stopCh: + return + + default: + if !ae.connected() { + return + } + + protoSpans := ocSpanDataToPbSpans(sdl) + if len(protoSpans) == 0 { + return + } + ae.senderMu.Lock() + err := ae.traceExporter.Send(&agenttracepb.ExportTraceServiceRequest{ + Spans: protoSpans, + }) + ae.senderMu.Unlock() + if err != nil { + ae.setStateDisconnected() + } + } +} + +func ocViewDataToPbMetrics(vdl []*view.Data) []*metricspb.Metric { + if len(vdl) == 0 { + return nil + } + metrics := make([]*metricspb.Metric, 0, len(vdl)) + for _, vd := range vdl { + if vd != nil { + vmetric, err := viewDataToMetric(vd) + // TODO: (@odeke-em) somehow report this error, if it is non-nil. + if err == nil && vmetric != nil { + metrics = append(metrics, vmetric) + } + } + } + return metrics +} + +func (ae *Exporter) uploadViewData(vdl []*view.Data) { + select { + case <-ae.stopCh: + return + + default: + if !ae.connected() { + return + } + + protoMetrics := ocViewDataToPbMetrics(vdl) + if len(protoMetrics) == 0 { + return + } + err := ae.metricsExporter.Send(&agentmetricspb.ExportMetricsServiceRequest{ + Metrics: protoMetrics, + // TODO:(@odeke-em) + // a) Figure out how to derive a Node from the environment + // b) Figure out how to derive a Resource from the environment + // or better letting users of the exporter configure it. + }) + if err != nil { + ae.setStateDisconnected() + } + } +} + +func (ae *Exporter) Flush() { + ae.traceBundler.Flush() + ae.viewDataBundler.Flush() +} + +func resourceProtoFromEnv() *resourcepb.Resource { + rs, _ := resource.FromEnv(context.Background()) + if rs == nil { + return nil + } + + rprs := &resourcepb.Resource{ + Type: rs.Type, + } + if rs.Labels != nil { + rprs.Labels = make(map[string]string) + for k, v := range rs.Labels { + rprs.Labels[k] = v + } + } + return rprs +} diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/options.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/options.go new file mode 100644 index 0000000000..3e05ae8b30 --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/options.go @@ -0,0 +1,128 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocagent + +import ( + "time" + + "google.golang.org/grpc/credentials" +) + +const ( + DefaultAgentPort uint16 = 55678 + DefaultAgentHost string = "localhost" +) + +type ExporterOption interface { + withExporter(e *Exporter) +} + +type insecureGrpcConnection int + +var _ ExporterOption = (*insecureGrpcConnection)(nil) + +func (igc *insecureGrpcConnection) withExporter(e *Exporter) { + e.canDialInsecure = true +} + +// WithInsecure disables client transport security for the exporter's gRPC connection +// just like grpc.WithInsecure() https://godoc.org/google.golang.org/grpc#WithInsecure +// does. Note, by default, client security is required unless WithInsecure is used. +func WithInsecure() ExporterOption { return new(insecureGrpcConnection) } + +type addressSetter string + +func (as addressSetter) withExporter(e *Exporter) { + e.agentAddress = string(as) +} + +var _ ExporterOption = (*addressSetter)(nil) + +// WithAddress allows one to set the address that the exporter will +// connect to the agent on. If unset, it will instead try to use +// connect to DefaultAgentHost:DefaultAgentPort +func WithAddress(addr string) ExporterOption { + return addressSetter(addr) +} + +type serviceNameSetter string + +func (sns serviceNameSetter) withExporter(e *Exporter) { + e.serviceName = string(sns) +} + +var _ ExporterOption = (*serviceNameSetter)(nil) + +// WithServiceName allows one to set/override the service name +// that the exporter will report to the agent. +func WithServiceName(serviceName string) ExporterOption { + return serviceNameSetter(serviceName) +} + +type reconnectionPeriod time.Duration + +func (rp reconnectionPeriod) withExporter(e *Exporter) { + e.reconnectionPeriod = time.Duration(rp) +} + +func WithReconnectionPeriod(rp time.Duration) ExporterOption { + return reconnectionPeriod(rp) +} + +type compressorSetter string + +func (c compressorSetter) withExporter(e *Exporter) { + e.compressor = string(c) +} + +// UseCompressor will set the compressor for the gRPC client to use when sending requests. +// It is the responsibility of the caller to ensure that the compressor set has been registered +// with google.golang.org/grpc/encoding. This can be done by encoding.RegisterCompressor. Some +// compressors auto-register on import, such as gzip, which can be registered by calling +// `import _ "google.golang.org/grpc/encoding/gzip"` +func UseCompressor(compressorName string) ExporterOption { + return compressorSetter(compressorName) +} + +type headerSetter map[string]string + +func (h headerSetter) withExporter(e *Exporter) { + e.headers = map[string]string(h) +} + +// WithHeaders will send the provided headers when the gRPC stream connection +// is instantiated +func WithHeaders(headers map[string]string) ExporterOption { + return headerSetter(headers) +} + +type clientCredentials struct { + credentials.TransportCredentials +} + +var _ ExporterOption = (*clientCredentials)(nil) + +// WithTLSCredentials allows the connection to use TLS credentials +// when talking to the server. It takes in grpc.TransportCredentials instead +// of say a Certificate file or a tls.Certificate, because the retrieving +// these credentials can be done in many ways e.g. plain file, in code tls.Config +// or by certificate rotation, so it is up to the caller to decide what to use. +func WithTLSCredentials(creds credentials.TransportCredentials) ExporterOption { + return &clientCredentials{TransportCredentials: creds} +} + +func (cc *clientCredentials) withExporter(e *Exporter) { + e.clientTransportCredentials = cc.TransportCredentials +} diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go new file mode 100644 index 0000000000..983ebe7b70 --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go @@ -0,0 +1,248 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocagent + +import ( + "math" + "time" + + "go.opencensus.io/trace" + "go.opencensus.io/trace/tracestate" + + tracepb "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" + "github.com/golang/protobuf/ptypes/timestamp" +) + +const ( + maxAnnotationEventsPerSpan = 32 + maxMessageEventsPerSpan = 128 +) + +func ocSpanToProtoSpan(sd *trace.SpanData) *tracepb.Span { + if sd == nil { + return nil + } + var namePtr *tracepb.TruncatableString + if sd.Name != "" { + namePtr = &tracepb.TruncatableString{Value: sd.Name} + } + return &tracepb.Span{ + TraceId: sd.TraceID[:], + SpanId: sd.SpanID[:], + ParentSpanId: sd.ParentSpanID[:], + Status: ocStatusToProtoStatus(sd.Status), + StartTime: timeToTimestamp(sd.StartTime), + EndTime: timeToTimestamp(sd.EndTime), + Links: ocLinksToProtoLinks(sd.Links), + Kind: ocSpanKindToProtoSpanKind(sd.SpanKind), + Name: namePtr, + Attributes: ocAttributesToProtoAttributes(sd.Attributes), + TimeEvents: ocTimeEventsToProtoTimeEvents(sd.Annotations, sd.MessageEvents), + Tracestate: ocTracestateToProtoTracestate(sd.Tracestate), + } +} + +var blankStatus trace.Status + +func ocStatusToProtoStatus(status trace.Status) *tracepb.Status { + if status == blankStatus { + return nil + } + return &tracepb.Status{ + Code: status.Code, + Message: status.Message, + } +} + +func ocLinksToProtoLinks(links []trace.Link) *tracepb.Span_Links { + if len(links) == 0 { + return nil + } + + sl := make([]*tracepb.Span_Link, 0, len(links)) + for _, ocLink := range links { + // This redefinition is necessary to prevent ocLink.*ID[:] copies + // being reused -- in short we need a new ocLink per iteration. + ocLink := ocLink + + sl = append(sl, &tracepb.Span_Link{ + TraceId: ocLink.TraceID[:], + SpanId: ocLink.SpanID[:], + Type: ocLinkTypeToProtoLinkType(ocLink.Type), + }) + } + + return &tracepb.Span_Links{ + Link: sl, + } +} + +func ocLinkTypeToProtoLinkType(oct trace.LinkType) tracepb.Span_Link_Type { + switch oct { + case trace.LinkTypeChild: + return tracepb.Span_Link_CHILD_LINKED_SPAN + case trace.LinkTypeParent: + return tracepb.Span_Link_PARENT_LINKED_SPAN + default: + return tracepb.Span_Link_TYPE_UNSPECIFIED + } +} + +func ocAttributesToProtoAttributes(attrs map[string]interface{}) *tracepb.Span_Attributes { + if len(attrs) == 0 { + return nil + } + outMap := make(map[string]*tracepb.AttributeValue) + for k, v := range attrs { + switch v := v.(type) { + case bool: + outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_BoolValue{BoolValue: v}} + + case int: + outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_IntValue{IntValue: int64(v)}} + + case int64: + outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_IntValue{IntValue: v}} + + case string: + outMap[k] = &tracepb.AttributeValue{ + Value: &tracepb.AttributeValue_StringValue{ + StringValue: &tracepb.TruncatableString{Value: v}, + }, + } + } + } + return &tracepb.Span_Attributes{ + AttributeMap: outMap, + } +} + +// This code is mostly copied from +// https://github.com/census-ecosystem/opencensus-go-exporter-stackdriver/blob/master/trace_proto.go#L46 +func ocTimeEventsToProtoTimeEvents(as []trace.Annotation, es []trace.MessageEvent) *tracepb.Span_TimeEvents { + if len(as) == 0 && len(es) == 0 { + return nil + } + + timeEvents := &tracepb.Span_TimeEvents{} + var annotations, droppedAnnotationsCount int + var messageEvents, droppedMessageEventsCount int + + // Transform annotations + for i, a := range as { + if annotations >= maxAnnotationEventsPerSpan { + droppedAnnotationsCount = len(as) - i + break + } + annotations++ + timeEvents.TimeEvent = append(timeEvents.TimeEvent, + &tracepb.Span_TimeEvent{ + Time: timeToTimestamp(a.Time), + Value: transformAnnotationToTimeEvent(&a), + }, + ) + } + + // Transform message events + for i, e := range es { + if messageEvents >= maxMessageEventsPerSpan { + droppedMessageEventsCount = len(es) - i + break + } + messageEvents++ + timeEvents.TimeEvent = append(timeEvents.TimeEvent, + &tracepb.Span_TimeEvent{ + Time: timeToTimestamp(e.Time), + Value: transformMessageEventToTimeEvent(&e), + }, + ) + } + + // Process dropped counter + timeEvents.DroppedAnnotationsCount = clip32(droppedAnnotationsCount) + timeEvents.DroppedMessageEventsCount = clip32(droppedMessageEventsCount) + + return timeEvents +} + +func transformAnnotationToTimeEvent(a *trace.Annotation) *tracepb.Span_TimeEvent_Annotation_ { + return &tracepb.Span_TimeEvent_Annotation_{ + Annotation: &tracepb.Span_TimeEvent_Annotation{ + Description: &tracepb.TruncatableString{Value: a.Message}, + Attributes: ocAttributesToProtoAttributes(a.Attributes), + }, + } +} + +func transformMessageEventToTimeEvent(e *trace.MessageEvent) *tracepb.Span_TimeEvent_MessageEvent_ { + return &tracepb.Span_TimeEvent_MessageEvent_{ + MessageEvent: &tracepb.Span_TimeEvent_MessageEvent{ + Type: tracepb.Span_TimeEvent_MessageEvent_Type(e.EventType), + Id: uint64(e.MessageID), + UncompressedSize: uint64(e.UncompressedByteSize), + CompressedSize: uint64(e.CompressedByteSize), + }, + } +} + +// clip32 clips an int to the range of an int32. +func clip32(x int) int32 { + if x < math.MinInt32 { + return math.MinInt32 + } + if x > math.MaxInt32 { + return math.MaxInt32 + } + return int32(x) +} + +func timeToTimestamp(t time.Time) *timestamp.Timestamp { + nanoTime := t.UnixNano() + return ×tamp.Timestamp{ + Seconds: nanoTime / 1e9, + Nanos: int32(nanoTime % 1e9), + } +} + +func ocSpanKindToProtoSpanKind(kind int) tracepb.Span_SpanKind { + switch kind { + case trace.SpanKindClient: + return tracepb.Span_CLIENT + case trace.SpanKindServer: + return tracepb.Span_SERVER + default: + return tracepb.Span_SPAN_KIND_UNSPECIFIED + } +} + +func ocTracestateToProtoTracestate(ts *tracestate.Tracestate) *tracepb.Span_Tracestate { + if ts == nil { + return nil + } + return &tracepb.Span_Tracestate{ + Entries: ocTracestateEntriesToProtoTracestateEntries(ts.Entries()), + } +} + +func ocTracestateEntriesToProtoTracestateEntries(entries []tracestate.Entry) []*tracepb.Span_Tracestate_Entry { + protoEntries := make([]*tracepb.Span_Tracestate_Entry, 0, len(entries)) + for _, entry := range entries { + protoEntries = append(protoEntries, &tracepb.Span_Tracestate_Entry{ + Key: entry.Key, + Value: entry.Value, + }) + } + return protoEntries +} diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_stats_to_metrics.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_stats_to_metrics.go new file mode 100644 index 0000000000..43f18dec19 --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_stats_to_metrics.go @@ -0,0 +1,274 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocagent + +import ( + "errors" + "time" + + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + + "github.com/golang/protobuf/ptypes/timestamp" + + metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" +) + +var ( + errNilMeasure = errors.New("expecting a non-nil stats.Measure") + errNilView = errors.New("expecting a non-nil view.View") + errNilViewData = errors.New("expecting a non-nil view.Data") +) + +func viewDataToMetric(vd *view.Data) (*metricspb.Metric, error) { + if vd == nil { + return nil, errNilViewData + } + + descriptor, err := viewToMetricDescriptor(vd.View) + if err != nil { + return nil, err + } + + timeseries, err := viewDataToTimeseries(vd) + if err != nil { + return nil, err + } + + metric := &metricspb.Metric{ + MetricDescriptor: descriptor, + Timeseries: timeseries, + } + return metric, nil +} + +func viewToMetricDescriptor(v *view.View) (*metricspb.MetricDescriptor, error) { + if v == nil { + return nil, errNilView + } + if v.Measure == nil { + return nil, errNilMeasure + } + + desc := &metricspb.MetricDescriptor{ + Name: stringOrCall(v.Name, v.Measure.Name), + Description: stringOrCall(v.Description, v.Measure.Description), + Unit: v.Measure.Unit(), + Type: aggregationToMetricDescriptorType(v), + LabelKeys: tagKeysToLabelKeys(v.TagKeys), + } + return desc, nil +} + +func stringOrCall(first string, call func() string) string { + if first != "" { + return first + } + return call() +} + +type measureType uint + +const ( + measureUnknown measureType = iota + measureInt64 + measureFloat64 +) + +func measureTypeFromMeasure(m stats.Measure) measureType { + switch m.(type) { + default: + return measureUnknown + case *stats.Float64Measure: + return measureFloat64 + case *stats.Int64Measure: + return measureInt64 + } +} + +func aggregationToMetricDescriptorType(v *view.View) metricspb.MetricDescriptor_Type { + if v == nil || v.Aggregation == nil { + return metricspb.MetricDescriptor_UNSPECIFIED + } + if v.Measure == nil { + return metricspb.MetricDescriptor_UNSPECIFIED + } + + switch v.Aggregation.Type { + case view.AggTypeCount: + // Cumulative on int64 + return metricspb.MetricDescriptor_CUMULATIVE_INT64 + + case view.AggTypeDistribution: + // Cumulative types + return metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION + + case view.AggTypeLastValue: + // Gauge types + switch measureTypeFromMeasure(v.Measure) { + case measureFloat64: + return metricspb.MetricDescriptor_GAUGE_DOUBLE + case measureInt64: + return metricspb.MetricDescriptor_GAUGE_INT64 + } + + case view.AggTypeSum: + // Cumulative types + switch measureTypeFromMeasure(v.Measure) { + case measureFloat64: + return metricspb.MetricDescriptor_CUMULATIVE_DOUBLE + case measureInt64: + return metricspb.MetricDescriptor_CUMULATIVE_INT64 + } + } + + // For all other cases, return unspecified. + return metricspb.MetricDescriptor_UNSPECIFIED +} + +func tagKeysToLabelKeys(tagKeys []tag.Key) []*metricspb.LabelKey { + labelKeys := make([]*metricspb.LabelKey, 0, len(tagKeys)) + for _, tagKey := range tagKeys { + labelKeys = append(labelKeys, &metricspb.LabelKey{ + Key: tagKey.Name(), + }) + } + return labelKeys +} + +func viewDataToTimeseries(vd *view.Data) ([]*metricspb.TimeSeries, error) { + if vd == nil || len(vd.Rows) == 0 { + return nil, nil + } + + // Given that view.Data only contains Start, End + // the timestamps for all the row data will be the exact same + // per aggregation. However, the values will differ. + // Each row has its own tags. + startTimestamp := timeToProtoTimestamp(vd.Start) + endTimestamp := timeToProtoTimestamp(vd.End) + + mType := measureTypeFromMeasure(vd.View.Measure) + timeseries := make([]*metricspb.TimeSeries, 0, len(vd.Rows)) + // It is imperative that the ordering of "LabelValues" matches those + // of the Label keys in the metric descriptor. + for _, row := range vd.Rows { + labelValues := labelValuesFromTags(row.Tags) + point := rowToPoint(vd.View, row, endTimestamp, mType) + timeseries = append(timeseries, &metricspb.TimeSeries{ + StartTimestamp: startTimestamp, + LabelValues: labelValues, + Points: []*metricspb.Point{point}, + }) + } + + if len(timeseries) == 0 { + return nil, nil + } + + return timeseries, nil +} + +func timeToProtoTimestamp(t time.Time) *timestamp.Timestamp { + unixNano := t.UnixNano() + return ×tamp.Timestamp{ + Seconds: int64(unixNano / 1e9), + Nanos: int32(unixNano % 1e9), + } +} + +func rowToPoint(v *view.View, row *view.Row, endTimestamp *timestamp.Timestamp, mType measureType) *metricspb.Point { + pt := &metricspb.Point{ + Timestamp: endTimestamp, + } + + switch data := row.Data.(type) { + case *view.CountData: + pt.Value = &metricspb.Point_Int64Value{Int64Value: data.Value} + + case *view.DistributionData: + pt.Value = &metricspb.Point_DistributionValue{ + DistributionValue: &metricspb.DistributionValue{ + Count: data.Count, + Sum: float64(data.Count) * data.Mean, // because Mean := Sum/Count + // TODO: Add Exemplar + Buckets: bucketsToProtoBuckets(data.CountPerBucket), + BucketOptions: &metricspb.DistributionValue_BucketOptions{ + Type: &metricspb.DistributionValue_BucketOptions_Explicit_{ + Explicit: &metricspb.DistributionValue_BucketOptions_Explicit{ + Bounds: v.Aggregation.Buckets, + }, + }, + }, + SumOfSquaredDeviation: data.SumOfSquaredDev, + }} + + case *view.LastValueData: + setPointValue(pt, data.Value, mType) + + case *view.SumData: + setPointValue(pt, data.Value, mType) + } + + return pt +} + +// Not returning anything from this function because metricspb.Point.is_Value is an unexported +// interface hence we just have to set its value by pointer. +func setPointValue(pt *metricspb.Point, value float64, mType measureType) { + if mType == measureInt64 { + pt.Value = &metricspb.Point_Int64Value{Int64Value: int64(value)} + } else { + pt.Value = &metricspb.Point_DoubleValue{DoubleValue: value} + } +} + +func bucketsToProtoBuckets(countPerBucket []int64) []*metricspb.DistributionValue_Bucket { + distBuckets := make([]*metricspb.DistributionValue_Bucket, len(countPerBucket)) + for i := 0; i < len(countPerBucket); i++ { + count := countPerBucket[i] + + distBuckets[i] = &metricspb.DistributionValue_Bucket{ + Count: count, + } + } + + return distBuckets +} + +func labelValuesFromTags(tags []tag.Tag) []*metricspb.LabelValue { + if len(tags) == 0 { + return nil + } + + labelValues := make([]*metricspb.LabelValue, 0, len(tags)) + for _, tag_ := range tags { + labelValues = append(labelValues, &metricspb.LabelValue{ + Value: tag_.Value, + + // It is imperative that we set the "HasValue" attribute, + // in order to distinguish missing a label from the empty string. + // https://godoc.org/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1#LabelValue.HasValue + // + // OpenCensus-Go uses non-pointers for tags as seen by this function's arguments, + // so the best case that we can use to distinguish missing labels/tags from the + // empty string is by checking if the Tag.Key.Name() != "" to indicate that we have + // a value. + HasValue: tag_.Key.Name() != "", + }) + } + return labelValues +} diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/version.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/version.go new file mode 100644 index 0000000000..68be4c75bd --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/version.go @@ -0,0 +1,17 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocagent + +const Version = "0.0.1" diff --git a/vendor/github.com/Azure/go-autorest/.gitignore b/vendor/github.com/Azure/go-autorest/.gitignore deleted file mode 100644 index ab262cbe56..0000000000 --- a/vendor/github.com/Azure/go-autorest/.gitignore +++ /dev/null @@ -1,31 +0,0 @@ -# The standard Go .gitignore file follows. (Sourced from: github.com/github/gitignore/master/Go.gitignore) -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test -.DS_Store -.idea/ - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof - -# go-autorest specific -vendor/ -autorest/azure/example/example diff --git a/vendor/github.com/Azure/go-autorest/.travis.yml b/vendor/github.com/Azure/go-autorest/.travis.yml deleted file mode 100644 index 14315d7543..0000000000 --- a/vendor/github.com/Azure/go-autorest/.travis.yml +++ /dev/null @@ -1,24 +0,0 @@ -sudo: false - -language: go - -go: - - 1.9 - - 1.8 - - 1.7 - -install: - - go get -u github.com/golang/lint/golint - - go get -u github.com/Masterminds/glide - - go get -u github.com/stretchr/testify - - go get -u github.com/GoASTScanner/gas - - glide install - -script: - - grep -L -r --include *.go --exclude-dir vendor -P "Copyright (\d{4}|\(c\)) Microsoft" ./ | tee /dev/stderr | test -z "$(< /dev/stdin)" - - test -z "$(gofmt -s -l -w ./autorest/. | tee /dev/stderr)" - - test -z "$(golint ./autorest/... | tee /dev/stderr)" - - go vet ./autorest/... - - test -z "$(gas ./autorest/... | tee /dev/stderr | grep Error)" - - go build -v ./autorest/... - - go test -v ./autorest/... diff --git a/vendor/github.com/Azure/go-autorest/CHANGELOG.md b/vendor/github.com/Azure/go-autorest/CHANGELOG.md deleted file mode 100644 index dbfd85fbe0..0000000000 --- a/vendor/github.com/Azure/go-autorest/CHANGELOG.md +++ /dev/null @@ -1,248 +0,0 @@ -# CHANGELOG - -## v9.4.0 - -### New Features - -- Added WaitForCompletion() to Future as a default polling implementation. - -### Bug Fixes - -- Method Future.Done() shouldn't update polling status for unexpected HTTP status codes. - -## v9.3.1 - -### Bug Fixes - -- DoRetryForStatusCodes will retry if sender.Do returns a non-nil error. - -## v9.3.0 - -### New Features - -- Added PollingMethod() to Future so callers know what kind of polling mechanism is used. -- Added azure.ChangeToGet() which transforms an http.Request into a GET (to be used with LROs). - -## v9.2.0 - -### New Features - -- Added support for custom Azure Stack endpoints. -- Added type azure.Future used to track the status of long-running operations. - -### Bug Fixes - -- Preserve the original error in DoRetryWithRegistration when registration fails. - -## v9.1.1 - -- Fixes a bug regarding the cookie jar on `autorest.Client.Sender`. - -## v9.1.0 - -### New Features - -- In cases where there is a non-empty error from the service, attempt to unmarshal it instead of uniformly calling it an "Unknown" error. -- Support for loading Azure CLI Authentication files. -- Automatically register your subscription with the Azure Resource Provider if it hadn't been previously. - -### Bug Fixes - - - RetriableRequest can now tolerate a ReadSeekable body being read but not reset. - - Adding missing Apache Headers - -## v9.0.0 - -> **IMPORTANT:** This release was intially labeled incorrectly as `v8.4.0`. From the time it was released, it should have been marked `v9.0.0` because it contains breaking changes to the MSI packages. We appologize for any inconvenience this causes. - -Adding MSI Endpoint Support and CLI token rehydration. - -## v8.3.1 - -Pick up bug fix in adal for MSI support. - -## v8.3.0 - -Updates to Error string formats for clarity. Also, adding a copy of the http.Response to errors for an improved debugging experience. - -## v8.2.0 - -### New Features - -- Add support for bearer authentication callbacks -- Support 429 response codes that include "Retry-After" header -- Support validation constraint "Pattern" for map keys - -### Bug Fixes - -- Make RetriableRequest work with multiple versions of Go - -## v8.1.1 -Updates the RetriableRequest to take advantage of GetBody() added in Go 1.8. - -## v8.1.0 -Adds RetriableRequest type for more efficient handling of retrying HTTP requests. - -## v8.0.0 - -ADAL refactored into its own package. -Support for UNIX time. - -## v7.3.1 -- Version Testing now removed from production bits that are shipped with the library. - -## v7.3.0 -- Exposing new `RespondDecorator`, `ByDiscardingBody`. This allows operations - to acknowledge that they do not need either the entire or a trailing portion - of accepts response body. In doing so, Go's http library can reuse HTTP - connections more readily. -- Adding `PrepareDecorator` to target custom BaseURLs. -- Adding ACR suffix to public cloud environment. -- Updating Glide dependencies. - -## v7.2.5 -- Fixed the Active Directory endpoint for the China cloud. -- Removes UTF-8 BOM if present in response payload. -- Added telemetry. - -## v7.2.3 -- Fixing bug in calls to `DelayForBackoff` that caused doubling of delay - duration. - -## v7.2.2 -- autorest/azure: added ASM and ARM VM DNS suffixes. - -## v7.2.1 -- fixed parsing of UTC times that are not RFC3339 conformant. - -## v7.2.0 -- autorest/validation: Reformat validation error for better error message. - -## v7.1.0 -- preparer: Added support for multipart formdata - WithMultiPartFormdata() -- preparer: Added support for sending file in request body - WithFile -- client: Added RetryDuration parameter. -- autorest/validation: new package for validation code for Azure Go SDK. - -## v7.0.7 -- Add trailing / to endpoint -- azure: add EnvironmentFromName - -## v7.0.6 -- Add retry logic for 408, 500, 502, 503 and 504 status codes. -- Change url path and query encoding logic. -- Fix DelayForBackoff for proper exponential delay. -- Add CookieJar in Client. - -## v7.0.5 -- Add check to start polling only when status is in [200,201,202]. -- Refactoring for unchecked errors. -- azure/persist changes. -- Fix 'file in use' issue in renewing token in deviceflow. -- Store header RetryAfter for subsequent requests in polling. -- Add attribute details in service error. - -## v7.0.4 -- Better error messages for long running operation failures - -## v7.0.3 -- Corrected DoPollForAsynchronous to properly handle the initial response - -## v7.0.2 -- Corrected DoPollForAsynchronous to continue using the polling method first discovered - -## v7.0.1 -- Fixed empty JSON input error in ByUnmarshallingJSON -- Fixed polling support for GET calls -- Changed format name from TimeRfc1123 to TimeRFC1123 - -## v7.0.0 -- Added ByCopying responder with supporting TeeReadCloser -- Rewrote Azure asynchronous handling -- Reverted to only unmarshalling JSON -- Corrected handling of RFC3339 time strings and added support for Rfc1123 time format - -The `json.Decoder` does not catch bad data as thoroughly as `json.Unmarshal`. Since -`encoding/json` successfully deserializes all core types, and extended types normally provide -their custom JSON serialization handlers, the code has been reverted back to using -`json.Unmarshal`. The original change to use `json.Decode` was made to reduce duplicate -code; there is no loss of function, and there is a gain in accuracy, by reverting. - -Additionally, Azure services indicate requests to be polled by multiple means. The existing code -only checked for one of those (that is, the presence of the `Azure-AsyncOperation` header). -The new code correctly covers all cases and aligns with the other Azure SDKs. - -## v6.1.0 -- Introduced `date.ByUnmarshallingJSONDate` and `date.ByUnmarshallingJSONTime` to enable JSON encoded values. - -## v6.0.0 -- Completely reworked the handling of polled and asynchronous requests -- Removed unnecessary routines -- Reworked `mocks.Sender` to replay a series of `http.Response` objects -- Added `PrepareDecorators` for primitive types (e.g., bool, int32) - -Handling polled and asynchronous requests is no longer part of `Client#Send`. Instead new -`SendDecorators` implement different styles of polled behavior. See`autorest.DoPollForStatusCodes` -and `azure.DoPollForAsynchronous` for examples. - -## v5.0.0 -- Added new RespondDecorators unmarshalling primitive types -- Corrected application of inspection and authorization PrependDecorators - -## v4.0.0 -- Added support for Azure long-running operations. -- Added cancelation support to all decorators and functions that may delay. -- Breaking: `DelayForBackoff` now accepts a channel, which may be nil. - -## v3.1.0 -- Add support for OAuth Device Flow authorization. -- Add support for ServicePrincipalTokens that are backed by an existing token, rather than other secret material. -- Add helpers for persisting and restoring Tokens. -- Increased code coverage in the github.com/Azure/autorest/azure package - -## v3.0.0 -- Breaking: `NewErrorWithError` no longer takes `statusCode int`. -- Breaking: `NewErrorWithStatusCode` is replaced with `NewErrorWithResponse`. -- Breaking: `Client#Send()` no longer takes `codes ...int` argument. -- Add: XML unmarshaling support with `ByUnmarshallingXML()` -- Stopped vending dependencies locally and switched to [Glide](https://github.com/Masterminds/glide). - Applications using this library should either use Glide or vendor dependencies locally some other way. -- Add: `azure.WithErrorUnlessStatusCode()` decorator to handle Azure errors. -- Fix: use `net/http.DefaultClient` as base client. -- Fix: Missing inspection for polling responses added. -- Add: CopyAndDecode helpers. -- Improved `./autorest/to` with `[]string` helpers. -- Removed golint suppressions in .travis.yml. - -## v2.1.0 - -- Added `StatusCode` to `Error` for more easily obtaining the HTTP Reponse StatusCode (if any) - -## v2.0.0 - -- Changed `to.StringMapPtr` method signature to return a pointer -- Changed `ServicePrincipalCertificateSecret` and `NewServicePrincipalTokenFromCertificate` to support generic certificate and private keys - -## v1.0.0 - -- Added Logging inspectors to trace http.Request / Response -- Added support for User-Agent header -- Changed WithHeader PrepareDecorator to use set vs. add -- Added JSON to error when unmarshalling fails -- Added Client#Send method -- Corrected case of "Azure" in package paths -- Added "to" helpers, Azure helpers, and improved ease-of-use -- Corrected golint issues - -## v1.0.1 - -- Added CHANGELOG.md - -## v1.1.0 - -- Added mechanism to retrieve a ServicePrincipalToken using a certificate-signed JWT -- Added an example of creating a certificate-based ServicePrincipal and retrieving an OAuth token using the certificate - -## v1.1.1 - -- Introduce godeps and vendor dependencies introduced in v1.1.1 diff --git a/vendor/github.com/Azure/go-autorest/GNUmakefile b/vendor/github.com/Azure/go-autorest/GNUmakefile deleted file mode 100644 index a434e73ac4..0000000000 --- a/vendor/github.com/Azure/go-autorest/GNUmakefile +++ /dev/null @@ -1,23 +0,0 @@ -DIR?=./autorest/ - -default: build - -build: fmt - go install $(DIR) - -test: - go test $(DIR) || exit 1 - -vet: - @echo "go vet ." - @go vet $(DIR)... ; if [ $$? -eq 1 ]; then \ - echo ""; \ - echo "Vet found suspicious constructs. Please check the reported constructs"; \ - echo "and fix them if necessary before submitting the code for review."; \ - exit 1; \ - fi - -fmt: - gofmt -w $(DIR) - -.PHONY: build test vet fmt diff --git a/vendor/github.com/Azure/go-autorest/README.md b/vendor/github.com/Azure/go-autorest/README.md deleted file mode 100644 index f4c34d0e68..0000000000 --- a/vendor/github.com/Azure/go-autorest/README.md +++ /dev/null @@ -1,132 +0,0 @@ -# go-autorest - -[![GoDoc](https://godoc.org/github.com/Azure/go-autorest/autorest?status.png)](https://godoc.org/github.com/Azure/go-autorest/autorest) [![Build Status](https://travis-ci.org/Azure/go-autorest.svg?branch=master)](https://travis-ci.org/Azure/go-autorest) [![Go Report Card](https://goreportcard.com/badge/Azure/go-autorest)](https://goreportcard.com/report/Azure/go-autorest) - -## Usage -Package autorest implements an HTTP request pipeline suitable for use across multiple go-routines -and provides the shared routines relied on by AutoRest (see https://github.com/Azure/autorest/) -generated Go code. - -The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, -and Responding. A typical pattern is: - -```go - req, err := Prepare(&http.Request{}, - token.WithAuthorization()) - - resp, err := Send(req, - WithLogging(logger), - DoErrorIfStatusCode(http.StatusInternalServerError), - DoCloseIfError(), - DoRetryForAttempts(5, time.Second)) - - err = Respond(resp, - ByDiscardingBody(), - ByClosing()) -``` - -Each phase relies on decorators to modify and / or manage processing. Decorators may first modify -and then pass the data along, pass the data first and then modify the result, or wrap themselves -around passing the data (such as a logger might do). Decorators run in the order provided. For -example, the following: - -```go - req, err := Prepare(&http.Request{}, - WithBaseURL("https://microsoft.com/"), - WithPath("a"), - WithPath("b"), - WithPath("c")) -``` - -will set the URL to: - -``` - https://microsoft.com/a/b/c -``` - -Preparers and Responders may be shared and re-used (assuming the underlying decorators support -sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders -shared among multiple go-routines, and a single Sender shared among multiple sending go-routines, -all bound together by means of input / output channels. - -Decorators hold their passed state within a closure (such as the path components in the example -above). Be careful to share Preparers and Responders only in a context where such held state -applies. For example, it may not make sense to share a Preparer that applies a query string from a -fixed set of values. Similarly, sharing a Responder that reads the response body into a passed -struct (e.g., `ByUnmarshallingJson`) is likely incorrect. - -Errors raised by autorest objects and methods will conform to the `autorest.Error` interface. - -See the included examples for more detail. For details on the suggested use of this package by -generated clients, see the Client described below. - -## Helpers - -### Handling Swagger Dates - -The Swagger specification (https://swagger.io) that drives AutoRest -(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The -github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure correct -parsing and formatting. - -### Handling Empty Values - -In JSON, missing values have different semantics than empty values. This is especially true for -services using the HTTP PATCH verb. The JSON submitted with a PATCH request generally contains -only those values to modify. Missing values are to be left unchanged. Developers, then, require a -means to both specify an empty value and to leave the value out of the submitted JSON. - -The Go JSON package (`encoding/json`) supports the `omitempty` tag. When specified, it omits -empty values from the rendered JSON. Since Go defines default values for all base types (such as "" -for string and 0 for int) and provides no means to mark a value as actually empty, the JSON package -treats default values as meaning empty, omitting them from the rendered JSON. This means that, using -the Go base types encoded through the default JSON package, it is not possible to create JSON to -clear a value at the server. - -The workaround within the Go community is to use pointers to base types in lieu of base types within -structures that map to JSON. For example, instead of a value of type `string`, the workaround uses -`*string`. While this enables distinguishing empty values from those to be unchanged, creating -pointers to a base type (notably constant, in-line values) requires additional variables. This, for -example, - -```go - s := struct { - S *string - }{ S: &"foo" } -``` -fails, while, this - -```go - v := "foo" - s := struct { - S *string - }{ S: &v } -``` -succeeds. - -To ease using pointers, the subpackage `to` contains helpers that convert to and from pointers for -Go base types which have Swagger analogs. It also provides a helper that converts between -`map[string]string` and `map[string]*string`, enabling the JSON to specify that the value -associated with a key should be cleared. With the helpers, the previous example becomes - -```go - s := struct { - S *string - }{ S: to.StringPtr("foo") } -``` - -## Install - -```bash -go get github.com/Azure/go-autorest/autorest -go get github.com/Azure/go-autorest/autorest/azure -go get github.com/Azure/go-autorest/autorest/date -go get github.com/Azure/go-autorest/autorest/to -``` - -## License - -See LICENSE file. - ------ -This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md index a17cf98c62..7b0c4bc4d2 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md @@ -1,19 +1,24 @@ -# Azure Active Directory library for Go +# Azure Active Directory authentication for Go -This project provides a stand alone Azure Active Directory library for Go. The code was extracted -from [go-autorest](https://github.com/Azure/go-autorest/) project, which is used as a base for -[azure-sdk-for-go](https://github.com/Azure/azure-sdk-for-go). +This is a standalone package for authenticating with Azure Active +Directory from other Go libraries and applications, in particular the [Azure SDK +for Go](https://github.com/Azure/azure-sdk-for-go). +Note: Despite the package's name it is not related to other "ADAL" libraries +maintained in the [github.com/AzureAD](https://github.com/AzureAD) org. Issues +should be opened in [this repo's](https://github.com/Azure/go-autorest/issues) +or [the SDK's](https://github.com/Azure/azure-sdk-for-go/issues) issue +trackers. -## Installation +## Install -``` +```bash go get -u github.com/Azure/go-autorest/autorest/adal ``` ## Usage -An Active Directory application is required in order to use this library. An application can be registered in the [Azure Portal](https://portal.azure.com/) follow these [guidelines](https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-integrating-applications) or using the [Azure CLI](https://github.com/Azure/azure-cli). +An Active Directory application is required in order to use this library. An application can be registered in the [Azure Portal](https://portal.azure.com/) by following these [guidelines](https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-integrating-applications) or using the [Azure CLI](https://github.com/Azure/azure-cli). ### Register an Azure AD Application with secret @@ -218,6 +223,40 @@ if (err == nil) { } ``` +#### Username password authenticate + +```Go +spt, err := adal.NewServicePrincipalTokenFromUsernamePassword( + oauthConfig, + applicationID, + username, + password, + resource, + callbacks...) + +if (err == nil) { + token := spt.Token +} +``` + +#### Authorization code authenticate + +``` Go +spt, err := adal.NewServicePrincipalTokenFromAuthorizationCode( + oauthConfig, + applicationID, + clientSecret, + authorizationCode, + redirectURI, + resource, + callbacks...) + +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + ### Command Line Tool A command line tool is available in `cmd/adal.go` that can acquire a token for a given resource. It supports all flows mentioned above. diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go index 49e9214d59..8c83a917ff 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go @@ -19,22 +19,48 @@ import ( "net/url" ) -const ( - activeDirectoryAPIVersion = "1.0" -) - // OAuthConfig represents the endpoints needed // in OAuth operations type OAuthConfig struct { - AuthorityEndpoint url.URL - AuthorizeEndpoint url.URL - TokenEndpoint url.URL - DeviceCodeEndpoint url.URL + AuthorityEndpoint url.URL `json:"authorityEndpoint"` + AuthorizeEndpoint url.URL `json:"authorizeEndpoint"` + TokenEndpoint url.URL `json:"tokenEndpoint"` + DeviceCodeEndpoint url.URL `json:"deviceCodeEndpoint"` +} + +// IsZero returns true if the OAuthConfig object is zero-initialized. +func (oac OAuthConfig) IsZero() bool { + return oac == OAuthConfig{} +} + +func validateStringParam(param, name string) error { + if len(param) == 0 { + return fmt.Errorf("parameter '" + name + "' cannot be empty") + } + return nil } // NewOAuthConfig returns an OAuthConfig with tenant specific urls func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) { - const activeDirectoryEndpointTemplate = "%s/oauth2/%s?api-version=%s" + apiVer := "1.0" + return NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID, &apiVer) +} + +// NewOAuthConfigWithAPIVersion returns an OAuthConfig with tenant specific urls. +// If apiVersion is not nil the "api-version" query parameter will be appended to the endpoint URLs with the specified value. +func NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID string, apiVersion *string) (*OAuthConfig, error) { + if err := validateStringParam(activeDirectoryEndpoint, "activeDirectoryEndpoint"); err != nil { + return nil, err + } + api := "" + // it's legal for tenantID to be empty so don't validate it + if apiVersion != nil { + if err := validateStringParam(*apiVersion, "apiVersion"); err != nil { + return nil, err + } + api = fmt.Sprintf("?api-version=%s", *apiVersion) + } + const activeDirectoryEndpointTemplate = "%s/oauth2/%s%s" u, err := url.Parse(activeDirectoryEndpoint) if err != nil { return nil, err @@ -43,15 +69,15 @@ func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, err if err != nil { return nil, err } - authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", activeDirectoryAPIVersion)) + authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", api)) if err != nil { return nil, err } - tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", activeDirectoryAPIVersion)) + tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", api)) if err != nil { return nil, err } - deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", activeDirectoryAPIVersion)) + deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", api)) if err != nil { return nil, err } diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/config_test.go b/vendor/github.com/Azure/go-autorest/autorest/adal/config_test.go deleted file mode 100644 index 304280f664..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/config_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "testing" -) - -func TestNewOAuthConfig(t *testing.T) { - const testActiveDirectoryEndpoint = "https://login.test.com" - const testTenantID = "tenant-id-test" - - config, err := NewOAuthConfig(testActiveDirectoryEndpoint, testTenantID) - if err != nil { - t.Fatalf("autorest/adal: Unexpected error while creating oauth configuration for tenant: %v.", err) - } - - expected := "https://login.test.com/tenant-id-test/oauth2/authorize?api-version=1.0" - if config.AuthorizeEndpoint.String() != expected { - t.Fatalf("autorest/adal: Incorrect authorize url for Tenant from Environment. expected(%s). actual(%v).", expected, config.AuthorizeEndpoint) - } - - expected = "https://login.test.com/tenant-id-test/oauth2/token?api-version=1.0" - if config.TokenEndpoint.String() != expected { - t.Fatalf("autorest/adal: Incorrect authorize url for Tenant from Environment. expected(%s). actual(%v).", expected, config.TokenEndpoint) - } - - expected = "https://login.test.com/tenant-id-test/oauth2/devicecode?api-version=1.0" - if config.DeviceCodeEndpoint.String() != expected { - t.Fatalf("autorest/adal Incorrect devicecode url for Tenant from Environment. expected(%s). actual(%v).", expected, config.DeviceCodeEndpoint) - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken_test.go b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken_test.go deleted file mode 100644 index 6beee161fa..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken_test.go +++ /dev/null @@ -1,330 +0,0 @@ -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/json" - "fmt" - "net/http" - "strings" - "testing" - - "github.com/Azure/go-autorest/autorest/mocks" -) - -const ( - TestResource = "SomeResource" - TestClientID = "SomeClientID" - TestTenantID = "SomeTenantID" - TestActiveDirectoryEndpoint = "https://login.test.com/" -) - -var ( - testOAuthConfig, _ = NewOAuthConfig(TestActiveDirectoryEndpoint, TestTenantID) - TestOAuthConfig = *testOAuthConfig -) - -const MockDeviceCodeResponse = ` -{ - "device_code": "10000-40-1234567890", - "user_code": "ABCDEF", - "verification_url": "http://aka.ms/deviceauth", - "expires_in": "900", - "interval": "0" -} -` - -const MockDeviceTokenResponse = `{ - "access_token": "accessToken", - "refresh_token": "refreshToken", - "expires_in": "1000", - "expires_on": "2000", - "not_before": "3000", - "resource": "resource", - "token_type": "type" -} -` - -func TestDeviceCodeIncludesResource(t *testing.T) { - sender := mocks.NewSender() - sender.AppendResponse(mocks.NewResponseWithContent(MockDeviceCodeResponse)) - - code, err := InitiateDeviceAuth(sender, TestOAuthConfig, TestClientID, TestResource) - if err != nil { - t.Fatalf("adal: unexpected error initiating device auth") - } - - if code.Resource != TestResource { - t.Fatalf("adal: InitiateDeviceAuth failed to stash the resource in the DeviceCode struct") - } -} - -func TestDeviceCodeReturnsErrorIfSendingFails(t *testing.T) { - sender := mocks.NewSender() - sender.SetError(fmt.Errorf("this is an error")) - - _, err := InitiateDeviceAuth(sender, TestOAuthConfig, TestClientID, TestResource) - if err == nil || !strings.Contains(err.Error(), errCodeSendingFails) { - t.Fatalf("adal: failed to get correct error expected(%s) actual(%s)", errCodeSendingFails, err.Error()) - } -} - -func TestDeviceCodeReturnsErrorIfBadRequest(t *testing.T) { - sender := mocks.NewSender() - body := mocks.NewBody("doesn't matter") - sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusBadRequest, "Bad Request")) - - _, err := InitiateDeviceAuth(sender, TestOAuthConfig, TestClientID, TestResource) - if err == nil || !strings.Contains(err.Error(), errCodeHandlingFails) { - t.Fatalf("adal: failed to get correct error expected(%s) actual(%s)", errCodeHandlingFails, err.Error()) - } - - if body.IsOpen() { - t.Fatalf("response body was left open!") - } -} - -func TestDeviceCodeReturnsErrorIfCannotDeserializeDeviceCode(t *testing.T) { - gibberishJSON := strings.Replace(MockDeviceCodeResponse, "expires_in", "\":, :gibberish", -1) - sender := mocks.NewSender() - body := mocks.NewBody(gibberishJSON) - sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK")) - - _, err := InitiateDeviceAuth(sender, TestOAuthConfig, TestClientID, TestResource) - if err == nil || !strings.Contains(err.Error(), errCodeHandlingFails) { - t.Fatalf("adal: failed to get correct error expected(%s) actual(%s)", errCodeHandlingFails, err.Error()) - } - - if body.IsOpen() { - t.Fatalf("response body was left open!") - } -} - -func TestDeviceCodeReturnsErrorIfEmptyDeviceCode(t *testing.T) { - sender := mocks.NewSender() - body := mocks.NewBody("") - sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK")) - - _, err := InitiateDeviceAuth(sender, TestOAuthConfig, TestClientID, TestResource) - if err != ErrDeviceCodeEmpty { - t.Fatalf("adal: failed to get correct error expected(%s) actual(%s)", ErrDeviceCodeEmpty, err.Error()) - } - - if body.IsOpen() { - t.Fatalf("response body was left open!") - } -} - -func deviceCode() *DeviceCode { - var deviceCode DeviceCode - _ = json.Unmarshal([]byte(MockDeviceCodeResponse), &deviceCode) - deviceCode.Resource = TestResource - deviceCode.ClientID = TestClientID - return &deviceCode -} - -func TestDeviceTokenReturns(t *testing.T) { - sender := mocks.NewSender() - body := mocks.NewBody(MockDeviceTokenResponse) - sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK")) - - _, err := WaitForUserCompletion(sender, deviceCode()) - if err != nil { - t.Fatalf("adal: got error unexpectedly") - } - - if body.IsOpen() { - t.Fatalf("response body was left open!") - } -} - -func TestDeviceTokenReturnsErrorIfSendingFails(t *testing.T) { - sender := mocks.NewSender() - sender.SetError(fmt.Errorf("this is an error")) - - _, err := WaitForUserCompletion(sender, deviceCode()) - if err == nil || !strings.Contains(err.Error(), errTokenSendingFails) { - t.Fatalf("adal: failed to get correct error expected(%s) actual(%s)", errTokenSendingFails, err.Error()) - } -} - -func TestDeviceTokenReturnsErrorIfServerError(t *testing.T) { - sender := mocks.NewSender() - body := mocks.NewBody("") - sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusInternalServerError, "Internal Server Error")) - - _, err := WaitForUserCompletion(sender, deviceCode()) - if err == nil || !strings.Contains(err.Error(), errTokenHandlingFails) { - t.Fatalf("adal: failed to get correct error expected(%s) actual(%s)", errTokenHandlingFails, err.Error()) - } - - if body.IsOpen() { - t.Fatalf("response body was left open!") - } -} - -func TestDeviceTokenReturnsErrorIfCannotDeserializeDeviceToken(t *testing.T) { - gibberishJSON := strings.Replace(MockDeviceTokenResponse, "expires_in", ";:\"gibberish", -1) - sender := mocks.NewSender() - body := mocks.NewBody(gibberishJSON) - sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK")) - - _, err := WaitForUserCompletion(sender, deviceCode()) - if err == nil || !strings.Contains(err.Error(), errTokenHandlingFails) { - t.Fatalf("adal: failed to get correct error expected(%s) actual(%s)", errTokenHandlingFails, err.Error()) - } - - if body.IsOpen() { - t.Fatalf("response body was left open!") - } -} - -func errorDeviceTokenResponse(message string) string { - return `{ "error": "` + message + `" }` -} - -func TestDeviceTokenReturnsErrorIfAuthorizationPending(t *testing.T) { - sender := mocks.NewSender() - body := mocks.NewBody(errorDeviceTokenResponse("authorization_pending")) - sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusBadRequest, "Bad Request")) - - _, err := CheckForUserCompletion(sender, deviceCode()) - if err != ErrDeviceAuthorizationPending { - t.Fatalf("!!!") - } - - if body.IsOpen() { - t.Fatalf("response body was left open!") - } -} - -func TestDeviceTokenReturnsErrorIfSlowDown(t *testing.T) { - sender := mocks.NewSender() - body := mocks.NewBody(errorDeviceTokenResponse("slow_down")) - sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusBadRequest, "Bad Request")) - - _, err := CheckForUserCompletion(sender, deviceCode()) - if err != ErrDeviceSlowDown { - t.Fatalf("!!!") - } - - if body.IsOpen() { - t.Fatalf("response body was left open!") - } -} - -type deviceTokenSender struct { - errorString string - attempts int -} - -func newDeviceTokenSender(deviceErrorString string) *deviceTokenSender { - return &deviceTokenSender{errorString: deviceErrorString, attempts: 0} -} - -func (s *deviceTokenSender) Do(req *http.Request) (*http.Response, error) { - var resp *http.Response - if s.attempts < 1 { - s.attempts++ - resp = mocks.NewResponseWithContent(errorDeviceTokenResponse(s.errorString)) - } else { - resp = mocks.NewResponseWithContent(MockDeviceTokenResponse) - } - return resp, nil -} - -// since the above only exercise CheckForUserCompletion, we repeat the test here, -// but with the intent of showing that WaitForUserCompletion loops properly. -func TestDeviceTokenSucceedsWithIntermediateAuthPending(t *testing.T) { - sender := newDeviceTokenSender("authorization_pending") - - _, err := WaitForUserCompletion(sender, deviceCode()) - if err != nil { - t.Fatalf("unexpected error occurred") - } -} - -// same as above but with SlowDown now -func TestDeviceTokenSucceedsWithIntermediateSlowDown(t *testing.T) { - sender := newDeviceTokenSender("slow_down") - - _, err := WaitForUserCompletion(sender, deviceCode()) - if err != nil { - t.Fatalf("unexpected error occurred") - } -} - -func TestDeviceTokenReturnsErrorIfAccessDenied(t *testing.T) { - sender := mocks.NewSender() - body := mocks.NewBody(errorDeviceTokenResponse("access_denied")) - sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusBadRequest, "Bad Request")) - - _, err := WaitForUserCompletion(sender, deviceCode()) - if err != ErrDeviceAccessDenied { - t.Fatalf("adal: got wrong error expected(%s) actual(%s)", ErrDeviceAccessDenied.Error(), err.Error()) - } - - if body.IsOpen() { - t.Fatalf("response body was left open!") - } -} - -func TestDeviceTokenReturnsErrorIfCodeExpired(t *testing.T) { - sender := mocks.NewSender() - body := mocks.NewBody(errorDeviceTokenResponse("code_expired")) - sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusBadRequest, "Bad Request")) - - _, err := WaitForUserCompletion(sender, deviceCode()) - if err != ErrDeviceCodeExpired { - t.Fatalf("adal: got wrong error expected(%s) actual(%s)", ErrDeviceCodeExpired.Error(), err.Error()) - } - - if body.IsOpen() { - t.Fatalf("response body was left open!") - } -} - -func TestDeviceTokenReturnsErrorForUnknownError(t *testing.T) { - sender := mocks.NewSender() - body := mocks.NewBody(errorDeviceTokenResponse("unknown_error")) - sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusBadRequest, "Bad Request")) - - _, err := WaitForUserCompletion(sender, deviceCode()) - if err == nil { - t.Fatalf("failed to get error") - } - if err != ErrDeviceGeneric { - t.Fatalf("adal: got wrong error expected(%s) actual(%s)", ErrDeviceGeneric.Error(), err.Error()) - } - - if body.IsOpen() { - t.Fatalf("response body was left open!") - } -} - -func TestDeviceTokenReturnsErrorIfTokenEmptyAndStatusOK(t *testing.T) { - sender := mocks.NewSender() - body := mocks.NewBody("") - sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK")) - - _, err := WaitForUserCompletion(sender, deviceCode()) - if err != ErrOAuthTokenEmpty { - t.Fatalf("adal: got wrong error expected(%s) actual(%s)", ErrOAuthTokenEmpty.Error(), err.Error()) - } - - if body.IsOpen() { - t.Fatalf("response body was left open!") - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/msi.go b/vendor/github.com/Azure/go-autorest/autorest/adal/msi.go deleted file mode 100644 index 5e02d52ac2..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/msi.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build !windows - -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// msiPath is the path to the MSI Extension settings file (to discover the endpoint) -var msiPath = "/var/lib/waagent/ManagedIdentity-Settings" diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/msi_windows.go b/vendor/github.com/Azure/go-autorest/autorest/adal/msi_windows.go deleted file mode 100644 index 261b568829..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/msi_windows.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build windows - -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "os" - "strings" -) - -// msiPath is the path to the MSI Extension settings file (to discover the endpoint) -var msiPath = strings.Join([]string{os.Getenv("SystemDrive"), "WindowsAzure/Config/ManagedIdentity-Settings"}, "/") diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/persist_test.go b/vendor/github.com/Azure/go-autorest/autorest/adal/persist_test.go deleted file mode 100644 index a9c287c6dd..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/persist_test.go +++ /dev/null @@ -1,171 +0,0 @@ -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/json" - "io/ioutil" - "os" - "path" - "reflect" - "runtime" - "strings" - "testing" -) - -const MockTokenJSON string = `{ - "access_token": "accessToken", - "refresh_token": "refreshToken", - "expires_in": "1000", - "expires_on": "2000", - "not_before": "3000", - "resource": "resource", - "token_type": "type" -}` - -var TestToken = Token{ - AccessToken: "accessToken", - RefreshToken: "refreshToken", - ExpiresIn: "1000", - ExpiresOn: "2000", - NotBefore: "3000", - Resource: "resource", - Type: "type", -} - -func writeTestTokenFile(t *testing.T, suffix string, contents string) *os.File { - f, err := ioutil.TempFile(os.TempDir(), suffix) - if err != nil { - t.Fatalf("azure: unexpected error when creating temp file: %v", err) - } - defer f.Close() - - _, err = f.Write([]byte(contents)) - if err != nil { - t.Fatalf("azure: unexpected error when writing temp test file: %v", err) - } - - return f -} - -func TestLoadToken(t *testing.T) { - f := writeTestTokenFile(t, "testloadtoken", MockTokenJSON) - defer os.Remove(f.Name()) - - expectedToken := TestToken - actualToken, err := LoadToken(f.Name()) - if err != nil { - t.Fatalf("azure: unexpected error loading token from file: %v", err) - } - - if *actualToken != expectedToken { - t.Fatalf("azure: failed to decode properly expected(%v) actual(%v)", expectedToken, *actualToken) - } - - // test that LoadToken closes the file properly - err = SaveToken(f.Name(), 0600, *actualToken) - if err != nil { - t.Fatalf("azure: could not save token after LoadToken: %v", err) - } -} - -func TestLoadTokenFailsBadPath(t *testing.T) { - _, err := LoadToken("/tmp/this_file_should_never_exist_really") - expectedSubstring := "failed to open file" - if err == nil || !strings.Contains(err.Error(), expectedSubstring) { - t.Fatalf("azure: failed to get correct error expected(%s) actual(%s)", expectedSubstring, err.Error()) - } -} - -func TestLoadTokenFailsBadJson(t *testing.T) { - gibberishJSON := strings.Replace(MockTokenJSON, "expires_on", ";:\"gibberish", -1) - f := writeTestTokenFile(t, "testloadtokenfailsbadjson", gibberishJSON) - defer os.Remove(f.Name()) - - _, err := LoadToken(f.Name()) - expectedSubstring := "failed to decode contents of file" - if err == nil || !strings.Contains(err.Error(), expectedSubstring) { - t.Fatalf("azure: failed to get correct error expected(%s) actual(%s)", expectedSubstring, err.Error()) - } -} - -func token() *Token { - var token Token - json.Unmarshal([]byte(MockTokenJSON), &token) - return &token -} - -func TestSaveToken(t *testing.T) { - f, err := ioutil.TempFile("", "testloadtoken") - if err != nil { - t.Fatalf("azure: unexpected error when creating temp file: %v", err) - } - defer os.Remove(f.Name()) - f.Close() - - mode := os.ModePerm & 0642 - err = SaveToken(f.Name(), mode, *token()) - if err != nil { - t.Fatalf("azure: unexpected error saving token to file: %v", err) - } - fi, err := os.Stat(f.Name()) // open a new stat as held ones are not fresh - if err != nil { - t.Fatalf("azure: stat failed: %v", err) - } - if runtime.GOOS != "windows" { // permissions don't work on Windows - if perm := fi.Mode().Perm(); perm != mode { - t.Fatalf("azure: wrong file perm. got:%s; expected:%s file :%s", perm, mode, f.Name()) - } - } - - var actualToken Token - var expectedToken Token - - json.Unmarshal([]byte(MockTokenJSON), expectedToken) - - contents, err := ioutil.ReadFile(f.Name()) - if err != nil { - t.Fatal("!!") - } - json.Unmarshal(contents, actualToken) - - if !reflect.DeepEqual(actualToken, expectedToken) { - t.Fatal("azure: token was not serialized correctly") - } -} - -func TestSaveTokenFailsNoPermission(t *testing.T) { - pathWhereWeShouldntHavePermission := "/usr/thiswontwork/atall" - if runtime.GOOS == "windows" { - pathWhereWeShouldntHavePermission = path.Join(os.Getenv("windir"), "system32\\mytokendir\\mytoken") - } - err := SaveToken(pathWhereWeShouldntHavePermission, 0644, *token()) - expectedSubstring := "failed to create directory" - if err == nil || !strings.Contains(err.Error(), expectedSubstring) { - t.Fatalf("azure: failed to get correct error expected(%s) actual(%v)", expectedSubstring, err) - } -} - -func TestSaveTokenFailsCantCreate(t *testing.T) { - tokenPath := "/thiswontwork" - if runtime.GOOS == "windows" { - tokenPath = path.Join(os.Getenv("windir"), "system32") - } - err := SaveToken(tokenPath, 0644, *token()) - expectedSubstring := "failed to create the temp file to write the token" - if err == nil || !strings.Contains(err.Error(), expectedSubstring) { - t.Fatalf("azure: failed to get correct error expected(%s) actual(%v)", expectedSubstring, err) - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go index 0e5ad14d39..834401e00d 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go @@ -38,7 +38,7 @@ func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { return sf(r) } -// SendDecorator takes and possibily decorates, by wrapping, a Sender. Decorators may affect the +// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the // http.Request and pass it along or, first, pass the http.Request along then react to the // http.Response result. type SendDecorator func(Sender) Sender diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go index 67dd97a18c..effa87ab2f 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go @@ -15,21 +15,26 @@ package adal // limitations under the License. import ( + "context" "crypto/rand" "crypto/rsa" "crypto/sha1" "crypto/x509" "encoding/base64" "encoding/json" + "errors" "fmt" "io/ioutil" + "math" + "net" "net/http" "net/url" - "strconv" "strings" + "sync" "time" "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/tracing" "github.com/dgrijalva/jwt-go" ) @@ -42,11 +47,23 @@ const ( // OAuthGrantTypeClientCredentials is the "grant_type" identifier used in credential flows OAuthGrantTypeClientCredentials = "client_credentials" + // OAuthGrantTypeUserPass is the "grant_type" identifier used in username and password auth flows + OAuthGrantTypeUserPass = "password" + // OAuthGrantTypeRefreshToken is the "grant_type" identifier used in refresh token flows OAuthGrantTypeRefreshToken = "refresh_token" + // OAuthGrantTypeAuthorizationCode is the "grant_type" identifier used in authorization code flows + OAuthGrantTypeAuthorizationCode = "authorization_code" + // metadataHeader is the header required by MSI extension metadataHeader = "Metadata" + + // msiEndpoint is the well known endpoint for getting MSI authentications tokens + msiEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token" + + // the default number of attempts to refresh an MSI authentication token + defaultMaxMSIRefreshAttempts = 5 ) // OAuthTokenProvider is an interface which should be implemented by an access token retriever @@ -54,6 +71,12 @@ type OAuthTokenProvider interface { OAuthToken() string } +// TokenRefreshError is an interface used by errors returned during token refresh. +type TokenRefreshError interface { + error + Response() *http.Response +} + // Refresher is an interface for token refresh functionality type Refresher interface { Refresh() error @@ -61,31 +84,52 @@ type Refresher interface { EnsureFresh() error } +// RefresherWithContext is an interface for token refresh functionality +type RefresherWithContext interface { + RefreshWithContext(ctx context.Context) error + RefreshExchangeWithContext(ctx context.Context, resource string) error + EnsureFreshWithContext(ctx context.Context) error +} + // TokenRefreshCallback is the type representing callbacks that will be called after // a successful token refresh type TokenRefreshCallback func(Token) error // Token encapsulates the access token used to authorize Azure requests. +// https://docs.microsoft.com/en-us/azure/active-directory/develop/v1-oauth2-client-creds-grant-flow#service-to-service-access-token-response type Token struct { AccessToken string `json:"access_token"` RefreshToken string `json:"refresh_token"` - ExpiresIn string `json:"expires_in"` - ExpiresOn string `json:"expires_on"` - NotBefore string `json:"not_before"` + ExpiresIn json.Number `json:"expires_in"` + ExpiresOn json.Number `json:"expires_on"` + NotBefore json.Number `json:"not_before"` Resource string `json:"resource"` Type string `json:"token_type"` } +func newToken() Token { + return Token{ + ExpiresIn: "0", + ExpiresOn: "0", + NotBefore: "0", + } +} + +// IsZero returns true if the token object is zero-initialized. +func (t Token) IsZero() bool { + return t == Token{} +} + // Expires returns the time.Time when the Token expires. func (t Token) Expires() time.Time { - s, err := strconv.Atoi(t.ExpiresOn) + s, err := t.ExpiresOn.Float64() if err != nil { s = -3600 } - expiration := date.NewUnixTimeFromSeconds(float64(s)) + expiration := date.NewUnixTimeFromSeconds(s) return time.Time(expiration).UTC() } @@ -106,6 +150,12 @@ func (t *Token) OAuthToken() string { return t.AccessToken } +// ServicePrincipalSecret is an interface that allows various secret mechanism to fill the form +// that is submitted when acquiring an oAuth token. +type ServicePrincipalSecret interface { + SetAuthenticationValues(spt *ServicePrincipalToken, values *url.Values) error +} + // ServicePrincipalNoSecret represents a secret type that contains no secret // meaning it is not valid for fetching a fresh token. This is used by Manual type ServicePrincipalNoSecret struct { @@ -117,15 +167,19 @@ func (noSecret *ServicePrincipalNoSecret) SetAuthenticationValues(spt *ServicePr return fmt.Errorf("Manually created ServicePrincipalToken does not contain secret material to retrieve a new access token") } -// ServicePrincipalSecret is an interface that allows various secret mechanism to fill the form -// that is submitted when acquiring an oAuth token. -type ServicePrincipalSecret interface { - SetAuthenticationValues(spt *ServicePrincipalToken, values *url.Values) error +// MarshalJSON implements the json.Marshaler interface. +func (noSecret ServicePrincipalNoSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalNoSecret", + }) } // ServicePrincipalTokenSecret implements ServicePrincipalSecret for client_secret type authorization. type ServicePrincipalTokenSecret struct { - ClientSecret string + ClientSecret string `json:"value"` } // SetAuthenticationValues is a method of the interface ServicePrincipalSecret. @@ -135,21 +189,24 @@ func (tokenSecret *ServicePrincipalTokenSecret) SetAuthenticationValues(spt *Ser return nil } +// MarshalJSON implements the json.Marshaler interface. +func (tokenSecret ServicePrincipalTokenSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + Value string `json:"value"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalTokenSecret", + Value: tokenSecret.ClientSecret, + }) +} + // ServicePrincipalCertificateSecret implements ServicePrincipalSecret for generic RSA cert auth with signed JWTs. type ServicePrincipalCertificateSecret struct { Certificate *x509.Certificate PrivateKey *rsa.PrivateKey } -// ServicePrincipalMSISecret implements ServicePrincipalSecret for machines running the MSI Extension. -type ServicePrincipalMSISecret struct { -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. -func (msiSecret *ServicePrincipalMSISecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - return nil -} - // SignJwt returns the JWT signed with the certificate's private key. func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalToken) (string, error) { hasher := sha1.New() @@ -169,10 +226,12 @@ func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalTo token := jwt.New(jwt.SigningMethodRS256) token.Header["x5t"] = thumbprint + x5c := []string{base64.StdEncoding.EncodeToString(secret.Certificate.Raw)} + token.Header["x5c"] = x5c token.Claims = jwt.MapClaims{ - "aud": spt.oauthConfig.TokenEndpoint.String(), - "iss": spt.clientID, - "sub": spt.clientID, + "aud": spt.inner.OauthConfig.TokenEndpoint.String(), + "iss": spt.inner.ClientID, + "sub": spt.inner.ClientID, "jti": base64.URLEncoding.EncodeToString(jti), "nbf": time.Now().Unix(), "exp": time.Now().Add(time.Hour * 24).Unix(), @@ -195,31 +254,191 @@ func (secret *ServicePrincipalCertificateSecret) SetAuthenticationValues(spt *Se return nil } +// MarshalJSON implements the json.Marshaler interface. +func (secret ServicePrincipalCertificateSecret) MarshalJSON() ([]byte, error) { + return nil, errors.New("marshalling ServicePrincipalCertificateSecret is not supported") +} + +// ServicePrincipalMSISecret implements ServicePrincipalSecret for machines running the MSI Extension. +type ServicePrincipalMSISecret struct { +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (msiSecret *ServicePrincipalMSISecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (msiSecret ServicePrincipalMSISecret) MarshalJSON() ([]byte, error) { + return nil, errors.New("marshalling ServicePrincipalMSISecret is not supported") +} + +// ServicePrincipalUsernamePasswordSecret implements ServicePrincipalSecret for username and password auth. +type ServicePrincipalUsernamePasswordSecret struct { + Username string `json:"username"` + Password string `json:"password"` +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (secret *ServicePrincipalUsernamePasswordSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("username", secret.Username) + v.Set("password", secret.Password) + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (secret ServicePrincipalUsernamePasswordSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + Username string `json:"username"` + Password string `json:"password"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalUsernamePasswordSecret", + Username: secret.Username, + Password: secret.Password, + }) +} + +// ServicePrincipalAuthorizationCodeSecret implements ServicePrincipalSecret for authorization code auth. +type ServicePrincipalAuthorizationCodeSecret struct { + ClientSecret string `json:"value"` + AuthorizationCode string `json:"authCode"` + RedirectURI string `json:"redirect"` +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (secret *ServicePrincipalAuthorizationCodeSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("code", secret.AuthorizationCode) + v.Set("client_secret", secret.ClientSecret) + v.Set("redirect_uri", secret.RedirectURI) + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (secret ServicePrincipalAuthorizationCodeSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + Value string `json:"value"` + AuthCode string `json:"authCode"` + Redirect string `json:"redirect"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalAuthorizationCodeSecret", + Value: secret.ClientSecret, + AuthCode: secret.AuthorizationCode, + Redirect: secret.RedirectURI, + }) +} + // ServicePrincipalToken encapsulates a Token created for a Service Principal. type ServicePrincipalToken struct { - Token + inner servicePrincipalToken + refreshLock *sync.RWMutex + sender Sender + refreshCallbacks []TokenRefreshCallback + // MaxMSIRefreshAttempts is the maximum number of attempts to refresh an MSI token. + MaxMSIRefreshAttempts int +} - secret ServicePrincipalSecret - oauthConfig OAuthConfig - clientID string - resource string - autoRefresh bool - refreshWithin time.Duration - sender Sender +// MarshalTokenJSON returns the marshalled inner token. +func (spt ServicePrincipalToken) MarshalTokenJSON() ([]byte, error) { + return json.Marshal(spt.inner.Token) +} - refreshCallbacks []TokenRefreshCallback +// SetRefreshCallbacks replaces any existing refresh callbacks with the specified callbacks. +func (spt *ServicePrincipalToken) SetRefreshCallbacks(callbacks []TokenRefreshCallback) { + spt.refreshCallbacks = callbacks +} + +// MarshalJSON implements the json.Marshaler interface. +func (spt ServicePrincipalToken) MarshalJSON() ([]byte, error) { + return json.Marshal(spt.inner) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (spt *ServicePrincipalToken) UnmarshalJSON(data []byte) error { + // need to determine the token type + raw := map[string]interface{}{} + err := json.Unmarshal(data, &raw) + if err != nil { + return err + } + secret := raw["secret"].(map[string]interface{}) + switch secret["type"] { + case "ServicePrincipalNoSecret": + spt.inner.Secret = &ServicePrincipalNoSecret{} + case "ServicePrincipalTokenSecret": + spt.inner.Secret = &ServicePrincipalTokenSecret{} + case "ServicePrincipalCertificateSecret": + return errors.New("unmarshalling ServicePrincipalCertificateSecret is not supported") + case "ServicePrincipalMSISecret": + return errors.New("unmarshalling ServicePrincipalMSISecret is not supported") + case "ServicePrincipalUsernamePasswordSecret": + spt.inner.Secret = &ServicePrincipalUsernamePasswordSecret{} + case "ServicePrincipalAuthorizationCodeSecret": + spt.inner.Secret = &ServicePrincipalAuthorizationCodeSecret{} + default: + return fmt.Errorf("unrecognized token type '%s'", secret["type"]) + } + err = json.Unmarshal(data, &spt.inner) + if err != nil { + return err + } + // Don't override the refreshLock or the sender if those have been already set. + if spt.refreshLock == nil { + spt.refreshLock = &sync.RWMutex{} + } + if spt.sender == nil { + spt.sender = &http.Client{Transport: tracing.Transport} + } + return nil +} + +// internal type used for marshalling/unmarshalling +type servicePrincipalToken struct { + Token Token `json:"token"` + Secret ServicePrincipalSecret `json:"secret"` + OauthConfig OAuthConfig `json:"oauth"` + ClientID string `json:"clientID"` + Resource string `json:"resource"` + AutoRefresh bool `json:"autoRefresh"` + RefreshWithin time.Duration `json:"refreshWithin"` +} + +func validateOAuthConfig(oac OAuthConfig) error { + if oac.IsZero() { + return fmt.Errorf("parameter 'oauthConfig' cannot be zero-initialized") + } + return nil } // NewServicePrincipalTokenWithSecret create a ServicePrincipalToken using the supplied ServicePrincipalSecret implementation. func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, resource string, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(id, "id"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if secret == nil { + return nil, fmt.Errorf("parameter 'secret' cannot be nil") + } spt := &ServicePrincipalToken{ - oauthConfig: oauthConfig, - secret: secret, - clientID: id, - resource: resource, - autoRefresh: true, - refreshWithin: defaultRefresh, - sender: &http.Client{}, + inner: servicePrincipalToken{ + Token: newToken(), + OauthConfig: oauthConfig, + Secret: secret, + ClientID: id, + Resource: resource, + AutoRefresh: true, + RefreshWithin: defaultRefresh, + }, + refreshLock: &sync.RWMutex{}, + sender: &http.Client{Transport: tracing.Transport}, refreshCallbacks: callbacks, } return spt, nil @@ -227,6 +446,18 @@ func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, reso // NewServicePrincipalTokenFromManualToken creates a ServicePrincipalToken using the supplied token func NewServicePrincipalTokenFromManualToken(oauthConfig OAuthConfig, clientID string, resource string, token Token, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if token.IsZero() { + return nil, fmt.Errorf("parameter 'token' cannot be zero-initialized") + } spt, err := NewServicePrincipalTokenWithSecret( oauthConfig, clientID, @@ -237,7 +468,39 @@ func NewServicePrincipalTokenFromManualToken(oauthConfig OAuthConfig, clientID s return nil, err } - spt.Token = token + spt.inner.Token = token + + return spt, nil +} + +// NewServicePrincipalTokenFromManualTokenSecret creates a ServicePrincipalToken using the supplied token and secret +func NewServicePrincipalTokenFromManualTokenSecret(oauthConfig OAuthConfig, clientID string, resource string, token Token, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if secret == nil { + return nil, fmt.Errorf("parameter 'secret' cannot be nil") + } + if token.IsZero() { + return nil, fmt.Errorf("parameter 'token' cannot be zero-initialized") + } + spt, err := NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + secret, + callbacks...) + if err != nil { + return nil, err + } + + spt.inner.Token = token return spt, nil } @@ -245,6 +508,18 @@ func NewServicePrincipalTokenFromManualToken(oauthConfig OAuthConfig, clientID s // NewServicePrincipalToken creates a ServicePrincipalToken from the supplied Service Principal // credentials scoped to the named resource. func NewServicePrincipalToken(oauthConfig OAuthConfig, clientID string, secret string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(secret, "secret"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } return NewServicePrincipalTokenWithSecret( oauthConfig, clientID, @@ -256,8 +531,23 @@ func NewServicePrincipalToken(oauthConfig OAuthConfig, clientID string, secret s ) } -// NewServicePrincipalTokenFromCertificate create a ServicePrincipalToken from the supplied pkcs12 bytes. +// NewServicePrincipalTokenFromCertificate creates a ServicePrincipalToken from the supplied pkcs12 bytes. func NewServicePrincipalTokenFromCertificate(oauthConfig OAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if certificate == nil { + return nil, fmt.Errorf("parameter 'certificate' cannot be nil") + } + if privateKey == nil { + return nil, fmt.Errorf("parameter 'privateKey' cannot be nil") + } return NewServicePrincipalTokenWithSecret( oauthConfig, clientID, @@ -270,59 +560,173 @@ func NewServicePrincipalTokenFromCertificate(oauthConfig OAuthConfig, clientID s ) } -// GetMSIVMEndpoint gets the MSI endpoint on Virtual Machines. -func GetMSIVMEndpoint() (string, error) { - return getMSIVMEndpoint(msiPath) +// NewServicePrincipalTokenFromUsernamePassword creates a ServicePrincipalToken from the username and password. +func NewServicePrincipalTokenFromUsernamePassword(oauthConfig OAuthConfig, clientID string, username string, password string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(username, "username"); err != nil { + return nil, err + } + if err := validateStringParam(password, "password"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalUsernamePasswordSecret{ + Username: username, + Password: password, + }, + callbacks..., + ) } -func getMSIVMEndpoint(path string) (string, error) { - // Read MSI settings - bytes, err := ioutil.ReadFile(path) - if err != nil { - return "", err +// NewServicePrincipalTokenFromAuthorizationCode creates a ServicePrincipalToken from the +func NewServicePrincipalTokenFromAuthorizationCode(oauthConfig OAuthConfig, clientID string, clientSecret string, authorizationCode string, redirectURI string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err } - msiSettings := struct { - URL string `json:"url"` - }{} - err = json.Unmarshal(bytes, &msiSettings) - if err != nil { - return "", err + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(clientSecret, "clientSecret"); err != nil { + return nil, err } + if err := validateStringParam(authorizationCode, "authorizationCode"); err != nil { + return nil, err + } + if err := validateStringParam(redirectURI, "redirectURI"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalAuthorizationCodeSecret{ + ClientSecret: clientSecret, + AuthorizationCode: authorizationCode, + RedirectURI: redirectURI, + }, + callbacks..., + ) +} - return msiSettings.URL, nil +// GetMSIVMEndpoint gets the MSI endpoint on Virtual Machines. +func GetMSIVMEndpoint() (string, error) { + return msiEndpoint, nil } // NewServicePrincipalTokenFromMSI creates a ServicePrincipalToken via the MSI VM Extension. +// It will use the system assigned identity when creating the token. func NewServicePrincipalTokenFromMSI(msiEndpoint, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + return newServicePrincipalTokenFromMSI(msiEndpoint, resource, nil, callbacks...) +} + +// NewServicePrincipalTokenFromMSIWithUserAssignedID creates a ServicePrincipalToken via the MSI VM Extension. +// It will use the specified user assigned identity when creating the token. +func NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, resource string, userAssignedID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + return newServicePrincipalTokenFromMSI(msiEndpoint, resource, &userAssignedID, callbacks...) +} + +func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedID *string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateStringParam(msiEndpoint, "msiEndpoint"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if userAssignedID != nil { + if err := validateStringParam(*userAssignedID, "userAssignedID"); err != nil { + return nil, err + } + } // We set the oauth config token endpoint to be MSI's endpoint msiEndpointURL, err := url.Parse(msiEndpoint) if err != nil { return nil, err } - oauthConfig, err := NewOAuthConfig(msiEndpointURL.String(), "") - if err != nil { - return nil, err + v := url.Values{} + v.Set("resource", resource) + v.Set("api-version", "2018-02-01") + if userAssignedID != nil { + v.Set("client_id", *userAssignedID) } + msiEndpointURL.RawQuery = v.Encode() spt := &ServicePrincipalToken{ - oauthConfig: *oauthConfig, - secret: &ServicePrincipalMSISecret{}, - resource: resource, - autoRefresh: true, - refreshWithin: defaultRefresh, - sender: &http.Client{}, - refreshCallbacks: callbacks, + inner: servicePrincipalToken{ + Token: newToken(), + OauthConfig: OAuthConfig{ + TokenEndpoint: *msiEndpointURL, + }, + Secret: &ServicePrincipalMSISecret{}, + Resource: resource, + AutoRefresh: true, + RefreshWithin: defaultRefresh, + }, + refreshLock: &sync.RWMutex{}, + sender: &http.Client{Transport: tracing.Transport}, + refreshCallbacks: callbacks, + MaxMSIRefreshAttempts: defaultMaxMSIRefreshAttempts, + } + + if userAssignedID != nil { + spt.inner.ClientID = *userAssignedID } return spt, nil } +// internal type that implements TokenRefreshError +type tokenRefreshError struct { + message string + resp *http.Response +} + +// Error implements the error interface which is part of the TokenRefreshError interface. +func (tre tokenRefreshError) Error() string { + return tre.message +} + +// Response implements the TokenRefreshError interface, it returns the raw HTTP response from the refresh operation. +func (tre tokenRefreshError) Response() *http.Response { + return tre.resp +} + +func newTokenRefreshError(message string, resp *http.Response) TokenRefreshError { + return tokenRefreshError{message: message, resp: resp} +} + // EnsureFresh will refresh the token if it will expire within the refresh window (as set by -// RefreshWithin) and autoRefresh flag is on. +// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. func (spt *ServicePrincipalToken) EnsureFresh() error { - if spt.autoRefresh && spt.WillExpireIn(spt.refreshWithin) { - return spt.Refresh() + return spt.EnsureFreshWithContext(context.Background()) +} + +// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. +func (spt *ServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error { + if spt.inner.AutoRefresh && spt.inner.Token.WillExpireIn(spt.inner.RefreshWithin) { + // take the write lock then check to see if the token was already refreshed + spt.refreshLock.Lock() + defer spt.refreshLock.Unlock() + if spt.inner.Token.WillExpireIn(spt.inner.RefreshWithin) { + return spt.refreshInternal(ctx, spt.inner.Resource) + } } return nil } @@ -331,7 +735,7 @@ func (spt *ServicePrincipalToken) EnsureFresh() error { func (spt *ServicePrincipalToken) InvokeRefreshCallbacks(token Token) error { if spt.refreshCallbacks != nil { for _, callback := range spt.refreshCallbacks { - err := callback(spt.Token) + err := callback(spt.inner.Token) if err != nil { return fmt.Errorf("adal: TokenRefreshCallback handler failed. Error = '%v'", err) } @@ -341,46 +745,103 @@ func (spt *ServicePrincipalToken) InvokeRefreshCallbacks(token Token) error { } // Refresh obtains a fresh token for the Service Principal. +// This method is not safe for concurrent use and should be syncrhonized. func (spt *ServicePrincipalToken) Refresh() error { - return spt.refreshInternal(spt.resource) + return spt.RefreshWithContext(context.Background()) +} + +// RefreshWithContext obtains a fresh token for the Service Principal. +// This method is not safe for concurrent use and should be syncrhonized. +func (spt *ServicePrincipalToken) RefreshWithContext(ctx context.Context) error { + spt.refreshLock.Lock() + defer spt.refreshLock.Unlock() + return spt.refreshInternal(ctx, spt.inner.Resource) } // RefreshExchange refreshes the token, but for a different resource. +// This method is not safe for concurrent use and should be syncrhonized. func (spt *ServicePrincipalToken) RefreshExchange(resource string) error { - return spt.refreshInternal(resource) + return spt.RefreshExchangeWithContext(context.Background(), resource) } -func (spt *ServicePrincipalToken) refreshInternal(resource string) error { - v := url.Values{} - v.Set("client_id", spt.clientID) - v.Set("resource", resource) +// RefreshExchangeWithContext refreshes the token, but for a different resource. +// This method is not safe for concurrent use and should be syncrhonized. +func (spt *ServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error { + spt.refreshLock.Lock() + defer spt.refreshLock.Unlock() + return spt.refreshInternal(ctx, resource) +} - if spt.RefreshToken != "" { - v.Set("grant_type", OAuthGrantTypeRefreshToken) - v.Set("refresh_token", spt.RefreshToken) - } else { - v.Set("grant_type", OAuthGrantTypeClientCredentials) - err := spt.secret.SetAuthenticationValues(spt, &v) - if err != nil { - return err - } +func (spt *ServicePrincipalToken) getGrantType() string { + switch spt.inner.Secret.(type) { + case *ServicePrincipalUsernamePasswordSecret: + return OAuthGrantTypeUserPass + case *ServicePrincipalAuthorizationCodeSecret: + return OAuthGrantTypeAuthorizationCode + default: + return OAuthGrantTypeClientCredentials + } +} + +func isIMDS(u url.URL) bool { + imds, err := url.Parse(msiEndpoint) + if err != nil { + return false } + return u.Host == imds.Host && u.Path == imds.Path +} - s := v.Encode() - body := ioutil.NopCloser(strings.NewReader(s)) - req, err := http.NewRequest(http.MethodPost, spt.oauthConfig.TokenEndpoint.String(), body) +func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource string) error { + req, err := http.NewRequest(http.MethodPost, spt.inner.OauthConfig.TokenEndpoint.String(), nil) if err != nil { return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err) } + req.Header.Add("User-Agent", UserAgent()) + req = req.WithContext(ctx) + if !isIMDS(spt.inner.OauthConfig.TokenEndpoint) { + v := url.Values{} + v.Set("client_id", spt.inner.ClientID) + v.Set("resource", resource) + + if spt.inner.Token.RefreshToken != "" { + v.Set("grant_type", OAuthGrantTypeRefreshToken) + v.Set("refresh_token", spt.inner.Token.RefreshToken) + // web apps must specify client_secret when refreshing tokens + // see https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-protocols-oauth-code#refreshing-the-access-tokens + if spt.getGrantType() == OAuthGrantTypeAuthorizationCode { + err := spt.inner.Secret.SetAuthenticationValues(spt, &v) + if err != nil { + return err + } + } + } else { + v.Set("grant_type", spt.getGrantType()) + err := spt.inner.Secret.SetAuthenticationValues(spt, &v) + if err != nil { + return err + } + } + + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + req.Body = body + } - req.ContentLength = int64(len(s)) - req.Header.Set(contentType, mimeTypeFormPost) - if _, ok := spt.secret.(*ServicePrincipalMSISecret); ok { + if _, ok := spt.inner.Secret.(*ServicePrincipalMSISecret); ok { + req.Method = http.MethodGet req.Header.Set(metadataHeader, "true") } - resp, err := spt.sender.Do(req) + + var resp *http.Response + if isIMDS(spt.inner.OauthConfig.TokenEndpoint) { + resp, err = retryForIMDS(spt.sender, req, spt.MaxMSIRefreshAttempts) + } else { + resp, err = spt.sender.Do(req) + } if err != nil { - return fmt.Errorf("adal: Failed to execute the refresh request. Error = '%v'", err) + return newTokenRefreshError(fmt.Sprintf("adal: Failed to execute the refresh request. Error = '%v'", err), nil) } defer resp.Body.Close() @@ -388,11 +849,15 @@ func (spt *ServicePrincipalToken) refreshInternal(resource string) error { if resp.StatusCode != http.StatusOK { if err != nil { - return fmt.Errorf("adal: Refresh request failed. Status Code = '%d'. Failed reading response body", resp.StatusCode) + return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Failed reading response body: %v", resp.StatusCode, err), resp) } - return fmt.Errorf("adal: Refresh request failed. Status Code = '%d'. Response body: %s", resp.StatusCode, string(rb)) + return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Response body: %s", resp.StatusCode, string(rb)), resp) } + // for the following error cases don't return a TokenRefreshError. the operation succeeded + // but some transient failure happened during deserialization. by returning a generic error + // the retry logic will kick in (we don't retry on TokenRefreshError). + if err != nil { return fmt.Errorf("adal: Failed to read a new service principal token during refresh. Error = '%v'", err) } @@ -405,23 +870,116 @@ func (spt *ServicePrincipalToken) refreshInternal(resource string) error { return fmt.Errorf("adal: Failed to unmarshal the service principal token during refresh. Error = '%v' JSON = '%s'", err, string(rb)) } - spt.Token = token + spt.inner.Token = token return spt.InvokeRefreshCallbacks(token) } +// retry logic specific to retrieving a token from the IMDS endpoint +func retryForIMDS(sender Sender, req *http.Request, maxAttempts int) (resp *http.Response, err error) { + // copied from client.go due to circular dependency + retries := []int{ + http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + } + // extra retry status codes specific to IMDS + retries = append(retries, + http.StatusNotFound, + http.StatusGone, + // all remaining 5xx + http.StatusNotImplemented, + http.StatusHTTPVersionNotSupported, + http.StatusVariantAlsoNegotiates, + http.StatusInsufficientStorage, + http.StatusLoopDetected, + http.StatusNotExtended, + http.StatusNetworkAuthenticationRequired) + + // see https://docs.microsoft.com/en-us/azure/active-directory/managed-service-identity/how-to-use-vm-token#retry-guidance + + const maxDelay time.Duration = 60 * time.Second + + attempt := 0 + delay := time.Duration(0) + + for attempt < maxAttempts { + resp, err = sender.Do(req) + // retry on temporary network errors, e.g. transient network failures. + // if we don't receive a response then assume we can't connect to the + // endpoint so we're likely not running on an Azure VM so don't retry. + if (err != nil && !isTemporaryNetworkError(err)) || resp == nil || resp.StatusCode == http.StatusOK || !containsInt(retries, resp.StatusCode) { + return + } + + // perform exponential backoff with a cap. + // must increment attempt before calculating delay. + attempt++ + // the base value of 2 is the "delta backoff" as specified in the guidance doc + delay += (time.Duration(math.Pow(2, float64(attempt))) * time.Second) + if delay > maxDelay { + delay = maxDelay + } + + select { + case <-time.After(delay): + // intentionally left blank + case <-req.Context().Done(): + err = req.Context().Err() + return + } + } + return +} + +// returns true if the specified error is a temporary network error or false if it's not. +// if the error doesn't implement the net.Error interface the return value is true. +func isTemporaryNetworkError(err error) bool { + if netErr, ok := err.(net.Error); !ok || (ok && netErr.Temporary()) { + return true + } + return false +} + +// returns true if slice ints contains the value n +func containsInt(ints []int, n int) bool { + for _, i := range ints { + if i == n { + return true + } + } + return false +} + // SetAutoRefresh enables or disables automatic refreshing of stale tokens. func (spt *ServicePrincipalToken) SetAutoRefresh(autoRefresh bool) { - spt.autoRefresh = autoRefresh + spt.inner.AutoRefresh = autoRefresh } // SetRefreshWithin sets the interval within which if the token will expire, EnsureFresh will // refresh the token. func (spt *ServicePrincipalToken) SetRefreshWithin(d time.Duration) { - spt.refreshWithin = d + spt.inner.RefreshWithin = d return } // SetSender sets the http.Client used when obtaining the Service Principal token. An // undecorated http.Client is used by default. func (spt *ServicePrincipalToken) SetSender(s Sender) { spt.sender = s } + +// OAuthToken implements the OAuthTokenProvider interface. It returns the current access token. +func (spt *ServicePrincipalToken) OAuthToken() string { + spt.refreshLock.RLock() + defer spt.refreshLock.RUnlock() + return spt.inner.Token.OAuthToken() +} + +// Token returns a copy of the current token. +func (spt *ServicePrincipalToken) Token() Token { + spt.refreshLock.RLock() + defer spt.refreshLock.RUnlock() + return spt.inner.Token +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token_test.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token_test.go deleted file mode 100644 index 73c848ba00..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/token_test.go +++ /dev/null @@ -1,654 +0,0 @@ -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "crypto/x509/pkix" - "fmt" - "io/ioutil" - "math/big" - "net/http" - "net/url" - "os" - "reflect" - "strconv" - "strings" - "testing" - "time" - - "github.com/Azure/go-autorest/autorest/date" - "github.com/Azure/go-autorest/autorest/mocks" -) - -const ( - defaultFormData = "client_id=id&client_secret=secret&grant_type=client_credentials&resource=resource" - defaultManualFormData = "client_id=id&grant_type=refresh_token&refresh_token=refreshtoken&resource=resource" -) - -func TestTokenExpires(t *testing.T) { - tt := time.Now().Add(5 * time.Second) - tk := newTokenExpiresAt(tt) - - if tk.Expires().Equal(tt) { - t.Fatalf("adal: Token#Expires miscalculated expiration time -- received %v, expected %v", tk.Expires(), tt) - } -} - -func TestTokenIsExpired(t *testing.T) { - tk := newTokenExpiresAt(time.Now().Add(-5 * time.Second)) - - if !tk.IsExpired() { - t.Fatalf("adal: Token#IsExpired failed to mark a stale token as expired -- now %v, token expires at %v", - time.Now().UTC(), tk.Expires()) - } -} - -func TestTokenIsExpiredUninitialized(t *testing.T) { - tk := &Token{} - - if !tk.IsExpired() { - t.Fatalf("adal: An uninitialized Token failed to mark itself as expired (expiration time %v)", tk.Expires()) - } -} - -func TestTokenIsNoExpired(t *testing.T) { - tk := newTokenExpiresAt(time.Now().Add(1000 * time.Second)) - - if tk.IsExpired() { - t.Fatalf("adal: Token marked a fresh token as expired -- now %v, token expires at %v", time.Now().UTC(), tk.Expires()) - } -} - -func TestTokenWillExpireIn(t *testing.T) { - d := 5 * time.Second - tk := newTokenExpiresIn(d) - - if !tk.WillExpireIn(d) { - t.Fatal("adal: Token#WillExpireIn mismeasured expiration time") - } -} - -func TestServicePrincipalTokenSetAutoRefresh(t *testing.T) { - spt := newServicePrincipalToken() - - if !spt.autoRefresh { - t.Fatal("adal: ServicePrincipalToken did not default to automatic token refreshing") - } - - spt.SetAutoRefresh(false) - if spt.autoRefresh { - t.Fatal("adal: ServicePrincipalToken#SetAutoRefresh did not disable automatic token refreshing") - } -} - -func TestServicePrincipalTokenSetRefreshWithin(t *testing.T) { - spt := newServicePrincipalToken() - - if spt.refreshWithin != defaultRefresh { - t.Fatal("adal: ServicePrincipalToken did not correctly set the default refresh interval") - } - - spt.SetRefreshWithin(2 * defaultRefresh) - if spt.refreshWithin != 2*defaultRefresh { - t.Fatal("adal: ServicePrincipalToken#SetRefreshWithin did not set the refresh interval") - } -} - -func TestServicePrincipalTokenSetSender(t *testing.T) { - spt := newServicePrincipalToken() - - c := &http.Client{} - spt.SetSender(c) - if !reflect.DeepEqual(c, spt.sender) { - t.Fatal("adal: ServicePrincipalToken#SetSender did not set the sender") - } -} - -func TestServicePrincipalTokenRefreshUsesPOST(t *testing.T) { - spt := newServicePrincipalToken() - - body := mocks.NewBody(newTokenJSON("test", "test")) - resp := mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK") - - c := mocks.NewSender() - s := DecorateSender(c, - (func() SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - if r.Method != "POST" { - t.Fatalf("adal: ServicePrincipalToken#Refresh did not correctly set HTTP method -- expected %v, received %v", "POST", r.Method) - } - return resp, nil - }) - } - })()) - spt.SetSender(s) - err := spt.Refresh() - if err != nil { - t.Fatalf("adal: ServicePrincipalToken#Refresh returned an unexpected error (%v)", err) - } - - if body.IsOpen() { - t.Fatalf("the response was not closed!") - } -} - -func TestServicePrincipalTokenFromMSIRefreshUsesPOST(t *testing.T) { - resource := "https://resource" - cb := func(token Token) error { return nil } - - spt, err := NewServicePrincipalTokenFromMSI("http://msiendpoint/", resource, cb) - if err != nil { - t.Fatalf("Failed to get MSI SPT: %v", err) - } - - body := mocks.NewBody(newTokenJSON("test", "test")) - resp := mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK") - - c := mocks.NewSender() - s := DecorateSender(c, - (func() SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - if r.Method != "POST" { - t.Fatalf("adal: ServicePrincipalToken#Refresh did not correctly set HTTP method -- expected %v, received %v", "POST", r.Method) - } - if h := r.Header.Get("Metadata"); h != "true" { - t.Fatalf("adal: ServicePrincipalToken#Refresh did not correctly set Metadata header for MSI") - } - return resp, nil - }) - } - })()) - spt.SetSender(s) - err = spt.Refresh() - if err != nil { - t.Fatalf("adal: ServicePrincipalToken#Refresh returned an unexpected error (%v)", err) - } - - if body.IsOpen() { - t.Fatalf("the response was not closed!") - } -} - -func TestServicePrincipalTokenRefreshSetsMimeType(t *testing.T) { - spt := newServicePrincipalToken() - - body := mocks.NewBody(newTokenJSON("test", "test")) - resp := mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK") - - c := mocks.NewSender() - s := DecorateSender(c, - (func() SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - if r.Header.Get(http.CanonicalHeaderKey("Content-Type")) != "application/x-www-form-urlencoded" { - t.Fatalf("adal: ServicePrincipalToken#Refresh did not correctly set Content-Type -- expected %v, received %v", - "application/x-form-urlencoded", - r.Header.Get(http.CanonicalHeaderKey("Content-Type"))) - } - return resp, nil - }) - } - })()) - spt.SetSender(s) - err := spt.Refresh() - if err != nil { - t.Fatalf("adal: ServicePrincipalToken#Refresh returned an unexpected error (%v)", err) - } -} - -func TestServicePrincipalTokenRefreshSetsURL(t *testing.T) { - spt := newServicePrincipalToken() - - body := mocks.NewBody(newTokenJSON("test", "test")) - resp := mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK") - - c := mocks.NewSender() - s := DecorateSender(c, - (func() SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - if r.URL.String() != TestOAuthConfig.TokenEndpoint.String() { - t.Fatalf("adal: ServicePrincipalToken#Refresh did not correctly set the URL -- expected %v, received %v", - TestOAuthConfig.TokenEndpoint, r.URL) - } - return resp, nil - }) - } - })()) - spt.SetSender(s) - err := spt.Refresh() - if err != nil { - t.Fatalf("adal: ServicePrincipalToken#Refresh returned an unexpected error (%v)", err) - } -} - -func testServicePrincipalTokenRefreshSetsBody(t *testing.T, spt *ServicePrincipalToken, f func(*testing.T, []byte)) { - body := mocks.NewBody(newTokenJSON("test", "test")) - resp := mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK") - - c := mocks.NewSender() - s := DecorateSender(c, - (func() SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - b, err := ioutil.ReadAll(r.Body) - if err != nil { - t.Fatalf("adal: Failed to read body of Service Principal token request (%v)", err) - } - f(t, b) - return resp, nil - }) - } - })()) - spt.SetSender(s) - err := spt.Refresh() - if err != nil { - t.Fatalf("adal: ServicePrincipalToken#Refresh returned an unexpected error (%v)", err) - } -} - -func TestServicePrincipalTokenManualRefreshSetsBody(t *testing.T) { - sptManual := newServicePrincipalTokenManual() - testServicePrincipalTokenRefreshSetsBody(t, sptManual, func(t *testing.T, b []byte) { - if string(b) != defaultManualFormData { - t.Fatalf("adal: ServicePrincipalToken#Refresh did not correctly set the HTTP Request Body -- expected %v, received %v", - defaultManualFormData, string(b)) - } - }) -} - -func TestServicePrincipalTokenCertficateRefreshSetsBody(t *testing.T) { - sptCert := newServicePrincipalTokenCertificate(t) - testServicePrincipalTokenRefreshSetsBody(t, sptCert, func(t *testing.T, b []byte) { - body := string(b) - - values, _ := url.ParseQuery(body) - if values["client_assertion_type"][0] != "urn:ietf:params:oauth:client-assertion-type:jwt-bearer" || - values["client_id"][0] != "id" || - values["grant_type"][0] != "client_credentials" || - values["resource"][0] != "resource" { - t.Fatalf("adal: ServicePrincipalTokenCertificate#Refresh did not correctly set the HTTP Request Body.") - } - }) -} - -func TestServicePrincipalTokenSecretRefreshSetsBody(t *testing.T) { - spt := newServicePrincipalToken() - testServicePrincipalTokenRefreshSetsBody(t, spt, func(t *testing.T, b []byte) { - if string(b) != defaultFormData { - t.Fatalf("adal: ServicePrincipalToken#Refresh did not correctly set the HTTP Request Body -- expected %v, received %v", - defaultFormData, string(b)) - } - - }) -} - -func TestServicePrincipalTokenRefreshClosesRequestBody(t *testing.T) { - spt := newServicePrincipalToken() - - body := mocks.NewBody(newTokenJSON("test", "test")) - resp := mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK") - - c := mocks.NewSender() - s := DecorateSender(c, - (func() SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - return resp, nil - }) - } - })()) - spt.SetSender(s) - err := spt.Refresh() - if err != nil { - t.Fatalf("adal: ServicePrincipalToken#Refresh returned an unexpected error (%v)", err) - } - if resp.Body.(*mocks.Body).IsOpen() { - t.Fatal("adal: ServicePrincipalToken#Refresh failed to close the HTTP Response Body") - } -} - -func TestServicePrincipalTokenRefreshRejectsResponsesWithStatusNotOK(t *testing.T) { - spt := newServicePrincipalToken() - - body := mocks.NewBody(newTokenJSON("test", "test")) - resp := mocks.NewResponseWithBodyAndStatus(body, http.StatusUnauthorized, "Unauthorized") - - c := mocks.NewSender() - s := DecorateSender(c, - (func() SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - return resp, nil - }) - } - })()) - spt.SetSender(s) - err := spt.Refresh() - if err == nil { - t.Fatalf("adal: ServicePrincipalToken#Refresh should reject a response with status != %d", http.StatusOK) - } -} - -func TestServicePrincipalTokenRefreshRejectsEmptyBody(t *testing.T) { - spt := newServicePrincipalToken() - - c := mocks.NewSender() - s := DecorateSender(c, - (func() SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - return mocks.NewResponse(), nil - }) - } - })()) - spt.SetSender(s) - err := spt.Refresh() - if err == nil { - t.Fatal("adal: ServicePrincipalToken#Refresh should reject an empty token") - } -} - -func TestServicePrincipalTokenRefreshPropagatesErrors(t *testing.T) { - spt := newServicePrincipalToken() - - c := mocks.NewSender() - c.SetError(fmt.Errorf("Faux Error")) - spt.SetSender(c) - - err := spt.Refresh() - if err == nil { - t.Fatal("adal: Failed to propagate the request error") - } -} - -func TestServicePrincipalTokenRefreshReturnsErrorIfNotOk(t *testing.T) { - spt := newServicePrincipalToken() - - c := mocks.NewSender() - c.AppendResponse(mocks.NewResponseWithStatus("401 NotAuthorized", http.StatusUnauthorized)) - spt.SetSender(c) - - err := spt.Refresh() - if err == nil { - t.Fatalf("adal: Failed to return an when receiving a status code other than HTTP %d", http.StatusOK) - } -} - -func TestServicePrincipalTokenRefreshUnmarshals(t *testing.T) { - spt := newServicePrincipalToken() - - expiresOn := strconv.Itoa(int(time.Now().Add(3600 * time.Second).Sub(date.UnixEpoch()).Seconds())) - j := newTokenJSON(expiresOn, "resource") - resp := mocks.NewResponseWithContent(j) - c := mocks.NewSender() - s := DecorateSender(c, - (func() SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - return resp, nil - }) - } - })()) - spt.SetSender(s) - - err := spt.Refresh() - if err != nil { - t.Fatalf("adal: ServicePrincipalToken#Refresh returned an unexpected error (%v)", err) - } else if spt.AccessToken != "accessToken" || - spt.ExpiresIn != "3600" || - spt.ExpiresOn != expiresOn || - spt.NotBefore != expiresOn || - spt.Resource != "resource" || - spt.Type != "Bearer" { - t.Fatalf("adal: ServicePrincipalToken#Refresh failed correctly unmarshal the JSON -- expected %v, received %v", - j, *spt) - } -} - -func TestServicePrincipalTokenEnsureFreshRefreshes(t *testing.T) { - spt := newServicePrincipalToken() - expireToken(&spt.Token) - - body := mocks.NewBody(newTokenJSON("test", "test")) - resp := mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK") - - f := false - c := mocks.NewSender() - s := DecorateSender(c, - (func() SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - f = true - return resp, nil - }) - } - })()) - spt.SetSender(s) - err := spt.EnsureFresh() - if err != nil { - t.Fatalf("adal: ServicePrincipalToken#EnsureFresh returned an unexpected error (%v)", err) - } - if !f { - t.Fatal("adal: ServicePrincipalToken#EnsureFresh failed to call Refresh for stale token") - } -} - -func TestServicePrincipalTokenEnsureFreshSkipsIfFresh(t *testing.T) { - spt := newServicePrincipalToken() - setTokenToExpireIn(&spt.Token, 1000*time.Second) - - f := false - c := mocks.NewSender() - s := DecorateSender(c, - (func() SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - f = true - return mocks.NewResponse(), nil - }) - } - })()) - spt.SetSender(s) - err := spt.EnsureFresh() - if err != nil { - t.Fatalf("adal: ServicePrincipalToken#EnsureFresh returned an unexpected error (%v)", err) - } - if f { - t.Fatal("adal: ServicePrincipalToken#EnsureFresh invoked Refresh for fresh token") - } -} - -func TestRefreshCallback(t *testing.T) { - callbackTriggered := false - spt := newServicePrincipalToken(func(Token) error { - callbackTriggered = true - return nil - }) - - expiresOn := strconv.Itoa(int(time.Now().Add(3600 * time.Second).Sub(date.UnixEpoch()).Seconds())) - - sender := mocks.NewSender() - j := newTokenJSON(expiresOn, "resource") - sender.AppendResponse(mocks.NewResponseWithContent(j)) - spt.SetSender(sender) - err := spt.Refresh() - if err != nil { - t.Fatalf("adal: ServicePrincipalToken#Refresh returned an unexpected error (%v)", err) - } - if !callbackTriggered { - t.Fatalf("adal: RefreshCallback failed to trigger call callback") - } -} - -func TestRefreshCallbackErrorPropagates(t *testing.T) { - errorText := "this is an error text" - spt := newServicePrincipalToken(func(Token) error { - return fmt.Errorf(errorText) - }) - - expiresOn := strconv.Itoa(int(time.Now().Add(3600 * time.Second).Sub(date.UnixEpoch()).Seconds())) - - sender := mocks.NewSender() - j := newTokenJSON(expiresOn, "resource") - sender.AppendResponse(mocks.NewResponseWithContent(j)) - spt.SetSender(sender) - err := spt.Refresh() - - if err == nil || !strings.Contains(err.Error(), errorText) { - t.Fatalf("adal: RefreshCallback failed to propagate error") - } -} - -// This demonstrates the danger of manual token without a refresh token -func TestServicePrincipalTokenManualRefreshFailsWithoutRefresh(t *testing.T) { - spt := newServicePrincipalTokenManual() - spt.RefreshToken = "" - err := spt.Refresh() - if err == nil { - t.Fatalf("adal: ServicePrincipalToken#Refresh should have failed with a ManualTokenSecret without a refresh token") - } -} - -func TestNewServicePrincipalTokenFromMSI(t *testing.T) { - resource := "https://resource" - cb := func(token Token) error { return nil } - - spt, err := NewServicePrincipalTokenFromMSI("http://msiendpoint/", resource, cb) - if err != nil { - t.Fatalf("Failed to get MSI SPT: %v", err) - } - - // check some of the SPT fields - if _, ok := spt.secret.(*ServicePrincipalMSISecret); !ok { - t.Fatal("SPT secret was not of MSI type") - } - - if spt.resource != resource { - t.Fatal("SPT came back with incorrect resource") - } - - if len(spt.refreshCallbacks) != 1 { - t.Fatal("SPT had incorrect refresh callbacks.") - } -} - -func TestGetVMEndpoint(t *testing.T) { - tempSettingsFile, err := ioutil.TempFile("", "ManagedIdentity-Settings") - if err != nil { - t.Fatal("Couldn't write temp settings file") - } - defer os.Remove(tempSettingsFile.Name()) - - settingsContents := []byte(`{ - "url": "http://msiendpoint/" - }`) - - if _, err := tempSettingsFile.Write(settingsContents); err != nil { - t.Fatal("Couldn't fill temp settings file") - } - - endpoint, err := getMSIVMEndpoint(tempSettingsFile.Name()) - if err != nil { - t.Fatal("Coudn't get VM endpoint") - } - - if endpoint != "http://msiendpoint/" { - t.Fatal("Didn't get correct endpoint") - } -} - -func newToken() *Token { - return &Token{ - AccessToken: "ASECRETVALUE", - Resource: "https://azure.microsoft.com/", - Type: "Bearer", - } -} - -func newTokenJSON(expiresOn string, resource string) string { - return fmt.Sprintf(`{ - "access_token" : "accessToken", - "expires_in" : "3600", - "expires_on" : "%s", - "not_before" : "%s", - "resource" : "%s", - "token_type" : "Bearer" - }`, - expiresOn, expiresOn, resource) -} - -func newTokenExpiresIn(expireIn time.Duration) *Token { - return setTokenToExpireIn(newToken(), expireIn) -} - -func newTokenExpiresAt(expireAt time.Time) *Token { - return setTokenToExpireAt(newToken(), expireAt) -} - -func expireToken(t *Token) *Token { - return setTokenToExpireIn(t, 0) -} - -func setTokenToExpireAt(t *Token, expireAt time.Time) *Token { - t.ExpiresIn = "3600" - t.ExpiresOn = strconv.Itoa(int(expireAt.Sub(date.UnixEpoch()).Seconds())) - t.NotBefore = t.ExpiresOn - return t -} - -func setTokenToExpireIn(t *Token, expireIn time.Duration) *Token { - return setTokenToExpireAt(t, time.Now().Add(expireIn)) -} - -func newServicePrincipalToken(callbacks ...TokenRefreshCallback) *ServicePrincipalToken { - spt, _ := NewServicePrincipalToken(TestOAuthConfig, "id", "secret", "resource", callbacks...) - return spt -} - -func newServicePrincipalTokenManual() *ServicePrincipalToken { - token := newToken() - token.RefreshToken = "refreshtoken" - spt, _ := NewServicePrincipalTokenFromManualToken(TestOAuthConfig, "id", "resource", *token) - return spt -} - -func newServicePrincipalTokenCertificate(t *testing.T) *ServicePrincipalToken { - template := x509.Certificate{ - SerialNumber: big.NewInt(0), - Subject: pkix.Name{CommonName: "test"}, - BasicConstraintsValid: true, - } - privateKey, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - t.Fatal(err) - } - certificateBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &privateKey.PublicKey, privateKey) - if err != nil { - t.Fatal(err) - } - certificate, err := x509.ParseCertificate(certificateBytes) - if err != nil { - t.Fatal(err) - } - - spt, _ := NewServicePrincipalTokenFromCertificate(TestOAuthConfig, "id", certificate, privateKey, "resource") - return spt -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/version.go b/vendor/github.com/Azure/go-autorest/autorest/adal/version.go new file mode 100644 index 0000000000..c867b34843 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/version.go @@ -0,0 +1,45 @@ +package adal + +import ( + "fmt" + "runtime" +) + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const number = "v1.0.0" + +var ( + ua = fmt.Sprintf("Go/%s (%s-%s) go-autorest/adal/%s", + runtime.Version(), + runtime.GOARCH, + runtime.GOOS, + number, + ) +) + +// UserAgent returns a string containing the Go version, system architecture and OS, and the adal version. +func UserAgent() string { + return ua +} + +// AddToUserAgent adds an extension to the current user agent +func AddToUserAgent(extension string) error { + if extension != "" { + ua = fmt.Sprintf("%s %s", ua, extension) + return nil + } + return fmt.Errorf("Extension was empty, User Agent remained as '%s'", ua) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization.go b/vendor/github.com/Azure/go-autorest/autorest/authorization.go index 71e3ced2d6..2e24b4b397 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/authorization.go +++ b/vendor/github.com/Azure/go-autorest/autorest/authorization.go @@ -15,18 +15,25 @@ package autorest // limitations under the License. import ( + "encoding/base64" "fmt" "net/http" "net/url" "strings" "github.com/Azure/go-autorest/autorest/adal" + "github.com/Azure/go-autorest/tracing" ) const ( - bearerChallengeHeader = "Www-Authenticate" - bearer = "Bearer" - tenantID = "tenantID" + bearerChallengeHeader = "Www-Authenticate" + bearer = "Bearer" + tenantID = "tenantID" + apiKeyAuthorizerHeader = "Ocp-Apim-Subscription-Key" + bingAPISdkHeader = "X-BingApis-SDK-Client" + golangBingAPISdkHeaderValue = "Go-SDK" + authorization = "Authorization" + basic = "Basic" ) // Authorizer is the interface that provides a PrepareDecorator used to supply request @@ -44,6 +51,53 @@ func (na NullAuthorizer) WithAuthorization() PrepareDecorator { return WithNothing() } +// APIKeyAuthorizer implements API Key authorization. +type APIKeyAuthorizer struct { + headers map[string]interface{} + queryParameters map[string]interface{} +} + +// NewAPIKeyAuthorizerWithHeaders creates an ApiKeyAuthorizer with headers. +func NewAPIKeyAuthorizerWithHeaders(headers map[string]interface{}) *APIKeyAuthorizer { + return NewAPIKeyAuthorizer(headers, nil) +} + +// NewAPIKeyAuthorizerWithQueryParameters creates an ApiKeyAuthorizer with query parameters. +func NewAPIKeyAuthorizerWithQueryParameters(queryParameters map[string]interface{}) *APIKeyAuthorizer { + return NewAPIKeyAuthorizer(nil, queryParameters) +} + +// NewAPIKeyAuthorizer creates an ApiKeyAuthorizer with headers. +func NewAPIKeyAuthorizer(headers map[string]interface{}, queryParameters map[string]interface{}) *APIKeyAuthorizer { + return &APIKeyAuthorizer{headers: headers, queryParameters: queryParameters} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP headers and Query Parameters. +func (aka *APIKeyAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return DecoratePreparer(p, WithHeaders(aka.headers), WithQueryParameters(aka.queryParameters)) + } +} + +// CognitiveServicesAuthorizer implements authorization for Cognitive Services. +type CognitiveServicesAuthorizer struct { + subscriptionKey string +} + +// NewCognitiveServicesAuthorizer is +func NewCognitiveServicesAuthorizer(subscriptionKey string) *CognitiveServicesAuthorizer { + return &CognitiveServicesAuthorizer{subscriptionKey: subscriptionKey} +} + +// WithAuthorization is +func (csa *CognitiveServicesAuthorizer) WithAuthorization() PrepareDecorator { + headers := make(map[string]interface{}) + headers[apiKeyAuthorizerHeader] = csa.subscriptionKey + headers[bingAPISdkHeader] = golangBingAPISdkHeaderValue + + return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() +} + // BearerAuthorizer implements the bearer authorization type BearerAuthorizer struct { tokenProvider adal.OAuthTokenProvider @@ -54,10 +108,6 @@ func NewBearerAuthorizer(tp adal.OAuthTokenProvider) *BearerAuthorizer { return &BearerAuthorizer{tokenProvider: tp} } -func (ba *BearerAuthorizer) withBearerAuthorization() PrepareDecorator { - return WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", ba.tokenProvider.OAuthToken())) -} - // WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose // value is "Bearer " followed by the token. // @@ -65,15 +115,25 @@ func (ba *BearerAuthorizer) withBearerAuthorization() PrepareDecorator { func (ba *BearerAuthorizer) WithAuthorization() PrepareDecorator { return func(p Preparer) Preparer { return PreparerFunc(func(r *http.Request) (*http.Request, error) { - refresher, ok := ba.tokenProvider.(adal.Refresher) - if ok { - err := refresher.EnsureFresh() + r, err := p.Prepare(r) + if err == nil { + // the ordering is important here, prefer RefresherWithContext if available + if refresher, ok := ba.tokenProvider.(adal.RefresherWithContext); ok { + err = refresher.EnsureFreshWithContext(r.Context()) + } else if refresher, ok := ba.tokenProvider.(adal.Refresher); ok { + err = refresher.EnsureFresh() + } if err != nil { - return r, NewErrorWithError(err, "azure.BearerAuthorizer", "WithAuthorization", nil, + var resp *http.Response + if tokError, ok := err.(adal.TokenRefreshError); ok { + resp = tokError.Response() + } + return r, NewErrorWithError(err, "azure.BearerAuthorizer", "WithAuthorization", resp, "Failed to refresh the Token for request to %s", r.URL) } + return Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", ba.tokenProvider.OAuthToken()))) } - return (ba.withBearerAuthorization()(p)).Prepare(r) + return r, err }) } } @@ -91,7 +151,7 @@ type BearerAuthorizerCallback struct { // is invoked when the HTTP request is submitted. func NewBearerAuthorizerCallback(sender Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback { if sender == nil { - sender = &http.Client{} + sender = &http.Client{Transport: tracing.Transport} } return &BearerAuthorizerCallback{sender: sender, callback: callback} } @@ -103,25 +163,28 @@ func NewBearerAuthorizerCallback(sender Sender, callback BearerAuthorizerCallbac func (bacb *BearerAuthorizerCallback) WithAuthorization() PrepareDecorator { return func(p Preparer) Preparer { return PreparerFunc(func(r *http.Request) (*http.Request, error) { - // make a copy of the request and remove the body as it's not - // required and avoids us having to create a copy of it. - rCopy := *r - removeRequestBody(&rCopy) - - resp, err := bacb.sender.Do(&rCopy) - if err == nil && resp.StatusCode == 401 { - defer resp.Body.Close() - if hasBearerChallenge(resp) { - bc, err := newBearerChallenge(resp) - if err != nil { - return r, err - } - if bacb.callback != nil { - ba, err := bacb.callback(bc.values[tenantID], bc.values["resource"]) + r, err := p.Prepare(r) + if err == nil { + // make a copy of the request and remove the body as it's not + // required and avoids us having to create a copy of it. + rCopy := *r + removeRequestBody(&rCopy) + + resp, err := bacb.sender.Do(&rCopy) + if err == nil && resp.StatusCode == 401 { + defer resp.Body.Close() + if hasBearerChallenge(resp) { + bc, err := newBearerChallenge(resp) if err != nil { return r, err } - return ba.WithAuthorization()(p).Prepare(r) + if bacb.callback != nil { + ba, err := bacb.callback(bc.values[tenantID], bc.values["resource"]) + if err != nil { + return r, err + } + return Prepare(r, ba.WithAuthorization()) + } } } } @@ -179,3 +242,46 @@ func newBearerChallenge(resp *http.Response) (bc bearerChallenge, err error) { return bc, err } + +// EventGridKeyAuthorizer implements authorization for event grid using key authentication. +type EventGridKeyAuthorizer struct { + topicKey string +} + +// NewEventGridKeyAuthorizer creates a new EventGridKeyAuthorizer +// with the specified topic key. +func NewEventGridKeyAuthorizer(topicKey string) EventGridKeyAuthorizer { + return EventGridKeyAuthorizer{topicKey: topicKey} +} + +// WithAuthorization returns a PrepareDecorator that adds the aeg-sas-key authentication header. +func (egta EventGridKeyAuthorizer) WithAuthorization() PrepareDecorator { + headers := map[string]interface{}{ + "aeg-sas-key": egta.topicKey, + } + return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() +} + +// BasicAuthorizer implements basic HTTP authorization by adding the Authorization HTTP header +// with the value "Basic " where is a base64-encoded username:password tuple. +type BasicAuthorizer struct { + userName string + password string +} + +// NewBasicAuthorizer creates a new BasicAuthorizer with the specified username and password. +func NewBasicAuthorizer(userName, password string) *BasicAuthorizer { + return &BasicAuthorizer{ + userName: userName, + password: password, + } +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Basic " followed by the base64-encoded username:password tuple. +func (ba *BasicAuthorizer) WithAuthorization() PrepareDecorator { + headers := make(map[string]interface{}) + headers[authorization] = basic + " " + base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", ba.userName, ba.password))) + + return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization_test.go b/vendor/github.com/Azure/go-autorest/autorest/authorization_test.go deleted file mode 100644 index 635fcfd7f8..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/authorization_test.go +++ /dev/null @@ -1,188 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" - "net/http" - "reflect" - "testing" - - "github.com/Azure/go-autorest/autorest/adal" - "github.com/Azure/go-autorest/autorest/mocks" -) - -const ( - TestTenantID = "TestTenantID" - TestActiveDirectoryEndpoint = "https://login/test.com/" -) - -func TestWithAuthorizer(t *testing.T) { - r1 := mocks.NewRequest() - - na := &NullAuthorizer{} - r2, err := Prepare(r1, - na.WithAuthorization()) - if err != nil { - t.Fatalf("autorest: NullAuthorizer#WithAuthorization returned an unexpected error (%v)", err) - } else if !reflect.DeepEqual(r1, r2) { - t.Fatalf("autorest: NullAuthorizer#WithAuthorization modified the request -- received %v, expected %v", r2, r1) - } -} - -func TestTokenWithAuthorization(t *testing.T) { - token := &adal.Token{ - AccessToken: "TestToken", - Resource: "https://azure.microsoft.com/", - Type: "Bearer", - } - - ba := NewBearerAuthorizer(token) - req, err := Prepare(&http.Request{}, ba.WithAuthorization()) - if err != nil { - t.Fatalf("azure: BearerAuthorizer#WithAuthorization returned an error (%v)", err) - } else if req.Header.Get(http.CanonicalHeaderKey("Authorization")) != fmt.Sprintf("Bearer %s", token.AccessToken) { - t.Fatal("azure: BearerAuthorizer#WithAuthorization failed to set Authorization header") - } -} - -func TestServicePrincipalTokenWithAuthorizationNoRefresh(t *testing.T) { - oauthConfig, err := adal.NewOAuthConfig(TestActiveDirectoryEndpoint, TestTenantID) - if err != nil { - t.Fatalf("azure: BearerAuthorizer#WithAuthorization returned an error (%v)", err) - } - spt, err := adal.NewServicePrincipalToken(*oauthConfig, "id", "secret", "resource", nil) - if err != nil { - t.Fatalf("azure: BearerAuthorizer#WithAuthorization returned an error (%v)", err) - } - spt.SetAutoRefresh(false) - s := mocks.NewSender() - spt.SetSender(s) - - ba := NewBearerAuthorizer(spt) - req, err := Prepare(mocks.NewRequest(), ba.WithAuthorization()) - if err != nil { - t.Fatalf("azure: BearerAuthorizer#WithAuthorization returned an error (%v)", err) - } else if req.Header.Get(http.CanonicalHeaderKey("Authorization")) != fmt.Sprintf("Bearer %s", spt.AccessToken) { - t.Fatal("azure: BearerAuthorizer#WithAuthorization failed to set Authorization header") - } -} - -func TestServicePrincipalTokenWithAuthorizationRefresh(t *testing.T) { - - oauthConfig, err := adal.NewOAuthConfig(TestActiveDirectoryEndpoint, TestTenantID) - if err != nil { - t.Fatalf("azure: BearerAuthorizer#WithAuthorization returned an error (%v)", err) - } - refreshed := false - spt, err := adal.NewServicePrincipalToken(*oauthConfig, "id", "secret", "resource", func(t adal.Token) error { - refreshed = true - return nil - }) - if err != nil { - t.Fatalf("azure: BearerAuthorizer#WithAuthorization returned an error (%v)", err) - } - - jwt := `{ - "access_token" : "accessToken", - "expires_in" : "3600", - "expires_on" : "test", - "not_before" : "test", - "resource" : "test", - "token_type" : "Bearer" - }` - body := mocks.NewBody(jwt) - resp := mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK") - c := mocks.NewSender() - s := DecorateSender(c, - (func() SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - return resp, nil - }) - } - })()) - spt.SetSender(s) - - ba := NewBearerAuthorizer(spt) - req, err := Prepare(mocks.NewRequest(), ba.WithAuthorization()) - if err != nil { - t.Fatalf("azure: BearerAuthorizer#WithAuthorization returned an error (%v)", err) - } else if req.Header.Get(http.CanonicalHeaderKey("Authorization")) != fmt.Sprintf("Bearer %s", spt.AccessToken) { - t.Fatal("azure: BearerAuthorizer#WithAuthorization failed to set Authorization header") - } - - if !refreshed { - t.Fatal("azure: BearerAuthorizer#WithAuthorization must refresh the token") - } -} - -func TestServicePrincipalTokenWithAuthorizationReturnsErrorIfConnotRefresh(t *testing.T) { - oauthConfig, err := adal.NewOAuthConfig(TestActiveDirectoryEndpoint, TestTenantID) - if err != nil { - t.Fatalf("azure: BearerAuthorizer#WithAuthorization returned an error (%v)", err) - } - spt, err := adal.NewServicePrincipalToken(*oauthConfig, "id", "secret", "resource", nil) - if err != nil { - t.Fatalf("azure: BearerAuthorizer#WithAuthorization returned an error (%v)", err) - } - - s := mocks.NewSender() - s.AppendResponse(mocks.NewResponseWithStatus("400 Bad Request", http.StatusBadRequest)) - spt.SetSender(s) - - ba := NewBearerAuthorizer(spt) - _, err = Prepare(mocks.NewRequest(), ba.WithAuthorization()) - if err == nil { - t.Fatal("azure: BearerAuthorizer#WithAuthorization failed to return an error when refresh fails") - } -} - -func TestBearerAuthorizerCallback(t *testing.T) { - tenantString := "123-tenantID-456" - resourceString := "https://fake.resource.net" - - s := mocks.NewSender() - resp := mocks.NewResponseWithStatus("401 Unauthorized", http.StatusUnauthorized) - mocks.SetResponseHeader(resp, bearerChallengeHeader, bearer+" \"authorization\"=\"https://fake.net/"+tenantString+"\",\"resource\"=\""+resourceString+"\"") - s.AppendResponse(resp) - - auth := NewBearerAuthorizerCallback(s, func(tenantID, resource string) (*BearerAuthorizer, error) { - if tenantID != tenantString { - t.Fatal("BearerAuthorizerCallback: bad tenant ID") - } - if resource != resourceString { - t.Fatal("BearerAuthorizerCallback: bad resource") - } - - oauthConfig, err := adal.NewOAuthConfig(TestActiveDirectoryEndpoint, tenantID) - if err != nil { - t.Fatalf("azure: NewOAuthConfig returned an error (%v)", err) - } - - spt, err := adal.NewServicePrincipalToken(*oauthConfig, "id", "secret", resource) - if err != nil { - t.Fatalf("azure: NewServicePrincipalToken returned an error (%v)", err) - } - - spt.SetSender(s) - return NewBearerAuthorizer(spt), nil - }) - - _, err := Prepare(mocks.NewRequest(), auth.WithAuthorization()) - if err == nil { - t.Fatal("azure: BearerAuthorizerCallback#WithAuthorization failed to return an error when refresh fails") - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/autorest.go b/vendor/github.com/Azure/go-autorest/autorest/autorest.go index f86b66a410..aafdf021fd 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/autorest.go +++ b/vendor/github.com/Azure/go-autorest/autorest/autorest.go @@ -72,6 +72,7 @@ package autorest // limitations under the License. import ( + "context" "net/http" "time" ) @@ -130,3 +131,20 @@ func NewPollingRequest(resp *http.Response, cancel <-chan struct{}) (*http.Reque return req, nil } + +// NewPollingRequestWithContext allocates and returns a new http.Request with the specified context to poll for the passed response. +func NewPollingRequestWithContext(ctx context.Context, resp *http.Response) (*http.Request, error) { + location := GetLocation(resp) + if location == "" { + return nil, NewErrorWithResponse("autorest", "NewPollingRequestWithContext", resp, "Location header missing from response that requires polling") + } + + req, err := Prepare((&http.Request{}).WithContext(ctx), + AsGet(), + WithBaseURL(location)) + if err != nil { + return nil, NewErrorWithError(err, "autorest", "NewPollingRequestWithContext", nil, "Failure creating poll request to %s", location) + } + + return req, nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/autorest_test.go b/vendor/github.com/Azure/go-autorest/autorest/autorest_test.go deleted file mode 100644 index 467ea438b2..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/autorest_test.go +++ /dev/null @@ -1,140 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "net/http" - "testing" - - "github.com/Azure/go-autorest/autorest/mocks" -) - -func TestResponseHasStatusCode(t *testing.T) { - codes := []int{http.StatusOK, http.StatusAccepted} - resp := &http.Response{StatusCode: http.StatusAccepted} - if !ResponseHasStatusCode(resp, codes...) { - t.Fatalf("autorest: ResponseHasStatusCode failed to find %v in %v", resp.StatusCode, codes) - } -} - -func TestResponseHasStatusCodeNotPresent(t *testing.T) { - codes := []int{http.StatusOK, http.StatusAccepted} - resp := &http.Response{StatusCode: http.StatusInternalServerError} - if ResponseHasStatusCode(resp, codes...) { - t.Fatalf("autorest: ResponseHasStatusCode unexpectedly found %v in %v", resp.StatusCode, codes) - } -} - -func TestNewPollingRequestDoesNotReturnARequestWhenLocationHeaderIsMissing(t *testing.T) { - resp := mocks.NewResponseWithStatus("500 InternalServerError", http.StatusInternalServerError) - - req, _ := NewPollingRequest(resp, nil) - if req != nil { - t.Fatal("autorest: NewPollingRequest returned an http.Request when the Location header was missing") - } -} - -func TestNewPollingRequestReturnsAnErrorWhenPrepareFails(t *testing.T) { - resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) - mocks.SetAcceptedHeaders(resp) - resp.Header.Set(http.CanonicalHeaderKey(HeaderLocation), mocks.TestBadURL) - - _, err := NewPollingRequest(resp, nil) - if err == nil { - t.Fatal("autorest: NewPollingRequest failed to return an error when Prepare fails") - } -} - -func TestNewPollingRequestDoesNotReturnARequestWhenPrepareFails(t *testing.T) { - resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) - mocks.SetAcceptedHeaders(resp) - resp.Header.Set(http.CanonicalHeaderKey(HeaderLocation), mocks.TestBadURL) - - req, _ := NewPollingRequest(resp, nil) - if req != nil { - t.Fatal("autorest: NewPollingRequest returned an http.Request when Prepare failed") - } -} - -func TestNewPollingRequestReturnsAGetRequest(t *testing.T) { - resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) - mocks.SetAcceptedHeaders(resp) - - req, _ := NewPollingRequest(resp, nil) - if req.Method != "GET" { - t.Fatalf("autorest: NewPollingRequest did not create an HTTP GET request -- actual method %v", req.Method) - } -} - -func TestNewPollingRequestProvidesTheURL(t *testing.T) { - resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) - mocks.SetAcceptedHeaders(resp) - - req, _ := NewPollingRequest(resp, nil) - if req.URL.String() != mocks.TestURL { - t.Fatalf("autorest: NewPollingRequest did not create an HTTP with the expected URL -- received %v, expected %v", req.URL, mocks.TestURL) - } -} - -func TestGetLocation(t *testing.T) { - resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) - mocks.SetAcceptedHeaders(resp) - - l := GetLocation(resp) - if len(l) == 0 { - t.Fatalf("autorest: GetLocation failed to return Location header -- expected %v, received %v", mocks.TestURL, l) - } -} - -func TestGetLocationReturnsEmptyStringForMissingLocation(t *testing.T) { - resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) - - l := GetLocation(resp) - if len(l) != 0 { - t.Fatalf("autorest: GetLocation return a value without a Location header -- received %v", l) - } -} - -func TestGetRetryAfter(t *testing.T) { - resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) - mocks.SetAcceptedHeaders(resp) - - d := GetRetryAfter(resp, DefaultPollingDelay) - if d != mocks.TestDelay { - t.Fatalf("autorest: GetRetryAfter failed to returned the expected delay -- expected %v, received %v", mocks.TestDelay, d) - } -} - -func TestGetRetryAfterReturnsDefaultDelayIfRetryHeaderIsMissing(t *testing.T) { - resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) - - d := GetRetryAfter(resp, DefaultPollingDelay) - if d != DefaultPollingDelay { - t.Fatalf("autorest: GetRetryAfter failed to returned the default delay for a missing Retry-After header -- expected %v, received %v", - DefaultPollingDelay, d) - } -} - -func TestGetRetryAfterReturnsDefaultDelayIfRetryHeaderIsMalformed(t *testing.T) { - resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) - mocks.SetAcceptedHeaders(resp) - resp.Header.Set(http.CanonicalHeaderKey(HeaderRetryAfter), "a very bad non-integer value") - - d := GetRetryAfter(resp, DefaultPollingDelay) - if d != DefaultPollingDelay { - t.Fatalf("autorest: GetRetryAfter failed to returned the default delay for a malformed Retry-After header -- expected %v, received %v", - DefaultPollingDelay, d) - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go index c8f495d853..0041eacf75 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go @@ -21,11 +21,12 @@ import ( "fmt" "io/ioutil" "net/http" + "net/url" "strings" "time" "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/tracing" ) const ( @@ -39,63 +40,106 @@ const ( operationSucceeded string = "Succeeded" ) -var pollingCodes = [...]int{http.StatusAccepted, http.StatusCreated, http.StatusOK} +var pollingCodes = [...]int{http.StatusNoContent, http.StatusAccepted, http.StatusCreated, http.StatusOK} // Future provides a mechanism to access the status and results of an asynchronous request. // Since futures are stateful they should be passed by value to avoid race conditions. type Future struct { - req *http.Request - resp *http.Response - ps pollingState + req *http.Request // legacy + pt pollingTracker } // NewFuture returns a new Future object initialized with the specified request. +// Deprecated: Please use NewFutureFromResponse instead. func NewFuture(req *http.Request) Future { return Future{req: req} } -// Response returns the last HTTP response or nil if there isn't one. +// NewFutureFromResponse returns a new Future object initialized +// with the initial response from an asynchronous operation. +func NewFutureFromResponse(resp *http.Response) (Future, error) { + pt, err := createPollingTracker(resp) + return Future{pt: pt}, err +} + +// Response returns the last HTTP response. func (f Future) Response() *http.Response { - return f.resp + if f.pt == nil { + return nil + } + return f.pt.latestResponse() } // Status returns the last status message of the operation. func (f Future) Status() string { - if f.ps.State == "" { - return "Unknown" + if f.pt == nil { + return "" } - return f.ps.State + return f.pt.pollingStatus() } // PollingMethod returns the method used to monitor the status of the asynchronous operation. func (f Future) PollingMethod() PollingMethodType { - return f.ps.PollingMethod + if f.pt == nil { + return PollingUnknown + } + return f.pt.pollingMethod() } // Done queries the service to see if the operation has completed. +// Deprecated: Use DoneWithContext() func (f *Future) Done(sender autorest.Sender) (bool, error) { - // exit early if this future has terminated - if f.ps.hasTerminated() { - return true, f.errorInfo() - } + return f.DoneWithContext(context.Background(), sender) +} - resp, err := sender.Do(f.req) - f.resp = resp - if err != nil || !autorest.ResponseHasStatusCode(resp, pollingCodes[:]...) { +// DoneWithContext queries the service to see if the operation has completed. +func (f *Future) DoneWithContext(ctx context.Context, sender autorest.Sender) (done bool, err error) { + ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.DoneWithContext") + defer func() { + sc := -1 + resp := f.Response() + if resp != nil { + sc = resp.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + + // support for legacy Future implementation + if f.req != nil { + resp, err := sender.Do(f.req) + if err != nil { + return false, err + } + pt, err := createPollingTracker(resp) + if err != nil { + return false, err + } + f.pt = pt + f.req = nil + } + // end legacy + if f.pt == nil { + return false, autorest.NewError("Future", "Done", "future is not initialized") + } + if f.pt.hasTerminated() { + return true, f.pt.pollingError() + } + if err := f.pt.pollForStatus(ctx, sender); err != nil { return false, err } - - err = updatePollingState(resp, &f.ps) - if err != nil { + if err := f.pt.checkForErrors(); err != nil { + return f.pt.hasTerminated(), err + } + if err := f.pt.updatePollingState(f.pt.provisioningStateApplicable()); err != nil { return false, err } - - if f.ps.hasTerminated() { - return true, f.errorInfo() + if err := f.pt.initPollingMethod(); err != nil { + return false, err } - - f.req, err = newPollingRequest(f.ps) - return false, err + if err := f.pt.updatePollingMethod(); err != nil { + return false, err + } + return f.pt.hasTerminated(), f.pt.pollingError() } // GetPollingDelay returns a duration the application should wait before checking @@ -103,11 +147,15 @@ func (f *Future) Done(sender autorest.Sender) (bool, error) { // the service via the Retry-After response header. If the header wasn't returned // then the function returns the zero-value time.Duration and false. func (f Future) GetPollingDelay() (time.Duration, bool) { - if f.resp == nil { + if f.pt == nil { + return 0, false + } + resp := f.pt.latestResponse() + if resp == nil { return 0, false } - retry := f.resp.Header.Get(autorest.HeaderRetryAfter) + retry := resp.Header.Get(autorest.HeaderRetryAfter) if retry == "" { return 0, false } @@ -124,14 +172,42 @@ func (f Future) GetPollingDelay() (time.Duration, bool) { // running operation has completed, the provided context is cancelled, or the client's // polling duration has been exceeded. It will retry failed polling attempts based on // the retry value defined in the client up to the maximum retry attempts. +// Deprecated: Please use WaitForCompletionRef() instead. func (f Future) WaitForCompletion(ctx context.Context, client autorest.Client) error { - ctx, cancel := context.WithTimeout(ctx, client.PollingDuration) - defer cancel() + return f.WaitForCompletionRef(ctx, client) +} - done, err := f.Done(client) - for attempts := 0; !done; done, err = f.Done(client) { +// WaitForCompletionRef will return when one of the following conditions is met: the long +// running operation has completed, the provided context is cancelled, or the client's +// polling duration has been exceeded. It will retry failed polling attempts based on +// the retry value defined in the client up to the maximum retry attempts. +// If no deadline is specified in the context then the client.PollingDuration will be +// used to determine if a default deadline should be used. +// If PollingDuration is greater than zero the value will be used as the context's timeout. +// If PollingDuration is zero then no default deadline will be used. +func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Client) (err error) { + ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.WaitForCompletionRef") + defer func() { + sc := -1 + resp := f.Response() + if resp != nil { + sc = resp.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + cancelCtx := ctx + // if the provided context already has a deadline don't override it + _, hasDeadline := ctx.Deadline() + if d := client.PollingDuration; !hasDeadline && d != 0 { + var cancel context.CancelFunc + cancelCtx, cancel = context.WithTimeout(ctx, d) + defer cancel() + } + + done, err := f.DoneWithContext(ctx, client) + for attempts := 0; !done; done, err = f.DoneWithContext(ctx, client) { if attempts >= client.RetryAttempts { - return autorest.NewErrorWithError(err, "azure", "WaitForCompletion", f.resp, "the number of retries has been exceeded") + return autorest.NewErrorWithError(err, "Future", "WaitForCompletion", f.pt.latestResponse(), "the number of retries has been exceeded") } // we want delayAttempt to be zero in the non-error case so // that DelayForBackoff doesn't perform exponential back-off @@ -153,313 +229,764 @@ func (f Future) WaitForCompletion(ctx context.Context, client autorest.Client) e attempts++ } // wait until the delay elapses or the context is cancelled - delayElapsed := autorest.DelayForBackoff(delay, delayAttempt, ctx.Done()) + delayElapsed := autorest.DelayForBackoff(delay, delayAttempt, cancelCtx.Done()) if !delayElapsed { - return autorest.NewErrorWithError(ctx.Err(), "azure", "WaitForCompletion", f.resp, "context has been cancelled") + return autorest.NewErrorWithError(cancelCtx.Err(), "Future", "WaitForCompletion", f.pt.latestResponse(), "context has been cancelled") } } - return err -} - -// if the operation failed the polling state will contain -// error information and implements the error interface -func (f *Future) errorInfo() error { - if !f.ps.hasSucceeded() { - return f.ps - } - return nil + return } // MarshalJSON implements the json.Marshaler interface. func (f Future) MarshalJSON() ([]byte, error) { - return json.Marshal(&f.ps) + return json.Marshal(f.pt) } // UnmarshalJSON implements the json.Unmarshaler interface. func (f *Future) UnmarshalJSON(data []byte) error { - err := json.Unmarshal(data, &f.ps) + // unmarshal into JSON object to determine the tracker type + obj := map[string]interface{}{} + err := json.Unmarshal(data, &obj) if err != nil { return err } - f.req, err = newPollingRequest(f.ps) - return err + if obj["method"] == nil { + return autorest.NewError("Future", "UnmarshalJSON", "missing 'method' property") + } + method := obj["method"].(string) + switch strings.ToUpper(method) { + case http.MethodDelete: + f.pt = &pollingTrackerDelete{} + case http.MethodPatch: + f.pt = &pollingTrackerPatch{} + case http.MethodPost: + f.pt = &pollingTrackerPost{} + case http.MethodPut: + f.pt = &pollingTrackerPut{} + default: + return autorest.NewError("Future", "UnmarshalJSON", "unsupoorted method '%s'", method) + } + // now unmarshal into the tracker + return json.Unmarshal(data, &f.pt) } -// DoPollForAsynchronous returns a SendDecorator that polls if the http.Response is for an Azure -// long-running operation. It will delay between requests for the duration specified in the -// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by -// closing the optional channel on the http.Request. -func DoPollForAsynchronous(delay time.Duration) autorest.SendDecorator { - return func(s autorest.Sender) autorest.Sender { - return autorest.SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - resp, err = s.Do(r) - if err != nil { - return resp, err - } - if !autorest.ResponseHasStatusCode(resp, pollingCodes[:]...) { - return resp, nil - } +// PollingURL returns the URL used for retrieving the status of the long-running operation. +func (f Future) PollingURL() string { + if f.pt == nil { + return "" + } + return f.pt.pollingURL() +} + +// GetResult should be called once polling has completed successfully. +// It makes the final GET call to retrieve the resultant payload. +func (f Future) GetResult(sender autorest.Sender) (*http.Response, error) { + if f.pt.finalGetURL() == "" { + // we can end up in this situation if the async operation returns a 200 + // with no polling URLs. in that case return the response which should + // contain the JSON payload (only do this for successful terminal cases). + if lr := f.pt.latestResponse(); lr != nil && f.pt.hasSucceeded() { + return lr, nil + } + return nil, autorest.NewError("Future", "GetResult", "missing URL for retrieving result") + } + req, err := http.NewRequest(http.MethodGet, f.pt.finalGetURL(), nil) + if err != nil { + return nil, err + } + return sender.Do(req) +} - ps := pollingState{} - for err == nil { - err = updatePollingState(resp, &ps) - if err != nil { - break - } - if ps.hasTerminated() { - if !ps.hasSucceeded() { - err = ps - } - break - } +type pollingTracker interface { + // these methods can differ per tracker - r, err = newPollingRequest(ps) - if err != nil { - return resp, err - } - r.Cancel = resp.Request.Cancel + // checks the response headers and status code to determine the polling mechanism + updatePollingMethod() error + + // checks the response for tracker-specific error conditions + checkForErrors() error + + // returns true if provisioning state should be checked + provisioningStateApplicable() bool + + // methods common to all trackers + + // initializes a tracker's polling URL and method, called for each iteration. + // these values can be overridden by each polling tracker as required. + initPollingMethod() error + + // initializes the tracker's internal state, call this when the tracker is created + initializeState() error + + // makes an HTTP request to check the status of the LRO + pollForStatus(ctx context.Context, sender autorest.Sender) error + + // updates internal tracker state, call this after each call to pollForStatus + updatePollingState(provStateApl bool) error + + // returns the error response from the service, can be nil + pollingError() error + + // returns the polling method being used + pollingMethod() PollingMethodType + + // returns the state of the LRO as returned from the service + pollingStatus() string - delay = autorest.GetRetryAfter(resp, delay) - resp, err = autorest.SendWithSender(s, r, - autorest.AfterDelay(delay)) + // returns the URL used for polling status + pollingURL() string + + // returns the URL used for the final GET to retrieve the resource + finalGetURL() string + + // returns true if the LRO is in a terminal state + hasTerminated() bool + + // returns true if the LRO is in a failed terminal state + hasFailed() bool + + // returns true if the LRO is in a successful terminal state + hasSucceeded() bool + + // returns the cached HTTP response after a call to pollForStatus(), can be nil + latestResponse() *http.Response +} + +type pollingTrackerBase struct { + // resp is the last response, either from the submission of the LRO or from polling + resp *http.Response + + // method is the HTTP verb, this is needed for deserialization + Method string `json:"method"` + + // rawBody is the raw JSON response body + rawBody map[string]interface{} + + // denotes if polling is using async-operation or location header + Pm PollingMethodType `json:"pollingMethod"` + + // the URL to poll for status + URI string `json:"pollingURI"` + + // the state of the LRO as returned from the service + State string `json:"lroState"` + + // the URL to GET for the final result + FinalGetURI string `json:"resultURI"` + + // used to hold an error object returned from the service + Err *ServiceError `json:"error,omitempty"` +} + +func (pt *pollingTrackerBase) initializeState() error { + // determine the initial polling state based on response body and/or HTTP status + // code. this is applicable to the initial LRO response, not polling responses! + pt.Method = pt.resp.Request.Method + if err := pt.updateRawBody(); err != nil { + return err + } + switch pt.resp.StatusCode { + case http.StatusOK: + if ps := pt.getProvisioningState(); ps != nil { + pt.State = *ps + if pt.hasFailed() { + pt.updateErrorFromResponse() + return pt.pollingError() } + } else { + pt.State = operationSucceeded + } + case http.StatusCreated: + if ps := pt.getProvisioningState(); ps != nil { + pt.State = *ps + } else { + pt.State = operationInProgress + } + case http.StatusAccepted: + pt.State = operationInProgress + case http.StatusNoContent: + pt.State = operationSucceeded + default: + pt.State = operationFailed + pt.updateErrorFromResponse() + return pt.pollingError() + } + return pt.initPollingMethod() +} - return resp, err - }) +func (pt pollingTrackerBase) getProvisioningState() *string { + if pt.rawBody != nil && pt.rawBody["properties"] != nil { + p := pt.rawBody["properties"].(map[string]interface{}) + if ps := p["provisioningState"]; ps != nil { + s := ps.(string) + return &s + } } + return nil } -func getAsyncOperation(resp *http.Response) string { - return resp.Header.Get(http.CanonicalHeaderKey(headerAsyncOperation)) +func (pt *pollingTrackerBase) updateRawBody() error { + pt.rawBody = map[string]interface{}{} + if pt.resp.ContentLength != 0 { + defer pt.resp.Body.Close() + b, err := ioutil.ReadAll(pt.resp.Body) + if err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to read response body") + } + // observed in 204 responses over HTTP/2.0; the content length is -1 but body is empty + if len(b) == 0 { + return nil + } + // put the body back so it's available to other callers + pt.resp.Body = ioutil.NopCloser(bytes.NewReader(b)) + if err = json.Unmarshal(b, &pt.rawBody); err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to unmarshal response body") + } + } + return nil } -func hasSucceeded(state string) bool { - return state == operationSucceeded +func (pt *pollingTrackerBase) pollForStatus(ctx context.Context, sender autorest.Sender) error { + req, err := http.NewRequest(http.MethodGet, pt.URI, nil) + if err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to create HTTP request") + } + + req = req.WithContext(ctx) + pt.resp, err = sender.Do(req) + if err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to send HTTP request") + } + if autorest.ResponseHasStatusCode(pt.resp, pollingCodes[:]...) { + // reset the service error on success case + pt.Err = nil + err = pt.updateRawBody() + } else { + // check response body for error content + pt.updateErrorFromResponse() + err = pt.pollingError() + } + return err } -func hasTerminated(state string) bool { - switch state { - case operationCanceled, operationFailed, operationSucceeded: - return true - default: - return false +// attempts to unmarshal a ServiceError type from the response body. +// if that fails then make a best attempt at creating something meaningful. +// NOTE: this assumes that the async operation has failed. +func (pt *pollingTrackerBase) updateErrorFromResponse() { + var err error + if pt.resp.ContentLength != 0 { + type respErr struct { + ServiceError *ServiceError `json:"error"` + } + re := respErr{} + defer pt.resp.Body.Close() + var b []byte + if b, err = ioutil.ReadAll(pt.resp.Body); err != nil || len(b) == 0 { + goto Default + } + if err = json.Unmarshal(b, &re); err != nil { + goto Default + } + // unmarshalling the error didn't yield anything, try unwrapped error + if re.ServiceError == nil { + err = json.Unmarshal(b, &re.ServiceError) + if err != nil { + goto Default + } + } + // the unmarshaller will ensure re.ServiceError is non-nil + // even if there was no content unmarshalled so check the code. + if re.ServiceError.Code != "" { + pt.Err = re.ServiceError + return + } + } +Default: + se := &ServiceError{ + Code: pt.pollingStatus(), + Message: "The async operation failed.", + } + if err != nil { + se.InnerError = make(map[string]interface{}) + se.InnerError["unmarshalError"] = err.Error() } + // stick the response body into the error object in hopes + // it contains something useful to help diagnose the failure. + if len(pt.rawBody) > 0 { + se.AdditionalInfo = []map[string]interface{}{ + pt.rawBody, + } + } + pt.Err = se } -func hasFailed(state string) bool { - return state == operationFailed +func (pt *pollingTrackerBase) updatePollingState(provStateApl bool) error { + if pt.Pm == PollingAsyncOperation && pt.rawBody["status"] != nil { + pt.State = pt.rawBody["status"].(string) + } else { + if pt.resp.StatusCode == http.StatusAccepted { + pt.State = operationInProgress + } else if provStateApl { + if ps := pt.getProvisioningState(); ps != nil { + pt.State = *ps + } else { + pt.State = operationSucceeded + } + } else { + return autorest.NewError("pollingTrackerBase", "updatePollingState", "the response from the async operation has an invalid status code") + } + } + // if the operation has failed update the error state + if pt.hasFailed() { + pt.updateErrorFromResponse() + } + return nil } -type provisioningTracker interface { - state() string - hasSucceeded() bool - hasTerminated() bool +func (pt pollingTrackerBase) pollingError() error { + if pt.Err == nil { + return nil + } + return pt.Err } -type operationResource struct { - // Note: - // The specification states services should return the "id" field. However some return it as - // "operationId". - ID string `json:"id"` - OperationID string `json:"operationId"` - Name string `json:"name"` - Status string `json:"status"` - Properties map[string]interface{} `json:"properties"` - OperationError ServiceError `json:"error"` - StartTime date.Time `json:"startTime"` - EndTime date.Time `json:"endTime"` - PercentComplete float64 `json:"percentComplete"` +func (pt pollingTrackerBase) pollingMethod() PollingMethodType { + return pt.Pm } -func (or operationResource) state() string { - return or.Status +func (pt pollingTrackerBase) pollingStatus() string { + return pt.State } -func (or operationResource) hasSucceeded() bool { - return hasSucceeded(or.state()) +func (pt pollingTrackerBase) pollingURL() string { + return pt.URI } -func (or operationResource) hasTerminated() bool { - return hasTerminated(or.state()) +func (pt pollingTrackerBase) finalGetURL() string { + return pt.FinalGetURI } -type provisioningProperties struct { - ProvisioningState string `json:"provisioningState"` +func (pt pollingTrackerBase) hasTerminated() bool { + return strings.EqualFold(pt.State, operationCanceled) || strings.EqualFold(pt.State, operationFailed) || strings.EqualFold(pt.State, operationSucceeded) } -type provisioningStatus struct { - Properties provisioningProperties `json:"properties,omitempty"` - ProvisioningError ServiceError `json:"error,omitempty"` +func (pt pollingTrackerBase) hasFailed() bool { + return strings.EqualFold(pt.State, operationCanceled) || strings.EqualFold(pt.State, operationFailed) } -func (ps provisioningStatus) state() string { - return ps.Properties.ProvisioningState +func (pt pollingTrackerBase) hasSucceeded() bool { + return strings.EqualFold(pt.State, operationSucceeded) } -func (ps provisioningStatus) hasSucceeded() bool { - return hasSucceeded(ps.state()) +func (pt pollingTrackerBase) latestResponse() *http.Response { + return pt.resp } -func (ps provisioningStatus) hasTerminated() bool { - return hasTerminated(ps.state()) +// error checking common to all trackers +func (pt pollingTrackerBase) baseCheckForErrors() error { + // for Azure-AsyncOperations the response body cannot be nil or empty + if pt.Pm == PollingAsyncOperation { + if pt.resp.Body == nil || pt.resp.ContentLength == 0 { + return autorest.NewError("pollingTrackerBase", "baseCheckForErrors", "for Azure-AsyncOperation response body cannot be nil") + } + if pt.rawBody["status"] == nil { + return autorest.NewError("pollingTrackerBase", "baseCheckForErrors", "missing status property in Azure-AsyncOperation response body") + } + } + return nil } -func (ps provisioningStatus) hasProvisioningError() bool { - return ps.ProvisioningError != ServiceError{} +// default initialization of polling URL/method. each verb tracker will update this as required. +func (pt *pollingTrackerBase) initPollingMethod() error { + if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + return nil + } + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh != "" { + pt.URI = lh + pt.Pm = PollingLocation + return nil + } + // it's ok if we didn't find a polling header, this will be handled elsewhere + return nil } -// PollingMethodType defines a type used for enumerating polling mechanisms. -type PollingMethodType string +// DELETE -const ( - // PollingAsyncOperation indicates the polling method uses the Azure-AsyncOperation header. - PollingAsyncOperation PollingMethodType = "AsyncOperation" +type pollingTrackerDelete struct { + pollingTrackerBase +} - // PollingLocation indicates the polling method uses the Location header. - PollingLocation PollingMethodType = "Location" +func (pt *pollingTrackerDelete) updatePollingMethod() error { + // for 201 the Location header is required + if pt.resp.StatusCode == http.StatusCreated { + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh == "" { + return autorest.NewError("pollingTrackerDelete", "updateHeaders", "missing Location header in 201 response") + } else { + pt.URI = lh + } + pt.Pm = PollingLocation + pt.FinalGetURI = pt.URI + } + // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary + if pt.resp.StatusCode == http.StatusAccepted { + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + // if the Location header is invalid and we already have a polling URL + // then we don't care if the Location header URL is malformed. + if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { + return err + } else if lh != "" { + if ao == "" { + pt.URI = lh + pt.Pm = PollingLocation + } + // when both headers are returned we use the value in the Location header for the final GET + pt.FinalGetURI = lh + } + // make sure a polling URL was found + if pt.URI == "" { + return autorest.NewError("pollingTrackerPost", "updateHeaders", "didn't get any suitable polling URLs in 202 response") + } + } + return nil +} - // PollingUnknown indicates an unknown polling method and is the default value. - PollingUnknown PollingMethodType = "" -) +func (pt pollingTrackerDelete) checkForErrors() error { + return pt.baseCheckForErrors() +} -type pollingState struct { - PollingMethod PollingMethodType `json:"pollingMethod"` - URI string `json:"uri"` - State string `json:"state"` - Code string `json:"code"` - Message string `json:"message"` +func (pt pollingTrackerDelete) provisioningStateApplicable() bool { + return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusNoContent } -func (ps pollingState) hasSucceeded() bool { - return hasSucceeded(ps.State) +// PATCH + +type pollingTrackerPatch struct { + pollingTrackerBase } -func (ps pollingState) hasTerminated() bool { - return hasTerminated(ps.State) +func (pt *pollingTrackerPatch) updatePollingMethod() error { + // by default we can use the original URL for polling and final GET + if pt.URI == "" { + pt.URI = pt.resp.Request.URL.String() + } + if pt.FinalGetURI == "" { + pt.FinalGetURI = pt.resp.Request.URL.String() + } + if pt.Pm == PollingUnknown { + pt.Pm = PollingRequestURI + } + // for 201 it's permissible for no headers to be returned + if pt.resp.StatusCode == http.StatusCreated { + if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + } + // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary + // note the absence of the "final GET" mechanism for PATCH + if pt.resp.StatusCode == http.StatusAccepted { + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + if ao == "" { + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh == "" { + return autorest.NewError("pollingTrackerPatch", "updateHeaders", "didn't get any suitable polling URLs in 202 response") + } else { + pt.URI = lh + pt.Pm = PollingLocation + } + } + } + return nil } -func (ps pollingState) hasFailed() bool { - return hasFailed(ps.State) +func (pt pollingTrackerPatch) checkForErrors() error { + return pt.baseCheckForErrors() } -func (ps pollingState) Error() string { - return fmt.Sprintf("Long running operation terminated with status '%s': Code=%q Message=%q", ps.State, ps.Code, ps.Message) +func (pt pollingTrackerPatch) provisioningStateApplicable() bool { + return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusCreated } -// updatePollingState maps the operation status -- retrieved from either a provisioningState -// field, the status field of an OperationResource, or inferred from the HTTP status code -- -// into a well-known states. Since the process begins from the initial request, the state -// always comes from either a the provisioningState returned or is inferred from the HTTP -// status code. Subsequent requests will read an Azure OperationResource object if the -// service initially returned the Azure-AsyncOperation header. The responseFormat field notes -// the expected response format. -func updatePollingState(resp *http.Response, ps *pollingState) error { - // Determine the response shape - // -- The first response will always be a provisioningStatus response; only the polling requests, - // depending on the header returned, may be something otherwise. - var pt provisioningTracker - if ps.PollingMethod == PollingAsyncOperation { - pt = &operationResource{} - } else { - pt = &provisioningStatus{} - } +// POST - // If this is the first request (that is, the polling response shape is unknown), determine how - // to poll and what to expect - if ps.PollingMethod == PollingUnknown { - req := resp.Request - if req == nil { - return autorest.NewError("azure", "updatePollingState", "Azure Polling Error - Original HTTP request is missing") - } +type pollingTrackerPost struct { + pollingTrackerBase +} - // Prefer the Azure-AsyncOperation header - ps.URI = getAsyncOperation(resp) - if ps.URI != "" { - ps.PollingMethod = PollingAsyncOperation +func (pt *pollingTrackerPost) updatePollingMethod() error { + // 201 requires Location header + if pt.resp.StatusCode == http.StatusCreated { + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh == "" { + return autorest.NewError("pollingTrackerPost", "updateHeaders", "missing Location header in 201 response") } else { - ps.PollingMethod = PollingLocation + pt.URI = lh + pt.FinalGetURI = lh + pt.Pm = PollingLocation } - - // Else, use the Location header - if ps.URI == "" { - ps.URI = autorest.GetLocation(resp) + } + // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary + if pt.resp.StatusCode == http.StatusAccepted { + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + // if the Location header is invalid and we already have a polling URL + // then we don't care if the Location header URL is malformed. + if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { + return err + } else if lh != "" { + if ao == "" { + pt.URI = lh + pt.Pm = PollingLocation + } + // when both headers are returned we use the value in the Location header for the final GET + pt.FinalGetURI = lh + } + // make sure a polling URL was found + if pt.URI == "" { + return autorest.NewError("pollingTrackerPost", "updateHeaders", "didn't get any suitable polling URLs in 202 response") } + } + return nil +} + +func (pt pollingTrackerPost) checkForErrors() error { + return pt.baseCheckForErrors() +} + +func (pt pollingTrackerPost) provisioningStateApplicable() bool { + return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusNoContent +} - // Lastly, requests against an existing resource, use the last request URI - if ps.URI == "" { - m := strings.ToUpper(req.Method) - if m == http.MethodPatch || m == http.MethodPut || m == http.MethodGet { - ps.URI = req.URL.String() +// PUT + +type pollingTrackerPut struct { + pollingTrackerBase +} + +func (pt *pollingTrackerPut) updatePollingMethod() error { + // by default we can use the original URL for polling and final GET + if pt.URI == "" { + pt.URI = pt.resp.Request.URL.String() + } + if pt.FinalGetURI == "" { + pt.FinalGetURI = pt.resp.Request.URL.String() + } + if pt.Pm == PollingUnknown { + pt.Pm = PollingRequestURI + } + // for 201 it's permissible for no headers to be returned + if pt.resp.StatusCode == http.StatusCreated { + if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + } + // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary + if pt.resp.StatusCode == http.StatusAccepted { + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + // if the Location header is invalid and we already have a polling URL + // then we don't care if the Location header URL is malformed. + if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { + return err + } else if lh != "" { + if ao == "" { + pt.URI = lh + pt.Pm = PollingLocation } } + // make sure a polling URL was found + if pt.URI == "" { + return autorest.NewError("pollingTrackerPut", "updateHeaders", "didn't get any suitable polling URLs in 202 response") + } } + return nil +} - // Read and interpret the response (saving the Body in case no polling is necessary) - b := &bytes.Buffer{} - err := autorest.Respond(resp, - autorest.ByCopying(b), - autorest.ByUnmarshallingJSON(pt), - autorest.ByClosing()) - resp.Body = ioutil.NopCloser(b) +func (pt pollingTrackerPut) checkForErrors() error { + err := pt.baseCheckForErrors() if err != nil { return err } + // if there are no LRO headers then the body cannot be empty + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } + lh, err := getURLFromLocationHeader(pt.resp) + if err != nil { + return err + } + if ao == "" && lh == "" && len(pt.rawBody) == 0 { + return autorest.NewError("pollingTrackerPut", "checkForErrors", "the response did not contain a body") + } + return nil +} - // Interpret the results - // -- Terminal states apply regardless - // -- Unknown states are per-service inprogress states - // -- Otherwise, infer state from HTTP status code - if pt.hasTerminated() { - ps.State = pt.state() - } else if pt.state() != "" { - ps.State = operationInProgress - } else { - switch resp.StatusCode { - case http.StatusAccepted: - ps.State = operationInProgress - - case http.StatusNoContent, http.StatusCreated, http.StatusOK: - ps.State = operationSucceeded +func (pt pollingTrackerPut) provisioningStateApplicable() bool { + return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusCreated +} - default: - ps.State = operationFailed - } +// creates a polling tracker based on the verb of the original request +func createPollingTracker(resp *http.Response) (pollingTracker, error) { + var pt pollingTracker + switch strings.ToUpper(resp.Request.Method) { + case http.MethodDelete: + pt = &pollingTrackerDelete{pollingTrackerBase: pollingTrackerBase{resp: resp}} + case http.MethodPatch: + pt = &pollingTrackerPatch{pollingTrackerBase: pollingTrackerBase{resp: resp}} + case http.MethodPost: + pt = &pollingTrackerPost{pollingTrackerBase: pollingTrackerBase{resp: resp}} + case http.MethodPut: + pt = &pollingTrackerPut{pollingTrackerBase: pollingTrackerBase{resp: resp}} + default: + return nil, autorest.NewError("azure", "createPollingTracker", "unsupported HTTP method %s", resp.Request.Method) + } + if err := pt.initializeState(); err != nil { + return pt, err + } + // this initializes the polling header values, we do this during creation in case the + // initial response send us invalid values; this way the API call will return a non-nil + // error (not doing this means the error shows up in Future.Done) + return pt, pt.updatePollingMethod() +} + +// gets the polling URL from the Azure-AsyncOperation header. +// ensures the URL is well-formed and absolute. +func getURLFromAsyncOpHeader(resp *http.Response) (string, error) { + s := resp.Header.Get(http.CanonicalHeaderKey(headerAsyncOperation)) + if s == "" { + return "", nil } + if !isValidURL(s) { + return "", autorest.NewError("azure", "getURLFromAsyncOpHeader", "invalid polling URL '%s'", s) + } + return s, nil +} - if ps.State == operationInProgress && ps.URI == "" { - return autorest.NewError("azure", "updatePollingState", "Azure Polling Error - Unable to obtain polling URI for %s %s", resp.Request.Method, resp.Request.URL) +// gets the polling URL from the Location header. +// ensures the URL is well-formed and absolute. +func getURLFromLocationHeader(resp *http.Response) (string, error) { + s := resp.Header.Get(http.CanonicalHeaderKey(autorest.HeaderLocation)) + if s == "" { + return "", nil } + if !isValidURL(s) { + return "", autorest.NewError("azure", "getURLFromLocationHeader", "invalid polling URL '%s'", s) + } + return s, nil +} - // For failed operation, check for error code and message in - // -- Operation resource - // -- Response - // -- Otherwise, Unknown - if ps.hasFailed() { - if ps.PollingMethod == PollingAsyncOperation { - or := pt.(*operationResource) - ps.Code = or.OperationError.Code - ps.Message = or.OperationError.Message - } else { - p := pt.(*provisioningStatus) - if p.hasProvisioningError() { - ps.Code = p.ProvisioningError.Code - ps.Message = p.ProvisioningError.Message - } else { - ps.Code = "Unknown" - ps.Message = "None" +// verify that the URL is valid and absolute +func isValidURL(s string) bool { + u, err := url.Parse(s) + return err == nil && u.IsAbs() +} + +// DoPollForAsynchronous returns a SendDecorator that polls if the http.Response is for an Azure +// long-running operation. It will delay between requests for the duration specified in the +// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled via +// the context associated with the http.Request. +// Deprecated: Prefer using Futures to allow for non-blocking async operations. +func DoPollForAsynchronous(delay time.Duration) autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err != nil { + return resp, err } - } + if !autorest.ResponseHasStatusCode(resp, pollingCodes[:]...) { + return resp, nil + } + future, err := NewFutureFromResponse(resp) + if err != nil { + return resp, err + } + // retry until either the LRO completes or we receive an error + var done bool + for done, err = future.Done(s); !done && err == nil; done, err = future.Done(s) { + // check for Retry-After delay, if not present use the specified polling delay + if pd, ok := future.GetPollingDelay(); ok { + delay = pd + } + // wait until the delay elapses or the context is cancelled + if delayElapsed := autorest.DelayForBackoff(delay, 0, r.Context().Done()); !delayElapsed { + return future.Response(), + autorest.NewErrorWithError(r.Context().Err(), "azure", "DoPollForAsynchronous", future.Response(), "context has been cancelled") + } + } + return future.Response(), err + }) } - return nil } -func newPollingRequest(ps pollingState) (*http.Request, error) { - reqPoll, err := autorest.Prepare(&http.Request{}, - autorest.AsGet(), - autorest.WithBaseURL(ps.URI)) - if err != nil { - return nil, autorest.NewErrorWithError(err, "azure", "newPollingRequest", nil, "Failure creating poll request to %s", ps.URI) - } +// PollingMethodType defines a type used for enumerating polling mechanisms. +type PollingMethodType string - return reqPoll, nil +const ( + // PollingAsyncOperation indicates the polling method uses the Azure-AsyncOperation header. + PollingAsyncOperation PollingMethodType = "AsyncOperation" + + // PollingLocation indicates the polling method uses the Location header. + PollingLocation PollingMethodType = "Location" + + // PollingRequestURI indicates the polling method uses the original request URI. + PollingRequestURI PollingMethodType = "RequestURI" + + // PollingUnknown indicates an unknown polling method and is the default value. + PollingUnknown PollingMethodType = "" +) + +// AsyncOpIncompleteError is the type that's returned from a future that has not completed. +type AsyncOpIncompleteError struct { + // FutureType is the name of the type composed of a azure.Future. + FutureType string +} + +// Error returns an error message including the originating type name of the error. +func (e AsyncOpIncompleteError) Error() string { + return fmt.Sprintf("%s: asynchronous operation has not completed", e.FutureType) +} + +// NewAsyncOpIncompleteError creates a new AsyncOpIncompleteError with the specified parameters. +func NewAsyncOpIncompleteError(futureType string) AsyncOpIncompleteError { + return AsyncOpIncompleteError{ + FutureType: futureType, + } } diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/async_test.go b/vendor/github.com/Azure/go-autorest/autorest/azure/async_test.go deleted file mode 100644 index 5757e25c22..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/async_test.go +++ /dev/null @@ -1,1327 +0,0 @@ -package azure - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "net/http" - "reflect" - "strings" - "sync" - "testing" - "time" - - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/mocks" -) - -func TestGetAsyncOperation_ReturnsAzureAsyncOperationHeader(t *testing.T) { - r := newAsynchronousResponse() - - if getAsyncOperation(r) != mocks.TestAzureAsyncURL { - t.Fatalf("azure: getAsyncOperation failed to extract the Azure-AsyncOperation header -- expected %v, received %v", mocks.TestURL, getAsyncOperation(r)) - } -} - -func TestGetAsyncOperation_ReturnsEmptyStringIfHeaderIsAbsent(t *testing.T) { - r := mocks.NewResponse() - - if len(getAsyncOperation(r)) != 0 { - t.Fatalf("azure: getAsyncOperation failed to return empty string when the Azure-AsyncOperation header is absent -- received %v", getAsyncOperation(r)) - } -} - -func TestHasSucceeded_ReturnsTrueForSuccess(t *testing.T) { - if !hasSucceeded(operationSucceeded) { - t.Fatal("azure: hasSucceeded failed to return true for success") - } -} - -func TestHasSucceeded_ReturnsFalseOtherwise(t *testing.T) { - if hasSucceeded("not a success string") { - t.Fatal("azure: hasSucceeded returned true for a non-success") - } -} - -func TestHasTerminated_ReturnsTrueForValidTerminationStates(t *testing.T) { - for _, state := range []string{operationSucceeded, operationCanceled, operationFailed} { - if !hasTerminated(state) { - t.Fatalf("azure: hasTerminated failed to return true for the '%s' state", state) - } - } -} - -func TestHasTerminated_ReturnsFalseForUnknownStates(t *testing.T) { - if hasTerminated("not a known state") { - t.Fatal("azure: hasTerminated returned true for an unknown state") - } -} - -func TestOperationError_ErrorReturnsAString(t *testing.T) { - s := (ServiceError{Code: "server code", Message: "server error"}).Error() - if s == "" { - t.Fatalf("azure: operationError#Error failed to return an error") - } - if !strings.Contains(s, "server code") || !strings.Contains(s, "server error") { - t.Fatalf("azure: operationError#Error returned a malformed error -- error='%v'", s) - } -} - -func TestOperationResource_StateReturnsState(t *testing.T) { - if (operationResource{Status: "state"}).state() != "state" { - t.Fatalf("azure: operationResource#state failed to return the correct state") - } -} - -func TestOperationResource_HasSucceededReturnsFalseIfNotSuccess(t *testing.T) { - if (operationResource{Status: "not a success string"}).hasSucceeded() { - t.Fatalf("azure: operationResource#hasSucceeded failed to return false for a canceled operation") - } -} - -func TestOperationResource_HasSucceededReturnsTrueIfSuccessful(t *testing.T) { - if !(operationResource{Status: operationSucceeded}).hasSucceeded() { - t.Fatalf("azure: operationResource#hasSucceeded failed to return true for a successful operation") - } -} - -func TestOperationResource_HasTerminatedReturnsTrueForKnownStates(t *testing.T) { - for _, state := range []string{operationSucceeded, operationCanceled, operationFailed} { - if !(operationResource{Status: state}).hasTerminated() { - t.Fatalf("azure: operationResource#hasTerminated failed to return true for the '%s' state", state) - } - } -} - -func TestOperationResource_HasTerminatedReturnsFalseForUnknownStates(t *testing.T) { - if (operationResource{Status: "not a known state"}).hasTerminated() { - t.Fatalf("azure: operationResource#hasTerminated returned true for a non-terminal operation") - } -} - -func TestProvisioningStatus_StateReturnsState(t *testing.T) { - if (provisioningStatus{Properties: provisioningProperties{"state"}}).state() != "state" { - t.Fatalf("azure: provisioningStatus#state failed to return the correct state") - } -} - -func TestProvisioningStatus_HasSucceededReturnsFalseIfNotSuccess(t *testing.T) { - if (provisioningStatus{Properties: provisioningProperties{"not a success string"}}).hasSucceeded() { - t.Fatalf("azure: provisioningStatus#hasSucceeded failed to return false for a canceled operation") - } -} - -func TestProvisioningStatus_HasSucceededReturnsTrueIfSuccessful(t *testing.T) { - if !(provisioningStatus{Properties: provisioningProperties{operationSucceeded}}).hasSucceeded() { - t.Fatalf("azure: provisioningStatus#hasSucceeded failed to return true for a successful operation") - } -} - -func TestProvisioningStatus_HasTerminatedReturnsTrueForKnownStates(t *testing.T) { - for _, state := range []string{operationSucceeded, operationCanceled, operationFailed} { - if !(provisioningStatus{Properties: provisioningProperties{state}}).hasTerminated() { - t.Fatalf("azure: provisioningStatus#hasTerminated failed to return true for the '%s' state", state) - } - } -} - -func TestProvisioningStatus_HasTerminatedReturnsFalseForUnknownStates(t *testing.T) { - if (provisioningStatus{Properties: provisioningProperties{"not a known state"}}).hasTerminated() { - t.Fatalf("azure: provisioningStatus#hasTerminated returned true for a non-terminal operation") - } -} - -func TestPollingState_HasSucceededReturnsFalseIfNotSuccess(t *testing.T) { - if (pollingState{State: "not a success string"}).hasSucceeded() { - t.Fatalf("azure: pollingState#hasSucceeded failed to return false for a canceled operation") - } -} - -func TestPollingState_HasSucceededReturnsTrueIfSuccessful(t *testing.T) { - if !(pollingState{State: operationSucceeded}).hasSucceeded() { - t.Fatalf("azure: pollingState#hasSucceeded failed to return true for a successful operation") - } -} - -func TestPollingState_HasTerminatedReturnsTrueForKnownStates(t *testing.T) { - for _, state := range []string{operationSucceeded, operationCanceled, operationFailed} { - if !(pollingState{State: state}).hasTerminated() { - t.Fatalf("azure: pollingState#hasTerminated failed to return true for the '%s' state", state) - } - } -} - -func TestPollingState_HasTerminatedReturnsFalseForUnknownStates(t *testing.T) { - if (pollingState{State: "not a known state"}).hasTerminated() { - t.Fatalf("azure: pollingState#hasTerminated returned true for a non-terminal operation") - } -} - -func TestUpdatePollingState_ReturnsAnErrorIfOneOccurs(t *testing.T) { - resp := mocks.NewResponseWithContent(operationResourceIllegal) - err := updatePollingState(resp, &pollingState{}) - if err == nil { - t.Fatalf("azure: updatePollingState failed to return an error after a JSON parsing error") - } -} - -func TestUpdatePollingState_ReturnsTerminatedForKnownProvisioningStates(t *testing.T) { - for _, state := range []string{operationSucceeded, operationCanceled, operationFailed} { - resp := mocks.NewResponseWithContent(fmt.Sprintf(pollingStateFormat, state)) - resp.StatusCode = 42 - ps := &pollingState{PollingMethod: PollingLocation} - updatePollingState(resp, ps) - if !ps.hasTerminated() { - t.Fatalf("azure: updatePollingState failed to return a terminating pollingState for the '%s' state", state) - } - } -} - -func TestUpdatePollingState_ReturnsSuccessForSuccessfulProvisioningState(t *testing.T) { - resp := mocks.NewResponseWithContent(fmt.Sprintf(pollingStateFormat, operationSucceeded)) - resp.StatusCode = 42 - ps := &pollingState{PollingMethod: PollingLocation} - updatePollingState(resp, ps) - if !ps.hasSucceeded() { - t.Fatalf("azure: updatePollingState failed to return a successful pollingState for the '%s' state", operationSucceeded) - } -} - -func TestUpdatePollingState_ReturnsInProgressForAllOtherProvisioningStates(t *testing.T) { - s := "not a recognized state" - resp := mocks.NewResponseWithContent(fmt.Sprintf(pollingStateFormat, s)) - resp.StatusCode = 42 - ps := &pollingState{PollingMethod: PollingLocation} - updatePollingState(resp, ps) - if ps.hasTerminated() { - t.Fatalf("azure: updatePollingState returned terminated for unknown state '%s'", s) - } -} - -func TestUpdatePollingState_ReturnsSuccessWhenProvisioningStateFieldIsAbsentForSuccessStatusCodes(t *testing.T) { - for _, sc := range []int{http.StatusOK, http.StatusCreated, http.StatusNoContent} { - resp := mocks.NewResponseWithContent(pollingStateEmpty) - resp.StatusCode = sc - ps := &pollingState{PollingMethod: PollingLocation} - updatePollingState(resp, ps) - if !ps.hasSucceeded() { - t.Fatalf("azure: updatePollingState failed to return success when the provisionState field is absent for Status Code %d", sc) - } - } -} - -func TestUpdatePollingState_ReturnsInProgressWhenProvisioningStateFieldIsAbsentForAccepted(t *testing.T) { - resp := mocks.NewResponseWithContent(pollingStateEmpty) - resp.StatusCode = http.StatusAccepted - ps := &pollingState{PollingMethod: PollingLocation} - updatePollingState(resp, ps) - if ps.hasTerminated() { - t.Fatalf("azure: updatePollingState returned terminated when the provisionState field is absent for Status Code Accepted") - } -} - -func TestUpdatePollingState_ReturnsFailedWhenProvisioningStateFieldIsAbsentForUnknownStatusCodes(t *testing.T) { - resp := mocks.NewResponseWithContent(pollingStateEmpty) - resp.StatusCode = 42 - ps := &pollingState{PollingMethod: PollingLocation} - updatePollingState(resp, ps) - if !ps.hasTerminated() || ps.hasSucceeded() { - t.Fatalf("azure: updatePollingState did not return failed when the provisionState field is absent for an unknown Status Code") - } -} - -func TestUpdatePollingState_ReturnsTerminatedForKnownOperationResourceStates(t *testing.T) { - for _, state := range []string{operationSucceeded, operationCanceled, operationFailed} { - resp := mocks.NewResponseWithContent(fmt.Sprintf(operationResourceFormat, state)) - resp.StatusCode = 42 - ps := &pollingState{PollingMethod: PollingAsyncOperation} - updatePollingState(resp, ps) - if !ps.hasTerminated() { - t.Fatalf("azure: updatePollingState failed to return a terminating pollingState for the '%s' state", state) - } - } -} - -func TestUpdatePollingState_ReturnsSuccessForSuccessfulOperationResourceState(t *testing.T) { - resp := mocks.NewResponseWithContent(fmt.Sprintf(operationResourceFormat, operationSucceeded)) - resp.StatusCode = 42 - ps := &pollingState{PollingMethod: PollingAsyncOperation} - updatePollingState(resp, ps) - if !ps.hasSucceeded() { - t.Fatalf("azure: updatePollingState failed to return a successful pollingState for the '%s' state", operationSucceeded) - } -} - -func TestUpdatePollingState_ReturnsInProgressForAllOtherOperationResourceStates(t *testing.T) { - s := "not a recognized state" - resp := mocks.NewResponseWithContent(fmt.Sprintf(operationResourceFormat, s)) - resp.StatusCode = 42 - ps := &pollingState{PollingMethod: PollingAsyncOperation} - updatePollingState(resp, ps) - if ps.hasTerminated() { - t.Fatalf("azure: updatePollingState returned terminated for unknown state '%s'", s) - } -} - -func TestUpdatePollingState_CopiesTheResponseBody(t *testing.T) { - s := fmt.Sprintf(pollingStateFormat, operationSucceeded) - resp := mocks.NewResponseWithContent(s) - resp.StatusCode = 42 - ps := &pollingState{PollingMethod: PollingAsyncOperation} - updatePollingState(resp, ps) - b, err := ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatalf("azure: updatePollingState failed to replace the http.Response Body -- Error='%v'", err) - } - if string(b) != s { - t.Fatalf("azure: updatePollingState failed to copy the http.Response Body -- Expected='%s' Received='%s'", s, string(b)) - } -} - -func TestUpdatePollingState_ClosesTheOriginalResponseBody(t *testing.T) { - resp := mocks.NewResponse() - b := resp.Body.(*mocks.Body) - ps := &pollingState{PollingMethod: PollingLocation} - updatePollingState(resp, ps) - if b.IsOpen() { - t.Fatal("azure: updatePollingState failed to close the original http.Response Body") - } -} - -func TestUpdatePollingState_FailsWhenResponseLacksRequest(t *testing.T) { - resp := newAsynchronousResponse() - resp.Request = nil - - ps := pollingState{} - err := updatePollingState(resp, &ps) - if err == nil { - t.Fatal("azure: updatePollingState failed to return an error when the http.Response lacked the original http.Request") - } -} - -func TestUpdatePollingState_SetsThePollingMethodWhenUsingTheAzureAsyncOperationHeader(t *testing.T) { - ps := pollingState{} - updatePollingState(newAsynchronousResponse(), &ps) - - if ps.PollingMethod != PollingAsyncOperation { - t.Fatal("azure: updatePollingState failed to set the correct response format when using the Azure-AsyncOperation header") - } -} - -func TestUpdatePollingState_SetsThePollingMethodWhenUsingTheAzureAsyncOperationHeaderIsMissing(t *testing.T) { - resp := newAsynchronousResponse() - resp.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) - - ps := pollingState{} - updatePollingState(resp, &ps) - - if ps.PollingMethod != PollingLocation { - t.Fatal("azure: updatePollingState failed to set the correct response format when the Azure-AsyncOperation header is absent") - } -} - -func TestUpdatePollingState_DoesNotChangeAnExistingReponseFormat(t *testing.T) { - resp := newAsynchronousResponse() - resp.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) - - ps := pollingState{PollingMethod: PollingAsyncOperation} - updatePollingState(resp, &ps) - - if ps.PollingMethod != PollingAsyncOperation { - t.Fatal("azure: updatePollingState failed to leave an existing response format setting") - } -} - -func TestUpdatePollingState_PrefersTheAzureAsyncOperationHeader(t *testing.T) { - resp := newAsynchronousResponse() - - ps := pollingState{} - updatePollingState(resp, &ps) - - if ps.URI != mocks.TestAzureAsyncURL { - t.Fatal("azure: updatePollingState failed to prefer the Azure-AsyncOperation header") - } -} - -func TestUpdatePollingState_PrefersLocationWhenTheAzureAsyncOperationHeaderMissing(t *testing.T) { - resp := newAsynchronousResponse() - resp.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) - - ps := pollingState{} - updatePollingState(resp, &ps) - - if ps.URI != mocks.TestLocationURL { - t.Fatal("azure: updatePollingState failed to prefer the Location header when the Azure-AsyncOperation header is missing") - } -} - -func TestUpdatePollingState_UsesTheObjectLocationIfAsyncHeadersAreMissing(t *testing.T) { - resp := newAsynchronousResponse() - resp.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) - resp.Header.Del(http.CanonicalHeaderKey(autorest.HeaderLocation)) - resp.Request.Method = http.MethodPatch - - ps := pollingState{} - updatePollingState(resp, &ps) - - if ps.URI != mocks.TestURL { - t.Fatal("azure: updatePollingState failed to use the Object URL when the asynchronous headers are missing") - } -} - -func TestUpdatePollingState_RecognizesLowerCaseHTTPVerbs(t *testing.T) { - for _, m := range []string{strings.ToLower(http.MethodPatch), strings.ToLower(http.MethodPut), strings.ToLower(http.MethodGet)} { - resp := newAsynchronousResponse() - resp.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) - resp.Header.Del(http.CanonicalHeaderKey(autorest.HeaderLocation)) - resp.Request.Method = m - - ps := pollingState{} - updatePollingState(resp, &ps) - - if ps.URI != mocks.TestURL { - t.Fatalf("azure: updatePollingState failed to recognize the lower-case HTTP verb '%s'", m) - } - } -} - -func TestUpdatePollingState_ReturnsAnErrorIfAsyncHeadersAreMissingForANewOrDeletedObject(t *testing.T) { - resp := newAsynchronousResponse() - resp.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) - resp.Header.Del(http.CanonicalHeaderKey(autorest.HeaderLocation)) - - for _, m := range []string{http.MethodDelete, http.MethodPost} { - resp.Request.Method = m - err := updatePollingState(resp, &pollingState{}) - if err == nil { - t.Fatalf("azure: updatePollingState failed to return an error even though it could not determine the polling URL for Method '%s'", m) - } - } -} - -func TestNewPollingRequest_ReturnsAnErrorWhenPrepareFails(t *testing.T) { - _, err := newPollingRequest(pollingState{PollingMethod: PollingAsyncOperation, URI: mocks.TestBadURL}) - if err == nil { - t.Fatal("azure: newPollingRequest failed to return an error when Prepare fails") - } -} - -func TestNewPollingRequest_DoesNotReturnARequestWhenPrepareFails(t *testing.T) { - req, _ := newPollingRequest(pollingState{PollingMethod: PollingAsyncOperation, URI: mocks.TestBadURL}) - if req != nil { - t.Fatal("azure: newPollingRequest returned an http.Request when Prepare failed") - } -} - -func TestNewPollingRequest_ReturnsAGetRequest(t *testing.T) { - req, _ := newPollingRequest(pollingState{PollingMethod: PollingAsyncOperation, URI: mocks.TestAzureAsyncURL}) - if req.Method != "GET" { - t.Fatalf("azure: newPollingRequest did not create an HTTP GET request -- actual method %v", req.Method) - } -} - -func TestDoPollForAsynchronous_IgnoresUnspecifiedStatusCodes(t *testing.T) { - client := mocks.NewSender() - - r, _ := autorest.SendWithSender(client, mocks.NewRequest(), - DoPollForAsynchronous(time.Duration(0))) - - if client.Attempts() != 1 { - t.Fatalf("azure: DoPollForAsynchronous polled for unspecified status code") - } - - autorest.Respond(r, - autorest.ByClosing()) -} - -func TestDoPollForAsynchronous_PollsForSpecifiedStatusCodes(t *testing.T) { - client := mocks.NewSender() - client.AppendResponse(newAsynchronousResponse()) - - r, _ := autorest.SendWithSender(client, mocks.NewRequest(), - DoPollForAsynchronous(time.Millisecond)) - - if client.Attempts() != 2 { - t.Fatalf("azure: DoPollForAsynchronous failed to poll for specified status code") - } - - autorest.Respond(r, - autorest.ByClosing()) -} - -func TestDoPollForAsynchronous_CanBeCanceled(t *testing.T) { - cancel := make(chan struct{}) - delay := 5 * time.Second - - r1 := newAsynchronousResponse() - - client := mocks.NewSender() - client.AppendResponse(r1) - client.AppendAndRepeatResponse(newOperationResourceResponse("Busy"), -1) - - var wg sync.WaitGroup - wg.Add(1) - start := time.Now() - go func() { - req := mocks.NewRequest() - req.Cancel = cancel - - wg.Done() - - r, _ := autorest.SendWithSender(client, req, - DoPollForAsynchronous(10*time.Second)) - autorest.Respond(r, - autorest.ByClosing()) - }() - wg.Wait() - close(cancel) - time.Sleep(5 * time.Millisecond) - if time.Since(start) >= delay { - t.Fatalf("azure: DoPollForAsynchronous failed to cancel") - } -} - -func TestDoPollForAsynchronous_ClosesAllNonreturnedResponseBodiesWhenPolling(t *testing.T) { - r1 := newAsynchronousResponse() - b1 := r1.Body.(*mocks.Body) - r2 := newOperationResourceResponse("busy") - b2 := r2.Body.(*mocks.Body) - r3 := newOperationResourceResponse(operationSucceeded) - b3 := r3.Body.(*mocks.Body) - - client := mocks.NewSender() - client.AppendResponse(r1) - client.AppendAndRepeatResponse(r2, 2) - client.AppendResponse(r3) - - r, _ := autorest.SendWithSender(client, mocks.NewRequest(), - DoPollForAsynchronous(time.Millisecond)) - - if b1.IsOpen() || b2.IsOpen() || b3.IsOpen() { - t.Fatalf("azure: DoPollForAsynchronous did not close unreturned response bodies") - } - - autorest.Respond(r, - autorest.ByClosing()) -} - -func TestDoPollForAsynchronous_LeavesLastResponseBodyOpen(t *testing.T) { - r1 := newAsynchronousResponse() - r2 := newOperationResourceResponse("busy") - r3 := newOperationResourceResponse(operationSucceeded) - - client := mocks.NewSender() - client.AppendResponse(r1) - client.AppendAndRepeatResponse(r2, 2) - client.AppendResponse(r3) - - r, _ := autorest.SendWithSender(client, mocks.NewRequest(), - DoPollForAsynchronous(time.Millisecond)) - - b, err := ioutil.ReadAll(r.Body) - if len(b) <= 0 || err != nil { - t.Fatalf("azure: DoPollForAsynchronous did not leave open the body of the last response - Error='%v'", err) - } - - autorest.Respond(r, - autorest.ByClosing()) -} - -func TestDoPollForAsynchronous_DoesNotPollIfOriginalRequestReturnedAnError(t *testing.T) { - r1 := newAsynchronousResponse() - r2 := newOperationResourceResponse("busy") - - client := mocks.NewSender() - client.AppendResponse(r1) - client.AppendResponse(r2) - client.SetError(fmt.Errorf("Faux Error")) - - r, _ := autorest.SendWithSender(client, mocks.NewRequest(), - DoPollForAsynchronous(time.Millisecond)) - - if client.Attempts() != 1 { - t.Fatalf("azure: DoPollForAsynchronous tried to poll after receiving an error") - } - - autorest.Respond(r, - autorest.ByClosing()) -} - -func TestDoPollForAsynchronous_DoesNotPollIfCreatingOperationRequestFails(t *testing.T) { - r1 := newAsynchronousResponse() - mocks.SetResponseHeader(r1, http.CanonicalHeaderKey(headerAsyncOperation), mocks.TestBadURL) - r2 := newOperationResourceResponse("busy") - - client := mocks.NewSender() - client.AppendResponse(r1) - client.AppendAndRepeatResponse(r2, 2) - - r, _ := autorest.SendWithSender(client, mocks.NewRequest(), - DoPollForAsynchronous(time.Millisecond)) - - if client.Attempts() > 1 { - t.Fatalf("azure: DoPollForAsynchronous polled with an invalidly formed operation request") - } - - autorest.Respond(r, - autorest.ByClosing()) -} - -func TestDoPollForAsynchronous_StopsPollingAfterAnError(t *testing.T) { - r1 := newAsynchronousResponse() - r2 := newOperationResourceResponse("busy") - - client := mocks.NewSender() - client.AppendResponse(r1) - client.AppendAndRepeatResponse(r2, 2) - client.SetError(fmt.Errorf("Faux Error")) - client.SetEmitErrorAfter(2) - - r, _ := autorest.SendWithSender(client, mocks.NewRequest(), - DoPollForAsynchronous(time.Millisecond)) - - if client.Attempts() > 3 { - t.Fatalf("azure: DoPollForAsynchronous failed to stop polling after receiving an error") - } - - autorest.Respond(r, - autorest.ByClosing()) -} - -func TestDoPollForAsynchronous_ReturnsPollingError(t *testing.T) { - client := mocks.NewSender() - client.AppendAndRepeatResponse(newAsynchronousResponse(), 5) - client.SetError(fmt.Errorf("Faux Error")) - client.SetEmitErrorAfter(1) - - r, err := autorest.SendWithSender(client, mocks.NewRequest(), - DoPollForAsynchronous(time.Millisecond)) - - if err == nil { - t.Fatalf("azure: DoPollForAsynchronous failed to return error from polling") - } - - autorest.Respond(r, - autorest.ByClosing()) -} - -func TestDoPollForAsynchronous_PollsForStatusAccepted(t *testing.T) { - r1 := newAsynchronousResponse() - r1.Status = "202 Accepted" - r1.StatusCode = http.StatusAccepted - r2 := newOperationResourceResponse("busy") - r3 := newOperationResourceResponse(operationCanceled) - - client := mocks.NewSender() - client.AppendResponse(r1) - client.AppendAndRepeatResponse(r2, 2) - client.AppendAndRepeatResponse(r3, 1) - - r, _ := autorest.SendWithSender(client, mocks.NewRequest(), - DoPollForAsynchronous(time.Millisecond)) - - if client.Attempts() < client.NumResponses() { - t.Fatalf("azure: DoPollForAsynchronous stopped polling before receiving a terminated OperationResource") - } - - autorest.Respond(r, - autorest.ByClosing()) -} - -func TestDoPollForAsynchronous_PollsForStatusCreated(t *testing.T) { - r1 := newAsynchronousResponse() - r1.Status = "201 Created" - r1.StatusCode = http.StatusCreated - r2 := newOperationResourceResponse("busy") - r3 := newOperationResourceResponse(operationCanceled) - - client := mocks.NewSender() - client.AppendResponse(r1) - client.AppendAndRepeatResponse(r2, 2) - client.AppendAndRepeatResponse(r3, 1) - - r, _ := autorest.SendWithSender(client, mocks.NewRequest(), - DoPollForAsynchronous(time.Millisecond)) - - if client.Attempts() < client.NumResponses() { - t.Fatalf("azure: DoPollForAsynchronous stopped polling before receiving a terminated OperationResource") - } - - autorest.Respond(r, - autorest.ByClosing()) -} - -func TestDoPollForAsynchronous_PollsUntilProvisioningStatusTerminates(t *testing.T) { - r1 := newAsynchronousResponse() - r1.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) - r2 := newProvisioningStatusResponse("busy") - r2.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) - r3 := newProvisioningStatusResponse(operationCanceled) - r3.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) - - client := mocks.NewSender() - client.AppendResponse(r1) - client.AppendAndRepeatResponse(r2, 2) - client.AppendAndRepeatResponse(r3, 1) - - r, _ := autorest.SendWithSender(client, mocks.NewRequest(), - DoPollForAsynchronous(time.Millisecond)) - - if client.Attempts() < client.NumResponses() { - t.Fatalf("azure: DoPollForAsynchronous stopped polling before receiving a terminated OperationResource") - } - - autorest.Respond(r, - autorest.ByClosing()) -} - -func TestDoPollForAsynchronous_PollsUntilProvisioningStatusSucceeds(t *testing.T) { - r1 := newAsynchronousResponse() - r1.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) - r2 := newProvisioningStatusResponse("busy") - r2.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) - r3 := newProvisioningStatusResponse(operationSucceeded) - r3.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) - - client := mocks.NewSender() - client.AppendResponse(r1) - client.AppendAndRepeatResponse(r2, 2) - client.AppendAndRepeatResponse(r3, 1) - - r, _ := autorest.SendWithSender(client, mocks.NewRequest(), - DoPollForAsynchronous(time.Millisecond)) - - if client.Attempts() < client.NumResponses() { - t.Fatalf("azure: DoPollForAsynchronous stopped polling before receiving a terminated OperationResource") - } - - autorest.Respond(r, - autorest.ByClosing()) -} - -func TestDoPollForAsynchronous_PollsUntilOperationResourceHasTerminated(t *testing.T) { - r1 := newAsynchronousResponse() - r2 := newOperationResourceResponse("busy") - r3 := newOperationResourceResponse(operationCanceled) - - client := mocks.NewSender() - client.AppendResponse(r1) - client.AppendAndRepeatResponse(r2, 2) - client.AppendAndRepeatResponse(r3, 1) - - r, _ := autorest.SendWithSender(client, mocks.NewRequest(), - DoPollForAsynchronous(time.Millisecond)) - - if client.Attempts() < client.NumResponses() { - t.Fatalf("azure: DoPollForAsynchronous stopped polling before receiving a terminated OperationResource") - } - - autorest.Respond(r, - autorest.ByClosing()) -} - -func TestDoPollForAsynchronous_PollsUntilOperationResourceHasSucceeded(t *testing.T) { - r1 := newAsynchronousResponse() - r2 := newOperationResourceResponse("busy") - r3 := newOperationResourceResponse(operationSucceeded) - - client := mocks.NewSender() - client.AppendResponse(r1) - client.AppendAndRepeatResponse(r2, 2) - client.AppendAndRepeatResponse(r3, 1) - - r, _ := autorest.SendWithSender(client, mocks.NewRequest(), - DoPollForAsynchronous(time.Millisecond)) - - if client.Attempts() < client.NumResponses() { - t.Fatalf("azure: DoPollForAsynchronous stopped polling before receiving a terminated OperationResource") - } - - autorest.Respond(r, - autorest.ByClosing()) -} - -func TestDoPollForAsynchronous_StopsPollingWhenOperationResourceHasTerminated(t *testing.T) { - r1 := newAsynchronousResponse() - r2 := newOperationResourceResponse("busy") - r3 := newOperationResourceResponse(operationCanceled) - - client := mocks.NewSender() - client.AppendResponse(r1) - client.AppendAndRepeatResponse(r2, 2) - client.AppendAndRepeatResponse(r3, 2) - - r, _ := autorest.SendWithSender(client, mocks.NewRequest(), - DoPollForAsynchronous(time.Millisecond)) - - if client.Attempts() > 4 { - t.Fatalf("azure: DoPollForAsynchronous failed to stop after receiving a terminated OperationResource") - } - - autorest.Respond(r, - autorest.ByClosing()) -} - -func TestDoPollForAsynchronous_ReturnsAnErrorForCanceledOperations(t *testing.T) { - r1 := newAsynchronousResponse() - r2 := newOperationResourceResponse("busy") - r3 := newOperationResourceErrorResponse(operationCanceled) - - client := mocks.NewSender() - client.AppendResponse(r1) - client.AppendAndRepeatResponse(r2, 2) - client.AppendAndRepeatResponse(r3, 1) - - r, err := autorest.SendWithSender(client, mocks.NewRequest(), - DoPollForAsynchronous(time.Millisecond)) - - if err == nil || !strings.Contains(fmt.Sprintf("%v", err), "Canceled") { - t.Fatalf("azure: DoPollForAsynchronous failed to return an appropriate error for a canceled OperationResource") - } - - autorest.Respond(r, - autorest.ByClosing()) -} - -func TestDoPollForAsynchronous_ReturnsAnErrorForFailedOperations(t *testing.T) { - r1 := newAsynchronousResponse() - r2 := newOperationResourceResponse("busy") - r3 := newOperationResourceErrorResponse(operationFailed) - - client := mocks.NewSender() - client.AppendResponse(r1) - client.AppendAndRepeatResponse(r2, 2) - client.AppendAndRepeatResponse(r3, 1) - - r, err := autorest.SendWithSender(client, mocks.NewRequest(), - DoPollForAsynchronous(time.Millisecond)) - - if err == nil || !strings.Contains(fmt.Sprintf("%v", err), "Failed") { - t.Fatalf("azure: DoPollForAsynchronous failed to return an appropriate error for a canceled OperationResource") - } - - autorest.Respond(r, - autorest.ByClosing()) -} - -func TestDoPollForAsynchronous_WithNilURI(t *testing.T) { - r1 := newAsynchronousResponse() - r1.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) - r1.Header.Del(http.CanonicalHeaderKey(autorest.HeaderLocation)) - - r2 := newOperationResourceResponse("busy") - r2.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) - r2.Header.Del(http.CanonicalHeaderKey(autorest.HeaderLocation)) - - client := mocks.NewSender() - client.AppendResponse(r1) - client.AppendResponse(r2) - - req, _ := http.NewRequest("POST", "https://microsoft.com/a/b/c/", mocks.NewBody("")) - r, err := autorest.SendWithSender(client, req, - DoPollForAsynchronous(time.Millisecond)) - - if err == nil { - t.Fatalf("azure: DoPollForAsynchronous failed to return error for nil URI. got: nil; want: Azure Polling Error - Unable to obtain polling URI for POST") - } - - autorest.Respond(r, - autorest.ByClosing()) -} - -func TestDoPollForAsynchronous_ReturnsAnUnknownErrorForFailedOperations(t *testing.T) { - // Return unknown error if error not present in last response - r1 := newAsynchronousResponse() - r1.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) - r2 := newProvisioningStatusResponse("busy") - r2.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) - r3 := newProvisioningStatusResponse(operationFailed) - r3.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) - - client := mocks.NewSender() - client.AppendResponse(r1) - client.AppendAndRepeatResponse(r2, 2) - client.AppendAndRepeatResponse(r3, 1) - - r, err := autorest.SendWithSender(client, mocks.NewRequest(), - DoPollForAsynchronous(time.Millisecond)) - - expected := makeLongRunningOperationErrorString("Unknown", "None") - if err.Error() != expected { - t.Fatalf("azure: DoPollForAsynchronous failed to return an appropriate error message for an unknown error. \n expected=%q \n got=%q", - expected, err.Error()) - } - - autorest.Respond(r, - autorest.ByClosing()) -} - -func TestDoPollForAsynchronous_ReturnsErrorForLastErrorResponse(t *testing.T) { - // Return error code and message if error present in last response - r1 := newAsynchronousResponse() - r1.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) - r2 := newProvisioningStatusResponse("busy") - r2.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) - r3 := newAsynchronousResponseWithError("400 Bad Request", http.StatusBadRequest) - r3.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) - - client := mocks.NewSender() - client.AppendResponse(r1) - client.AppendAndRepeatResponse(r2, 2) - client.AppendAndRepeatResponse(r3, 1) - - r, err := autorest.SendWithSender(client, mocks.NewRequest(), - DoPollForAsynchronous(time.Millisecond)) - - expected := makeLongRunningOperationErrorString("InvalidParameter", "tom-service-DISCOVERY-server-base-v1.core.local' is not a valid captured VHD blob name prefix.") - if err.Error() != expected { - t.Fatalf("azure: DoPollForAsynchronous failed to return an appropriate error message for an unknown error. \n expected=%q \n got=%q", - expected, err.Error()) - } - - autorest.Respond(r, - autorest.ByClosing()) -} - -func TestDoPollForAsynchronous_ReturnsOperationResourceErrorForFailedOperations(t *testing.T) { - // Return Operation resource response with error code and message in last operation resource response - r1 := newAsynchronousResponse() - r2 := newOperationResourceResponse("busy") - r3 := newOperationResourceErrorResponse(operationFailed) - - client := mocks.NewSender() - client.AppendResponse(r1) - client.AppendAndRepeatResponse(r2, 2) - client.AppendAndRepeatResponse(r3, 1) - - r, err := autorest.SendWithSender(client, mocks.NewRequest(), - DoPollForAsynchronous(time.Millisecond)) - - expected := makeLongRunningOperationErrorString("BadArgument", "The provided database 'foo' has an invalid username.") - if err.Error() != expected { - t.Fatalf("azure: DoPollForAsynchronous failed to return an appropriate error message for a failed Operations. \n expected=%q \n got=%q", - expected, err.Error()) - } - - autorest.Respond(r, - autorest.ByClosing()) -} - -func TestDoPollForAsynchronous_ReturnsErrorForFirstPutRequest(t *testing.T) { - // Return 400 bad response with error code and message in first put - r1 := newAsynchronousResponseWithError("400 Bad Request", http.StatusBadRequest) - client := mocks.NewSender() - client.AppendResponse(r1) - - res, err := autorest.SendWithSender(client, mocks.NewRequest(), - DoPollForAsynchronous(time.Millisecond)) - if err != nil { - t.Fatalf("azure: DoPollForAsynchronous failed to return an appropriate error message for a failed Operations. \n expected=%q \n got=%q", - errorResponse, err.Error()) - } - - err = autorest.Respond(res, - WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusCreated, http.StatusOK), - autorest.ByClosing()) - - reqError, ok := err.(*RequestError) - if !ok { - t.Fatalf("azure: returned error is not azure.RequestError: %T", err) - } - - expected := &RequestError{ - ServiceError: &ServiceError{ - Code: "InvalidParameter", - Message: "tom-service-DISCOVERY-server-base-v1.core.local' is not a valid captured VHD blob name prefix.", - }, - DetailedError: autorest.DetailedError{ - StatusCode: 400, - }, - } - if !reflect.DeepEqual(reqError, expected) { - t.Fatalf("azure: wrong error. expected=%q\ngot=%q", expected, reqError) - } - - defer res.Body.Close() - b, err := ioutil.ReadAll(res.Body) - if err != nil { - t.Fatal(err) - } - if string(b) != errorResponse { - t.Fatalf("azure: Response body is wrong. got=%q expected=%q", string(b), errorResponse) - } - -} - -func TestDoPollForAsynchronous_ReturnsNoErrorForSuccessfulOperations(t *testing.T) { - r1 := newAsynchronousResponse() - r2 := newOperationResourceResponse("busy") - r3 := newOperationResourceErrorResponse(operationSucceeded) - - client := mocks.NewSender() - client.AppendResponse(r1) - client.AppendAndRepeatResponse(r2, 2) - client.AppendAndRepeatResponse(r3, 1) - - r, err := autorest.SendWithSender(client, mocks.NewRequest(), - DoPollForAsynchronous(time.Millisecond)) - - if err != nil { - t.Fatalf("azure: DoPollForAsynchronous returned an error for a successful OperationResource") - } - - autorest.Respond(r, - autorest.ByClosing()) -} - -func TestDoPollForAsynchronous_StopsPollingIfItReceivesAnInvalidOperationResource(t *testing.T) { - r1 := newAsynchronousResponse() - r2 := newOperationResourceResponse("busy") - r3 := newOperationResourceResponse("busy") - r3.Body = mocks.NewBody(operationResourceIllegal) - r4 := newOperationResourceResponse(operationSucceeded) - - client := mocks.NewSender() - client.AppendResponse(r1) - client.AppendAndRepeatResponse(r2, 2) - client.AppendAndRepeatResponse(r3, 1) - client.AppendAndRepeatResponse(r4, 1) - - r, err := autorest.SendWithSender(client, mocks.NewRequest(), - DoPollForAsynchronous(time.Millisecond)) - - if client.Attempts() > 4 { - t.Fatalf("azure: DoPollForAsynchronous failed to stop polling after receiving an invalid OperationResource") - } - if err == nil { - t.Fatalf("azure: DoPollForAsynchronous failed to return an error after receving an invalid OperationResource") - } - - autorest.Respond(r, - autorest.ByClosing()) -} - -func TestFuture_PollsUntilProvisioningStatusSucceeds(t *testing.T) { - r1 := newAsynchronousResponse() - r1.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) - r2 := newProvisioningStatusResponse("busy") - r2.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) - r3 := newProvisioningStatusResponse(operationSucceeded) - r3.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) - - client := mocks.NewSender() - client.AppendResponse(r1) - client.AppendAndRepeatResponse(r2, 2) - client.AppendResponse(r3) - - future := NewFuture(mocks.NewRequest()) - - for done, err := future.Done(client); !done; done, err = future.Done(client) { - if future.PollingMethod() != PollingLocation { - t.Fatalf("azure: wrong future polling method") - } - if err != nil { - t.Fatalf("azure: TestFuture polling Done failed") - } - delay, ok := future.GetPollingDelay() - if !ok { - t.Fatalf("expected Retry-After value") - } - time.Sleep(delay) - } - - if client.Attempts() < client.NumResponses() { - t.Fatalf("azure: TestFuture stopped polling before receiving a terminated OperationResource") - } - - autorest.Respond(future.Response(), - autorest.ByClosing()) -} - -func TestFuture_Marshalling(t *testing.T) { - client := mocks.NewSender() - client.AppendResponse(newAsynchronousResponse()) - - future := NewFuture(mocks.NewRequest()) - done, err := future.Done(client) - if err != nil { - t.Fatalf("azure: TestFuture marshalling Done failed") - } - if done { - t.Fatalf("azure: TestFuture marshalling shouldn't be done") - } - if future.PollingMethod() != PollingAsyncOperation { - t.Fatalf("azure: wrong future polling method") - } - - data, err := json.Marshal(future) - if err != nil { - t.Fatalf("azure: TestFuture failed to marshal") - } - - var future2 Future - err = json.Unmarshal(data, &future2) - if err != nil { - t.Fatalf("azure: TestFuture failed to unmarshal") - } - - if future.ps.Code != future2.ps.Code { - t.Fatalf("azure: TestFuture marshalling codes don't match") - } - if future.ps.Message != future2.ps.Message { - t.Fatalf("azure: TestFuture marshalling messages don't match") - } - if future.ps.PollingMethod != future2.ps.PollingMethod { - t.Fatalf("azure: TestFuture marshalling response formats don't match") - } - if future.ps.State != future2.ps.State { - t.Fatalf("azure: TestFuture marshalling states don't match") - } - if future.ps.URI != future2.ps.URI { - t.Fatalf("azure: TestFuture marshalling URIs don't match") - } -} - -func TestFuture_WaitForCompletion(t *testing.T) { - r1 := newAsynchronousResponse() - r1.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) - r2 := newProvisioningStatusResponse("busy") - r2.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) - r3 := newAsynchronousResponseWithError("Internal server error", http.StatusInternalServerError) - r3.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) - r3.Header.Del(http.CanonicalHeaderKey("Retry-After")) - r4 := newProvisioningStatusResponse(operationSucceeded) - r4.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) - - sender := mocks.NewSender() - sender.AppendResponse(r1) - sender.AppendError(errors.New("transient network failure")) - sender.AppendAndRepeatResponse(r2, 2) - sender.AppendResponse(r3) - sender.AppendResponse(r4) - - future := NewFuture(mocks.NewRequest()) - - client := autorest.Client{ - PollingDelay: 1 * time.Second, - PollingDuration: autorest.DefaultPollingDuration, - RetryAttempts: autorest.DefaultRetryAttempts, - RetryDuration: 1 * time.Second, - Sender: sender, - } - - err := future.WaitForCompletion(context.Background(), client) - if err != nil { - t.Fatalf("azure: WaitForCompletion returned non-nil error") - } - - if sender.Attempts() < sender.NumResponses() { - t.Fatalf("azure: TestFuture stopped polling before receiving a terminated OperationResource") - } - - autorest.Respond(future.Response(), - autorest.ByClosing()) -} - -func TestFuture_WaitForCompletionTimedOut(t *testing.T) { - r1 := newAsynchronousResponse() - r1.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) - r2 := newProvisioningStatusResponse("busy") - r2.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) - - sender := mocks.NewSender() - sender.AppendResponse(r1) - sender.AppendAndRepeatResponseWithDelay(r2, 1*time.Second, 5) - - future := NewFuture(mocks.NewRequest()) - - client := autorest.Client{ - PollingDelay: autorest.DefaultPollingDelay, - PollingDuration: 2 * time.Second, - RetryAttempts: autorest.DefaultRetryAttempts, - RetryDuration: 1 * time.Second, - Sender: sender, - } - - err := future.WaitForCompletion(context.Background(), client) - if err == nil { - t.Fatalf("azure: WaitForCompletion returned nil error, should have timed out") - } -} - -func TestFuture_WaitForCompletionRetriesExceeded(t *testing.T) { - r1 := newAsynchronousResponse() - r1.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) - - sender := mocks.NewSender() - sender.AppendResponse(r1) - sender.AppendAndRepeatError(errors.New("transient network failure"), autorest.DefaultRetryAttempts+1) - - future := NewFuture(mocks.NewRequest()) - - client := autorest.Client{ - PollingDelay: autorest.DefaultPollingDelay, - PollingDuration: autorest.DefaultPollingDuration, - RetryAttempts: autorest.DefaultRetryAttempts, - RetryDuration: 100 * time.Millisecond, - Sender: sender, - } - - err := future.WaitForCompletion(context.Background(), client) - if err == nil { - t.Fatalf("azure: WaitForCompletion returned nil error, should have errored out") - } -} - -func TestFuture_WaitForCompletionCancelled(t *testing.T) { - r1 := newAsynchronousResponse() - r1.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) - r2 := newProvisioningStatusResponse("busy") - r2.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) - - sender := mocks.NewSender() - sender.AppendResponse(r1) - sender.AppendAndRepeatResponseWithDelay(r2, 1*time.Second, 5) - - future := NewFuture(mocks.NewRequest()) - - client := autorest.Client{ - PollingDelay: autorest.DefaultPollingDelay, - PollingDuration: autorest.DefaultPollingDuration, - RetryAttempts: autorest.DefaultRetryAttempts, - RetryDuration: autorest.DefaultRetryDuration, - Sender: sender, - } - - ctx, cancel := context.WithCancel(context.Background()) - go func() { - time.Sleep(2 * time.Second) - cancel() - }() - - err := future.WaitForCompletion(ctx, client) - if err == nil { - t.Fatalf("azure: WaitForCompletion returned nil error, should have been cancelled") - } -} - -const ( - operationResourceIllegal = ` - This is not JSON and should fail...badly. - ` - pollingStateFormat = ` - { - "unused" : { - "somefield" : 42 - }, - "properties" : { - "provisioningState": "%s" - } - } - ` - - errorResponse = ` - { - "error" : { - "code" : "InvalidParameter", - "message" : "tom-service-DISCOVERY-server-base-v1.core.local' is not a valid captured VHD blob name prefix." - } - } - ` - - pollingStateEmpty = ` - { - "unused" : { - "somefield" : 42 - }, - "properties" : { - } - } - ` - - operationResourceFormat = ` - { - "id": "/subscriptions/id/locations/westus/operationsStatus/sameguid", - "name": "sameguid", - "status" : "%s", - "startTime" : "2006-01-02T15:04:05Z", - "endTime" : "2006-01-02T16:04:05Z", - "percentComplete" : 50.00, - - "properties" : {} - } - ` - - operationResourceErrorFormat = ` - { - "id": "/subscriptions/id/locations/westus/operationsStatus/sameguid", - "name": "sameguid", - "status" : "%s", - "startTime" : "2006-01-02T15:04:05Z", - "endTime" : "2006-01-02T16:04:05Z", - "percentComplete" : 50.00, - - "properties" : {}, - "error" : { - "code" : "BadArgument", - "message" : "The provided database 'foo' has an invalid username." - } - } - ` -) - -func newAsynchronousResponse() *http.Response { - r := mocks.NewResponseWithStatus("201 Created", http.StatusCreated) - r.Body = mocks.NewBody(fmt.Sprintf(pollingStateFormat, operationInProgress)) - mocks.SetResponseHeader(r, http.CanonicalHeaderKey(headerAsyncOperation), mocks.TestAzureAsyncURL) - mocks.SetResponseHeader(r, http.CanonicalHeaderKey(autorest.HeaderLocation), mocks.TestLocationURL) - mocks.SetRetryHeader(r, retryDelay) - r.Request = mocks.NewRequestForURL(mocks.TestURL) - return r -} - -func newAsynchronousResponseWithError(response string, status int) *http.Response { - r := mocks.NewResponseWithStatus(response, status) - mocks.SetRetryHeader(r, retryDelay) - r.Request = mocks.NewRequestForURL(mocks.TestURL) - r.Body = mocks.NewBody(errorResponse) - return r -} - -func newOperationResourceResponse(status string) *http.Response { - r := newAsynchronousResponse() - r.Body = mocks.NewBody(fmt.Sprintf(operationResourceFormat, status)) - return r -} - -func newOperationResourceErrorResponse(status string) *http.Response { - r := newAsynchronousResponse() - r.Body = mocks.NewBody(fmt.Sprintf(operationResourceErrorFormat, status)) - return r -} - -func newProvisioningStatusResponse(status string) *http.Response { - r := newAsynchronousResponse() - r.Body = mocks.NewBody(fmt.Sprintf(pollingStateFormat, status)) - return r -} - -func makeLongRunningOperationErrorString(code string, message string) string { - return fmt.Sprintf("Long running operation terminated with status 'Failed': Code=%q Message=%q", code, message) -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go index fa18356476..3a0a439ff9 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go @@ -1,8 +1,5 @@ -/* -Package azure provides Azure-specific implementations used with AutoRest. - -See the included examples for more detail. -*/ +// Package azure provides Azure-specific implementations used with AutoRest. +// See the included examples for more detail. package azure // Copyright 2017 Microsoft Corporation @@ -24,7 +21,9 @@ import ( "fmt" "io/ioutil" "net/http" + "regexp" "strconv" + "strings" "github.com/Azure/go-autorest/autorest" ) @@ -43,21 +42,100 @@ const ( ) // ServiceError encapsulates the error response from an Azure service. +// It adhears to the OData v4 specification for error responses. type ServiceError struct { - Code string `json:"code"` - Message string `json:"message"` - Details *[]interface{} `json:"details"` + Code string `json:"code"` + Message string `json:"message"` + Target *string `json:"target"` + Details []map[string]interface{} `json:"details"` + InnerError map[string]interface{} `json:"innererror"` + AdditionalInfo []map[string]interface{} `json:"additionalInfo"` } func (se ServiceError) Error() string { + result := fmt.Sprintf("Code=%q Message=%q", se.Code, se.Message) + + if se.Target != nil { + result += fmt.Sprintf(" Target=%q", *se.Target) + } + if se.Details != nil { - d, err := json.Marshal(*(se.Details)) + d, err := json.Marshal(se.Details) if err != nil { - return fmt.Sprintf("Code=%q Message=%q Details=%v", se.Code, se.Message, *se.Details) + result += fmt.Sprintf(" Details=%v", se.Details) } - return fmt.Sprintf("Code=%q Message=%q Details=%v", se.Code, se.Message, string(d)) + result += fmt.Sprintf(" Details=%v", string(d)) + } + + if se.InnerError != nil { + d, err := json.Marshal(se.InnerError) + if err != nil { + result += fmt.Sprintf(" InnerError=%v", se.InnerError) + } + result += fmt.Sprintf(" InnerError=%v", string(d)) + } + + if se.AdditionalInfo != nil { + d, err := json.Marshal(se.AdditionalInfo) + if err != nil { + result += fmt.Sprintf(" AdditionalInfo=%v", se.AdditionalInfo) + } + result += fmt.Sprintf(" AdditionalInfo=%v", string(d)) + } + + return result +} + +// UnmarshalJSON implements the json.Unmarshaler interface for the ServiceError type. +func (se *ServiceError) UnmarshalJSON(b []byte) error { + // per the OData v4 spec the details field must be an array of JSON objects. + // unfortunately not all services adhear to the spec and just return a single + // object instead of an array with one object. so we have to perform some + // shenanigans to accommodate both cases. + // http://docs.oasis-open.org/odata/odata-json-format/v4.0/os/odata-json-format-v4.0-os.html#_Toc372793091 + + type serviceError1 struct { + Code string `json:"code"` + Message string `json:"message"` + Target *string `json:"target"` + Details []map[string]interface{} `json:"details"` + InnerError map[string]interface{} `json:"innererror"` + AdditionalInfo []map[string]interface{} `json:"additionalInfo"` + } + + type serviceError2 struct { + Code string `json:"code"` + Message string `json:"message"` + Target *string `json:"target"` + Details map[string]interface{} `json:"details"` + InnerError map[string]interface{} `json:"innererror"` + AdditionalInfo []map[string]interface{} `json:"additionalInfo"` + } + + se1 := serviceError1{} + err := json.Unmarshal(b, &se1) + if err == nil { + se.populate(se1.Code, se1.Message, se1.Target, se1.Details, se1.InnerError, se1.AdditionalInfo) + return nil } - return fmt.Sprintf("Code=%q Message=%q", se.Code, se.Message) + + se2 := serviceError2{} + err = json.Unmarshal(b, &se2) + if err == nil { + se.populate(se2.Code, se2.Message, se2.Target, nil, se2.InnerError, se2.AdditionalInfo) + se.Details = append(se.Details, se2.Details) + return nil + } + return err +} + +func (se *ServiceError) populate(code, message string, target *string, details []map[string]interface{}, inner map[string]interface{}, additional []map[string]interface{}) { + se.Code = code + se.Message = message + se.Target = target + se.Details = details + se.InnerError = inner + se.AdditionalInfo = additional } // RequestError describes an error response returned by Azure service. @@ -83,6 +161,41 @@ func IsAzureError(e error) bool { return ok } +// Resource contains details about an Azure resource. +type Resource struct { + SubscriptionID string + ResourceGroup string + Provider string + ResourceType string + ResourceName string +} + +// ParseResourceID parses a resource ID into a ResourceDetails struct. +// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-template-functions-resource#return-value-4. +func ParseResourceID(resourceID string) (Resource, error) { + + const resourceIDPatternText = `(?i)subscriptions/(.+)/resourceGroups/(.+)/providers/(.+?)/(.+?)/(.+)` + resourceIDPattern := regexp.MustCompile(resourceIDPatternText) + match := resourceIDPattern.FindStringSubmatch(resourceID) + + if len(match) == 0 { + return Resource{}, fmt.Errorf("parsing failed for %s. Invalid resource Id format", resourceID) + } + + v := strings.Split(match[5], "/") + resourceName := v[len(v)-1] + + result := Resource{ + SubscriptionID: match[1], + ResourceGroup: match[2], + Provider: match[3], + ResourceType: match[4], + ResourceName: resourceName, + } + + return result, nil +} + // NewErrorWithError creates a new Error conforming object from the // passed packageType, method, statusCode of the given resp (UndefinedStatusCode // if resp is nil), message, and original error. message is treated as a format @@ -178,16 +291,29 @@ func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator { resp.Body = ioutil.NopCloser(&b) if decodeErr != nil { return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), decodeErr) - } else if e.ServiceError == nil { + } + if e.ServiceError == nil { // Check if error is unwrapped ServiceError - if err := json.Unmarshal(b.Bytes(), &e.ServiceError); err != nil || e.ServiceError.Message == "" { - e.ServiceError = &ServiceError{ - Code: "Unknown", - Message: "Unknown service error", - } + if err := json.Unmarshal(b.Bytes(), &e.ServiceError); err != nil { + return err } } - + if e.ServiceError.Message == "" { + // if we're here it means the returned error wasn't OData v4 compliant. + // try to unmarshal the body as raw JSON in hopes of getting something. + rawBody := map[string]interface{}{} + if err := json.Unmarshal(b.Bytes(), &rawBody); err != nil { + return err + } + e.ServiceError = &ServiceError{ + Code: "Unknown", + Message: "Unknown service error", + } + if len(rawBody) > 0 { + e.ServiceError.Details = []map[string]interface{}{rawBody} + } + } + e.Response = resp e.RequestID = ExtractRequestID(resp) if e.StatusCode == nil { e.StatusCode = resp.StatusCode diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/azure_test.go b/vendor/github.com/Azure/go-autorest/autorest/azure/azure_test.go deleted file mode 100644 index e8bddbbfd4..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/azure_test.go +++ /dev/null @@ -1,513 +0,0 @@ -package azure - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "reflect" - "strconv" - "testing" - "time" - - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/mocks" -) - -const ( - headerAuthorization = "Authorization" - longDelay = 5 * time.Second - retryDelay = 10 * time.Millisecond - testLogPrefix = "azure:" -) - -// Use a Client Inspector to set the request identifier. -func ExampleWithClientID() { - uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" - req, _ := autorest.Prepare(&http.Request{}, - autorest.AsGet(), - autorest.WithBaseURL("https://microsoft.com/a/b/c/")) - - c := autorest.Client{Sender: mocks.NewSender()} - c.RequestInspector = WithReturningClientID(uuid) - - autorest.SendWithSender(c, req) - fmt.Printf("Inspector added the %s header with the value %s\n", - HeaderClientID, req.Header.Get(HeaderClientID)) - fmt.Printf("Inspector added the %s header with the value %s\n", - HeaderReturnClientID, req.Header.Get(HeaderReturnClientID)) - // Output: - // Inspector added the x-ms-client-request-id header with the value 71FDB9F4-5E49-4C12-B266-DE7B4FD999A6 - // Inspector added the x-ms-return-client-request-id header with the value true -} - -func TestWithReturningClientIDReturnsError(t *testing.T) { - var errIn error - uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" - _, errOut := autorest.Prepare(&http.Request{}, - withErrorPrepareDecorator(&errIn), - WithReturningClientID(uuid)) - - if errOut == nil || errIn != errOut { - t.Fatalf("azure: WithReturningClientID failed to exit early when receiving an error -- expected (%v), received (%v)", - errIn, errOut) - } -} - -func TestWithClientID(t *testing.T) { - uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" - req, _ := autorest.Prepare(&http.Request{}, - WithClientID(uuid)) - - if req.Header.Get(HeaderClientID) != uuid { - t.Fatalf("azure: WithClientID failed to set %s -- expected %s, received %s", - HeaderClientID, uuid, req.Header.Get(HeaderClientID)) - } -} - -func TestWithReturnClientID(t *testing.T) { - b := false - req, _ := autorest.Prepare(&http.Request{}, - WithReturnClientID(b)) - - if req.Header.Get(HeaderReturnClientID) != strconv.FormatBool(b) { - t.Fatalf("azure: WithReturnClientID failed to set %s -- expected %s, received %s", - HeaderClientID, strconv.FormatBool(b), req.Header.Get(HeaderClientID)) - } -} - -func TestExtractClientID(t *testing.T) { - uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" - resp := mocks.NewResponse() - mocks.SetResponseHeader(resp, HeaderClientID, uuid) - - if ExtractClientID(resp) != uuid { - t.Fatalf("azure: ExtractClientID failed to extract the %s -- expected %s, received %s", - HeaderClientID, uuid, ExtractClientID(resp)) - } -} - -func TestExtractRequestID(t *testing.T) { - uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" - resp := mocks.NewResponse() - mocks.SetResponseHeader(resp, HeaderRequestID, uuid) - - if ExtractRequestID(resp) != uuid { - t.Fatalf("azure: ExtractRequestID failed to extract the %s -- expected %s, received %s", - HeaderRequestID, uuid, ExtractRequestID(resp)) - } -} - -func TestIsAzureError_ReturnsTrueForAzureError(t *testing.T) { - if !IsAzureError(&RequestError{}) { - t.Fatalf("azure: IsAzureError failed to return true for an Azure Service error") - } -} - -func TestIsAzureError_ReturnsFalseForNonAzureError(t *testing.T) { - if IsAzureError(fmt.Errorf("An Error")) { - t.Fatalf("azure: IsAzureError return true for an non-Azure Service error") - } -} - -func TestNewErrorWithError_UsesReponseStatusCode(t *testing.T) { - e := NewErrorWithError(fmt.Errorf("Error"), "packageType", "method", mocks.NewResponseWithStatus("Forbidden", http.StatusForbidden), "message") - if e.StatusCode != http.StatusForbidden { - t.Fatalf("azure: NewErrorWithError failed to use the Status Code of the passed Response -- expected %v, received %v", http.StatusForbidden, e.StatusCode) - } -} - -func TestNewErrorWithError_ReturnsUnwrappedError(t *testing.T) { - e1 := RequestError{} - e1.ServiceError = &ServiceError{Code: "42", Message: "A Message"} - e1.StatusCode = 200 - e1.RequestID = "A RequestID" - e2 := NewErrorWithError(&e1, "packageType", "method", nil, "message") - - if !reflect.DeepEqual(e1, e2) { - t.Fatalf("azure: NewErrorWithError wrapped an RequestError -- expected %T, received %T", e1, e2) - } -} - -func TestNewErrorWithError_WrapsAnError(t *testing.T) { - e1 := fmt.Errorf("Inner Error") - var e2 interface{} = NewErrorWithError(e1, "packageType", "method", nil, "message") - - if _, ok := e2.(RequestError); !ok { - t.Fatalf("azure: NewErrorWithError failed to wrap a standard error -- received %T", e2) - } -} - -func TestWithErrorUnlessStatusCode_NotAnAzureError(t *testing.T) { - body := ` - - IIS Error page - - Some non-JSON error page - ` - r := mocks.NewResponseWithContent(body) - r.Request = mocks.NewRequest() - r.StatusCode = http.StatusBadRequest - r.Status = http.StatusText(r.StatusCode) - - err := autorest.Respond(r, - WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByClosing()) - ok, _ := err.(*RequestError) - if ok != nil { - t.Fatalf("azure: azure.RequestError returned from malformed response: %v", err) - } - - // the error body should still be there - defer r.Body.Close() - b, err := ioutil.ReadAll(r.Body) - if err != nil { - t.Fatal(err) - } - if string(b) != body { - t.Fatalf("response body is wrong. got=%q exptected=%q", string(b), body) - } -} - -func TestWithErrorUnlessStatusCode_FoundAzureErrorWithoutDetails(t *testing.T) { - j := `{ - "error": { - "code": "InternalError", - "message": "Azure is having trouble right now." - } - }` - uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" - r := mocks.NewResponseWithContent(j) - mocks.SetResponseHeader(r, HeaderRequestID, uuid) - r.Request = mocks.NewRequest() - r.StatusCode = http.StatusInternalServerError - r.Status = http.StatusText(r.StatusCode) - - err := autorest.Respond(r, - WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByClosing()) - - if err == nil { - t.Fatalf("azure: returned nil error for proper error response") - } - azErr, ok := err.(*RequestError) - if !ok { - t.Fatalf("azure: returned error is not azure.RequestError: %T", err) - } - - expected := "autorest/azure: Service returned an error. Status=500 Code=\"InternalError\" Message=\"Azure is having trouble right now.\"" - if !reflect.DeepEqual(expected, azErr.Error()) { - t.Fatalf("azure: service error is not unmarshaled properly.\nexpected=%v\ngot=%v", expected, azErr.Error()) - } - - if expected := http.StatusInternalServerError; azErr.StatusCode != expected { - t.Fatalf("azure: got wrong StatusCode=%d Expected=%d", azErr.StatusCode, expected) - } - if expected := uuid; azErr.RequestID != expected { - t.Fatalf("azure: wrong request ID in error. expected=%q; got=%q", expected, azErr.RequestID) - } - - _ = azErr.Error() - - // the error body should still be there - defer r.Body.Close() - b, err := ioutil.ReadAll(r.Body) - if err != nil { - t.Fatal(err) - } - if string(b) != j { - t.Fatalf("response body is wrong. got=%q expected=%q", string(b), j) - } - -} - -func TestWithErrorUnlessStatusCode_FoundAzureErrorWithDetails(t *testing.T) { - j := `{ - "error": { - "code": "InternalError", - "message": "Azure is having trouble right now.", - "details": [{"code": "conflict1", "message":"error message1"}, - {"code": "conflict2", "message":"error message2"}] - } - }` - uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" - r := mocks.NewResponseWithContent(j) - mocks.SetResponseHeader(r, HeaderRequestID, uuid) - r.Request = mocks.NewRequest() - r.StatusCode = http.StatusInternalServerError - r.Status = http.StatusText(r.StatusCode) - - err := autorest.Respond(r, - WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByClosing()) - - if err == nil { - t.Fatalf("azure: returned nil error for proper error response") - } - azErr, ok := err.(*RequestError) - if !ok { - t.Fatalf("azure: returned error is not azure.RequestError: %T", err) - } - - if expected := "InternalError"; azErr.ServiceError.Code != expected { - t.Fatalf("azure: wrong error code. expected=%q; got=%q", expected, azErr.ServiceError.Code) - } - if azErr.ServiceError.Message == "" { - t.Fatalf("azure: error message is not unmarshaled properly") - } - b, _ := json.Marshal(*azErr.ServiceError.Details) - if string(b) != `[{"code":"conflict1","message":"error message1"},{"code":"conflict2","message":"error message2"}]` { - t.Fatalf("azure: error details is not unmarshaled properly") - } - - if expected := http.StatusInternalServerError; azErr.StatusCode != expected { - t.Fatalf("azure: got wrong StatusCode=%v Expected=%d", azErr.StatusCode, expected) - } - if expected := uuid; azErr.RequestID != expected { - t.Fatalf("azure: wrong request ID in error. expected=%q; got=%q", expected, azErr.RequestID) - } - - _ = azErr.Error() - - // the error body should still be there - defer r.Body.Close() - b, err = ioutil.ReadAll(r.Body) - if err != nil { - t.Fatal(err) - } - if string(b) != j { - t.Fatalf("response body is wrong. got=%q expected=%q", string(b), j) - } - -} - -func TestWithErrorUnlessStatusCode_NoAzureError(t *testing.T) { - j := `{ - "Status":"NotFound" - }` - uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" - r := mocks.NewResponseWithContent(j) - mocks.SetResponseHeader(r, HeaderRequestID, uuid) - r.Request = mocks.NewRequest() - r.StatusCode = http.StatusInternalServerError - r.Status = http.StatusText(r.StatusCode) - - err := autorest.Respond(r, - WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByClosing()) - if err == nil { - t.Fatalf("azure: returned nil error for proper error response") - } - azErr, ok := err.(*RequestError) - if !ok { - t.Fatalf("azure: returned error is not azure.RequestError: %T", err) - } - - expected := &ServiceError{ - Code: "Unknown", - Message: "Unknown service error", - } - - if !reflect.DeepEqual(expected, azErr.ServiceError) { - t.Fatalf("azure: service error is not unmarshaled properly. expected=%q\ngot=%q", expected, azErr.ServiceError) - } - - if expected := http.StatusInternalServerError; azErr.StatusCode != expected { - t.Fatalf("azure: got wrong StatusCode=%v Expected=%d", azErr.StatusCode, expected) - } - if expected := uuid; azErr.RequestID != expected { - t.Fatalf("azure: wrong request ID in error. expected=%q; got=%q", expected, azErr.RequestID) - } - - _ = azErr.Error() - - // the error body should still be there - defer r.Body.Close() - b, err := ioutil.ReadAll(r.Body) - if err != nil { - t.Fatal(err) - } - if string(b) != j { - t.Fatalf("response body is wrong. got=%q expected=%q", string(b), j) - } - -} - -func TestWithErrorUnlessStatusCode_UnwrappedError(t *testing.T) { - j := `{ - "target": null, - "code": "InternalError", - "message": "Azure is having trouble right now.", - "details": [{"code": "conflict1", "message":"error message1"}, - {"code": "conflict2", "message":"error message2"}], - "innererror": [] -}` - uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" - r := mocks.NewResponseWithContent(j) - mocks.SetResponseHeader(r, HeaderRequestID, uuid) - r.Request = mocks.NewRequest() - r.StatusCode = http.StatusInternalServerError - r.Status = http.StatusText(r.StatusCode) - - err := autorest.Respond(r, - WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByClosing()) - - if err == nil { - t.Fatal("azure: returned nil error for proper error response") - } - - azErr, ok := err.(*RequestError) - if !ok { - t.Fatalf("returned error is not azure.RequestError: %T", err) - } - - if expected := http.StatusInternalServerError; azErr.StatusCode != expected { - t.Logf("Incorrect StatusCode got: %v want: %d", azErr.StatusCode, expected) - t.Fail() - } - - if expected := "Azure is having trouble right now."; azErr.ServiceError.Message != expected { - t.Logf("Incorrect Message\n\tgot: %q\n\twant: %q", azErr.Message, expected) - t.Fail() - } - - if expected := uuid; azErr.RequestID != expected { - t.Logf("Incorrect request ID\n\tgot: %q\n\twant: %q", azErr.RequestID, expected) - t.Fail() - } - - expectedServiceErrorDetails := `[{"code":"conflict1","message":"error message1"},{"code":"conflict2","message":"error message2"}]` - if azErr.ServiceError == nil { - t.Logf("`ServiceError` was nil when it shouldn't have been.") - t.Fail() - } else if azErr.ServiceError.Details == nil { - t.Logf("`ServiceError.Details` was nil when it should have been %q", expectedServiceErrorDetails) - t.Fail() - } else if details, _ := json.Marshal(*azErr.ServiceError.Details); expectedServiceErrorDetails != string(details) { - t.Logf("Error detaisl was not unmarshaled properly.\n\tgot: %q\n\twant: %q", string(details), expectedServiceErrorDetails) - t.Fail() - } - - // the error body should still be there - defer r.Body.Close() - b, err := ioutil.ReadAll(r.Body) - if err != nil { - t.Error(err) - } - if string(b) != j { - t.Fatalf("response body is wrong. got=%q expected=%q", string(b), j) - } - -} - -func TestRequestErrorString_WithError(t *testing.T) { - j := `{ - "error": { - "code": "InternalError", - "message": "Conflict", - "details": [{"code": "conflict1", "message":"error message1"}] - } - }` - uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" - r := mocks.NewResponseWithContent(j) - mocks.SetResponseHeader(r, HeaderRequestID, uuid) - r.Request = mocks.NewRequest() - r.StatusCode = http.StatusInternalServerError - r.Status = http.StatusText(r.StatusCode) - - err := autorest.Respond(r, - WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByClosing()) - - if err == nil { - t.Fatalf("azure: returned nil error for proper error response") - } - azErr, _ := err.(*RequestError) - expected := "autorest/azure: Service returned an error. Status=500 Code=\"InternalError\" Message=\"Conflict\" Details=[{\"code\":\"conflict1\",\"message\":\"error message1\"}]" - if expected != azErr.Error() { - t.Fatalf("azure: send wrong RequestError.\nexpected=%v\ngot=%v", expected, azErr.Error()) - } -} - -func withErrorPrepareDecorator(e *error) autorest.PrepareDecorator { - return func(p autorest.Preparer) autorest.Preparer { - return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { - *e = fmt.Errorf("azure: Faux Prepare Error") - return r, *e - }) - } -} - -func withAsyncResponseDecorator(n int) autorest.SendDecorator { - i := 0 - return func(s autorest.Sender) autorest.Sender { - return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { - resp, err := s.Do(r) - if err == nil { - if i < n { - resp.StatusCode = http.StatusCreated - resp.Header = http.Header{} - resp.Header.Add(http.CanonicalHeaderKey(headerAsyncOperation), mocks.TestURL) - i++ - } else { - resp.StatusCode = http.StatusOK - resp.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) - } - } - return resp, err - }) - } -} - -type mockAuthorizer struct{} - -func (ma mockAuthorizer) WithAuthorization() autorest.PrepareDecorator { - return autorest.WithHeader(headerAuthorization, mocks.TestAuthorizationHeader) -} - -type mockFailingAuthorizer struct{} - -func (mfa mockFailingAuthorizer) WithAuthorization() autorest.PrepareDecorator { - return func(p autorest.Preparer) autorest.Preparer { - return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { - return r, fmt.Errorf("ERROR: mockFailingAuthorizer returned expected error") - }) - } -} - -type mockInspector struct { - wasInvoked bool -} - -func (mi *mockInspector) WithInspection() autorest.PrepareDecorator { - return func(p autorest.Preparer) autorest.Preparer { - return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { - mi.wasInvoked = true - return p.Prepare(r) - }) - } -} - -func (mi *mockInspector) ByInspecting() autorest.RespondDecorator { - return func(r autorest.Responder) autorest.Responder { - return autorest.ResponderFunc(func(resp *http.Response) error { - mi.wasInvoked = true - return r.Respond(resp) - }) - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go index efdab6a110..85d3202afe 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go @@ -44,6 +44,8 @@ type Environment struct { GalleryEndpoint string `json:"galleryEndpoint"` KeyVaultEndpoint string `json:"keyVaultEndpoint"` GraphEndpoint string `json:"graphEndpoint"` + ServiceBusEndpoint string `json:"serviceBusEndpoint"` + BatchManagementEndpoint string `json:"batchManagementEndpoint"` StorageEndpointSuffix string `json:"storageEndpointSuffix"` SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"` TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"` @@ -52,6 +54,8 @@ type Environment struct { ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"` ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"` ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"` + CosmosDBDNSSuffix string `json:"cosmosDBDNSSuffix"` + TokenAudience string `json:"tokenAudience"` } var ( @@ -66,14 +70,18 @@ var ( GalleryEndpoint: "https://gallery.azure.com/", KeyVaultEndpoint: "https://vault.azure.net/", GraphEndpoint: "https://graph.windows.net/", + ServiceBusEndpoint: "https://servicebus.windows.net/", + BatchManagementEndpoint: "https://batch.core.windows.net/", StorageEndpointSuffix: "core.windows.net", SQLDatabaseDNSSuffix: "database.windows.net", TrafficManagerDNSSuffix: "trafficmanager.net", KeyVaultDNSSuffix: "vault.azure.net", - ServiceBusEndpointSuffix: "servicebus.azure.com", + ServiceBusEndpointSuffix: "servicebus.windows.net", ServiceManagementVMDNSSuffix: "cloudapp.net", ResourceManagerVMDNSSuffix: "cloudapp.azure.com", ContainerRegistryDNSSuffix: "azurecr.io", + CosmosDBDNSSuffix: "documents.azure.com", + TokenAudience: "https://management.azure.com/", } // USGovernmentCloud is the cloud environment for the US Government @@ -83,10 +91,12 @@ var ( PublishSettingsURL: "https://manage.windowsazure.us/publishsettings/index", ServiceManagementEndpoint: "https://management.core.usgovcloudapi.net/", ResourceManagerEndpoint: "https://management.usgovcloudapi.net/", - ActiveDirectoryEndpoint: "https://login.microsoftonline.com/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.us/", GalleryEndpoint: "https://gallery.usgovcloudapi.net/", KeyVaultEndpoint: "https://vault.usgovcloudapi.net/", - GraphEndpoint: "https://graph.usgovcloudapi.net/", + GraphEndpoint: "https://graph.windows.net/", + ServiceBusEndpoint: "https://servicebus.usgovcloudapi.net/", + BatchManagementEndpoint: "https://batch.core.usgovcloudapi.net/", StorageEndpointSuffix: "core.usgovcloudapi.net", SQLDatabaseDNSSuffix: "database.usgovcloudapi.net", TrafficManagerDNSSuffix: "usgovtrafficmanager.net", @@ -94,7 +104,9 @@ var ( ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net", ServiceManagementVMDNSSuffix: "usgovcloudapp.net", ResourceManagerVMDNSSuffix: "cloudapp.windowsazure.us", - ContainerRegistryDNSSuffix: "azurecr.io", + ContainerRegistryDNSSuffix: "azurecr.us", + CosmosDBDNSSuffix: "documents.azure.us", + TokenAudience: "https://management.usgovcloudapi.net/", } // ChinaCloud is the cloud environment operated in China @@ -108,14 +120,18 @@ var ( GalleryEndpoint: "https://gallery.chinacloudapi.cn/", KeyVaultEndpoint: "https://vault.azure.cn/", GraphEndpoint: "https://graph.chinacloudapi.cn/", + ServiceBusEndpoint: "https://servicebus.chinacloudapi.cn/", + BatchManagementEndpoint: "https://batch.chinacloudapi.cn/", StorageEndpointSuffix: "core.chinacloudapi.cn", SQLDatabaseDNSSuffix: "database.chinacloudapi.cn", TrafficManagerDNSSuffix: "trafficmanager.cn", KeyVaultDNSSuffix: "vault.azure.cn", - ServiceBusEndpointSuffix: "servicebus.chinacloudapi.net", + ServiceBusEndpointSuffix: "servicebus.chinacloudapi.cn", ServiceManagementVMDNSSuffix: "chinacloudapp.cn", ResourceManagerVMDNSSuffix: "cloudapp.azure.cn", - ContainerRegistryDNSSuffix: "azurecr.io", + ContainerRegistryDNSSuffix: "azurecr.cn", + CosmosDBDNSSuffix: "documents.azure.cn", + TokenAudience: "https://management.chinacloudapi.cn/", } // GermanCloud is the cloud environment operated in Germany @@ -129,6 +145,8 @@ var ( GalleryEndpoint: "https://gallery.cloudapi.de/", KeyVaultEndpoint: "https://vault.microsoftazure.de/", GraphEndpoint: "https://graph.cloudapi.de/", + ServiceBusEndpoint: "https://servicebus.cloudapi.de/", + BatchManagementEndpoint: "https://batch.cloudapi.de/", StorageEndpointSuffix: "core.cloudapi.de", SQLDatabaseDNSSuffix: "database.cloudapi.de", TrafficManagerDNSSuffix: "azuretrafficmanager.de", @@ -136,7 +154,9 @@ var ( ServiceBusEndpointSuffix: "servicebus.cloudapi.de", ServiceManagementVMDNSSuffix: "azurecloudapp.de", ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de", - ContainerRegistryDNSSuffix: "azurecr.io", + // ContainerRegistryDNSSuffix: "", ACR not present yet in the German Cloud + CosmosDBDNSSuffix: "documents.microsoftazure.de", + TokenAudience: "https://management.microsoftazure.de/", } ) diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments_test.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments_test.go deleted file mode 100644 index 9cee30b7d0..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/environments_test.go +++ /dev/null @@ -1,284 +0,0 @@ -// test -package azure - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/json" - "os" - "path" - "path/filepath" - "runtime" - "testing" -) - -// This correlates to the expected contents of ./testdata/test_environment_1.json -var testEnvironment1 = Environment{ - Name: "--unit-test--", - ManagementPortalURL: "--management-portal-url", - PublishSettingsURL: "--publish-settings-url--", - ServiceManagementEndpoint: "--service-management-endpoint--", - ResourceManagerEndpoint: "--resource-management-endpoint--", - ActiveDirectoryEndpoint: "--active-directory-endpoint--", - GalleryEndpoint: "--gallery-endpoint--", - KeyVaultEndpoint: "--key-vault--endpoint--", - GraphEndpoint: "--graph-endpoint--", - StorageEndpointSuffix: "--storage-endpoint-suffix--", - SQLDatabaseDNSSuffix: "--sql-database-dns-suffix--", - TrafficManagerDNSSuffix: "--traffic-manager-dns-suffix--", - KeyVaultDNSSuffix: "--key-vault-dns-suffix--", - ServiceBusEndpointSuffix: "--service-bus-endpoint-suffix--", - ServiceManagementVMDNSSuffix: "--asm-vm-dns-suffix--", - ResourceManagerVMDNSSuffix: "--arm-vm-dns-suffix--", - ContainerRegistryDNSSuffix: "--container-registry-dns-suffix--", -} - -func TestEnvironment_EnvironmentFromFile(t *testing.T) { - got, err := EnvironmentFromFile(filepath.Join("testdata", "test_environment_1.json")) - if err != nil { - t.Error(err) - } - - if got != testEnvironment1 { - t.Logf("got: %v want: %v", got, testEnvironment1) - t.Fail() - } -} - -func TestEnvironment_EnvironmentFromName_Stack(t *testing.T) { - _, currentFile, _, _ := runtime.Caller(0) - prevEnvFilepathValue := os.Getenv(EnvironmentFilepathName) - os.Setenv(EnvironmentFilepathName, filepath.Join(path.Dir(currentFile), "testdata", "test_environment_1.json")) - defer os.Setenv(EnvironmentFilepathName, prevEnvFilepathValue) - - got, err := EnvironmentFromName("AZURESTACKCLOUD") - if err != nil { - t.Error(err) - } - - if got != testEnvironment1 { - t.Logf("got: %v want: %v", got, testEnvironment1) - t.Fail() - } -} - -func TestEnvironmentFromName(t *testing.T) { - name := "azurechinacloud" - if env, _ := EnvironmentFromName(name); env != ChinaCloud { - t.Errorf("Expected to get ChinaCloud for %q", name) - } - - name = "AzureChinaCloud" - if env, _ := EnvironmentFromName(name); env != ChinaCloud { - t.Errorf("Expected to get ChinaCloud for %q", name) - } - - name = "azuregermancloud" - if env, _ := EnvironmentFromName(name); env != GermanCloud { - t.Errorf("Expected to get GermanCloud for %q", name) - } - - name = "AzureGermanCloud" - if env, _ := EnvironmentFromName(name); env != GermanCloud { - t.Errorf("Expected to get GermanCloud for %q", name) - } - - name = "azurepubliccloud" - if env, _ := EnvironmentFromName(name); env != PublicCloud { - t.Errorf("Expected to get PublicCloud for %q", name) - } - - name = "AzurePublicCloud" - if env, _ := EnvironmentFromName(name); env != PublicCloud { - t.Errorf("Expected to get PublicCloud for %q", name) - } - - name = "azureusgovernmentcloud" - if env, _ := EnvironmentFromName(name); env != USGovernmentCloud { - t.Errorf("Expected to get USGovernmentCloud for %q", name) - } - - name = "AzureUSGovernmentCloud" - if env, _ := EnvironmentFromName(name); env != USGovernmentCloud { - t.Errorf("Expected to get USGovernmentCloud for %q", name) - } - - name = "thisisnotarealcloudenv" - if _, err := EnvironmentFromName(name); err == nil { - t.Errorf("Expected to get an error for %q", name) - } -} - -func TestDeserializeEnvironment(t *testing.T) { - env := `{ - "name": "--name--", - "ActiveDirectoryEndpoint": "--active-directory-endpoint--", - "galleryEndpoint": "--gallery-endpoint--", - "graphEndpoint": "--graph-endpoint--", - "keyVaultDNSSuffix": "--key-vault-dns-suffix--", - "keyVaultEndpoint": "--key-vault-endpoint--", - "managementPortalURL": "--management-portal-url--", - "publishSettingsURL": "--publish-settings-url--", - "resourceManagerEndpoint": "--resource-manager-endpoint--", - "serviceBusEndpointSuffix": "--service-bus-endpoint-suffix--", - "serviceManagementEndpoint": "--service-management-endpoint--", - "sqlDatabaseDNSSuffix": "--sql-database-dns-suffix--", - "storageEndpointSuffix": "--storage-endpoint-suffix--", - "trafficManagerDNSSuffix": "--traffic-manager-dns-suffix--", - "serviceManagementVMDNSSuffix": "--asm-vm-dns-suffix--", - "resourceManagerVMDNSSuffix": "--arm-vm-dns-suffix--", - "containerRegistryDNSSuffix": "--container-registry-dns-suffix--" - }` - - testSubject := Environment{} - err := json.Unmarshal([]byte(env), &testSubject) - if err != nil { - t.Fatalf("failed to unmarshal: %s", err) - } - - if "--name--" != testSubject.Name { - t.Errorf("Expected Name to be \"--name--\", but got %q", testSubject.Name) - } - if "--management-portal-url--" != testSubject.ManagementPortalURL { - t.Errorf("Expected ManagementPortalURL to be \"--management-portal-url--\", but got %q", testSubject.ManagementPortalURL) - } - if "--publish-settings-url--" != testSubject.PublishSettingsURL { - t.Errorf("Expected PublishSettingsURL to be \"--publish-settings-url--\", but got %q", testSubject.PublishSettingsURL) - } - if "--service-management-endpoint--" != testSubject.ServiceManagementEndpoint { - t.Errorf("Expected ServiceManagementEndpoint to be \"--service-management-endpoint--\", but got %q", testSubject.ServiceManagementEndpoint) - } - if "--resource-manager-endpoint--" != testSubject.ResourceManagerEndpoint { - t.Errorf("Expected ResourceManagerEndpoint to be \"--resource-manager-endpoint--\", but got %q", testSubject.ResourceManagerEndpoint) - } - if "--active-directory-endpoint--" != testSubject.ActiveDirectoryEndpoint { - t.Errorf("Expected ActiveDirectoryEndpoint to be \"--active-directory-endpoint--\", but got %q", testSubject.ActiveDirectoryEndpoint) - } - if "--gallery-endpoint--" != testSubject.GalleryEndpoint { - t.Errorf("Expected GalleryEndpoint to be \"--gallery-endpoint--\", but got %q", testSubject.GalleryEndpoint) - } - if "--key-vault-endpoint--" != testSubject.KeyVaultEndpoint { - t.Errorf("Expected KeyVaultEndpoint to be \"--key-vault-endpoint--\", but got %q", testSubject.KeyVaultEndpoint) - } - if "--graph-endpoint--" != testSubject.GraphEndpoint { - t.Errorf("Expected GraphEndpoint to be \"--graph-endpoint--\", but got %q", testSubject.GraphEndpoint) - } - if "--storage-endpoint-suffix--" != testSubject.StorageEndpointSuffix { - t.Errorf("Expected StorageEndpointSuffix to be \"--storage-endpoint-suffix--\", but got %q", testSubject.StorageEndpointSuffix) - } - if "--sql-database-dns-suffix--" != testSubject.SQLDatabaseDNSSuffix { - t.Errorf("Expected sql-database-dns-suffix to be \"--sql-database-dns-suffix--\", but got %q", testSubject.SQLDatabaseDNSSuffix) - } - if "--key-vault-dns-suffix--" != testSubject.KeyVaultDNSSuffix { - t.Errorf("Expected StorageEndpointSuffix to be \"--key-vault-dns-suffix--\", but got %q", testSubject.KeyVaultDNSSuffix) - } - if "--service-bus-endpoint-suffix--" != testSubject.ServiceBusEndpointSuffix { - t.Errorf("Expected StorageEndpointSuffix to be \"--service-bus-endpoint-suffix--\", but got %q", testSubject.ServiceBusEndpointSuffix) - } - if "--asm-vm-dns-suffix--" != testSubject.ServiceManagementVMDNSSuffix { - t.Errorf("Expected ServiceManagementVMDNSSuffix to be \"--asm-vm-dns-suffix--\", but got %q", testSubject.ServiceManagementVMDNSSuffix) - } - if "--arm-vm-dns-suffix--" != testSubject.ResourceManagerVMDNSSuffix { - t.Errorf("Expected ResourceManagerVMDNSSuffix to be \"--arm-vm-dns-suffix--\", but got %q", testSubject.ResourceManagerVMDNSSuffix) - } - if "--container-registry-dns-suffix--" != testSubject.ContainerRegistryDNSSuffix { - t.Errorf("Expected ContainerRegistryDNSSuffix to be \"--container-registry-dns-suffix--\", but got %q", testSubject.ContainerRegistryDNSSuffix) - } -} - -func TestRoundTripSerialization(t *testing.T) { - env := Environment{ - Name: "--unit-test--", - ManagementPortalURL: "--management-portal-url", - PublishSettingsURL: "--publish-settings-url--", - ServiceManagementEndpoint: "--service-management-endpoint--", - ResourceManagerEndpoint: "--resource-management-endpoint--", - ActiveDirectoryEndpoint: "--active-directory-endpoint--", - GalleryEndpoint: "--gallery-endpoint--", - KeyVaultEndpoint: "--key-vault--endpoint--", - GraphEndpoint: "--graph-endpoint--", - StorageEndpointSuffix: "--storage-endpoint-suffix--", - SQLDatabaseDNSSuffix: "--sql-database-dns-suffix--", - TrafficManagerDNSSuffix: "--traffic-manager-dns-suffix--", - KeyVaultDNSSuffix: "--key-vault-dns-suffix--", - ServiceBusEndpointSuffix: "--service-bus-endpoint-suffix--", - ServiceManagementVMDNSSuffix: "--asm-vm-dns-suffix--", - ResourceManagerVMDNSSuffix: "--arm-vm-dns-suffix--", - ContainerRegistryDNSSuffix: "--container-registry-dns-suffix--", - } - - bytes, err := json.Marshal(env) - if err != nil { - t.Fatalf("failed to marshal: %s", err) - } - - testSubject := Environment{} - err = json.Unmarshal(bytes, &testSubject) - if err != nil { - t.Fatalf("failed to unmarshal: %s", err) - } - - if env.Name != testSubject.Name { - t.Errorf("Expected Name to be %q, but got %q", env.Name, testSubject.Name) - } - if env.ManagementPortalURL != testSubject.ManagementPortalURL { - t.Errorf("Expected ManagementPortalURL to be %q, but got %q", env.ManagementPortalURL, testSubject.ManagementPortalURL) - } - if env.PublishSettingsURL != testSubject.PublishSettingsURL { - t.Errorf("Expected PublishSettingsURL to be %q, but got %q", env.PublishSettingsURL, testSubject.PublishSettingsURL) - } - if env.ServiceManagementEndpoint != testSubject.ServiceManagementEndpoint { - t.Errorf("Expected ServiceManagementEndpoint to be %q, but got %q", env.ServiceManagementEndpoint, testSubject.ServiceManagementEndpoint) - } - if env.ResourceManagerEndpoint != testSubject.ResourceManagerEndpoint { - t.Errorf("Expected ResourceManagerEndpoint to be %q, but got %q", env.ResourceManagerEndpoint, testSubject.ResourceManagerEndpoint) - } - if env.ActiveDirectoryEndpoint != testSubject.ActiveDirectoryEndpoint { - t.Errorf("Expected ActiveDirectoryEndpoint to be %q, but got %q", env.ActiveDirectoryEndpoint, testSubject.ActiveDirectoryEndpoint) - } - if env.GalleryEndpoint != testSubject.GalleryEndpoint { - t.Errorf("Expected GalleryEndpoint to be %q, but got %q", env.GalleryEndpoint, testSubject.GalleryEndpoint) - } - if env.KeyVaultEndpoint != testSubject.KeyVaultEndpoint { - t.Errorf("Expected KeyVaultEndpoint to be %q, but got %q", env.KeyVaultEndpoint, testSubject.KeyVaultEndpoint) - } - if env.GraphEndpoint != testSubject.GraphEndpoint { - t.Errorf("Expected GraphEndpoint to be %q, but got %q", env.GraphEndpoint, testSubject.GraphEndpoint) - } - if env.StorageEndpointSuffix != testSubject.StorageEndpointSuffix { - t.Errorf("Expected StorageEndpointSuffix to be %q, but got %q", env.StorageEndpointSuffix, testSubject.StorageEndpointSuffix) - } - if env.SQLDatabaseDNSSuffix != testSubject.SQLDatabaseDNSSuffix { - t.Errorf("Expected SQLDatabaseDNSSuffix to be %q, but got %q", env.SQLDatabaseDNSSuffix, testSubject.SQLDatabaseDNSSuffix) - } - if env.TrafficManagerDNSSuffix != testSubject.TrafficManagerDNSSuffix { - t.Errorf("Expected TrafficManagerDNSSuffix to be %q, but got %q", env.TrafficManagerDNSSuffix, testSubject.TrafficManagerDNSSuffix) - } - if env.KeyVaultDNSSuffix != testSubject.KeyVaultDNSSuffix { - t.Errorf("Expected KeyVaultDNSSuffix to be %q, but got %q", env.KeyVaultDNSSuffix, testSubject.KeyVaultDNSSuffix) - } - if env.ServiceBusEndpointSuffix != testSubject.ServiceBusEndpointSuffix { - t.Errorf("Expected ServiceBusEndpointSuffix to be %q, but got %q", env.ServiceBusEndpointSuffix, testSubject.ServiceBusEndpointSuffix) - } - if env.ServiceManagementVMDNSSuffix != testSubject.ServiceManagementVMDNSSuffix { - t.Errorf("Expected ServiceManagementVMDNSSuffix to be %q, but got %q", env.ServiceManagementVMDNSSuffix, testSubject.ServiceManagementVMDNSSuffix) - } - if env.ResourceManagerVMDNSSuffix != testSubject.ResourceManagerVMDNSSuffix { - t.Errorf("Expected ResourceManagerVMDNSSuffix to be %q, but got %q", env.ResourceManagerVMDNSSuffix, testSubject.ResourceManagerVMDNSSuffix) - } - if env.ContainerRegistryDNSSuffix != testSubject.ContainerRegistryDNSSuffix { - t.Errorf("Expected ContainerRegistryDNSSuffix to be %q, but got %q", env.ContainerRegistryDNSSuffix, testSubject.ContainerRegistryDNSSuffix) - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go b/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go new file mode 100644 index 0000000000..507f9e95cf --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go @@ -0,0 +1,245 @@ +package azure + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + + "github.com/Azure/go-autorest/autorest" +) + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +type audience []string + +type authentication struct { + LoginEndpoint string `json:"loginEndpoint"` + Audiences audience `json:"audiences"` +} + +type environmentMetadataInfo struct { + GalleryEndpoint string `json:"galleryEndpoint"` + GraphEndpoint string `json:"graphEndpoint"` + PortalEndpoint string `json:"portalEndpoint"` + Authentication authentication `json:"authentication"` +} + +// EnvironmentProperty represent property names that clients can override +type EnvironmentProperty string + +const ( + // EnvironmentName ... + EnvironmentName EnvironmentProperty = "name" + // EnvironmentManagementPortalURL .. + EnvironmentManagementPortalURL EnvironmentProperty = "managementPortalURL" + // EnvironmentPublishSettingsURL ... + EnvironmentPublishSettingsURL EnvironmentProperty = "publishSettingsURL" + // EnvironmentServiceManagementEndpoint ... + EnvironmentServiceManagementEndpoint EnvironmentProperty = "serviceManagementEndpoint" + // EnvironmentResourceManagerEndpoint ... + EnvironmentResourceManagerEndpoint EnvironmentProperty = "resourceManagerEndpoint" + // EnvironmentActiveDirectoryEndpoint ... + EnvironmentActiveDirectoryEndpoint EnvironmentProperty = "activeDirectoryEndpoint" + // EnvironmentGalleryEndpoint ... + EnvironmentGalleryEndpoint EnvironmentProperty = "galleryEndpoint" + // EnvironmentKeyVaultEndpoint ... + EnvironmentKeyVaultEndpoint EnvironmentProperty = "keyVaultEndpoint" + // EnvironmentGraphEndpoint ... + EnvironmentGraphEndpoint EnvironmentProperty = "graphEndpoint" + // EnvironmentServiceBusEndpoint ... + EnvironmentServiceBusEndpoint EnvironmentProperty = "serviceBusEndpoint" + // EnvironmentBatchManagementEndpoint ... + EnvironmentBatchManagementEndpoint EnvironmentProperty = "batchManagementEndpoint" + // EnvironmentStorageEndpointSuffix ... + EnvironmentStorageEndpointSuffix EnvironmentProperty = "storageEndpointSuffix" + // EnvironmentSQLDatabaseDNSSuffix ... + EnvironmentSQLDatabaseDNSSuffix EnvironmentProperty = "sqlDatabaseDNSSuffix" + // EnvironmentTrafficManagerDNSSuffix ... + EnvironmentTrafficManagerDNSSuffix EnvironmentProperty = "trafficManagerDNSSuffix" + // EnvironmentKeyVaultDNSSuffix ... + EnvironmentKeyVaultDNSSuffix EnvironmentProperty = "keyVaultDNSSuffix" + // EnvironmentServiceBusEndpointSuffix ... + EnvironmentServiceBusEndpointSuffix EnvironmentProperty = "serviceBusEndpointSuffix" + // EnvironmentServiceManagementVMDNSSuffix ... + EnvironmentServiceManagementVMDNSSuffix EnvironmentProperty = "serviceManagementVMDNSSuffix" + // EnvironmentResourceManagerVMDNSSuffix ... + EnvironmentResourceManagerVMDNSSuffix EnvironmentProperty = "resourceManagerVMDNSSuffix" + // EnvironmentContainerRegistryDNSSuffix ... + EnvironmentContainerRegistryDNSSuffix EnvironmentProperty = "containerRegistryDNSSuffix" + // EnvironmentTokenAudience ... + EnvironmentTokenAudience EnvironmentProperty = "tokenAudience" +) + +// OverrideProperty represents property name and value that clients can override +type OverrideProperty struct { + Key EnvironmentProperty + Value string +} + +// EnvironmentFromURL loads an Environment from a URL +// This function is particularly useful in the Hybrid Cloud model, where one may define their own +// endpoints. +func EnvironmentFromURL(resourceManagerEndpoint string, properties ...OverrideProperty) (environment Environment, err error) { + var metadataEnvProperties environmentMetadataInfo + + if resourceManagerEndpoint == "" { + return environment, fmt.Errorf("Metadata resource manager endpoint is empty") + } + + if metadataEnvProperties, err = retrieveMetadataEnvironment(resourceManagerEndpoint); err != nil { + return environment, err + } + + // Give priority to user's override values + overrideProperties(&environment, properties) + + if environment.Name == "" { + environment.Name = "HybridEnvironment" + } + stampDNSSuffix := environment.StorageEndpointSuffix + if stampDNSSuffix == "" { + stampDNSSuffix = strings.TrimSuffix(strings.TrimPrefix(strings.Replace(resourceManagerEndpoint, strings.Split(resourceManagerEndpoint, ".")[0], "", 1), "."), "/") + environment.StorageEndpointSuffix = stampDNSSuffix + } + if environment.KeyVaultDNSSuffix == "" { + environment.KeyVaultDNSSuffix = fmt.Sprintf("%s.%s", "vault", stampDNSSuffix) + } + if environment.KeyVaultEndpoint == "" { + environment.KeyVaultEndpoint = fmt.Sprintf("%s%s", "https://", environment.KeyVaultDNSSuffix) + } + if environment.TokenAudience == "" { + environment.TokenAudience = metadataEnvProperties.Authentication.Audiences[0] + } + if environment.ActiveDirectoryEndpoint == "" { + environment.ActiveDirectoryEndpoint = metadataEnvProperties.Authentication.LoginEndpoint + } + if environment.ResourceManagerEndpoint == "" { + environment.ResourceManagerEndpoint = resourceManagerEndpoint + } + if environment.GalleryEndpoint == "" { + environment.GalleryEndpoint = metadataEnvProperties.GalleryEndpoint + } + if environment.GraphEndpoint == "" { + environment.GraphEndpoint = metadataEnvProperties.GraphEndpoint + } + + return environment, nil +} + +func overrideProperties(environment *Environment, properties []OverrideProperty) { + for _, property := range properties { + switch property.Key { + case EnvironmentName: + { + environment.Name = property.Value + } + case EnvironmentManagementPortalURL: + { + environment.ManagementPortalURL = property.Value + } + case EnvironmentPublishSettingsURL: + { + environment.PublishSettingsURL = property.Value + } + case EnvironmentServiceManagementEndpoint: + { + environment.ServiceManagementEndpoint = property.Value + } + case EnvironmentResourceManagerEndpoint: + { + environment.ResourceManagerEndpoint = property.Value + } + case EnvironmentActiveDirectoryEndpoint: + { + environment.ActiveDirectoryEndpoint = property.Value + } + case EnvironmentGalleryEndpoint: + { + environment.GalleryEndpoint = property.Value + } + case EnvironmentKeyVaultEndpoint: + { + environment.KeyVaultEndpoint = property.Value + } + case EnvironmentGraphEndpoint: + { + environment.GraphEndpoint = property.Value + } + case EnvironmentServiceBusEndpoint: + { + environment.ServiceBusEndpoint = property.Value + } + case EnvironmentBatchManagementEndpoint: + { + environment.BatchManagementEndpoint = property.Value + } + case EnvironmentStorageEndpointSuffix: + { + environment.StorageEndpointSuffix = property.Value + } + case EnvironmentSQLDatabaseDNSSuffix: + { + environment.SQLDatabaseDNSSuffix = property.Value + } + case EnvironmentTrafficManagerDNSSuffix: + { + environment.TrafficManagerDNSSuffix = property.Value + } + case EnvironmentKeyVaultDNSSuffix: + { + environment.KeyVaultDNSSuffix = property.Value + } + case EnvironmentServiceBusEndpointSuffix: + { + environment.ServiceBusEndpointSuffix = property.Value + } + case EnvironmentServiceManagementVMDNSSuffix: + { + environment.ServiceManagementVMDNSSuffix = property.Value + } + case EnvironmentResourceManagerVMDNSSuffix: + { + environment.ResourceManagerVMDNSSuffix = property.Value + } + case EnvironmentContainerRegistryDNSSuffix: + { + environment.ContainerRegistryDNSSuffix = property.Value + } + case EnvironmentTokenAudience: + { + environment.TokenAudience = property.Value + } + } + } +} + +func retrieveMetadataEnvironment(endpoint string) (environment environmentMetadataInfo, err error) { + client := autorest.NewClientWithUserAgent("") + managementEndpoint := fmt.Sprintf("%s%s", strings.TrimSuffix(endpoint, "/"), "/metadata/endpoints?api-version=1.0") + req, _ := http.NewRequest("GET", managementEndpoint, nil) + response, err := client.Do(req) + if err != nil { + return environment, err + } + defer response.Body.Close() + jsonResponse, err := ioutil.ReadAll(response.Body) + if err != nil { + return environment, err + } + err = json.Unmarshal(jsonResponse, &environment) + return environment, err +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go index 66d1c8c2b3..86ce9f2b5b 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go @@ -44,7 +44,7 @@ func DoRetryWithRegistration(client autorest.Client) autorest.SendDecorator { return resp, err } - if resp.StatusCode != http.StatusConflict { + if resp.StatusCode != http.StatusConflict || client.SkipResourceProviderRegistration { return resp, err } var re RequestError @@ -64,17 +64,14 @@ func DoRetryWithRegistration(client autorest.Client) autorest.SendDecorator { } } } - return resp, fmt.Errorf("failed request: %s", err) + return resp, err }) } } func getProvider(re RequestError) (string, error) { - if re.ServiceError != nil { - if re.ServiceError.Details != nil && len(*re.ServiceError.Details) > 0 { - detail := (*re.ServiceError.Details)[0].(map[string]interface{}) - return detail["target"].(string), nil - } + if re.ServiceError != nil && len(re.ServiceError.Details) > 0 { + return re.ServiceError.Details[0]["target"].(string), nil } return "", errors.New("provider was not found in the response") } @@ -118,7 +115,7 @@ func register(client autorest.Client, originalReq *http.Request, re RequestError if err != nil { return err } - req.Cancel = originalReq.Cancel + req = req.WithContext(originalReq.Context()) resp, err := autorest.SendWithSender(client, req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), @@ -143,8 +140,8 @@ func register(client autorest.Client, originalReq *http.Request, re RequestError } // poll for registered provisioning state - now := time.Now() - for err == nil && time.Since(now) < client.PollingDuration { + registrationStartTime := time.Now() + for err == nil && (client.PollingDuration == 0 || (client.PollingDuration != 0 && time.Since(registrationStartTime) < client.PollingDuration)) { // taken from the resources SDK // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L45 preparer := autorest.CreatePreparer( @@ -157,9 +154,9 @@ func register(client autorest.Client, originalReq *http.Request, re RequestError if err != nil { return err } - req.Cancel = originalReq.Cancel + req = req.WithContext(originalReq.Context()) - resp, err := autorest.SendWithSender(client.Sender, req, + resp, err := autorest.SendWithSender(client, req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), ) if err != nil { @@ -181,12 +178,12 @@ func register(client autorest.Client, originalReq *http.Request, re RequestError break } - delayed := autorest.DelayWithRetryAfter(resp, originalReq.Cancel) - if !delayed { - autorest.DelayForBackoff(client.PollingDelay, 0, originalReq.Cancel) + delayed := autorest.DelayWithRetryAfter(resp, originalReq.Context().Done()) + if !delayed && !autorest.DelayForBackoff(client.PollingDelay, 0, originalReq.Context().Done()) { + return originalReq.Context().Err() } } - if !(time.Since(now) < client.PollingDuration) { + if client.PollingDuration != 0 && !(time.Since(registrationStartTime) < client.PollingDuration) { return errors.New("polling for resource provider registration has exceeded the polling duration") } return err diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/rp_test.go b/vendor/github.com/Azure/go-autorest/autorest/azure/rp_test.go deleted file mode 100644 index b069430578..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/rp_test.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package azure - -import ( - "net/http" - "testing" - "time" - - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/mocks" -) - -func TestDoRetryWithRegistration(t *testing.T) { - client := mocks.NewSender() - // first response, should retry because it is a transient error - client.AppendResponse(mocks.NewResponseWithStatus("Internal server error", http.StatusInternalServerError)) - // response indicates the resource provider has not been registered - client.AppendResponse(mocks.NewResponseWithBodyAndStatus(mocks.NewBody(`{ - "error":{ - "code":"MissingSubscriptionRegistration", - "message":"The subscription registration is in 'Unregistered' state. The subscription must be registered to use namespace 'Microsoft.EventGrid'. See https://aka.ms/rps-not-found for how to register subscriptions.", - "details":[ - { - "code":"MissingSubscriptionRegistration", - "target":"Microsoft.EventGrid", - "message":"The subscription registration is in 'Unregistered' state. The subscription must be registered to use namespace 'Microsoft.EventGrid'. See https://aka.ms/rps-not-found for how to register subscriptions." - } - ] - } -} -`), http.StatusConflict, "MissingSubscriptionRegistration")) - // first poll response, still not ready - client.AppendResponse(mocks.NewResponseWithBodyAndStatus(mocks.NewBody(`{ - "registrationState": "Registering" -} -`), http.StatusOK, "200 OK")) - // last poll response, respurce provider has been registered - client.AppendResponse(mocks.NewResponseWithBodyAndStatus(mocks.NewBody(`{ - "registrationState": "Registered" -} -`), http.StatusOK, "200 OK")) - // retry original request, response is successful - client.AppendResponse(mocks.NewResponseWithStatus("200 OK", http.StatusOK)) - - req := mocks.NewRequestForURL("https://lol/subscriptions/rofl") - req.Body = mocks.NewBody("lolol") - r, err := autorest.SendWithSender(client, req, - DoRetryWithRegistration(autorest.Client{ - PollingDelay: time.Second, - PollingDuration: time.Second * 10, - RetryAttempts: 5, - RetryDuration: time.Second, - Sender: client, - }), - ) - if err != nil { - t.Fatalf("got error: %v", err) - } - - autorest.Respond(r, - autorest.ByDiscardingBody(), - autorest.ByClosing(), - ) - - if r.StatusCode != http.StatusOK { - t.Fatalf("azure: Sender#DoRetryWithRegistration -- Got: StatusCode %v; Want: StatusCode 200 OK", r.StatusCode) - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/client.go b/vendor/github.com/Azure/go-autorest/autorest/client.go index 9eebd83bf8..9520001fc5 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/client.go +++ b/vendor/github.com/Azure/go-autorest/autorest/client.go @@ -16,14 +16,18 @@ package autorest import ( "bytes" + "crypto/tls" "fmt" "io" "io/ioutil" "log" "net/http" "net/http/cookiejar" - "runtime" + "strings" "time" + + "github.com/Azure/go-autorest/logger" + "github.com/Azure/go-autorest/tracing" ) const ( @@ -41,15 +45,6 @@ const ( ) var ( - // defaultUserAgent builds a string containing the Go version, system archityecture and OS, - // and the go-autorest version. - defaultUserAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s", - runtime.Version(), - runtime.GOARCH, - runtime.GOOS, - Version(), - ) - // StatusCodesForRetry are a defined group of status code for which the client will retry StatusCodesForRetry = []int{ http.StatusRequestTimeout, // 408 @@ -153,6 +148,7 @@ type Client struct { PollingDelay time.Duration // PollingDuration sets the maximum polling time after which an error is returned. + // Setting this to zero will use the provided context to control the duration. PollingDuration time.Duration // RetryAttempts sets the default number of retry attempts for client. @@ -166,6 +162,9 @@ type Client struct { UserAgent string Jar http.CookieJar + + // Set to true to skip attempted registration of resource providers (false by default). + SkipResourceProviderRegistration bool } // NewClientWithUserAgent returns an instance of a Client with the UserAgent set to the passed @@ -176,7 +175,7 @@ func NewClientWithUserAgent(ua string) Client { PollingDuration: DefaultPollingDuration, RetryAttempts: DefaultRetryAttempts, RetryDuration: DefaultRetryDuration, - UserAgent: defaultUserAgent, + UserAgent: UserAgent(), } c.Sender = c.sender() c.AddToUserAgent(ua) @@ -200,14 +199,30 @@ func (c Client) Do(r *http.Request) (*http.Response, error) { r, _ = Prepare(r, WithUserAgent(c.UserAgent)) } + // NOTE: c.WithInspection() must be last in the list so that it can inspect all preceding operations r, err := Prepare(r, - c.WithInspection(), - c.WithAuthorization()) + c.WithAuthorization(), + c.WithInspection()) if err != nil { - return nil, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed") + var resp *http.Response + if detErr, ok := err.(DetailedError); ok { + // if the authorization failed (e.g. invalid credentials) there will + // be a response associated with the error, be sure to return it. + resp = detErr.Response + } + return resp, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed") } - + logger.Instance.WriteRequest(r, logger.Filter{ + Header: func(k string, v []string) (bool, []string) { + // remove the auth token from the log + if strings.EqualFold(k, "Authorization") || strings.EqualFold(k, "Ocp-Apim-Subscription-Key") { + v = []string{"**REDACTED**"} + } + return true, v + }, + }) resp, err := SendWithSender(c.sender(), r) + logger.Instance.WriteResponse(resp, logger.Filter{}) Respond(resp, c.ByInspecting()) return resp, err } @@ -215,9 +230,25 @@ func (c Client) Do(r *http.Request) (*http.Response, error) { // sender returns the Sender to which to send requests. func (c Client) sender() Sender { if c.Sender == nil { + // Use behaviour compatible with DefaultTransport, but require TLS minimum version. + var defaultTransport = http.DefaultTransport.(*http.Transport) + + tracing.Transport.Base = &http.Transport{ + Proxy: defaultTransport.Proxy, + DialContext: defaultTransport.DialContext, + MaxIdleConns: defaultTransport.MaxIdleConns, + IdleConnTimeout: defaultTransport.IdleConnTimeout, + TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout, + ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout, + TLSClientConfig: &tls.Config{ + MinVersion: tls.VersionTLS12, + }, + } + j, _ := cookiejar.New(nil) - return &http.Client{Jar: j} + return &http.Client{Jar: j, Transport: tracing.Transport} } + return c.Sender } diff --git a/vendor/github.com/Azure/go-autorest/autorest/client_test.go b/vendor/github.com/Azure/go-autorest/autorest/client_test.go deleted file mode 100644 index 151f8eedb2..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/client_test.go +++ /dev/null @@ -1,402 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "fmt" - "io/ioutil" - "log" - "math/rand" - "net/http" - "net/http/httptest" - "reflect" - "testing" - "time" - - "github.com/Azure/go-autorest/autorest/mocks" -) - -func TestLoggingInspectorWithInspection(t *testing.T) { - b := bytes.Buffer{} - c := Client{} - li := LoggingInspector{Logger: log.New(&b, "", 0)} - c.RequestInspector = li.WithInspection() - - Prepare(mocks.NewRequestWithContent("Content"), - c.WithInspection()) - - if len(b.String()) <= 0 { - t.Fatal("autorest: LoggingInspector#WithInspection did not record Request to the log") - } -} - -func TestLoggingInspectorWithInspectionEmitsErrors(t *testing.T) { - b := bytes.Buffer{} - c := Client{} - r := mocks.NewRequestWithContent("Content") - li := LoggingInspector{Logger: log.New(&b, "", 0)} - c.RequestInspector = li.WithInspection() - - if _, err := Prepare(r, - c.WithInspection()); err != nil { - t.Error(err) - } - - if len(b.String()) <= 0 { - t.Fatal("autorest: LoggingInspector#WithInspection did not record Request to the log") - } -} - -func TestLoggingInspectorWithInspectionRestoresBody(t *testing.T) { - b := bytes.Buffer{} - c := Client{} - r := mocks.NewRequestWithContent("Content") - li := LoggingInspector{Logger: log.New(&b, "", 0)} - c.RequestInspector = li.WithInspection() - - Prepare(r, - c.WithInspection()) - - s, _ := ioutil.ReadAll(r.Body) - if len(s) <= 0 { - t.Fatal("autorest: LoggingInspector#WithInspection did not restore the Request body") - } -} - -func TestLoggingInspectorByInspecting(t *testing.T) { - b := bytes.Buffer{} - c := Client{} - li := LoggingInspector{Logger: log.New(&b, "", 0)} - c.ResponseInspector = li.ByInspecting() - - Respond(mocks.NewResponseWithContent("Content"), - c.ByInspecting()) - - if len(b.String()) <= 0 { - t.Fatal("autorest: LoggingInspector#ByInspection did not record Response to the log") - } -} - -func TestLoggingInspectorByInspectingEmitsErrors(t *testing.T) { - b := bytes.Buffer{} - c := Client{} - r := mocks.NewResponseWithContent("Content") - li := LoggingInspector{Logger: log.New(&b, "", 0)} - c.ResponseInspector = li.ByInspecting() - - if err := Respond(r, - c.ByInspecting()); err != nil { - t.Fatal(err) - } - - if len(b.String()) <= 0 { - t.Fatal("autorest: LoggingInspector#ByInspection did not record Response to the log") - } -} - -func TestLoggingInspectorByInspectingRestoresBody(t *testing.T) { - b := bytes.Buffer{} - c := Client{} - r := mocks.NewResponseWithContent("Content") - li := LoggingInspector{Logger: log.New(&b, "", 0)} - c.ResponseInspector = li.ByInspecting() - - Respond(r, - c.ByInspecting()) - - s, _ := ioutil.ReadAll(r.Body) - if len(s) <= 0 { - t.Fatal("autorest: LoggingInspector#ByInspecting did not restore the Response body") - } -} - -func TestNewClientWithUserAgent(t *testing.T) { - ua := "UserAgent" - c := NewClientWithUserAgent(ua) - completeUA := fmt.Sprintf("%s %s", defaultUserAgent, ua) - - if c.UserAgent != completeUA { - t.Fatalf("autorest: NewClientWithUserAgent failed to set the UserAgent -- expected %s, received %s", - completeUA, c.UserAgent) - } -} - -func TestAddToUserAgent(t *testing.T) { - ua := "UserAgent" - c := NewClientWithUserAgent(ua) - ext := "extension" - err := c.AddToUserAgent(ext) - if err != nil { - t.Fatalf("autorest: AddToUserAgent returned error -- expected nil, received %s", err) - } - completeUA := fmt.Sprintf("%s %s %s", defaultUserAgent, ua, ext) - - if c.UserAgent != completeUA { - t.Fatalf("autorest: AddToUserAgent failed to add an extension to the UserAgent -- expected %s, received %s", - completeUA, c.UserAgent) - } - - err = c.AddToUserAgent("") - if err == nil { - t.Fatalf("autorest: AddToUserAgent didn't return error -- expected %s, received nil", - fmt.Errorf("Extension was empty, User Agent stayed as %s", c.UserAgent)) - } - if c.UserAgent != completeUA { - t.Fatalf("autorest: AddToUserAgent failed to not add an empty extension to the UserAgent -- expected %s, received %s", - completeUA, c.UserAgent) - } -} - -func TestClientSenderReturnsHttpClientByDefault(t *testing.T) { - c := Client{} - - if fmt.Sprintf("%T", c.sender()) != "*http.Client" { - t.Fatal("autorest: Client#sender failed to return http.Client by default") - } -} - -func TestClientSenderReturnsSetSender(t *testing.T) { - c := Client{} - - s := mocks.NewSender() - c.Sender = s - - if c.sender() != s { - t.Fatal("autorest: Client#sender failed to return set Sender") - } -} - -func TestClientDoInvokesSender(t *testing.T) { - c := Client{} - - s := mocks.NewSender() - c.Sender = s - - c.Do(&http.Request{}) - if s.Attempts() != 1 { - t.Fatal("autorest: Client#Do failed to invoke the Sender") - } -} - -func TestClientDoSetsUserAgent(t *testing.T) { - ua := "UserAgent" - c := Client{UserAgent: ua} - r := mocks.NewRequest() - s := mocks.NewSender() - c.Sender = s - - c.Do(r) - - if r.UserAgent() != ua { - t.Fatalf("autorest: Client#Do failed to correctly set User-Agent header: %s=%s", - http.CanonicalHeaderKey(headerUserAgent), r.UserAgent()) - } -} - -func TestClientDoSetsAuthorization(t *testing.T) { - r := mocks.NewRequest() - s := mocks.NewSender() - c := Client{Authorizer: mockAuthorizer{}, Sender: s} - - c.Do(r) - if len(r.Header.Get(http.CanonicalHeaderKey(headerAuthorization))) <= 0 { - t.Fatalf("autorest: Client#Send failed to set Authorization header -- %s=%s", - http.CanonicalHeaderKey(headerAuthorization), - r.Header.Get(http.CanonicalHeaderKey(headerAuthorization))) - } -} - -func TestClientDoInvokesRequestInspector(t *testing.T) { - r := mocks.NewRequest() - s := mocks.NewSender() - i := &mockInspector{} - c := Client{RequestInspector: i.WithInspection(), Sender: s} - - c.Do(r) - if !i.wasInvoked { - t.Fatal("autorest: Client#Send failed to invoke the RequestInspector") - } -} - -func TestClientDoInvokesResponseInspector(t *testing.T) { - r := mocks.NewRequest() - s := mocks.NewSender() - i := &mockInspector{} - c := Client{ResponseInspector: i.ByInspecting(), Sender: s} - - c.Do(r) - if !i.wasInvoked { - t.Fatal("autorest: Client#Send failed to invoke the ResponseInspector") - } -} - -func TestClientDoReturnsErrorIfPrepareFails(t *testing.T) { - c := Client{} - s := mocks.NewSender() - c.Authorizer = mockFailingAuthorizer{} - c.Sender = s - - _, err := c.Do(&http.Request{}) - if err == nil { - t.Fatalf("autorest: Client#Do failed to return an error when Prepare failed") - } -} - -func TestClientDoDoesNotSendIfPrepareFails(t *testing.T) { - c := Client{} - s := mocks.NewSender() - c.Authorizer = mockFailingAuthorizer{} - c.Sender = s - - c.Do(&http.Request{}) - if s.Attempts() > 0 { - t.Fatal("autorest: Client#Do failed to invoke the Sender") - } -} - -func TestClientAuthorizerReturnsNullAuthorizerByDefault(t *testing.T) { - c := Client{} - - if fmt.Sprintf("%T", c.authorizer()) != "autorest.NullAuthorizer" { - t.Fatal("autorest: Client#authorizer failed to return the NullAuthorizer by default") - } -} - -func TestClientAuthorizerReturnsSetAuthorizer(t *testing.T) { - c := Client{} - c.Authorizer = mockAuthorizer{} - - if fmt.Sprintf("%T", c.authorizer()) != "autorest.mockAuthorizer" { - t.Fatal("autorest: Client#authorizer failed to return the set Authorizer") - } -} - -func TestClientWithAuthorizer(t *testing.T) { - c := Client{} - c.Authorizer = mockAuthorizer{} - - req, _ := Prepare(&http.Request{}, - c.WithAuthorization()) - - if req.Header.Get(headerAuthorization) == "" { - t.Fatal("autorest: Client#WithAuthorizer failed to return the WithAuthorizer from the active Authorizer") - } -} - -func TestClientWithInspection(t *testing.T) { - c := Client{} - r := &mockInspector{} - c.RequestInspector = r.WithInspection() - - Prepare(&http.Request{}, - c.WithInspection()) - - if !r.wasInvoked { - t.Fatal("autorest: Client#WithInspection failed to invoke RequestInspector") - } -} - -func TestClientWithInspectionSetsDefault(t *testing.T) { - c := Client{} - - r1 := &http.Request{} - r2, _ := Prepare(r1, - c.WithInspection()) - - if !reflect.DeepEqual(r1, r2) { - t.Fatal("autorest: Client#WithInspection failed to provide a default RequestInspector") - } -} - -func TestClientByInspecting(t *testing.T) { - c := Client{} - r := &mockInspector{} - c.ResponseInspector = r.ByInspecting() - - Respond(&http.Response{}, - c.ByInspecting()) - - if !r.wasInvoked { - t.Fatal("autorest: Client#ByInspecting failed to invoke ResponseInspector") - } -} - -func TestClientByInspectingSetsDefault(t *testing.T) { - c := Client{} - - r := &http.Response{} - Respond(r, - c.ByInspecting()) - - if !reflect.DeepEqual(r, &http.Response{}) { - t.Fatal("autorest: Client#ByInspecting failed to provide a default ResponseInspector") - } -} - -func TestCookies(t *testing.T) { - second := "second" - expected := http.Cookie{ - Name: "tastes", - Value: "delicious", - } - - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - http.SetCookie(w, &expected) - b, err := ioutil.ReadAll(r.Body) - if err != nil { - t.Fatalf("autorest: ioutil.ReadAll failed reading request body: %s", err) - } - if string(b) == second { - cookie, err := r.Cookie(expected.Name) - if err != nil { - t.Fatalf("autorest: r.Cookie could not get request cookie: %s", err) - } - if cookie == nil { - t.Fatalf("autorest: got nil cookie, expecting %v", expected) - } - if cookie.Value != expected.Value { - t.Fatalf("autorest: got cookie value '%s', expecting '%s'", cookie.Value, expected.Name) - } - } - })) - defer server.Close() - - client := NewClientWithUserAgent("") - _, err := SendWithSender(client, mocks.NewRequestForURL(server.URL)) - if err != nil { - t.Fatalf("autorest: first request failed: %s", err) - } - - r2, err := http.NewRequest(http.MethodGet, server.URL, mocks.NewBody(second)) - if err != nil { - t.Fatalf("autorest: failed creating second request: %s", err) - } - - _, err = SendWithSender(client, r2) - if err != nil { - t.Fatalf("autorest: second request failed: %s", err) - } -} - -func randomString(n int) string { - const chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" - r := rand.New(rand.NewSource(time.Now().UTC().UnixNano())) - s := make([]byte, n) - for i := range s { - s[i] = chars[r.Intn(len(chars))] - } - return string(s) -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/date_test.go b/vendor/github.com/Azure/go-autorest/autorest/date/date_test.go deleted file mode 100644 index 4a40bbc3e1..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/date_test.go +++ /dev/null @@ -1,237 +0,0 @@ -package date - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/json" - "fmt" - "reflect" - "testing" - "time" -) - -func ExampleParseDate() { - d, err := ParseDate("2001-02-03") - if err != nil { - fmt.Println(err) - } - fmt.Println(d) - // Output: 2001-02-03 -} - -func ExampleDate() { - d, err := ParseDate("2001-02-03") - if err != nil { - fmt.Println(err) - } - - t, err := time.Parse(time.RFC3339, "2001-02-04T00:00:00Z") - if err != nil { - fmt.Println(err) - } - - // Date acts as time.Time when the receiver - if d.Before(t) { - fmt.Printf("Before ") - } else { - fmt.Printf("After ") - } - - // Convert Date when needing a time.Time - if t.After(d.ToTime()) { - fmt.Printf("After") - } else { - fmt.Printf("Before") - } - // Output: Before After -} - -func ExampleDate_MarshalBinary() { - d, err := ParseDate("2001-02-03") - if err != nil { - fmt.Println(err) - } - t, err := d.MarshalBinary() - if err != nil { - fmt.Println(err) - } - fmt.Println(string(t)) - // Output: 2001-02-03 -} - -func ExampleDate_UnmarshalBinary() { - d := Date{} - t := "2001-02-03" - - if err := d.UnmarshalBinary([]byte(t)); err != nil { - fmt.Println(err) - } - fmt.Println(d) - // Output: 2001-02-03 -} - -func ExampleDate_MarshalJSON() { - d, err := ParseDate("2001-02-03") - if err != nil { - fmt.Println(err) - } - j, err := json.Marshal(d) - if err != nil { - fmt.Println(err) - } - fmt.Println(string(j)) - // Output: "2001-02-03" -} - -func ExampleDate_UnmarshalJSON() { - var d struct { - Date Date `json:"date"` - } - j := `{"date" : "2001-02-03"}` - - if err := json.Unmarshal([]byte(j), &d); err != nil { - fmt.Println(err) - } - fmt.Println(d.Date) - // Output: 2001-02-03 -} - -func ExampleDate_MarshalText() { - d, err := ParseDate("2001-02-03") - if err != nil { - fmt.Println(err) - } - t, err := d.MarshalText() - if err != nil { - fmt.Println(err) - } - fmt.Println(string(t)) - // Output: 2001-02-03 -} - -func ExampleDate_UnmarshalText() { - d := Date{} - t := "2001-02-03" - - if err := d.UnmarshalText([]byte(t)); err != nil { - fmt.Println(err) - } - fmt.Println(d) - // Output: 2001-02-03 -} - -func TestDateString(t *testing.T) { - d, err := ParseDate("2001-02-03") - if err != nil { - t.Fatalf("date: String failed (%v)", err) - } - if d.String() != "2001-02-03" { - t.Fatalf("date: String failed (%v)", d.String()) - } -} - -func TestDateBinaryRoundTrip(t *testing.T) { - d1, err := ParseDate("2001-02-03") - if err != nil { - t.Fatalf("date: ParseDate failed (%v)", err) - } - t1, err := d1.MarshalBinary() - if err != nil { - t.Fatalf("date: MarshalBinary failed (%v)", err) - } - - d2 := Date{} - if err = d2.UnmarshalBinary(t1); err != nil { - t.Fatalf("date: UnmarshalBinary failed (%v)", err) - } - - if !reflect.DeepEqual(d1, d2) { - t.Fatalf("date: Round-trip Binary failed (%v, %v)", d1, d2) - } -} - -func TestDateJSONRoundTrip(t *testing.T) { - type s struct { - Date Date `json:"date"` - } - var err error - d1 := s{} - d1.Date, err = ParseDate("2001-02-03") - if err != nil { - t.Fatalf("date: ParseDate failed (%v)", err) - } - - j, err := json.Marshal(d1) - if err != nil { - t.Fatalf("date: MarshalJSON failed (%v)", err) - } - - d2 := s{} - if err = json.Unmarshal(j, &d2); err != nil { - t.Fatalf("date: UnmarshalJSON failed (%v)", err) - } - - if !reflect.DeepEqual(d1, d2) { - t.Fatalf("date: Round-trip JSON failed (%v, %v)", d1, d2) - } -} - -func TestDateTextRoundTrip(t *testing.T) { - d1, err := ParseDate("2001-02-03") - if err != nil { - t.Fatalf("date: ParseDate failed (%v)", err) - } - t1, err := d1.MarshalText() - if err != nil { - t.Fatalf("date: MarshalText failed (%v)", err) - } - d2 := Date{} - if err = d2.UnmarshalText(t1); err != nil { - t.Fatalf("date: UnmarshalText failed (%v)", err) - } - - if !reflect.DeepEqual(d1, d2) { - t.Fatalf("date: Round-trip Text failed (%v, %v)", d1, d2) - } -} - -func TestDateToTime(t *testing.T) { - var d Date - d, err := ParseDate("2001-02-03") - if err != nil { - t.Fatalf("date: ParseDate failed (%v)", err) - } - var _ time.Time = d.ToTime() -} - -func TestDateUnmarshalJSONReturnsError(t *testing.T) { - var d struct { - Date Date `json:"date"` - } - j := `{"date" : "February 3, 2001"}` - - if err := json.Unmarshal([]byte(j), &d); err == nil { - t.Fatal("date: Date failed to return error for malformed JSON date") - } -} - -func TestDateUnmarshalTextReturnsError(t *testing.T) { - d := Date{} - txt := "February 3, 2001" - - if err := d.UnmarshalText([]byte(txt)); err == nil { - t.Fatal("date: Date failed to return error for malformed Text date") - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/time_test.go b/vendor/github.com/Azure/go-autorest/autorest/date/time_test.go deleted file mode 100644 index d8c811707a..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/time_test.go +++ /dev/null @@ -1,277 +0,0 @@ -package date - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/json" - "fmt" - "reflect" - "testing" - "time" -) - -func ExampleParseTime() { - d, _ := ParseTime(rfc3339, "2001-02-03T04:05:06Z") - fmt.Println(d) - // Output: 2001-02-03 04:05:06 +0000 UTC -} - -func ExampleTime_MarshalBinary() { - ti, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") - if err != nil { - fmt.Println(err) - } - d := Time{ti} - t, err := d.MarshalBinary() - if err != nil { - fmt.Println(err) - } - fmt.Println(string(t)) - // Output: 2001-02-03T04:05:06Z -} - -func ExampleTime_UnmarshalBinary() { - d := Time{} - t := "2001-02-03T04:05:06Z" - - if err := d.UnmarshalBinary([]byte(t)); err != nil { - fmt.Println(err) - } - fmt.Println(d) - // Output: 2001-02-03T04:05:06Z -} - -func ExampleTime_MarshalJSON() { - d, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") - if err != nil { - fmt.Println(err) - } - j, err := json.Marshal(d) - if err != nil { - fmt.Println(err) - } - fmt.Println(string(j)) - // Output: "2001-02-03T04:05:06Z" -} - -func ExampleTime_UnmarshalJSON() { - var d struct { - Time Time `json:"datetime"` - } - j := `{"datetime" : "2001-02-03T04:05:06Z"}` - - if err := json.Unmarshal([]byte(j), &d); err != nil { - fmt.Println(err) - } - fmt.Println(d.Time) - // Output: 2001-02-03T04:05:06Z -} - -func ExampleTime_MarshalText() { - d, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") - if err != nil { - fmt.Println(err) - } - t, err := d.MarshalText() - if err != nil { - fmt.Println(err) - } - fmt.Println(string(t)) - // Output: 2001-02-03T04:05:06Z -} - -func ExampleTime_UnmarshalText() { - d := Time{} - t := "2001-02-03T04:05:06Z" - - if err := d.UnmarshalText([]byte(t)); err != nil { - fmt.Println(err) - } - fmt.Println(d) - // Output: 2001-02-03T04:05:06Z -} - -func TestUnmarshalTextforInvalidDate(t *testing.T) { - d := Time{} - dt := "2001-02-03T04:05:06AAA" - - if err := d.UnmarshalText([]byte(dt)); err == nil { - t.Fatalf("date: Time#Unmarshal was expecting error for invalid date") - } -} - -func TestUnmarshalJSONforInvalidDate(t *testing.T) { - d := Time{} - dt := `"2001-02-03T04:05:06AAA"` - - if err := d.UnmarshalJSON([]byte(dt)); err == nil { - t.Fatalf("date: Time#Unmarshal was expecting error for invalid date") - } -} - -func TestTimeString(t *testing.T) { - ti, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") - if err != nil { - fmt.Println(err) - } - d := Time{ti} - if d.String() != "2001-02-03T04:05:06Z" { - t.Fatalf("date: Time#String failed (%v)", d.String()) - } -} - -func TestTimeStringReturnsEmptyStringForError(t *testing.T) { - d := Time{Time: time.Date(20000, 01, 01, 01, 01, 01, 01, time.UTC)} - if d.String() != "" { - t.Fatalf("date: Time#String failed empty string for an error") - } -} - -func TestTimeBinaryRoundTrip(t *testing.T) { - ti, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") - if err != nil { - t.Fatalf("date: Time#ParseTime failed (%v)", err) - } - d1 := Time{ti} - t1, err := d1.MarshalBinary() - if err != nil { - t.Fatalf("date: Time#MarshalBinary failed (%v)", err) - } - - d2 := Time{} - if err = d2.UnmarshalBinary(t1); err != nil { - t.Fatalf("date: Time#UnmarshalBinary failed (%v)", err) - } - - if !reflect.DeepEqual(d1, d2) { - t.Fatalf("date:Round-trip Binary failed (%v, %v)", d1, d2) - } -} - -func TestTimeJSONRoundTrip(t *testing.T) { - type s struct { - Time Time `json:"datetime"` - } - - ti, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") - if err != nil { - t.Fatalf("date: Time#ParseTime failed (%v)", err) - } - - d1 := s{Time: Time{ti}} - j, err := json.Marshal(d1) - if err != nil { - t.Fatalf("date: Time#MarshalJSON failed (%v)", err) - } - - d2 := s{} - if err = json.Unmarshal(j, &d2); err != nil { - t.Fatalf("date: Time#UnmarshalJSON failed (%v)", err) - } - - if !reflect.DeepEqual(d1, d2) { - t.Fatalf("date: Round-trip JSON failed (%v, %v)", d1, d2) - } -} - -func TestTimeTextRoundTrip(t *testing.T) { - ti, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") - if err != nil { - t.Fatalf("date: Time#ParseTime failed (%v)", err) - } - d1 := Time{Time: ti} - t1, err := d1.MarshalText() - if err != nil { - t.Fatalf("date: Time#MarshalText failed (%v)", err) - } - - d2 := Time{} - if err = d2.UnmarshalText(t1); err != nil { - t.Fatalf("date: Time#UnmarshalText failed (%v)", err) - } - - if !reflect.DeepEqual(d1, d2) { - t.Fatalf("date: Round-trip Text failed (%v, %v)", d1, d2) - } -} - -func TestTimeToTime(t *testing.T) { - ti, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") - d := Time{ti} - if err != nil { - t.Fatalf("date: Time#ParseTime failed (%v)", err) - } - var _ time.Time = d.ToTime() -} - -func TestUnmarshalJSONNoOffset(t *testing.T) { - var d struct { - Time Time `json:"datetime"` - } - j := `{"datetime" : "2001-02-03T04:05:06.789"}` - - if err := json.Unmarshal([]byte(j), &d); err != nil { - t.Fatalf("date: Time#Unmarshal failed (%v)", err) - } -} - -func TestUnmarshalJSONPosOffset(t *testing.T) { - var d struct { - Time Time `json:"datetime"` - } - j := `{"datetime" : "1980-01-02T00:11:35.01+01:00"}` - - if err := json.Unmarshal([]byte(j), &d); err != nil { - t.Fatalf("date: Time#Unmarshal failed (%v)", err) - } -} - -func TestUnmarshalJSONNegOffset(t *testing.T) { - var d struct { - Time Time `json:"datetime"` - } - j := `{"datetime" : "1492-10-12T10:15:01.789-08:00"}` - - if err := json.Unmarshal([]byte(j), &d); err != nil { - t.Fatalf("date: Time#Unmarshal failed (%v)", err) - } -} - -func TestUnmarshalTextNoOffset(t *testing.T) { - d := Time{} - t1 := "2001-02-03T04:05:06" - - if err := d.UnmarshalText([]byte(t1)); err != nil { - t.Fatalf("date: Time#UnmarshalText failed (%v)", err) - } -} - -func TestUnmarshalTextPosOffset(t *testing.T) { - d := Time{} - t1 := "2001-02-03T04:05:06+00:30" - - if err := d.UnmarshalText([]byte(t1)); err != nil { - t.Fatalf("date: Time#UnmarshalText failed (%v)", err) - } -} - -func TestUnmarshalTextNegOffset(t *testing.T) { - d := Time{} - t1 := "2001-02-03T04:05:06-11:00" - - if err := d.UnmarshalText([]byte(t1)); err != nil { - t.Fatalf("date: Time#UnmarshalText failed (%v)", err) - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123_test.go b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123_test.go deleted file mode 100644 index dfd640513d..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123_test.go +++ /dev/null @@ -1,226 +0,0 @@ -package date - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/json" - "fmt" - "reflect" - "testing" - "time" -) - -func ExampleTimeRFC1123() { - d, err := ParseTime(rfc1123, "Mon, 02 Jan 2006 15:04:05 MST") - if err != nil { - fmt.Println(err) - } - fmt.Println(d) - // Output: 2006-01-02 15:04:05 +0000 MST -} - -func ExampleTimeRFC1123_MarshalBinary() { - ti, err := ParseTime(rfc1123, "Mon, 02 Jan 2006 15:04:05 MST") - if err != nil { - fmt.Println(err) - } - d := TimeRFC1123{ti} - b, err := d.MarshalBinary() - if err != nil { - fmt.Println(err) - } - fmt.Println(string(b)) - // Output: Mon, 02 Jan 2006 15:04:05 MST -} - -func ExampleTimeRFC1123_UnmarshalBinary() { - d := TimeRFC1123{} - t := "Mon, 02 Jan 2006 15:04:05 MST" - if err := d.UnmarshalBinary([]byte(t)); err != nil { - fmt.Println(err) - } - fmt.Println(d) - // Output: Mon, 02 Jan 2006 15:04:05 MST -} - -func ExampleTimeRFC1123_MarshalJSON() { - ti, err := ParseTime(rfc1123, "Mon, 02 Jan 2006 15:04:05 MST") - if err != nil { - fmt.Println(err) - } - d := TimeRFC1123{ti} - j, err := json.Marshal(d) - if err != nil { - fmt.Println(err) - } - fmt.Println(string(j)) - // Output: "Mon, 02 Jan 2006 15:04:05 MST" -} - -func TestTimeRFC1123MarshalJSONInvalid(t *testing.T) { - ti := time.Date(20000, 01, 01, 00, 00, 00, 00, time.UTC) - d := TimeRFC1123{ti} - if _, err := json.Marshal(d); err == nil { - t.Fatalf("date: TimeRFC1123#Marshal failed for invalid date") - } -} - -func ExampleTimeRFC1123_UnmarshalJSON() { - var d struct { - Time TimeRFC1123 `json:"datetime"` - } - j := `{"datetime" : "Mon, 02 Jan 2006 15:04:05 MST"}` - - if err := json.Unmarshal([]byte(j), &d); err != nil { - fmt.Println(err) - } - fmt.Println(d.Time) - // Output: Mon, 02 Jan 2006 15:04:05 MST -} - -func ExampleTimeRFC1123_MarshalText() { - ti, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") - if err != nil { - fmt.Println(err) - } - d := TimeRFC1123{ti} - t, err := d.MarshalText() - if err != nil { - fmt.Println(err) - } - fmt.Println(string(t)) - // Output: Sat, 03 Feb 2001 04:05:06 UTC -} - -func ExampleTimeRFC1123_UnmarshalText() { - d := TimeRFC1123{} - t := "Sat, 03 Feb 2001 04:05:06 UTC" - - if err := d.UnmarshalText([]byte(t)); err != nil { - fmt.Println(err) - } - fmt.Println(d) - // Output: Sat, 03 Feb 2001 04:05:06 UTC -} - -func TestUnmarshalJSONforInvalidDateRfc1123(t *testing.T) { - dt := `"Mon, 02 Jan 2000000 15:05 MST"` - d := TimeRFC1123{} - if err := d.UnmarshalJSON([]byte(dt)); err == nil { - t.Fatalf("date: TimeRFC1123#Unmarshal failed for invalid date") - } -} - -func TestUnmarshalTextforInvalidDateRfc1123(t *testing.T) { - dt := "Mon, 02 Jan 2000000 15:05 MST" - d := TimeRFC1123{} - if err := d.UnmarshalText([]byte(dt)); err == nil { - t.Fatalf("date: TimeRFC1123#Unmarshal failed for invalid date") - } -} - -func TestTimeStringRfc1123(t *testing.T) { - ti, err := ParseTime(rfc1123, "Mon, 02 Jan 2006 15:04:05 MST") - if err != nil { - fmt.Println(err) - } - d := TimeRFC1123{ti} - if d.String() != "Mon, 02 Jan 2006 15:04:05 MST" { - t.Fatalf("date: TimeRFC1123#String failed (%v)", d.String()) - } -} - -func TestTimeStringReturnsEmptyStringForErrorRfc1123(t *testing.T) { - d := TimeRFC1123{Time: time.Date(20000, 01, 01, 01, 01, 01, 01, time.UTC)} - if d.String() != "" { - t.Fatalf("date: TimeRFC1123#String failed empty string for an error") - } -} - -func TestTimeBinaryRoundTripRfc1123(t *testing.T) { - ti, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") - if err != nil { - t.Fatalf("date: TimeRFC1123#ParseTime failed (%v)", err) - } - d1 := TimeRFC1123{ti} - t1, err := d1.MarshalBinary() - if err != nil { - t.Fatalf("date: TimeRFC1123#MarshalBinary failed (%v)", err) - } - - d2 := TimeRFC1123{} - if err = d2.UnmarshalBinary(t1); err != nil { - t.Fatalf("date: TimeRFC1123#UnmarshalBinary failed (%v)", err) - } - - if !reflect.DeepEqual(d1, d2) { - t.Fatalf("date: Round-trip Binary failed (%v, %v)", d1, d2) - } -} - -func TestTimeJSONRoundTripRfc1123(t *testing.T) { - type s struct { - Time TimeRFC1123 `json:"datetime"` - } - var err error - ti, err := ParseTime(rfc1123, "Mon, 02 Jan 2006 15:04:05 MST") - if err != nil { - t.Fatalf("date: TimeRFC1123#ParseTime failed (%v)", err) - } - d1 := s{Time: TimeRFC1123{ti}} - j, err := json.Marshal(d1) - if err != nil { - t.Fatalf("date: TimeRFC1123#MarshalJSON failed (%v)", err) - } - - d2 := s{} - if err = json.Unmarshal(j, &d2); err != nil { - t.Fatalf("date: TimeRFC1123#UnmarshalJSON failed (%v)", err) - } - - if !reflect.DeepEqual(d1, d2) { - t.Fatalf("date: Round-trip JSON failed (%v, %v)", d1, d2) - } -} - -func TestTimeTextRoundTripRfc1123(t *testing.T) { - ti, err := ParseTime(rfc1123, "Mon, 02 Jan 2006 15:04:05 MST") - if err != nil { - t.Fatalf("date: TimeRFC1123#ParseTime failed (%v)", err) - } - d1 := TimeRFC1123{Time: ti} - t1, err := d1.MarshalText() - if err != nil { - t.Fatalf("date: TimeRFC1123#MarshalText failed (%v)", err) - } - - d2 := TimeRFC1123{} - if err = d2.UnmarshalText(t1); err != nil { - t.Fatalf("date: TimeRFC1123#UnmarshalText failed (%v)", err) - } - - if !reflect.DeepEqual(d1, d2) { - t.Fatalf("date: Round-trip Text failed (%v, %v)", d1, d2) - } -} - -func TestTimeToTimeRFC1123(t *testing.T) { - ti, err := ParseTime(rfc1123, "Mon, 02 Jan 2006 15:04:05 MST") - d := TimeRFC1123{ti} - if err != nil { - t.Fatalf("date: TimeRFC1123#ParseTime failed (%v)", err) - } - var _ time.Time = d.ToTime() -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/unixtime_test.go b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime_test.go deleted file mode 100644 index 3fa347c00c..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/unixtime_test.go +++ /dev/null @@ -1,283 +0,0 @@ -// +build go1.7 - -package date - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "encoding/binary" - "encoding/json" - "fmt" - "math" - "testing" - "time" -) - -func ExampleUnixTime_MarshalJSON() { - epoch := UnixTime(UnixEpoch()) - text, _ := json.Marshal(epoch) - fmt.Print(string(text)) - // Output: 0 -} - -func ExampleUnixTime_UnmarshalJSON() { - var myTime UnixTime - json.Unmarshal([]byte("1.3e2"), &myTime) - fmt.Printf("%v", time.Time(myTime)) - // Output: 1970-01-01 00:02:10 +0000 UTC -} - -func TestUnixTime_MarshalJSON(t *testing.T) { - testCases := []time.Time{ - UnixEpoch().Add(-1 * time.Second), // One second befote the Unix Epoch - time.Date(2017, time.April, 14, 20, 27, 47, 0, time.UTC), // The time this test was written - UnixEpoch(), - time.Date(1800, 01, 01, 0, 0, 0, 0, time.UTC), - time.Date(2200, 12, 29, 00, 01, 37, 82, time.UTC), - } - - for _, tc := range testCases { - t.Run(tc.String(), func(subT *testing.T) { - var actual, expected float64 - var marshaled []byte - - target := UnixTime(tc) - expected = float64(target.Duration().Nanoseconds()) / 1e9 - - if temp, err := json.Marshal(target); err == nil { - marshaled = temp - } else { - subT.Error(err) - return - } - - dec := json.NewDecoder(bytes.NewReader(marshaled)) - if err := dec.Decode(&actual); err != nil { - subT.Error(err) - return - } - - diff := math.Abs(actual - expected) - subT.Logf("\ngot :\t%g\nwant:\t%g\ndiff:\t%g", actual, expected, diff) - if diff > 1e-9 { //Must be within 1 nanosecond of one another - subT.Fail() - } - }) - } -} - -func TestUnixTime_UnmarshalJSON(t *testing.T) { - testCases := []struct { - text string - expected time.Time - }{ - {"1", UnixEpoch().Add(time.Second)}, - {"0", UnixEpoch()}, - {"1492203742", time.Date(2017, time.April, 14, 21, 02, 22, 0, time.UTC)}, // The time this test was written - {"-1", time.Date(1969, time.December, 31, 23, 59, 59, 0, time.UTC)}, - {"1.5", UnixEpoch().Add(1500 * time.Millisecond)}, - {"0e1", UnixEpoch()}, // See http://json.org for 'number' format definition. - {"1.3e+2", UnixEpoch().Add(130 * time.Second)}, - {"1.6E-10", UnixEpoch()}, // This is so small, it should get truncated into the UnixEpoch - {"2E-6", UnixEpoch().Add(2 * time.Microsecond)}, - {"1.289345e9", UnixEpoch().Add(1289345000 * time.Second)}, - {"1e-9", UnixEpoch().Add(time.Nanosecond)}, - } - - for _, tc := range testCases { - t.Run(tc.text, func(subT *testing.T) { - var rehydrated UnixTime - if err := json.Unmarshal([]byte(tc.text), &rehydrated); err != nil { - subT.Error(err) - return - } - - if time.Time(rehydrated) != tc.expected { - subT.Logf("\ngot: \t%v\nwant:\t%v\ndiff:\t%v", time.Time(rehydrated), tc.expected, time.Time(rehydrated).Sub(tc.expected)) - subT.Fail() - } - }) - } -} - -func TestUnixTime_JSONRoundTrip(t *testing.T) { - testCases := []time.Time{ - UnixEpoch(), - time.Date(2005, time.November, 5, 0, 0, 0, 0, time.UTC), // The day V for Vendetta (film) was released. - UnixEpoch().Add(-6 * time.Second), - UnixEpoch().Add(800 * time.Hour), - UnixEpoch().Add(time.Nanosecond), - time.Date(2015, time.September, 05, 4, 30, 12, 9992, time.UTC), - } - - for _, tc := range testCases { - t.Run(tc.String(), func(subT *testing.T) { - subject := UnixTime(tc) - var marshaled []byte - if temp, err := json.Marshal(subject); err == nil { - marshaled = temp - } else { - subT.Error(err) - return - } - - var unmarshaled UnixTime - if err := json.Unmarshal(marshaled, &unmarshaled); err != nil { - subT.Error(err) - } - - actual := time.Time(unmarshaled) - diff := actual.Sub(tc) - subT.Logf("\ngot :\t%s\nwant:\t%s\ndiff:\t%s", actual.String(), tc.String(), diff.String()) - - if diff > time.Duration(100) { // We lose some precision be working in floats. We shouldn't lose more than 100 nanoseconds. - subT.Fail() - } - }) - } -} - -func TestUnixTime_MarshalBinary(t *testing.T) { - testCases := []struct { - expected int64 - subject time.Time - }{ - {0, UnixEpoch()}, - {-15 * int64(time.Second), UnixEpoch().Add(-15 * time.Second)}, - {54, UnixEpoch().Add(54 * time.Nanosecond)}, - } - - for _, tc := range testCases { - t.Run("", func(subT *testing.T) { - var marshaled []byte - - if temp, err := UnixTime(tc.subject).MarshalBinary(); err == nil { - marshaled = temp - } else { - subT.Error(err) - return - } - - var unmarshaled int64 - if err := binary.Read(bytes.NewReader(marshaled), binary.LittleEndian, &unmarshaled); err != nil { - subT.Error(err) - return - } - - if unmarshaled != tc.expected { - subT.Logf("\ngot: \t%d\nwant:\t%d", unmarshaled, tc.expected) - subT.Fail() - } - }) - } -} - -func TestUnixTime_BinaryRoundTrip(t *testing.T) { - testCases := []time.Time{ - UnixEpoch(), - UnixEpoch().Add(800 * time.Minute), - UnixEpoch().Add(7 * time.Hour), - UnixEpoch().Add(-1 * time.Nanosecond), - } - - for _, tc := range testCases { - t.Run(tc.String(), func(subT *testing.T) { - original := UnixTime(tc) - var marshaled []byte - - if temp, err := original.MarshalBinary(); err == nil { - marshaled = temp - } else { - subT.Error(err) - return - } - - var traveled UnixTime - if err := traveled.UnmarshalBinary(marshaled); err != nil { - subT.Error(err) - return - } - - if traveled != original { - subT.Logf("\ngot: \t%s\nwant:\t%s", time.Time(original).String(), time.Time(traveled).String()) - subT.Fail() - } - }) - } -} - -func TestUnixTime_MarshalText(t *testing.T) { - testCases := []time.Time{ - UnixEpoch(), - UnixEpoch().Add(45 * time.Second), - UnixEpoch().Add(time.Nanosecond), - UnixEpoch().Add(-100000 * time.Second), - } - - for _, tc := range testCases { - expected, _ := tc.MarshalText() - t.Run("", func(subT *testing.T) { - var marshaled []byte - - if temp, err := UnixTime(tc).MarshalText(); err == nil { - marshaled = temp - } else { - subT.Error(err) - return - } - - if string(marshaled) != string(expected) { - subT.Logf("\ngot: \t%s\nwant:\t%s", string(marshaled), string(expected)) - subT.Fail() - } - }) - } -} - -func TestUnixTime_TextRoundTrip(t *testing.T) { - testCases := []time.Time{ - UnixEpoch(), - UnixEpoch().Add(-1 * time.Nanosecond), - UnixEpoch().Add(1 * time.Nanosecond), - time.Date(2017, time.April, 17, 21, 00, 00, 00, time.UTC), - } - - for _, tc := range testCases { - t.Run(tc.String(), func(subT *testing.T) { - unixTC := UnixTime(tc) - - var marshaled []byte - - if temp, err := unixTC.MarshalText(); err == nil { - marshaled = temp - } else { - subT.Error(err) - return - } - - var unmarshaled UnixTime - if err := unmarshaled.UnmarshalText(marshaled); err != nil { - subT.Error(err) - return - } - - if unmarshaled != unixTC { - t.Logf("\ngot: \t%s\nwant:\t%s", time.Time(unmarshaled).String(), tc.String()) - t.Fail() - } - }) - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/error_test.go b/vendor/github.com/Azure/go-autorest/autorest/error_test.go deleted file mode 100644 index f4f5d75ef5..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/error_test.go +++ /dev/null @@ -1,202 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" - "net/http" - "reflect" - "regexp" - "testing" -) - -func TestNewErrorWithError_AssignsPackageType(t *testing.T) { - e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", nil, "message") - - if e.PackageType != "packageType" { - t.Fatalf("autorest: Error failed to set package type -- expected %v, received %v", "packageType", e.PackageType) - } -} - -func TestNewErrorWithError_AssignsMethod(t *testing.T) { - e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", nil, "message") - - if e.Method != "method" { - t.Fatalf("autorest: Error failed to set method -- expected %v, received %v", "method", e.Method) - } -} - -func TestNewErrorWithError_AssignsMessage(t *testing.T) { - e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", nil, "message") - - if e.Message != "message" { - t.Fatalf("autorest: Error failed to set message -- expected %v, received %v", "message", e.Message) - } -} - -func TestNewErrorWithError_AssignsUndefinedStatusCodeIfRespNil(t *testing.T) { - e := NewErrorWithError(nil, "packageType", "method", nil, "message") - if e.StatusCode != UndefinedStatusCode { - t.Fatalf("autorest: Error failed to set status code -- expected %v, received %v", UndefinedStatusCode, e.StatusCode) - } -} - -func TestNewErrorWithError_AssignsStatusCode(t *testing.T) { - e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", &http.Response{ - StatusCode: http.StatusBadRequest, - Status: http.StatusText(http.StatusBadRequest)}, "message") - - if e.StatusCode != http.StatusBadRequest { - t.Fatalf("autorest: Error failed to set status code -- expected %v, received %v", http.StatusBadRequest, e.StatusCode) - } -} - -func TestNewErrorWithError_AcceptsArgs(t *testing.T) { - e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", nil, "message %s", "arg") - - if matched, _ := regexp.MatchString(`.*arg.*`, e.Message); !matched { - t.Fatalf("autorest: Error failed to apply message arguments -- expected %v, received %v", - `.*arg.*`, e.Message) - } -} - -func TestNewErrorWithError_AssignsError(t *testing.T) { - err := fmt.Errorf("original") - e := NewErrorWithError(err, "packageType", "method", nil, "message") - - if e.Original != err { - t.Fatalf("autorest: Error failed to set error -- expected %v, received %v", err, e.Original) - } -} - -func TestNewErrorWithResponse_ContainsStatusCode(t *testing.T) { - e := NewErrorWithResponse("packageType", "method", &http.Response{ - StatusCode: http.StatusBadRequest, - Status: http.StatusText(http.StatusBadRequest)}, "message") - - if e.StatusCode != http.StatusBadRequest { - t.Fatalf("autorest: Error failed to set status code -- expected %v, received %v", http.StatusBadRequest, e.StatusCode) - } -} - -func TestNewErrorWithResponse_nilResponse_ReportsUndefinedStatusCode(t *testing.T) { - e := NewErrorWithResponse("packageType", "method", nil, "message") - - if e.StatusCode != UndefinedStatusCode { - t.Fatalf("autorest: Error failed to set status code -- expected %v, received %v", UndefinedStatusCode, e.StatusCode) - } -} - -func TestNewErrorWithResponse_Forwards(t *testing.T) { - e1 := NewError("packageType", "method", "message %s", "arg") - e2 := NewErrorWithResponse("packageType", "method", nil, "message %s", "arg") - - if !reflect.DeepEqual(e1, e2) { - t.Fatal("autorest: NewError did not return an error equivelent to NewErrorWithError") - } -} - -func TestNewErrorWithError_Forwards(t *testing.T) { - e1 := NewError("packageType", "method", "message %s", "arg") - e2 := NewErrorWithError(nil, "packageType", "method", nil, "message %s", "arg") - - if !reflect.DeepEqual(e1, e2) { - t.Fatal("autorest: NewError did not return an error equivelent to NewErrorWithError") - } -} - -func TestNewErrorWithError_DoesNotWrapADetailedError(t *testing.T) { - e1 := NewError("packageType1", "method1", "message1 %s", "arg1") - e2 := NewErrorWithError(e1, "packageType2", "method2", nil, "message2 %s", "arg2") - - if !reflect.DeepEqual(e1, e2) { - t.Fatalf("autorest: NewErrorWithError incorrectly wrapped a DetailedError -- expected %v, received %v", e1, e2) - } -} - -func TestNewErrorWithError_WrapsAnError(t *testing.T) { - e1 := fmt.Errorf("Inner Error") - var e2 interface{} = NewErrorWithError(e1, "packageType", "method", nil, "message") - - if _, ok := e2.(DetailedError); !ok { - t.Fatalf("autorest: NewErrorWithError failed to wrap a standard error -- received %T", e2) - } -} - -func TestDetailedError(t *testing.T) { - err := fmt.Errorf("original") - e := NewErrorWithError(err, "packageType", "method", nil, "message") - - if matched, _ := regexp.MatchString(`.*original.*`, e.Error()); !matched { - t.Fatalf("autorest: Error#Error failed to return original error message -- expected %v, received %v", - `.*original.*`, e.Error()) - } -} - -func TestDetailedErrorConstainsPackageType(t *testing.T) { - e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", nil, "message") - - if matched, _ := regexp.MatchString(`.*packageType.*`, e.Error()); !matched { - t.Fatalf("autorest: Error#String failed to include PackageType -- expected %v, received %v", - `.*packageType.*`, e.Error()) - } -} - -func TestDetailedErrorConstainsMethod(t *testing.T) { - e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", nil, "message") - - if matched, _ := regexp.MatchString(`.*method.*`, e.Error()); !matched { - t.Fatalf("autorest: Error#String failed to include Method -- expected %v, received %v", - `.*method.*`, e.Error()) - } -} - -func TestDetailedErrorConstainsMessage(t *testing.T) { - e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", nil, "message") - - if matched, _ := regexp.MatchString(`.*message.*`, e.Error()); !matched { - t.Fatalf("autorest: Error#String failed to include Message -- expected %v, received %v", - `.*message.*`, e.Error()) - } -} - -func TestDetailedErrorConstainsStatusCode(t *testing.T) { - e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", &http.Response{ - StatusCode: http.StatusBadRequest, - Status: http.StatusText(http.StatusBadRequest)}, "message") - - if matched, _ := regexp.MatchString(`.*400.*`, e.Error()); !matched { - t.Fatalf("autorest: Error#String failed to include Status Code -- expected %v, received %v", - `.*400.*`, e.Error()) - } -} - -func TestDetailedErrorConstainsOriginal(t *testing.T) { - e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", nil, "message") - - if matched, _ := regexp.MatchString(`.*original.*`, e.Error()); !matched { - t.Fatalf("autorest: Error#String failed to include Original error -- expected %v, received %v", - `.*original.*`, e.Error()) - } -} - -func TestDetailedErrorSkipsOriginal(t *testing.T) { - e := NewError("packageType", "method", "message") - - if matched, _ := regexp.MatchString(`.*Original.*`, e.Error()); matched { - t.Fatalf("autorest: Error#String included missing Original error -- unexpected %v, received %v", - `.*Original.*`, e.Error()) - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/preparer.go b/vendor/github.com/Azure/go-autorest/autorest/preparer.go index 2290c40100..6d67bd7337 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/preparer.go +++ b/vendor/github.com/Azure/go-autorest/autorest/preparer.go @@ -27,8 +27,9 @@ import ( ) const ( - mimeTypeJSON = "application/json" - mimeTypeFormPost = "application/x-www-form-urlencoded" + mimeTypeJSON = "application/json" + mimeTypeOctetStream = "application/octet-stream" + mimeTypeFormPost = "application/x-www-form-urlencoded" headerAuthorization = "Authorization" headerContentType = "Content-Type" @@ -112,6 +113,28 @@ func WithHeader(header string, value string) PrepareDecorator { } } +// WithHeaders returns a PrepareDecorator that sets the specified HTTP headers of the http.Request to +// the passed value. It canonicalizes the passed headers name (via http.CanonicalHeaderKey) before +// adding them. +func WithHeaders(headers map[string]interface{}) PrepareDecorator { + h := ensureValueStrings(headers) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.Header == nil { + r.Header = make(http.Header) + } + + for name, value := range h { + r.Header.Set(http.CanonicalHeaderKey(name), value) + } + } + return r, err + }) + } +} + // WithBearerAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose // value is "Bearer " followed by the supplied token. func WithBearerAuthorization(token string) PrepareDecorator { @@ -142,6 +165,11 @@ func AsJSON() PrepareDecorator { return AsContentType(mimeTypeJSON) } +// AsOctetStream returns a PrepareDecorator that adds the "application/octet-stream" Content-Type header. +func AsOctetStream() PrepareDecorator { + return AsContentType(mimeTypeOctetStream) +} + // WithMethod returns a PrepareDecorator that sets the HTTP method of the passed request. The // decorator does not validate that the passed method string is a known HTTP method. func WithMethod(method string) PrepareDecorator { @@ -215,6 +243,11 @@ func WithFormData(v url.Values) PrepareDecorator { r, err := p.Prepare(r) if err == nil { s := v.Encode() + + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set(http.CanonicalHeaderKey(headerContentType), mimeTypeFormPost) r.ContentLength = int64(len(s)) r.Body = ioutil.NopCloser(strings.NewReader(s)) } @@ -430,11 +463,16 @@ func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorato if r.URL == nil { return r, NewError("autorest", "WithQueryParameters", "Invoked with a nil URL") } + v := r.URL.Query() for key, value := range parameters { - v.Add(key, value) + d, err := url.QueryUnescape(value) + if err != nil { + return r, err + } + v.Add(key, d) } - r.URL.RawQuery = createQuery(v) + r.URL.RawQuery = v.Encode() } return r, err }) diff --git a/vendor/github.com/Azure/go-autorest/autorest/preparer_test.go b/vendor/github.com/Azure/go-autorest/autorest/preparer_test.go deleted file mode 100644 index 9ba608a8fa..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/preparer_test.go +++ /dev/null @@ -1,766 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" - "io/ioutil" - "net/http" - "net/url" - "reflect" - "strconv" - "strings" - "testing" - - "github.com/Azure/go-autorest/autorest/mocks" -) - -// PrepareDecorators wrap and invoke a Preparer. Most often, the decorator invokes the passed -// Preparer and decorates the response. -func ExamplePrepareDecorator() { - path := "a/b/c/" - pd := func() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.URL == nil { - return r, fmt.Errorf("ERROR: URL is not set") - } - r.URL.Path += path - } - return r, err - }) - } - } - - r, _ := Prepare(&http.Request{}, - WithBaseURL("https://microsoft.com/"), - pd()) - - fmt.Printf("Path is %s\n", r.URL) - // Output: Path is https://microsoft.com/a/b/c/ -} - -// PrepareDecorators may also modify and then invoke the Preparer. -func ExamplePrepareDecorator_pre() { - pd := func() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r.Header.Add(http.CanonicalHeaderKey("ContentType"), "application/json") - return p.Prepare(r) - }) - } - } - - r, _ := Prepare(&http.Request{Header: http.Header{}}, - pd()) - - fmt.Printf("ContentType is %s\n", r.Header.Get("ContentType")) - // Output: ContentType is application/json -} - -// Create a sequence of three Preparers that build up the URL path. -func ExampleCreatePreparer() { - p := CreatePreparer( - WithBaseURL("https://microsoft.com/"), - WithPath("a"), - WithPath("b"), - WithPath("c")) - r, err := p.Prepare(&http.Request{}) - if err != nil { - fmt.Printf("ERROR: %v\n", err) - } else { - fmt.Println(r.URL) - } - // Output: https://microsoft.com/a/b/c -} - -// Create and apply separate Preparers -func ExampleCreatePreparer_multiple() { - params := map[string]interface{}{ - "param1": "a", - "param2": "c", - } - - p1 := CreatePreparer(WithBaseURL("https://microsoft.com/")) - p2 := CreatePreparer(WithPathParameters("/{param1}/b/{param2}/", params)) - - r, err := p1.Prepare(&http.Request{}) - if err != nil { - fmt.Printf("ERROR: %v\n", err) - } - - r, err = p2.Prepare(r) - if err != nil { - fmt.Printf("ERROR: %v\n", err) - } else { - fmt.Println(r.URL) - } - // Output: https://microsoft.com/a/b/c/ -} - -// Create and chain separate Preparers -func ExampleCreatePreparer_chain() { - params := map[string]interface{}{ - "param1": "a", - "param2": "c", - } - - p := CreatePreparer(WithBaseURL("https://microsoft.com/")) - p = DecoratePreparer(p, WithPathParameters("/{param1}/b/{param2}/", params)) - - r, err := p.Prepare(&http.Request{}) - if err != nil { - fmt.Printf("ERROR: %v\n", err) - } else { - fmt.Println(r.URL) - } - // Output: https://microsoft.com/a/b/c/ -} - -// Create and prepare an http.Request in one call -func ExamplePrepare() { - r, err := Prepare(&http.Request{}, - AsGet(), - WithBaseURL("https://microsoft.com/"), - WithPath("a/b/c/")) - if err != nil { - fmt.Printf("ERROR: %v\n", err) - } else { - fmt.Printf("%s %s", r.Method, r.URL) - } - // Output: GET https://microsoft.com/a/b/c/ -} - -// Create a request for a supplied base URL and path -func ExampleWithBaseURL() { - r, err := Prepare(&http.Request{}, - WithBaseURL("https://microsoft.com/a/b/c/")) - if err != nil { - fmt.Printf("ERROR: %v\n", err) - } else { - fmt.Println(r.URL) - } - // Output: https://microsoft.com/a/b/c/ -} - -func ExampleWithBaseURL_second() { - _, err := Prepare(&http.Request{}, WithBaseURL(":")) - fmt.Println(err) - // Output: parse :: missing protocol scheme -} - -func ExampleWithCustomBaseURL() { - r, err := Prepare(&http.Request{}, - WithCustomBaseURL("https://{account}.{service}.core.windows.net/", - map[string]interface{}{ - "account": "myaccount", - "service": "blob", - })) - if err != nil { - fmt.Printf("ERROR: %v\n", err) - } else { - fmt.Println(r.URL) - } - // Output: https://myaccount.blob.core.windows.net/ -} - -func ExampleWithCustomBaseURL_second() { - _, err := Prepare(&http.Request{}, - WithCustomBaseURL(":", map[string]interface{}{})) - fmt.Println(err) - // Output: parse :: missing protocol scheme -} - -// Create a request with a custom HTTP header -func ExampleWithHeader() { - r, err := Prepare(&http.Request{}, - WithBaseURL("https://microsoft.com/a/b/c/"), - WithHeader("x-foo", "bar")) - if err != nil { - fmt.Printf("ERROR: %v\n", err) - } else { - fmt.Printf("Header %s=%s\n", "x-foo", r.Header.Get("x-foo")) - } - // Output: Header x-foo=bar -} - -// Create a request whose Body is the JSON encoding of a structure -func ExampleWithFormData() { - v := url.Values{} - v.Add("name", "Rob Pike") - v.Add("age", "42") - - r, err := Prepare(&http.Request{}, - WithFormData(v)) - if err != nil { - fmt.Printf("ERROR: %v\n", err) - } - - b, err := ioutil.ReadAll(r.Body) - if err != nil { - fmt.Printf("ERROR: %v\n", err) - } else { - fmt.Printf("Request Body contains %s\n", string(b)) - } - // Output: Request Body contains age=42&name=Rob+Pike -} - -// Create a request whose Body is the JSON encoding of a structure -func ExampleWithJSON() { - t := mocks.T{Name: "Rob Pike", Age: 42} - - r, err := Prepare(&http.Request{}, - WithJSON(&t)) - if err != nil { - fmt.Printf("ERROR: %v\n", err) - } - - b, err := ioutil.ReadAll(r.Body) - if err != nil { - fmt.Printf("ERROR: %v\n", err) - } else { - fmt.Printf("Request Body contains %s\n", string(b)) - } - // Output: Request Body contains {"name":"Rob Pike","age":42} -} - -// Create a request from a path with escaped parameters -func ExampleWithEscapedPathParameters() { - params := map[string]interface{}{ - "param1": "a b c", - "param2": "d e f", - } - r, err := Prepare(&http.Request{}, - WithBaseURL("https://microsoft.com/"), - WithEscapedPathParameters("/{param1}/b/{param2}/", params)) - if err != nil { - fmt.Printf("ERROR: %v\n", err) - } else { - fmt.Println(r.URL) - } - // Output: https://microsoft.com/a+b+c/b/d+e+f/ -} - -// Create a request from a path with parameters -func ExampleWithPathParameters() { - params := map[string]interface{}{ - "param1": "a", - "param2": "c", - } - r, err := Prepare(&http.Request{}, - WithBaseURL("https://microsoft.com/"), - WithPathParameters("/{param1}/b/{param2}/", params)) - if err != nil { - fmt.Printf("ERROR: %v\n", err) - } else { - fmt.Println(r.URL) - } - // Output: https://microsoft.com/a/b/c/ -} - -// Create a request with query parameters -func ExampleWithQueryParameters() { - params := map[string]interface{}{ - "q1": "value1", - "q2": "value2", - } - r, err := Prepare(&http.Request{}, - WithBaseURL("https://microsoft.com/"), - WithPath("/a/b/c/"), - WithQueryParameters(params)) - if err != nil { - fmt.Printf("ERROR: %v\n", err) - } else { - fmt.Println(r.URL) - } - // Output: https://microsoft.com/a/b/c/?q1=value1&q2=value2 -} - -func TestWithCustomBaseURL(t *testing.T) { - r, err := Prepare(&http.Request{}, WithCustomBaseURL("https://{account}.{service}.core.windows.net/", - map[string]interface{}{ - "account": "myaccount", - "service": "blob", - })) - if err != nil { - t.Fatalf("autorest: WithCustomBaseURL should not fail") - } - if r.URL.String() != "https://myaccount.blob.core.windows.net/" { - t.Fatalf("autorest: WithCustomBaseURL expected https://myaccount.blob.core.windows.net/, got %s", r.URL) - } -} - -func TestWithCustomBaseURLwithInvalidURL(t *testing.T) { - _, err := Prepare(&http.Request{}, WithCustomBaseURL("hello/{account}.{service}.core.windows.net/", - map[string]interface{}{ - "account": "myaccount", - "service": "blob", - })) - if err == nil { - t.Fatalf("autorest: WithCustomBaseURL should fail fo URL parse error") - } -} - -func TestWithPathWithInvalidPath(t *testing.T) { - p := "path%2*end" - if _, err := Prepare(&http.Request{}, WithBaseURL("https://microsoft.com/"), WithPath(p)); err == nil { - t.Fatalf("autorest: WithPath should fail for invalid URL escape error for path '%v' ", p) - } - -} - -func TestWithPathParametersWithInvalidPath(t *testing.T) { - p := "path%2*end" - m := map[string]interface{}{ - "path1": p, - } - if _, err := Prepare(&http.Request{}, WithBaseURL("https://microsoft.com/"), WithPathParameters("/{path1}/", m)); err == nil { - t.Fatalf("autorest: WithPath should fail for invalid URL escape for path '%v' ", p) - } - -} - -func TestCreatePreparerDoesNotModify(t *testing.T) { - r1 := &http.Request{} - p := CreatePreparer() - r2, err := p.Prepare(r1) - if err != nil { - t.Fatalf("autorest: CreatePreparer failed (%v)", err) - } - if !reflect.DeepEqual(r1, r2) { - t.Fatalf("autorest: CreatePreparer without decorators modified the request") - } -} - -func TestCreatePreparerRunsDecoratorsInOrder(t *testing.T) { - p := CreatePreparer(WithBaseURL("https://microsoft.com/"), WithPath("1"), WithPath("2"), WithPath("3")) - r, err := p.Prepare(&http.Request{}) - if err != nil { - t.Fatalf("autorest: CreatePreparer failed (%v)", err) - } - if r.URL.String() != "https:/1/2/3" && r.URL.Host != "microsoft.com" { - t.Fatalf("autorest: CreatePreparer failed to run decorators in order") - } -} - -func TestAsContentType(t *testing.T) { - r, err := Prepare(mocks.NewRequest(), AsContentType("application/text")) - if err != nil { - fmt.Printf("ERROR: %v", err) - } - if r.Header.Get(headerContentType) != "application/text" { - t.Fatalf("autorest: AsContentType failed to add header (%s=%s)", headerContentType, r.Header.Get(headerContentType)) - } -} - -func TestAsFormURLEncoded(t *testing.T) { - r, err := Prepare(mocks.NewRequest(), AsFormURLEncoded()) - if err != nil { - fmt.Printf("ERROR: %v", err) - } - if r.Header.Get(headerContentType) != mimeTypeFormPost { - t.Fatalf("autorest: AsFormURLEncoded failed to add header (%s=%s)", headerContentType, r.Header.Get(headerContentType)) - } -} - -func TestAsJSON(t *testing.T) { - r, err := Prepare(mocks.NewRequest(), AsJSON()) - if err != nil { - fmt.Printf("ERROR: %v", err) - } - if r.Header.Get(headerContentType) != mimeTypeJSON { - t.Fatalf("autorest: AsJSON failed to add header (%s=%s)", headerContentType, r.Header.Get(headerContentType)) - } -} - -func TestWithNothing(t *testing.T) { - r1 := mocks.NewRequest() - r2, err := Prepare(r1, WithNothing()) - if err != nil { - t.Fatalf("autorest: WithNothing returned an unexpected error (%v)", err) - } - - if !reflect.DeepEqual(r1, r2) { - t.Fatal("azure: WithNothing modified the passed HTTP Request") - } -} - -func TestWithBearerAuthorization(t *testing.T) { - r, err := Prepare(mocks.NewRequest(), WithBearerAuthorization("SOME-TOKEN")) - if err != nil { - fmt.Printf("ERROR: %v", err) - } - if r.Header.Get(headerAuthorization) != "Bearer SOME-TOKEN" { - t.Fatalf("autorest: WithBearerAuthorization failed to add header (%s=%s)", headerAuthorization, r.Header.Get(headerAuthorization)) - } -} - -func TestWithUserAgent(t *testing.T) { - ua := "User Agent Go" - r, err := Prepare(mocks.NewRequest(), WithUserAgent(ua)) - if err != nil { - fmt.Printf("ERROR: %v", err) - } - if r.UserAgent() != ua || r.Header.Get(headerUserAgent) != ua { - t.Fatalf("autorest: WithUserAgent failed to add header (%s=%s)", headerUserAgent, r.Header.Get(headerUserAgent)) - } -} - -func TestWithMethod(t *testing.T) { - r, _ := Prepare(mocks.NewRequest(), WithMethod("HEAD")) - if r.Method != "HEAD" { - t.Fatal("autorest: WithMethod failed to set HTTP method header") - } -} - -func TestAsDelete(t *testing.T) { - r, _ := Prepare(mocks.NewRequest(), AsDelete()) - if r.Method != "DELETE" { - t.Fatal("autorest: AsDelete failed to set HTTP method header to DELETE") - } -} - -func TestAsGet(t *testing.T) { - r, _ := Prepare(mocks.NewRequest(), AsGet()) - if r.Method != "GET" { - t.Fatal("autorest: AsGet failed to set HTTP method header to GET") - } -} - -func TestAsHead(t *testing.T) { - r, _ := Prepare(mocks.NewRequest(), AsHead()) - if r.Method != "HEAD" { - t.Fatal("autorest: AsHead failed to set HTTP method header to HEAD") - } -} - -func TestAsOptions(t *testing.T) { - r, _ := Prepare(mocks.NewRequest(), AsOptions()) - if r.Method != "OPTIONS" { - t.Fatal("autorest: AsOptions failed to set HTTP method header to OPTIONS") - } -} - -func TestAsPatch(t *testing.T) { - r, _ := Prepare(mocks.NewRequest(), AsPatch()) - if r.Method != "PATCH" { - t.Fatal("autorest: AsPatch failed to set HTTP method header to PATCH") - } -} - -func TestAsPost(t *testing.T) { - r, _ := Prepare(mocks.NewRequest(), AsPost()) - if r.Method != "POST" { - t.Fatal("autorest: AsPost failed to set HTTP method header to POST") - } -} - -func TestAsPut(t *testing.T) { - r, _ := Prepare(mocks.NewRequest(), AsPut()) - if r.Method != "PUT" { - t.Fatal("autorest: AsPut failed to set HTTP method header to PUT") - } -} - -func TestPrepareWithNullRequest(t *testing.T) { - _, err := Prepare(nil) - if err == nil { - t.Fatal("autorest: Prepare failed to return an error when given a null http.Request") - } -} - -func TestWithFormDataSetsContentLength(t *testing.T) { - v := url.Values{} - v.Add("name", "Rob Pike") - v.Add("age", "42") - - r, err := Prepare(&http.Request{}, - WithFormData(v)) - if err != nil { - t.Fatalf("autorest: WithFormData failed with error (%v)", err) - } - - b, err := ioutil.ReadAll(r.Body) - if err != nil { - t.Fatalf("autorest: WithFormData failed with error (%v)", err) - } - - expected := "name=Rob+Pike&age=42" - if !(string(b) == "name=Rob+Pike&age=42" || string(b) == "age=42&name=Rob+Pike") { - t.Fatalf("autorest:WithFormData failed to return correct string got (%v), expected (%v)", string(b), expected) - } - - if r.ContentLength != int64(len(b)) { - t.Fatalf("autorest:WithFormData set Content-Length to %v, expected %v", r.ContentLength, len(b)) - } -} - -func TestWithMultiPartFormDataSetsContentLength(t *testing.T) { - v := map[string]interface{}{ - "file": ioutil.NopCloser(strings.NewReader("Hello Gopher")), - "age": "42", - } - - r, err := Prepare(&http.Request{}, - WithMultiPartFormData(v)) - if err != nil { - t.Fatalf("autorest: WithMultiPartFormData failed with error (%v)", err) - } - - b, err := ioutil.ReadAll(r.Body) - if err != nil { - t.Fatalf("autorest: WithMultiPartFormData failed with error (%v)", err) - } - - if r.ContentLength != int64(len(b)) { - t.Fatalf("autorest:WithMultiPartFormData set Content-Length to %v, expected %v", r.ContentLength, len(b)) - } -} - -func TestWithMultiPartFormDataWithNoFile(t *testing.T) { - v := map[string]interface{}{ - "file": "no file", - "age": "42", - } - - r, err := Prepare(&http.Request{}, - WithMultiPartFormData(v)) - if err != nil { - t.Fatalf("autorest: WithMultiPartFormData failed with error (%v)", err) - } - - b, err := ioutil.ReadAll(r.Body) - if err != nil { - t.Fatalf("autorest: WithMultiPartFormData failed with error (%v)", err) - } - - if r.ContentLength != int64(len(b)) { - t.Fatalf("autorest:WithMultiPartFormData set Content-Length to %v, expected %v", r.ContentLength, len(b)) - } -} - -func TestWithFile(t *testing.T) { - r, err := Prepare(&http.Request{}, - WithFile(ioutil.NopCloser(strings.NewReader("Hello Gopher")))) - if err != nil { - t.Fatalf("autorest: WithFile failed with error (%v)", err) - } - - b, err := ioutil.ReadAll(r.Body) - if err != nil { - t.Fatalf("autorest: WithFile failed with error (%v)", err) - } - if r.ContentLength != int64(len(b)) { - t.Fatalf("autorest:WithFile set Content-Length to %v, expected %v", r.ContentLength, len(b)) - } -} - -func TestWithBool_SetsTheBody(t *testing.T) { - r, err := Prepare(&http.Request{}, - WithBool(false)) - if err != nil { - t.Fatalf("autorest: WithBool failed with error (%v)", err) - } - - s, err := ioutil.ReadAll(r.Body) - if err != nil { - t.Fatalf("autorest: WithBool failed with error (%v)", err) - } - - if r.ContentLength != int64(len(fmt.Sprintf("%v", false))) { - t.Fatalf("autorest: WithBool set Content-Length to %v, expected %v", r.ContentLength, int64(len(fmt.Sprintf("%v", false)))) - } - - v, err := strconv.ParseBool(string(s)) - if err != nil || v { - t.Fatalf("autorest: WithBool incorrectly encoded the boolean as %v", s) - } -} - -func TestWithFloat32_SetsTheBody(t *testing.T) { - r, err := Prepare(&http.Request{}, - WithFloat32(42.0)) - if err != nil { - t.Fatalf("autorest: WithFloat32 failed with error (%v)", err) - } - - s, err := ioutil.ReadAll(r.Body) - if err != nil { - t.Fatalf("autorest: WithFloat32 failed with error (%v)", err) - } - - if r.ContentLength != int64(len(fmt.Sprintf("%v", 42.0))) { - t.Fatalf("autorest: WithFloat32 set Content-Length to %v, expected %v", r.ContentLength, int64(len(fmt.Sprintf("%v", 42.0)))) - } - - v, err := strconv.ParseFloat(string(s), 32) - if err != nil || float32(v) != float32(42.0) { - t.Fatalf("autorest: WithFloat32 incorrectly encoded the boolean as %v", s) - } -} - -func TestWithFloat64_SetsTheBody(t *testing.T) { - r, err := Prepare(&http.Request{}, - WithFloat64(42.0)) - if err != nil { - t.Fatalf("autorest: WithFloat64 failed with error (%v)", err) - } - - s, err := ioutil.ReadAll(r.Body) - if err != nil { - t.Fatalf("autorest: WithFloat64 failed with error (%v)", err) - } - - if r.ContentLength != int64(len(fmt.Sprintf("%v", 42.0))) { - t.Fatalf("autorest: WithFloat64 set Content-Length to %v, expected %v", r.ContentLength, int64(len(fmt.Sprintf("%v", 42.0)))) - } - - v, err := strconv.ParseFloat(string(s), 64) - if err != nil || v != float64(42.0) { - t.Fatalf("autorest: WithFloat64 incorrectly encoded the boolean as %v", s) - } -} - -func TestWithInt32_SetsTheBody(t *testing.T) { - r, err := Prepare(&http.Request{}, - WithInt32(42)) - if err != nil { - t.Fatalf("autorest: WithInt32 failed with error (%v)", err) - } - - s, err := ioutil.ReadAll(r.Body) - if err != nil { - t.Fatalf("autorest: WithInt32 failed with error (%v)", err) - } - - if r.ContentLength != int64(len(fmt.Sprintf("%v", 42))) { - t.Fatalf("autorest: WithInt32 set Content-Length to %v, expected %v", r.ContentLength, int64(len(fmt.Sprintf("%v", 42)))) - } - - v, err := strconv.ParseInt(string(s), 10, 32) - if err != nil || int32(v) != int32(42) { - t.Fatalf("autorest: WithInt32 incorrectly encoded the boolean as %v", s) - } -} - -func TestWithInt64_SetsTheBody(t *testing.T) { - r, err := Prepare(&http.Request{}, - WithInt64(42)) - if err != nil { - t.Fatalf("autorest: WithInt64 failed with error (%v)", err) - } - - s, err := ioutil.ReadAll(r.Body) - if err != nil { - t.Fatalf("autorest: WithInt64 failed with error (%v)", err) - } - - if r.ContentLength != int64(len(fmt.Sprintf("%v", 42))) { - t.Fatalf("autorest: WithInt64 set Content-Length to %v, expected %v", r.ContentLength, int64(len(fmt.Sprintf("%v", 42)))) - } - - v, err := strconv.ParseInt(string(s), 10, 64) - if err != nil || v != int64(42) { - t.Fatalf("autorest: WithInt64 incorrectly encoded the boolean as %v", s) - } -} - -func TestWithString_SetsTheBody(t *testing.T) { - r, err := Prepare(&http.Request{}, - WithString("value")) - if err != nil { - t.Fatalf("autorest: WithString failed with error (%v)", err) - } - - s, err := ioutil.ReadAll(r.Body) - if err != nil { - t.Fatalf("autorest: WithString failed with error (%v)", err) - } - - if r.ContentLength != int64(len("value")) { - t.Fatalf("autorest: WithString set Content-Length to %v, expected %v", r.ContentLength, int64(len("value"))) - } - - if string(s) != "value" { - t.Fatalf("autorest: WithString incorrectly encoded the string as %v", s) - } -} - -func TestWithJSONSetsContentLength(t *testing.T) { - r, err := Prepare(&http.Request{}, - WithJSON(&mocks.T{Name: "Rob Pike", Age: 42})) - if err != nil { - t.Fatalf("autorest: WithJSON failed with error (%v)", err) - } - - b, err := ioutil.ReadAll(r.Body) - if err != nil { - t.Fatalf("autorest: WithJSON failed with error (%v)", err) - } - - if r.ContentLength != int64(len(b)) { - t.Fatalf("autorest:WithJSON set Content-Length to %v, expected %v", r.ContentLength, len(b)) - } -} - -func TestWithHeaderAllocatesHeaders(t *testing.T) { - r, err := Prepare(mocks.NewRequest(), WithHeader("x-foo", "bar")) - if err != nil { - t.Fatalf("autorest: WithHeader failed (%v)", err) - } - if r.Header.Get("x-foo") != "bar" { - t.Fatalf("autorest: WithHeader failed to add header (%s=%s)", "x-foo", r.Header.Get("x-foo")) - } -} - -func TestWithPathCatchesNilURL(t *testing.T) { - _, err := Prepare(&http.Request{}, WithPath("a")) - if err == nil { - t.Fatalf("autorest: WithPath failed to catch a nil URL") - } -} - -func TestWithEscapedPathParametersCatchesNilURL(t *testing.T) { - _, err := Prepare(&http.Request{}, WithEscapedPathParameters("", map[string]interface{}{"foo": "bar"})) - if err == nil { - t.Fatalf("autorest: WithEscapedPathParameters failed to catch a nil URL") - } -} - -func TestWithPathParametersCatchesNilURL(t *testing.T) { - _, err := Prepare(&http.Request{}, WithPathParameters("", map[string]interface{}{"foo": "bar"})) - if err == nil { - t.Fatalf("autorest: WithPathParameters failed to catch a nil URL") - } -} - -func TestWithQueryParametersCatchesNilURL(t *testing.T) { - _, err := Prepare(&http.Request{}, WithQueryParameters(map[string]interface{}{"foo": "bar"})) - if err == nil { - t.Fatalf("autorest: WithQueryParameters failed to catch a nil URL") - } -} - -func TestModifyingExistingRequest(t *testing.T) { - r, err := Prepare(mocks.NewRequestForURL("https://bing.com"), WithPath("search"), WithQueryParameters(map[string]interface{}{"q": "golang"})) - if err != nil { - t.Fatalf("autorest: Preparing an existing request returned an error (%v)", err) - } - if r.URL.String() != "https:/search?q=golang" && r.URL.Host != "bing.com" { - t.Fatalf("autorest: Preparing an existing request failed (%s)", r.URL) - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/responder_test.go b/vendor/github.com/Azure/go-autorest/autorest/responder_test.go deleted file mode 100644 index 4a57b1e3b1..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/responder_test.go +++ /dev/null @@ -1,665 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "reflect" - "strings" - "testing" - - "github.com/Azure/go-autorest/autorest/mocks" -) - -func ExampleWithErrorUnlessOK() { - r := mocks.NewResponse() - r.Request = mocks.NewRequest() - - // Respond and leave the response body open (for a subsequent responder to close) - err := Respond(r, - WithErrorUnlessOK(), - ByDiscardingBody(), - ByClosingIfError()) - - if err == nil { - fmt.Printf("%s of %s returned HTTP 200", r.Request.Method, r.Request.URL) - - // Complete handling the response and close the body - Respond(r, - ByDiscardingBody(), - ByClosing()) - } - // Output: GET of https://microsoft.com/a/b/c/ returned HTTP 200 -} - -func ExampleByUnmarshallingJSON() { - c := ` - { - "name" : "Rob Pike", - "age" : 42 - } - ` - - type V struct { - Name string `json:"name"` - Age int `json:"age"` - } - - v := &V{} - - Respond(mocks.NewResponseWithContent(c), - ByUnmarshallingJSON(v), - ByClosing()) - - fmt.Printf("%s is %d years old\n", v.Name, v.Age) - // Output: Rob Pike is 42 years old -} - -func ExampleByUnmarshallingXML() { - c := ` - - Rob Pike - 42 - ` - - type V struct { - Name string `xml:"Name"` - Age int `xml:"Age"` - } - - v := &V{} - - Respond(mocks.NewResponseWithContent(c), - ByUnmarshallingXML(v), - ByClosing()) - - fmt.Printf("%s is %d years old\n", v.Name, v.Age) - // Output: Rob Pike is 42 years old -} - -func TestCreateResponderDoesNotModify(t *testing.T) { - r1 := mocks.NewResponse() - r2 := mocks.NewResponse() - p := CreateResponder() - err := p.Respond(r1) - if err != nil { - t.Fatalf("autorest: CreateResponder failed (%v)", err) - } - if !reflect.DeepEqual(r1, r2) { - t.Fatalf("autorest: CreateResponder without decorators modified the response") - } -} - -func TestCreateResponderRunsDecoratorsInOrder(t *testing.T) { - s := "" - - d := func(n int) RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil { - s += fmt.Sprintf("%d", n) - } - return err - }) - } - } - - p := CreateResponder(d(1), d(2), d(3)) - err := p.Respond(&http.Response{}) - if err != nil { - t.Fatalf("autorest: Respond failed (%v)", err) - } - - if s != "123" { - t.Fatalf("autorest: CreateResponder invoked decorators in an incorrect order; expected '123', received '%s'", s) - } -} - -func TestByIgnoring(t *testing.T) { - r := mocks.NewResponse() - - Respond(r, - (func() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(r2 *http.Response) error { - r1 := mocks.NewResponse() - if !reflect.DeepEqual(r1, r2) { - t.Fatalf("autorest: ByIgnoring modified the HTTP Response -- received %v, expected %v", r2, r1) - } - return nil - }) - } - })(), - ByIgnoring(), - ByClosing()) -} - -func TestByCopying_Copies(t *testing.T) { - r := mocks.NewResponseWithContent(jsonT) - b := &bytes.Buffer{} - - err := Respond(r, - ByCopying(b), - ByUnmarshallingJSON(&mocks.T{}), - ByClosing()) - if err != nil { - t.Fatalf("autorest: ByCopying returned an unexpected error -- %v", err) - } - if b.String() != jsonT { - t.Fatalf("autorest: ByCopying failed to copy the bytes read") - } -} - -func TestByCopying_ReturnsNestedErrors(t *testing.T) { - r := mocks.NewResponseWithContent(jsonT) - - r.Body.Close() - err := Respond(r, - ByCopying(&bytes.Buffer{}), - ByUnmarshallingJSON(&mocks.T{}), - ByClosing()) - if err == nil { - t.Fatalf("autorest: ByCopying failed to return the expected error") - } -} - -func TestByCopying_AcceptsNilReponse(t *testing.T) { - r := mocks.NewResponse() - - Respond(r, - (func() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - resp.Body.Close() - r.Respond(nil) - return nil - }) - } - })(), - ByCopying(&bytes.Buffer{})) -} - -func TestByCopying_AcceptsNilBody(t *testing.T) { - r := mocks.NewResponse() - - Respond(r, - (func() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - resp.Body.Close() - resp.Body = nil - r.Respond(resp) - return nil - }) - } - })(), - ByCopying(&bytes.Buffer{})) -} - -func TestByClosing(t *testing.T) { - r := mocks.NewResponse() - err := Respond(r, ByClosing()) - if err != nil { - t.Fatalf("autorest: ByClosing failed (%v)", err) - } - if r.Body.(*mocks.Body).IsOpen() { - t.Fatalf("autorest: ByClosing did not close the response body") - } -} - -func TestByClosingAcceptsNilResponse(t *testing.T) { - r := mocks.NewResponse() - - Respond(r, - (func() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - resp.Body.Close() - r.Respond(nil) - return nil - }) - } - })(), - ByClosing()) -} - -func TestByClosingAcceptsNilBody(t *testing.T) { - r := mocks.NewResponse() - - Respond(r, - (func() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - resp.Body.Close() - resp.Body = nil - r.Respond(resp) - return nil - }) - } - })(), - ByClosing()) -} - -func TestByClosingClosesEvenAfterErrors(t *testing.T) { - var e error - - r := mocks.NewResponse() - Respond(r, - withErrorRespondDecorator(&e), - ByClosing()) - - if r.Body.(*mocks.Body).IsOpen() { - t.Fatalf("autorest: ByClosing did not close the response body after an error occurred") - } -} - -func TestByClosingClosesReturnsNestedErrors(t *testing.T) { - var e error - - r := mocks.NewResponse() - err := Respond(r, - withErrorRespondDecorator(&e), - ByClosing()) - - if err == nil || !reflect.DeepEqual(e, err) { - t.Fatalf("autorest: ByClosing failed to return a nested error") - } -} - -func TestByClosingIfErrorAcceptsNilResponse(t *testing.T) { - var e error - - r := mocks.NewResponse() - - Respond(r, - withErrorRespondDecorator(&e), - (func() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - resp.Body.Close() - r.Respond(nil) - return nil - }) - } - })(), - ByClosingIfError()) -} - -func TestByClosingIfErrorAcceptsNilBody(t *testing.T) { - var e error - - r := mocks.NewResponse() - - Respond(r, - withErrorRespondDecorator(&e), - (func() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - resp.Body.Close() - resp.Body = nil - r.Respond(resp) - return nil - }) - } - })(), - ByClosingIfError()) -} - -func TestByClosingIfErrorClosesIfAnErrorOccurs(t *testing.T) { - var e error - - r := mocks.NewResponse() - Respond(r, - withErrorRespondDecorator(&e), - ByClosingIfError()) - - if r.Body.(*mocks.Body).IsOpen() { - t.Fatalf("autorest: ByClosingIfError did not close the response body after an error occurred") - } -} - -func TestByClosingIfErrorDoesNotClosesIfNoErrorOccurs(t *testing.T) { - r := mocks.NewResponse() - Respond(r, - ByClosingIfError()) - - if !r.Body.(*mocks.Body).IsOpen() { - t.Fatalf("autorest: ByClosingIfError closed the response body even though no error occurred") - } -} - -func TestByDiscardingBody(t *testing.T) { - r := mocks.NewResponse() - err := Respond(r, - ByDiscardingBody()) - if err != nil { - t.Fatalf("autorest: ByDiscardingBody failed (%v)", err) - } - buf, err := ioutil.ReadAll(r.Body) - if err != nil { - t.Fatalf("autorest: Reading result of ByDiscardingBody failed (%v)", err) - } - - if len(buf) != 0 { - t.Logf("autorest: Body was not empty after calling ByDiscardingBody.") - t.Fail() - } -} - -func TestByDiscardingBodyAcceptsNilResponse(t *testing.T) { - var e error - - r := mocks.NewResponse() - - Respond(r, - withErrorRespondDecorator(&e), - (func() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - resp.Body.Close() - r.Respond(nil) - return nil - }) - } - })(), - ByDiscardingBody()) -} - -func TestByDiscardingBodyAcceptsNilBody(t *testing.T) { - var e error - - r := mocks.NewResponse() - - Respond(r, - withErrorRespondDecorator(&e), - (func() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - resp.Body.Close() - resp.Body = nil - r.Respond(resp) - return nil - }) - } - })(), - ByDiscardingBody()) -} - -func TestByUnmarshallingJSON(t *testing.T) { - v := &mocks.T{} - r := mocks.NewResponseWithContent(jsonT) - err := Respond(r, - ByUnmarshallingJSON(v), - ByClosing()) - if err != nil { - t.Fatalf("autorest: ByUnmarshallingJSON failed (%v)", err) - } - if v.Name != "Rob Pike" || v.Age != 42 { - t.Fatalf("autorest: ByUnmarshallingJSON failed to properly unmarshal") - } -} - -func TestByUnmarshallingJSON_HandlesReadErrors(t *testing.T) { - v := &mocks.T{} - r := mocks.NewResponseWithContent(jsonT) - r.Body.(*mocks.Body).Close() - - err := Respond(r, - ByUnmarshallingJSON(v), - ByClosing()) - if err == nil { - t.Fatalf("autorest: ByUnmarshallingJSON failed to receive / respond to read error") - } -} - -func TestByUnmarshallingJSONIncludesJSONInErrors(t *testing.T) { - v := &mocks.T{} - j := jsonT[0 : len(jsonT)-2] - r := mocks.NewResponseWithContent(j) - err := Respond(r, - ByUnmarshallingJSON(v), - ByClosing()) - if err == nil || !strings.Contains(err.Error(), j) { - t.Fatalf("autorest: ByUnmarshallingJSON failed to return JSON in error (%v)", err) - } -} - -func TestByUnmarshallingJSONEmptyInput(t *testing.T) { - v := &mocks.T{} - r := mocks.NewResponseWithContent(``) - err := Respond(r, - ByUnmarshallingJSON(v), - ByClosing()) - if err != nil { - t.Fatalf("autorest: ByUnmarshallingJSON failed to return nil in case of empty JSON (%v)", err) - } -} - -func TestByUnmarshallingXML(t *testing.T) { - v := &mocks.T{} - r := mocks.NewResponseWithContent(xmlT) - err := Respond(r, - ByUnmarshallingXML(v), - ByClosing()) - if err != nil { - t.Fatalf("autorest: ByUnmarshallingXML failed (%v)", err) - } - if v.Name != "Rob Pike" || v.Age != 42 { - t.Fatalf("autorest: ByUnmarshallingXML failed to properly unmarshal") - } -} - -func TestByUnmarshallingXML_HandlesReadErrors(t *testing.T) { - v := &mocks.T{} - r := mocks.NewResponseWithContent(xmlT) - r.Body.(*mocks.Body).Close() - - err := Respond(r, - ByUnmarshallingXML(v), - ByClosing()) - if err == nil { - t.Fatalf("autorest: ByUnmarshallingXML failed to receive / respond to read error") - } -} - -func TestByUnmarshallingXMLIncludesXMLInErrors(t *testing.T) { - v := &mocks.T{} - x := xmlT[0 : len(xmlT)-2] - r := mocks.NewResponseWithContent(x) - err := Respond(r, - ByUnmarshallingXML(v), - ByClosing()) - if err == nil || !strings.Contains(err.Error(), x) { - t.Fatalf("autorest: ByUnmarshallingXML failed to return XML in error (%v)", err) - } -} - -func TestRespondAcceptsNullResponse(t *testing.T) { - err := Respond(nil) - if err != nil { - t.Fatalf("autorest: Respond returned an unexpected error when given a null Response (%v)", err) - } -} - -func TestWithErrorUnlessStatusCodeOKResponse(t *testing.T) { - v := &mocks.T{} - r := mocks.NewResponseWithContent(jsonT) - err := Respond(r, - WithErrorUnlessStatusCode(http.StatusOK), - ByUnmarshallingJSON(v), - ByClosing()) - - if err != nil { - t.Fatalf("autorest: WithErrorUnlessStatusCode(http.StatusOK) failed on okay response. (%v)", err) - } - - if v.Name != "Rob Pike" || v.Age != 42 { - t.Fatalf("autorest: WithErrorUnlessStatusCode(http.StatusOK) corrupted the response body of okay response.") - } -} - -func TesWithErrorUnlessStatusCodeErrorResponse(t *testing.T) { - v := &mocks.T{} - e := &mocks.T{} - r := mocks.NewResponseWithContent(jsonT) - r.Status = "400 BadRequest" - r.StatusCode = http.StatusBadRequest - - err := Respond(r, - WithErrorUnlessStatusCode(http.StatusOK), - ByUnmarshallingJSON(v), - ByClosing()) - - if err == nil { - t.Fatal("autorest: WithErrorUnlessStatusCode(http.StatusOK) did not return error, on a response to a bad request.") - } - - var errorRespBody []byte - if derr, ok := err.(DetailedError); !ok { - t.Fatalf("autorest: WithErrorUnlessStatusCode(http.StatusOK) got wrong error type : %T, expected: DetailedError, on a response to a bad request.", err) - } else { - errorRespBody = derr.ServiceError - } - - if errorRespBody == nil { - t.Fatalf("autorest: WithErrorUnlessStatusCode(http.StatusOK) ServiceError not returned in DetailedError on a response to a bad request.") - } - - err = json.Unmarshal(errorRespBody, e) - if err != nil { - t.Fatalf("autorest: WithErrorUnlessStatusCode(http.StatusOK) cannot parse error returned in ServiceError into json. %v", err) - } - - expected := &mocks.T{Name: "Rob Pike", Age: 42} - if e != expected { - t.Fatalf("autorest: WithErrorUnlessStatusCode(http.StatusOK wrong value from parsed ServiceError: got=%#v expected=%#v", e, expected) - } -} - -func TestWithErrorUnlessStatusCode(t *testing.T) { - r := mocks.NewResponse() - r.Request = mocks.NewRequest() - r.Status = "400 BadRequest" - r.StatusCode = http.StatusBadRequest - - err := Respond(r, - WithErrorUnlessStatusCode(http.StatusBadRequest, http.StatusUnauthorized, http.StatusInternalServerError), - ByClosingIfError()) - - if err != nil { - t.Fatalf("autorest: WithErrorUnlessStatusCode returned an error (%v) for an acceptable status code (%s)", err, r.Status) - } -} - -func TestWithErrorUnlessStatusCodeEmitsErrorForUnacceptableStatusCode(t *testing.T) { - r := mocks.NewResponse() - r.Request = mocks.NewRequest() - r.Status = "400 BadRequest" - r.StatusCode = http.StatusBadRequest - - err := Respond(r, - WithErrorUnlessStatusCode(http.StatusOK, http.StatusUnauthorized, http.StatusInternalServerError), - ByClosingIfError()) - - if err == nil { - t.Fatalf("autorest: WithErrorUnlessStatusCode failed to return an error for an unacceptable status code (%s)", r.Status) - } -} - -func TestWithErrorUnlessOK(t *testing.T) { - r := mocks.NewResponse() - r.Request = mocks.NewRequest() - - err := Respond(r, - WithErrorUnlessOK(), - ByClosingIfError()) - - if err != nil { - t.Fatalf("autorest: WithErrorUnlessOK returned an error for OK status code (%v)", err) - } -} - -func TestWithErrorUnlessOKEmitsErrorIfNotOK(t *testing.T) { - r := mocks.NewResponse() - r.Request = mocks.NewRequest() - r.Status = "400 BadRequest" - r.StatusCode = http.StatusBadRequest - - err := Respond(r, - WithErrorUnlessOK(), - ByClosingIfError()) - - if err == nil { - t.Fatalf("autorest: WithErrorUnlessOK failed to return an error for a non-OK status code (%v)", err) - } -} - -func TestExtractHeader(t *testing.T) { - r := mocks.NewResponse() - v := []string{"v1", "v2", "v3"} - mocks.SetResponseHeaderValues(r, mocks.TestHeader, v) - - if !reflect.DeepEqual(ExtractHeader(mocks.TestHeader, r), v) { - t.Fatalf("autorest: ExtractHeader failed to retrieve the expected header -- expected [%s]%v, received [%s]%v", - mocks.TestHeader, v, mocks.TestHeader, ExtractHeader(mocks.TestHeader, r)) - } -} - -func TestExtractHeaderHandlesMissingHeader(t *testing.T) { - var v []string - r := mocks.NewResponse() - - if !reflect.DeepEqual(ExtractHeader(mocks.TestHeader, r), v) { - t.Fatalf("autorest: ExtractHeader failed to handle a missing header -- expected %v, received %v", - v, ExtractHeader(mocks.TestHeader, r)) - } -} - -func TestExtractHeaderValue(t *testing.T) { - r := mocks.NewResponse() - v := "v1" - mocks.SetResponseHeader(r, mocks.TestHeader, v) - - if ExtractHeaderValue(mocks.TestHeader, r) != v { - t.Fatalf("autorest: ExtractHeader failed to retrieve the expected header -- expected [%s]%v, received [%s]%v", - mocks.TestHeader, v, mocks.TestHeader, ExtractHeaderValue(mocks.TestHeader, r)) - } -} - -func TestExtractHeaderValueHandlesMissingHeader(t *testing.T) { - r := mocks.NewResponse() - v := "" - - if ExtractHeaderValue(mocks.TestHeader, r) != v { - t.Fatalf("autorest: ExtractHeader failed to retrieve the expected header -- expected [%s]%v, received [%s]%v", - mocks.TestHeader, v, mocks.TestHeader, ExtractHeaderValue(mocks.TestHeader, r)) - } -} - -func TestExtractHeaderValueRetrievesFirstValue(t *testing.T) { - r := mocks.NewResponse() - v := []string{"v1", "v2", "v3"} - mocks.SetResponseHeaderValues(r, mocks.TestHeader, v) - - if ExtractHeaderValue(mocks.TestHeader, r) != v[0] { - t.Fatalf("autorest: ExtractHeader failed to retrieve the expected header -- expected [%s]%v, received [%s]%v", - mocks.TestHeader, v[0], mocks.TestHeader, ExtractHeaderValue(mocks.TestHeader, r)) - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/sender.go b/vendor/github.com/Azure/go-autorest/autorest/sender.go index e1ec49573f..6665d7c006 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/sender.go +++ b/vendor/github.com/Azure/go-autorest/autorest/sender.go @@ -21,6 +21,8 @@ import ( "net/http" "strconv" "time" + + "github.com/Azure/go-autorest/tracing" ) // Sender is the interface that wraps the Do method to send HTTP requests. @@ -38,7 +40,7 @@ func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { return sf(r) } -// SendDecorator takes and possibily decorates, by wrapping, a Sender. Decorators may affect the +// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the // http.Request and pass it along or, first, pass the http.Request along then react to the // http.Response result. type SendDecorator func(Sender) Sender @@ -68,7 +70,7 @@ func DecorateSender(s Sender, decorators ...SendDecorator) Sender { // // Send will not poll or retry requests. func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) { - return SendWithSender(&http.Client{}, r, decorators...) + return SendWithSender(&http.Client{Transport: tracing.Transport}, r, decorators...) } // SendWithSender sends the passed http.Request, through the provided Sender, returning the @@ -86,7 +88,7 @@ func SendWithSender(s Sender, r *http.Request, decorators ...SendDecorator) (*ht func AfterDelay(d time.Duration) SendDecorator { return func(s Sender) Sender { return SenderFunc(func(r *http.Request) (*http.Response, error) { - if !DelayForBackoff(d, 0, r.Cancel) { + if !DelayForBackoff(d, 0, r.Context().Done()) { return nil, fmt.Errorf("autorest: AfterDelay canceled before full delay") } return s.Do(r) @@ -165,7 +167,7 @@ func DoPollForStatusCodes(duration time.Duration, delay time.Duration, codes ... resp, err = s.Do(r) if err == nil && ResponseHasStatusCode(resp, codes...) { - r, err = NewPollingRequest(resp, r.Cancel) + r, err = NewPollingRequestWithContext(r.Context(), resp) for err == nil && ResponseHasStatusCode(resp, codes...) { Respond(resp, @@ -198,7 +200,9 @@ func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator { if err == nil { return resp, err } - DelayForBackoff(backoff, attempt, r.Cancel) + if !DelayForBackoff(backoff, attempt, r.Context().Done()) { + return nil, r.Context().Err() + } } return resp, err }) @@ -214,20 +218,29 @@ func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) Se return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { rr := NewRetriableRequest(r) // Increment to add the first call (attempts denotes number of retries) - attempts++ - for attempt := 0; attempt < attempts; attempt++ { + for attempt := 0; attempt < attempts+1; { err = rr.Prepare() if err != nil { return resp, err } resp, err = s.Do(rr.Request()) - // we want to retry if err is not nil (e.g. transient network failure) - if err == nil && !ResponseHasStatusCode(resp, codes...) { + // if the error isn't temporary don't bother retrying + if err != nil && !IsTemporaryNetworkError(err) { + return nil, err + } + // we want to retry if err is not nil (e.g. transient network failure). note that for failed authentication + // resp and err will both have a value, so in this case we don't want to retry as it will never succeed. + if err == nil && !ResponseHasStatusCode(resp, codes...) || IsTokenRefreshError(err) { return resp, err } - delayed := DelayWithRetryAfter(resp, r.Cancel) - if !delayed { - DelayForBackoff(backoff, attempt, r.Cancel) + delayed := DelayWithRetryAfter(resp, r.Context().Done()) + if !delayed && !DelayForBackoff(backoff, attempt, r.Context().Done()) { + return resp, r.Context().Err() + } + // don't count a 429 against the number of attempts + // so that we continue to retry until it succeeds + if resp == nil || resp.StatusCode != http.StatusTooManyRequests { + attempt++ } } return resp, err @@ -271,7 +284,9 @@ func DoRetryForDuration(d time.Duration, backoff time.Duration) SendDecorator { if err == nil { return resp, err } - DelayForBackoff(backoff, attempt, r.Cancel) + if !DelayForBackoff(backoff, attempt, r.Context().Done()) { + return nil, r.Context().Err() + } } return resp, err }) diff --git a/vendor/github.com/Azure/go-autorest/autorest/sender_test.go b/vendor/github.com/Azure/go-autorest/autorest/sender_test.go deleted file mode 100644 index a72731d41f..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/sender_test.go +++ /dev/null @@ -1,811 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "fmt" - "log" - "net/http" - "os" - "reflect" - "sync" - "testing" - "time" - - "github.com/Azure/go-autorest/autorest/mocks" -) - -func ExampleSendWithSender() { - r := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) - mocks.SetAcceptedHeaders(r) - - client := mocks.NewSender() - client.AppendAndRepeatResponse(r, 10) - - logger := log.New(os.Stdout, "autorest: ", 0) - na := NullAuthorizer{} - - req, _ := Prepare(&http.Request{}, - AsGet(), - WithBaseURL("https://microsoft.com/a/b/c/"), - na.WithAuthorization()) - - r, _ = SendWithSender(client, req, - WithLogging(logger), - DoErrorIfStatusCode(http.StatusAccepted), - DoCloseIfError(), - DoRetryForAttempts(5, time.Duration(0))) - - Respond(r, - ByDiscardingBody(), - ByClosing()) - - // Output: - // autorest: Sending GET https://microsoft.com/a/b/c/ - // autorest: GET https://microsoft.com/a/b/c/ received 202 Accepted - // autorest: Sending GET https://microsoft.com/a/b/c/ - // autorest: GET https://microsoft.com/a/b/c/ received 202 Accepted - // autorest: Sending GET https://microsoft.com/a/b/c/ - // autorest: GET https://microsoft.com/a/b/c/ received 202 Accepted - // autorest: Sending GET https://microsoft.com/a/b/c/ - // autorest: GET https://microsoft.com/a/b/c/ received 202 Accepted - // autorest: Sending GET https://microsoft.com/a/b/c/ - // autorest: GET https://microsoft.com/a/b/c/ received 202 Accepted -} - -func ExampleDoRetryForAttempts() { - client := mocks.NewSender() - client.SetAndRepeatError(fmt.Errorf("Faux Error"), 10) - - // Retry with backoff -- ensure returned Bodies are closed - r, _ := SendWithSender(client, mocks.NewRequest(), - DoCloseIfError(), - DoRetryForAttempts(5, time.Duration(0))) - - Respond(r, - ByDiscardingBody(), - ByClosing()) - - fmt.Printf("Retry stopped after %d attempts", client.Attempts()) - // Output: Retry stopped after 5 attempts -} - -func ExampleDoErrorIfStatusCode() { - client := mocks.NewSender() - client.AppendAndRepeatResponse(mocks.NewResponseWithStatus("204 NoContent", http.StatusNoContent), 10) - - // Chain decorators to retry the request, up to five times, if the status code is 204 - r, _ := SendWithSender(client, mocks.NewRequest(), - DoErrorIfStatusCode(http.StatusNoContent), - DoCloseIfError(), - DoRetryForAttempts(5, time.Duration(0))) - - Respond(r, - ByDiscardingBody(), - ByClosing()) - - fmt.Printf("Retry stopped after %d attempts with code %s", client.Attempts(), r.Status) - // Output: Retry stopped after 5 attempts with code 204 NoContent -} - -func TestSendWithSenderRunsDecoratorsInOrder(t *testing.T) { - client := mocks.NewSender() - s := "" - - r, err := SendWithSender(client, mocks.NewRequest(), - withMessage(&s, "a"), - withMessage(&s, "b"), - withMessage(&s, "c")) - if err != nil { - t.Fatalf("autorest: SendWithSender returned an error (%v)", err) - } - - Respond(r, - ByDiscardingBody(), - ByClosing()) - - if s != "abc" { - t.Fatalf("autorest: SendWithSender invoke decorators out of order; expected 'abc', received '%s'", s) - } -} - -func TestCreateSender(t *testing.T) { - f := false - - s := CreateSender( - (func() SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - f = true - return nil, nil - }) - } - })()) - s.Do(&http.Request{}) - - if !f { - t.Fatal("autorest: CreateSender failed to apply supplied decorator") - } -} - -func TestSend(t *testing.T) { - f := false - - Send(&http.Request{}, - (func() SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - f = true - return nil, nil - }) - } - })()) - - if !f { - t.Fatal("autorest: Send failed to apply supplied decorator") - } -} - -func TestAfterDelayWaits(t *testing.T) { - client := mocks.NewSender() - - d := 2 * time.Second - - tt := time.Now() - r, _ := SendWithSender(client, mocks.NewRequest(), - AfterDelay(d)) - s := time.Since(tt) - if s < d { - t.Fatal("autorest: AfterDelay failed to wait for at least the specified duration") - } - - Respond(r, - ByDiscardingBody(), - ByClosing()) -} - -func TestAfterDelay_Cancels(t *testing.T) { - client := mocks.NewSender() - cancel := make(chan struct{}) - delay := 5 * time.Second - - var wg sync.WaitGroup - wg.Add(1) - tt := time.Now() - go func() { - req := mocks.NewRequest() - req.Cancel = cancel - wg.Done() - SendWithSender(client, req, - AfterDelay(delay)) - }() - wg.Wait() - close(cancel) - time.Sleep(5 * time.Millisecond) - if time.Since(tt) >= delay { - t.Fatal("autorest: AfterDelay failed to cancel") - } -} - -func TestAfterDelayDoesNotWaitTooLong(t *testing.T) { - client := mocks.NewSender() - - d := 5 * time.Millisecond - start := time.Now() - r, _ := SendWithSender(client, mocks.NewRequest(), - AfterDelay(d)) - - if time.Since(start) > (5 * d) { - t.Fatal("autorest: AfterDelay waited too long (exceeded 5 times specified duration)") - } - - Respond(r, - ByDiscardingBody(), - ByClosing()) -} - -func TestAsIs(t *testing.T) { - client := mocks.NewSender() - - r1 := mocks.NewResponse() - client.AppendResponse(r1) - - r2, err := SendWithSender(client, mocks.NewRequest(), - AsIs()) - if err != nil { - t.Fatalf("autorest: AsIs returned an unexpected error (%v)", err) - } else if !reflect.DeepEqual(r1, r2) { - t.Fatalf("autorest: AsIs modified the response -- received %v, expected %v", r2, r1) - } - - Respond(r1, - ByDiscardingBody(), - ByClosing()) - Respond(r2, - ByDiscardingBody(), - ByClosing()) -} - -func TestDoCloseIfError(t *testing.T) { - client := mocks.NewSender() - client.AppendResponse(mocks.NewResponseWithStatus("400 BadRequest", http.StatusBadRequest)) - - r, _ := SendWithSender(client, mocks.NewRequest(), - DoErrorIfStatusCode(http.StatusBadRequest), - DoCloseIfError()) - - if r.Body.(*mocks.Body).IsOpen() { - t.Fatal("autorest: Expected DoCloseIfError to close response body -- it was left open") - } - - Respond(r, - ByDiscardingBody(), - ByClosing()) -} - -func TestDoCloseIfErrorAcceptsNilResponse(t *testing.T) { - client := mocks.NewSender() - - SendWithSender(client, mocks.NewRequest(), - (func() SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - resp, err := s.Do(r) - if err != nil { - resp.Body.Close() - } - return nil, fmt.Errorf("Faux Error") - }) - } - })(), - DoCloseIfError()) -} - -func TestDoCloseIfErrorAcceptsNilBody(t *testing.T) { - client := mocks.NewSender() - - SendWithSender(client, mocks.NewRequest(), - (func() SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - resp, err := s.Do(r) - if err != nil { - resp.Body.Close() - } - resp.Body = nil - return resp, fmt.Errorf("Faux Error") - }) - } - })(), - DoCloseIfError()) -} - -func TestDoErrorIfStatusCode(t *testing.T) { - client := mocks.NewSender() - client.AppendResponse(mocks.NewResponseWithStatus("400 BadRequest", http.StatusBadRequest)) - - r, err := SendWithSender(client, mocks.NewRequest(), - DoErrorIfStatusCode(http.StatusBadRequest), - DoCloseIfError()) - if err == nil { - t.Fatal("autorest: DoErrorIfStatusCode failed to emit an error for passed code") - } - - Respond(r, - ByDiscardingBody(), - ByClosing()) -} - -func TestDoErrorIfStatusCodeIgnoresStatusCodes(t *testing.T) { - client := mocks.NewSender() - client.AppendResponse(newAcceptedResponse()) - - r, err := SendWithSender(client, mocks.NewRequest(), - DoErrorIfStatusCode(http.StatusBadRequest), - DoCloseIfError()) - if err != nil { - t.Fatal("autorest: DoErrorIfStatusCode failed to ignore a status code") - } - - Respond(r, - ByDiscardingBody(), - ByClosing()) -} - -func TestDoErrorUnlessStatusCode(t *testing.T) { - client := mocks.NewSender() - client.AppendResponse(mocks.NewResponseWithStatus("400 BadRequest", http.StatusBadRequest)) - - r, err := SendWithSender(client, mocks.NewRequest(), - DoErrorUnlessStatusCode(http.StatusAccepted), - DoCloseIfError()) - if err == nil { - t.Fatal("autorest: DoErrorUnlessStatusCode failed to emit an error for an unknown status code") - } - - Respond(r, - ByDiscardingBody(), - ByClosing()) -} - -func TestDoErrorUnlessStatusCodeIgnoresStatusCodes(t *testing.T) { - client := mocks.NewSender() - client.AppendResponse(newAcceptedResponse()) - - r, err := SendWithSender(client, mocks.NewRequest(), - DoErrorUnlessStatusCode(http.StatusAccepted), - DoCloseIfError()) - if err != nil { - t.Fatal("autorest: DoErrorUnlessStatusCode emitted an error for a knonwn status code") - } - - Respond(r, - ByDiscardingBody(), - ByClosing()) -} - -func TestDoRetryForAttemptsStopsAfterSuccess(t *testing.T) { - client := mocks.NewSender() - - r, err := SendWithSender(client, mocks.NewRequest(), - DoRetryForAttempts(5, time.Duration(0))) - if client.Attempts() != 1 { - t.Fatalf("autorest: DoRetryForAttempts failed to stop after success -- expected attempts %v, actual %v", - 1, client.Attempts()) - } - if err != nil { - t.Fatalf("autorest: DoRetryForAttempts returned an unexpected error (%v)", err) - } - - Respond(r, - ByDiscardingBody(), - ByClosing()) -} - -func TestDoRetryForAttemptsStopsAfterAttempts(t *testing.T) { - client := mocks.NewSender() - client.SetAndRepeatError(fmt.Errorf("Faux Error"), 10) - - r, err := SendWithSender(client, mocks.NewRequest(), - DoRetryForAttempts(5, time.Duration(0)), - DoCloseIfError()) - if err == nil { - t.Fatal("autorest: Mock client failed to emit errors") - } - - Respond(r, - ByDiscardingBody(), - ByClosing()) - - if client.Attempts() != 5 { - t.Fatal("autorest: DoRetryForAttempts failed to stop after specified number of attempts") - } -} - -func TestDoRetryForAttemptsReturnsResponse(t *testing.T) { - client := mocks.NewSender() - client.SetError(fmt.Errorf("Faux Error")) - - r, err := SendWithSender(client, mocks.NewRequest(), - DoRetryForAttempts(1, time.Duration(0))) - if err == nil { - t.Fatal("autorest: Mock client failed to emit errors") - } - - if r == nil { - t.Fatal("autorest: DoRetryForAttempts failed to return the underlying response") - } - - Respond(r, - ByDiscardingBody(), - ByClosing()) -} - -func TestDoRetryForDurationStopsAfterSuccess(t *testing.T) { - client := mocks.NewSender() - - r, err := SendWithSender(client, mocks.NewRequest(), - DoRetryForDuration(10*time.Millisecond, time.Duration(0))) - if client.Attempts() != 1 { - t.Fatalf("autorest: DoRetryForDuration failed to stop after success -- expected attempts %v, actual %v", - 1, client.Attempts()) - } - if err != nil { - t.Fatalf("autorest: DoRetryForDuration returned an unexpected error (%v)", err) - } - - Respond(r, - ByDiscardingBody(), - ByClosing()) -} - -func TestDoRetryForDurationStopsAfterDuration(t *testing.T) { - client := mocks.NewSender() - client.SetAndRepeatError(fmt.Errorf("Faux Error"), -1) - - d := 5 * time.Millisecond - start := time.Now() - r, err := SendWithSender(client, mocks.NewRequest(), - DoRetryForDuration(d, time.Duration(0)), - DoCloseIfError()) - if err == nil { - t.Fatal("autorest: Mock client failed to emit errors") - } - - if time.Since(start) < d { - t.Fatal("autorest: DoRetryForDuration failed stopped too soon") - } - - Respond(r, - ByDiscardingBody(), - ByClosing()) -} - -func TestDoRetryForDurationStopsWithinReason(t *testing.T) { - client := mocks.NewSender() - client.SetAndRepeatError(fmt.Errorf("Faux Error"), -1) - - d := 5 * time.Second - start := time.Now() - r, err := SendWithSender(client, mocks.NewRequest(), - DoRetryForDuration(d, time.Duration(0)), - DoCloseIfError()) - if err == nil { - t.Fatal("autorest: Mock client failed to emit errors") - } - - if time.Since(start) > (5 * d) { - t.Fatal("autorest: DoRetryForDuration failed stopped soon enough (exceeded 5 times specified duration)") - } - - Respond(r, - ByDiscardingBody(), - ByClosing()) -} - -func TestDoRetryForDurationReturnsResponse(t *testing.T) { - client := mocks.NewSender() - client.SetAndRepeatError(fmt.Errorf("Faux Error"), -1) - - r, err := SendWithSender(client, mocks.NewRequest(), - DoRetryForDuration(10*time.Millisecond, time.Duration(0)), - DoCloseIfError()) - if err == nil { - t.Fatal("autorest: Mock client failed to emit errors") - } - - if r == nil { - t.Fatal("autorest: DoRetryForDuration failed to return the underlying response") - } - - Respond(r, - ByDiscardingBody(), - ByClosing()) -} - -func TestDelayForBackoff(t *testing.T) { - d := 2 * time.Second - start := time.Now() - DelayForBackoff(d, 0, nil) - if time.Since(start) < d { - t.Fatal("autorest: DelayForBackoff did not delay as long as expected") - } -} - -func TestDelayForBackoff_Cancels(t *testing.T) { - cancel := make(chan struct{}) - delay := 5 * time.Second - - var wg sync.WaitGroup - wg.Add(1) - start := time.Now() - go func() { - wg.Done() - DelayForBackoff(delay, 0, cancel) - }() - wg.Wait() - close(cancel) - time.Sleep(5 * time.Millisecond) - if time.Since(start) >= delay { - t.Fatal("autorest: DelayForBackoff failed to cancel") - } -} - -func TestDelayForBackoffWithinReason(t *testing.T) { - d := 5 * time.Second - maxCoefficient := 2 - start := time.Now() - DelayForBackoff(d, 0, nil) - if time.Since(start) > (time.Duration(maxCoefficient) * d) { - - t.Fatalf("autorest: DelayForBackoff delayed too long (exceeded %d times the specified duration)", maxCoefficient) - } -} - -func TestDoPollForStatusCodes_IgnoresUnspecifiedStatusCodes(t *testing.T) { - client := mocks.NewSender() - - r, _ := SendWithSender(client, mocks.NewRequest(), - DoPollForStatusCodes(time.Duration(0), time.Duration(0))) - - if client.Attempts() != 1 { - t.Fatalf("autorest: Sender#DoPollForStatusCodes polled for unspecified status code") - } - - Respond(r, - ByDiscardingBody(), - ByClosing()) -} - -func TestDoPollForStatusCodes_PollsForSpecifiedStatusCodes(t *testing.T) { - client := mocks.NewSender() - client.AppendResponse(newAcceptedResponse()) - - r, _ := SendWithSender(client, mocks.NewRequest(), - DoPollForStatusCodes(time.Millisecond, time.Millisecond, http.StatusAccepted)) - - if client.Attempts() != 2 { - t.Fatalf("autorest: Sender#DoPollForStatusCodes failed to poll for specified status code") - } - - Respond(r, - ByDiscardingBody(), - ByClosing()) -} - -func TestDoPollForStatusCodes_CanBeCanceled(t *testing.T) { - cancel := make(chan struct{}) - delay := 5 * time.Second - - r := mocks.NewResponse() - mocks.SetAcceptedHeaders(r) - client := mocks.NewSender() - client.AppendAndRepeatResponse(r, 100) - - var wg sync.WaitGroup - wg.Add(1) - start := time.Now() - go func() { - wg.Done() - r, _ := SendWithSender(client, mocks.NewRequest(), - DoPollForStatusCodes(time.Millisecond, time.Millisecond, http.StatusAccepted)) - Respond(r, - ByDiscardingBody(), - ByClosing()) - }() - wg.Wait() - close(cancel) - time.Sleep(5 * time.Millisecond) - if time.Since(start) >= delay { - t.Fatalf("autorest: Sender#DoPollForStatusCodes failed to cancel") - } -} - -func TestDoPollForStatusCodes_ClosesAllNonreturnedResponseBodiesWhenPolling(t *testing.T) { - resp := newAcceptedResponse() - - client := mocks.NewSender() - client.AppendAndRepeatResponse(resp, 2) - - r, _ := SendWithSender(client, mocks.NewRequest(), - DoPollForStatusCodes(time.Millisecond, time.Millisecond, http.StatusAccepted)) - - if resp.Body.(*mocks.Body).IsOpen() || resp.Body.(*mocks.Body).CloseAttempts() < 2 { - t.Fatalf("autorest: Sender#DoPollForStatusCodes did not close unreturned response bodies") - } - - Respond(r, - ByDiscardingBody(), - ByClosing()) -} - -func TestDoPollForStatusCodes_LeavesLastResponseBodyOpen(t *testing.T) { - client := mocks.NewSender() - client.AppendResponse(newAcceptedResponse()) - - r, _ := SendWithSender(client, mocks.NewRequest(), - DoPollForStatusCodes(time.Millisecond, time.Millisecond, http.StatusAccepted)) - - if !r.Body.(*mocks.Body).IsOpen() { - t.Fatalf("autorest: Sender#DoPollForStatusCodes did not leave open the body of the last response") - } - - Respond(r, - ByDiscardingBody(), - ByClosing()) -} - -func TestDoPollForStatusCodes_StopsPollingAfterAnError(t *testing.T) { - client := mocks.NewSender() - client.AppendAndRepeatResponse(newAcceptedResponse(), 5) - client.SetError(fmt.Errorf("Faux Error")) - client.SetEmitErrorAfter(1) - - r, _ := SendWithSender(client, mocks.NewRequest(), - DoPollForStatusCodes(time.Millisecond, time.Millisecond, http.StatusAccepted)) - - if client.Attempts() > 2 { - t.Fatalf("autorest: Sender#DoPollForStatusCodes failed to stop polling after receiving an error") - } - - Respond(r, - ByDiscardingBody(), - ByClosing()) -} - -func TestDoPollForStatusCodes_ReturnsPollingError(t *testing.T) { - client := mocks.NewSender() - client.AppendAndRepeatResponse(newAcceptedResponse(), 5) - client.SetError(fmt.Errorf("Faux Error")) - client.SetEmitErrorAfter(1) - - r, err := SendWithSender(client, mocks.NewRequest(), - DoPollForStatusCodes(time.Millisecond, time.Millisecond, http.StatusAccepted)) - - if err == nil { - t.Fatalf("autorest: Sender#DoPollForStatusCodes failed to return error from polling") - } - - Respond(r, - ByDiscardingBody(), - ByClosing()) -} - -func TestWithLogging_Logs(t *testing.T) { - buf := &bytes.Buffer{} - logger := log.New(buf, "autorest: ", 0) - client := mocks.NewSender() - - r, _ := SendWithSender(client, &http.Request{}, - WithLogging(logger)) - - if buf.String() == "" { - t.Fatal("autorest: Sender#WithLogging failed to log the request") - } - - Respond(r, - ByDiscardingBody(), - ByClosing()) -} - -func TestWithLogging_HandlesMissingResponse(t *testing.T) { - buf := &bytes.Buffer{} - logger := log.New(buf, "autorest: ", 0) - client := mocks.NewSender() - client.AppendResponse(nil) - client.SetError(fmt.Errorf("Faux Error")) - - r, err := SendWithSender(client, &http.Request{}, - WithLogging(logger)) - - if r != nil || err == nil { - t.Fatal("autorest: Sender#WithLogging returned a valid response -- expecting nil") - } - if buf.String() == "" { - t.Fatal("autorest: Sender#WithLogging failed to log the request for a nil response") - } - - Respond(r, - ByDiscardingBody(), - ByClosing()) -} - -func TestDoRetryForStatusCodesWithSuccess(t *testing.T) { - client := mocks.NewSender() - client.AppendAndRepeatResponse(mocks.NewResponseWithStatus("408 Request Timeout", http.StatusRequestTimeout), 2) - client.AppendResponse(mocks.NewResponseWithStatus("200 OK", http.StatusOK)) - - r, _ := SendWithSender(client, mocks.NewRequest(), - DoRetryForStatusCodes(5, time.Duration(2*time.Second), http.StatusRequestTimeout), - ) - - Respond(r, - ByDiscardingBody(), - ByClosing()) - - if client.Attempts() != 3 { - t.Fatalf("autorest: Sender#DoRetryForStatusCodes -- Got: StatusCode %v in %v attempts; Want: StatusCode 200 OK in 2 attempts -- ", - r.Status, client.Attempts()-1) - } -} - -func TestDoRetryForStatusCodesWithNoSuccess(t *testing.T) { - client := mocks.NewSender() - client.AppendAndRepeatResponse(mocks.NewResponseWithStatus("504 Gateway Timeout", http.StatusGatewayTimeout), 5) - - r, _ := SendWithSender(client, mocks.NewRequest(), - DoRetryForStatusCodes(2, time.Duration(2*time.Second), http.StatusGatewayTimeout), - ) - Respond(r, - ByDiscardingBody(), - ByClosing()) - - if client.Attempts() != 3 { - t.Fatalf("autorest: Sender#DoRetryForStatusCodes -- Got: failed stop after %v retry attempts; Want: Stop after 2 retry attempts", - client.Attempts()-1) - } -} - -func TestDoRetryForStatusCodes_CodeNotInRetryList(t *testing.T) { - client := mocks.NewSender() - client.AppendAndRepeatResponse(mocks.NewResponseWithStatus("204 No Content", http.StatusNoContent), 1) - - r, _ := SendWithSender(client, mocks.NewRequest(), - DoRetryForStatusCodes(6, time.Duration(2*time.Second), http.StatusGatewayTimeout), - ) - - Respond(r, - ByDiscardingBody(), - ByClosing()) - - if client.Attempts() != 1 || r.Status != "204 No Content" { - t.Fatalf("autorest: Sender#DoRetryForStatusCodes -- Got: Retry attempts %v for StatusCode %v; Want: 0 attempts for StatusCode 204", - client.Attempts(), r.Status) - } -} - -func TestDoRetryForStatusCodes_RequestBodyReadError(t *testing.T) { - client := mocks.NewSender() - client.AppendAndRepeatResponse(mocks.NewResponseWithStatus("204 No Content", http.StatusNoContent), 2) - - r, err := SendWithSender(client, mocks.NewRequestWithCloseBody(), - DoRetryForStatusCodes(6, time.Duration(2*time.Second), http.StatusGatewayTimeout), - ) - - Respond(r, - ByDiscardingBody(), - ByClosing()) - - if err == nil || client.Attempts() != 0 { - t.Fatalf("autorest: Sender#DoRetryForStatusCodes -- Got: Not failed for request body read error; Want: Failed for body read error - %v", err) - } -} - -func newAcceptedResponse() *http.Response { - resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) - mocks.SetAcceptedHeaders(resp) - return resp -} - -func TestDelayWithRetryAfterWithSuccess(t *testing.T) { - after, retries := 5, 2 - totalSecs := after * retries - - client := mocks.NewSender() - resp := mocks.NewResponseWithStatus("429 Too many requests", http.StatusTooManyRequests) - mocks.SetResponseHeader(resp, "Retry-After", fmt.Sprintf("%v", after)) - client.AppendAndRepeatResponse(resp, retries) - client.AppendResponse(mocks.NewResponseWithStatus("200 OK", http.StatusOK)) - - d := time.Second * time.Duration(totalSecs) - start := time.Now() - r, _ := SendWithSender(client, mocks.NewRequest(), - DoRetryForStatusCodes(5, time.Duration(time.Second), http.StatusTooManyRequests), - ) - - if time.Since(start) < d { - t.Fatal("autorest: DelayWithRetryAfter failed stopped too soon") - } - - Respond(r, - ByDiscardingBody(), - ByClosing()) - - if client.Attempts() != 3 { - t.Fatalf("autorest: Sender#DelayWithRetryAfter -- Got: StatusCode %v in %v attempts; Want: StatusCode 200 OK in 2 attempts -- ", - r.Status, client.Attempts()-1) - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility.go b/vendor/github.com/Azure/go-autorest/autorest/utility.go index 1ef4575fa0..08cf11c118 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/utility.go +++ b/vendor/github.com/Azure/go-autorest/autorest/utility.go @@ -20,11 +20,13 @@ import ( "encoding/xml" "fmt" "io" + "net" "net/http" "net/url" "reflect" - "sort" "strings" + + "github.com/Azure/go-autorest/autorest/adal" ) // EncodedAs is a series of constants specifying various data encodings @@ -138,13 +140,38 @@ func MapToValues(m map[string]interface{}) url.Values { return v } +// AsStringSlice method converts interface{} to []string. This expects a +//that the parameter passed to be a slice or array of a type that has the underlying +//type a string. +func AsStringSlice(s interface{}) ([]string, error) { + v := reflect.ValueOf(s) + if v.Kind() != reflect.Slice && v.Kind() != reflect.Array { + return nil, NewError("autorest", "AsStringSlice", "the value's type is not an array.") + } + stringSlice := make([]string, 0, v.Len()) + + for i := 0; i < v.Len(); i++ { + stringSlice = append(stringSlice, v.Index(i).String()) + } + return stringSlice, nil +} + // String method converts interface v to string. If interface is a list, it -// joins list elements using separator. +// joins list elements using the separator. Note that only sep[0] will be used for +// joining if any separator is specified. func String(v interface{}, sep ...string) string { - if len(sep) > 0 { - return ensureValueString(strings.Join(v.([]string), sep[0])) + if len(sep) == 0 { + return ensureValueString(v) } - return ensureValueString(v) + stringSlice, ok := v.([]string) + if ok == false { + var err error + stringSlice, err = AsStringSlice(v) + if err != nil { + panic(fmt.Sprintf("autorest: Couldn't convert value to a string %s.", err)) + } + } + return ensureValueString(strings.Join(stringSlice, sep[0])) } // Encode method encodes url path and query parameters. @@ -168,30 +195,6 @@ func queryEscape(s string) string { return url.QueryEscape(s) } -// This method is same as Encode() method of "net/url" go package, -// except it does not encode the query parameters because they -// already come encoded. It formats values map in query format (bar=foo&a=b). -func createQuery(v url.Values) string { - var buf bytes.Buffer - keys := make([]string, 0, len(v)) - for k := range v { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - vs := v[k] - prefix := url.QueryEscape(k) + "=" - for _, v := range vs { - if buf.Len() > 0 { - buf.WriteByte('&') - } - buf.WriteString(prefix) - buf.WriteString(v) - } - } - return buf.String() -} - // ChangeToGet turns the specified http.Request into a GET (it assumes it wasn't). // This is mainly useful for long-running operations that use the Azure-AsyncOperation // header, so we change the initial PUT into a GET to retrieve the final result. @@ -202,3 +205,24 @@ func ChangeToGet(req *http.Request) *http.Request { req.Header.Del("Content-Length") return req } + +// IsTokenRefreshError returns true if the specified error implements the TokenRefreshError +// interface. If err is a DetailedError it will walk the chain of Original errors. +func IsTokenRefreshError(err error) bool { + if _, ok := err.(adal.TokenRefreshError); ok { + return true + } + if de, ok := err.(DetailedError); ok { + return IsTokenRefreshError(de.Original) + } + return false +} + +// IsTemporaryNetworkError returns true if the specified error is a temporary network error or false +// if it's not. If the error doesn't implement the net.Error interface the return value is true. +func IsTemporaryNetworkError(err error) bool { + if netErr, ok := err.(net.Error); !ok || (ok && netErr.Temporary()) { + return true + } + return false +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility_test.go b/vendor/github.com/Azure/go-autorest/autorest/utility_test.go deleted file mode 100644 index 1cda8758f8..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/utility_test.go +++ /dev/null @@ -1,382 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "encoding/json" - "encoding/xml" - "fmt" - "net/http" - "net/url" - "reflect" - "sort" - "strings" - "testing" - - "github.com/Azure/go-autorest/autorest/mocks" -) - -const ( - jsonT = ` - { - "name":"Rob Pike", - "age":42 - }` - xmlT = ` - - Rob Pike - 42 - ` -) - -func TestNewDecoderCreatesJSONDecoder(t *testing.T) { - d := NewDecoder(EncodedAsJSON, strings.NewReader(jsonT)) - _, ok := d.(*json.Decoder) - if d == nil || !ok { - t.Fatal("autorest: NewDecoder failed to create a JSON decoder when requested") - } -} - -func TestNewDecoderCreatesXMLDecoder(t *testing.T) { - d := NewDecoder(EncodedAsXML, strings.NewReader(xmlT)) - _, ok := d.(*xml.Decoder) - if d == nil || !ok { - t.Fatal("autorest: NewDecoder failed to create an XML decoder when requested") - } -} - -func TestNewDecoderReturnsNilForUnknownEncoding(t *testing.T) { - d := NewDecoder("unknown", strings.NewReader(xmlT)) - if d != nil { - t.Fatal("autorest: NewDecoder created a decoder for an unknown encoding") - } -} - -func TestCopyAndDecodeDecodesJSON(t *testing.T) { - _, err := CopyAndDecode(EncodedAsJSON, strings.NewReader(jsonT), &mocks.T{}) - if err != nil { - t.Fatalf("autorest: CopyAndDecode returned an error with valid JSON - %v", err) - } -} - -func TestCopyAndDecodeDecodesXML(t *testing.T) { - _, err := CopyAndDecode(EncodedAsXML, strings.NewReader(xmlT), &mocks.T{}) - if err != nil { - t.Fatalf("autorest: CopyAndDecode returned an error with valid XML - %v", err) - } -} - -func TestCopyAndDecodeReturnsJSONDecodingErrors(t *testing.T) { - _, err := CopyAndDecode(EncodedAsJSON, strings.NewReader(jsonT[0:len(jsonT)-2]), &mocks.T{}) - if err == nil { - t.Fatalf("autorest: CopyAndDecode failed to return an error with invalid JSON") - } -} - -func TestCopyAndDecodeReturnsXMLDecodingErrors(t *testing.T) { - _, err := CopyAndDecode(EncodedAsXML, strings.NewReader(xmlT[0:len(xmlT)-2]), &mocks.T{}) - if err == nil { - t.Fatalf("autorest: CopyAndDecode failed to return an error with invalid XML") - } -} - -func TestCopyAndDecodeAlwaysReturnsACopy(t *testing.T) { - b, _ := CopyAndDecode(EncodedAsJSON, strings.NewReader(jsonT), &mocks.T{}) - if b.String() != jsonT { - t.Fatalf("autorest: CopyAndDecode failed to return a valid copy of the data - %v", b.String()) - } -} - -func TestTeeReadCloser_Copies(t *testing.T) { - v := &mocks.T{} - r := mocks.NewResponseWithContent(jsonT) - b := &bytes.Buffer{} - - r.Body = TeeReadCloser(r.Body, b) - - err := Respond(r, - ByUnmarshallingJSON(v), - ByClosing()) - if err != nil { - t.Fatalf("autorest: TeeReadCloser returned an unexpected error -- %v", err) - } - if b.String() != jsonT { - t.Fatalf("autorest: TeeReadCloser failed to copy the bytes read") - } -} - -func TestTeeReadCloser_PassesReadErrors(t *testing.T) { - v := &mocks.T{} - r := mocks.NewResponseWithContent(jsonT) - - r.Body.(*mocks.Body).Close() - r.Body = TeeReadCloser(r.Body, &bytes.Buffer{}) - - err := Respond(r, - ByUnmarshallingJSON(v), - ByClosing()) - if err == nil { - t.Fatalf("autorest: TeeReadCloser failed to return the expected error") - } -} - -func TestTeeReadCloser_ClosesWrappedReader(t *testing.T) { - v := &mocks.T{} - r := mocks.NewResponseWithContent(jsonT) - - b := r.Body.(*mocks.Body) - r.Body = TeeReadCloser(r.Body, &bytes.Buffer{}) - err := Respond(r, - ByUnmarshallingJSON(v), - ByClosing()) - if err != nil { - t.Fatalf("autorest: TeeReadCloser returned an unexpected error -- %v", err) - } - if b.IsOpen() { - t.Fatalf("autorest: TeeReadCloser failed to close the nested io.ReadCloser") - } -} - -func TestContainsIntFindsValue(t *testing.T) { - ints := []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9} - v := 5 - if !containsInt(ints, v) { - t.Fatalf("autorest: containsInt failed to find %v in %v", v, ints) - } -} - -func TestContainsIntDoesNotFindValue(t *testing.T) { - ints := []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9} - v := 42 - if containsInt(ints, v) { - t.Fatalf("autorest: containsInt unexpectedly found %v in %v", v, ints) - } -} - -func TestContainsIntAcceptsEmptyList(t *testing.T) { - ints := make([]int, 10) - if containsInt(ints, 42) { - t.Fatalf("autorest: containsInt failed to handle an empty list") - } -} - -func TestContainsIntAcceptsNilList(t *testing.T) { - var ints []int - if containsInt(ints, 42) { - t.Fatalf("autorest: containsInt failed to handle an nil list") - } -} - -func TestEscapeStrings(t *testing.T) { - m := map[string]string{ - "string": "a long string with = odd characters", - "int": "42", - "nil": "", - } - r := map[string]string{ - "string": "a+long+string+with+%3D+odd+characters", - "int": "42", - "nil": "", - } - v := escapeValueStrings(m) - if !reflect.DeepEqual(v, r) { - t.Fatalf("autorest: ensureValueStrings returned %v\n", v) - } -} - -func TestEnsureStrings(t *testing.T) { - m := map[string]interface{}{ - "string": "string", - "int": 42, - "nil": nil, - "bytes": []byte{255, 254, 253}, - } - r := map[string]string{ - "string": "string", - "int": "42", - "nil": "", - "bytes": string([]byte{255, 254, 253}), - } - v := ensureValueStrings(m) - if !reflect.DeepEqual(v, r) { - t.Fatalf("autorest: ensureValueStrings returned %v\n", v) - } -} - -func ExampleString() { - m := []string{ - "string1", - "string2", - "string3", - } - - fmt.Println(String(m, ",")) - // Output: string1,string2,string3 -} - -func TestStringWithValidString(t *testing.T) { - i := 123 - if String(i) != "123" { - t.Fatal("autorest: String method failed to convert integer 123 to string") - } -} - -func TestEncodeWithValidPath(t *testing.T) { - s := Encode("Path", "Hello Gopher") - if s != "Hello%20Gopher" { - t.Fatalf("autorest: Encode method failed for valid path encoding. Got: %v; Want: %v", s, "Hello%20Gopher") - } -} - -func TestEncodeWithValidQuery(t *testing.T) { - s := Encode("Query", "Hello Gopher") - if s != "Hello+Gopher" { - t.Fatalf("autorest: Encode method failed for valid query encoding. Got: '%v'; Want: 'Hello+Gopher'", s) - } -} - -func TestEncodeWithValidNotPathQuery(t *testing.T) { - s := Encode("Host", "Hello Gopher") - if s != "Hello Gopher" { - t.Fatalf("autorest: Encode method failed for parameter not query or path. Got: '%v'; Want: 'Hello Gopher'", s) - } -} - -func TestMapToValues(t *testing.T) { - m := map[string]interface{}{ - "a": "a", - "b": 2, - } - v := url.Values{} - v.Add("a", "a") - v.Add("b", "2") - if !isEqual(v, MapToValues(m)) { - t.Fatalf("autorest: MapToValues method failed to return correct values - expected(%v) got(%v)", v, MapToValues(m)) - } -} - -func TestMapToValuesWithArrayValues(t *testing.T) { - m := map[string]interface{}{ - "a": []string{"a", "b"}, - "b": 2, - "c": []int{3, 4}, - } - v := url.Values{} - v.Add("a", "a") - v.Add("a", "b") - v.Add("b", "2") - v.Add("c", "3") - v.Add("c", "4") - - if !isEqual(v, MapToValues(m)) { - t.Fatalf("autorest: MapToValues method failed to return correct values - expected(%v) got(%v)", v, MapToValues(m)) - } -} - -func isEqual(v, u url.Values) bool { - for key, value := range v { - if len(u[key]) == 0 { - return false - } - sort.Strings(value) - sort.Strings(u[key]) - for i := range value { - if value[i] != u[key][i] { - return false - } - } - u.Del(key) - } - if len(u) > 0 { - return false - } - return true -} - -func doEnsureBodyClosed(t *testing.T) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - resp, err := s.Do(r) - if resp != nil && resp.Body != nil && resp.Body.(*mocks.Body).IsOpen() { - t.Fatal("autorest: Expected Body to be closed -- it was left open") - } - return resp, err - }) - } -} - -type mockAuthorizer struct{} - -func (ma mockAuthorizer) WithAuthorization() PrepareDecorator { - return WithHeader(headerAuthorization, mocks.TestAuthorizationHeader) -} - -type mockFailingAuthorizer struct{} - -func (mfa mockFailingAuthorizer) WithAuthorization() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - return r, fmt.Errorf("ERROR: mockFailingAuthorizer returned expected error") - }) - } -} - -type mockInspector struct { - wasInvoked bool -} - -func (mi *mockInspector) WithInspection() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - mi.wasInvoked = true - return p.Prepare(r) - }) - } -} - -func (mi *mockInspector) ByInspecting() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - mi.wasInvoked = true - return r.Respond(resp) - }) - } -} - -func withMessage(output *string, msg string) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - resp, err := s.Do(r) - if err == nil { - *output += msg - } - return resp, err - }) - } -} - -func withErrorRespondDecorator(e *error) RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err != nil { - return err - } - *e = fmt.Errorf("autorest: Faux Respond Error") - return *e - }) - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/version.go b/vendor/github.com/Azure/go-autorest/autorest/version.go index f588807dbb..773fb96125 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/version.go +++ b/vendor/github.com/Azure/go-autorest/autorest/version.go @@ -15,35 +15,27 @@ package autorest // limitations under the License. import ( - "bytes" "fmt" - "strings" - "sync" + "runtime" ) -const ( - major = 8 - minor = 0 - patch = 0 - tag = "" +const number = "v11.7.1" + +var ( + userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s", + runtime.Version(), + runtime.GOARCH, + runtime.GOOS, + number, + ) ) -var once sync.Once -var version string +// UserAgent returns a string containing the Go version, system architecture and OS, and the go-autorest version. +func UserAgent() string { + return userAgent +} // Version returns the semantic version (see http://semver.org). func Version() string { - once.Do(func() { - semver := fmt.Sprintf("%d.%d.%d", major, minor, patch) - verBuilder := bytes.NewBufferString(semver) - if tag != "" && tag != "-" { - updated := strings.TrimPrefix(tag, "-") - _, err := verBuilder.WriteString("-" + updated) - if err == nil { - verBuilder = bytes.NewBufferString(semver) - } - } - version = verBuilder.String() - }) - return version + return number } diff --git a/vendor/github.com/Azure/go-autorest/glide.lock b/vendor/github.com/Azure/go-autorest/glide.lock deleted file mode 100644 index 695d58cff1..0000000000 --- a/vendor/github.com/Azure/go-autorest/glide.lock +++ /dev/null @@ -1,44 +0,0 @@ -hash: 6e0121d946623e7e609280b1b18627e1c8a767fdece54cb97c4447c1167cbc46 -updated: 2017-08-31T13:58:01.034822883+01:00 -imports: -- name: github.com/dgrijalva/jwt-go - version: 2268707a8f0843315e2004ee4f1d021dc08baedf - subpackages: - - . -- name: github.com/dimchansky/utfbom - version: 6c6132ff69f0f6c088739067407b5d32c52e1d0f -- name: github.com/mitchellh/go-homedir - version: b8bc1bf767474819792c23f32d8286a45736f1c6 -- name: golang.org/x/crypto - version: 81e90905daefcd6fd217b62423c0908922eadb30 - repo: https://github.com/golang/crypto.git - vcs: git - subpackages: - - pkcs12 - - pkcs12/internal/rc2 -- name: golang.org/x/net - version: 66aacef3dd8a676686c7ae3716979581e8b03c47 - repo: https://github.com/golang/net.git - vcs: git - subpackages: - - . -- name: golang.org/x/text - version: 21e35d45962262c8ee80f6cb048dcf95ad0e9d79 - repo: https://github.com/golang/text.git - vcs: git - subpackages: - - . -testImports: -- name: github.com/davecgh/go-spew - version: 04cdfd42973bb9c8589fd6a731800cf222fde1a9 - subpackages: - - spew -- name: github.com/pmezard/go-difflib - version: d8ed2627bdf02c080bf22230dbb337003b7aba2d - subpackages: - - difflib -- name: github.com/stretchr/testify - version: 890a5c3458b43e6104ff5da8dfa139d013d77544 - subpackages: - - assert - - require diff --git a/vendor/github.com/Azure/go-autorest/glide.yaml b/vendor/github.com/Azure/go-autorest/glide.yaml deleted file mode 100644 index 375dcfd1a3..0000000000 --- a/vendor/github.com/Azure/go-autorest/glide.yaml +++ /dev/null @@ -1,22 +0,0 @@ -package: github.com/Azure/go-autorest -import: -- package: github.com/dgrijalva/jwt-go - subpackages: - - . -- package: golang.org/x/crypto - repo: https://github.com/golang/crypto.git - vcs: git - subpackages: - - pkcs12 -- package: golang.org/x/net - repo: https://github.com/golang/net.git - vcs: git - subpackages: - - . -- package: golang.org/x/text - repo: https://github.com/golang/text.git - vcs: git - subpackages: - - . -- package: github.com/mitchellh/go-homedir -- package: github.com/dimchansky/utfbom diff --git a/vendor/github.com/Azure/go-autorest/logger/logger.go b/vendor/github.com/Azure/go-autorest/logger/logger.go new file mode 100644 index 0000000000..da09f394c5 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/logger/logger.go @@ -0,0 +1,328 @@ +package logger + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "strings" + "sync" + "time" +) + +// LevelType tells a logger the minimum level to log. When code reports a log entry, +// the LogLevel indicates the level of the log entry. The logger only records entries +// whose level is at least the level it was told to log. See the Log* constants. +// For example, if a logger is configured with LogError, then LogError, LogPanic, +// and LogFatal entries will be logged; lower level entries are ignored. +type LevelType uint32 + +const ( + // LogNone tells a logger not to log any entries passed to it. + LogNone LevelType = iota + + // LogFatal tells a logger to log all LogFatal entries passed to it. + LogFatal + + // LogPanic tells a logger to log all LogPanic and LogFatal entries passed to it. + LogPanic + + // LogError tells a logger to log all LogError, LogPanic and LogFatal entries passed to it. + LogError + + // LogWarning tells a logger to log all LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogWarning + + // LogInfo tells a logger to log all LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogInfo + + // LogDebug tells a logger to log all LogDebug, LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogDebug +) + +const ( + logNone = "NONE" + logFatal = "FATAL" + logPanic = "PANIC" + logError = "ERROR" + logWarning = "WARNING" + logInfo = "INFO" + logDebug = "DEBUG" + logUnknown = "UNKNOWN" +) + +// ParseLevel converts the specified string into the corresponding LevelType. +func ParseLevel(s string) (lt LevelType, err error) { + switch strings.ToUpper(s) { + case logFatal: + lt = LogFatal + case logPanic: + lt = LogPanic + case logError: + lt = LogError + case logWarning: + lt = LogWarning + case logInfo: + lt = LogInfo + case logDebug: + lt = LogDebug + default: + err = fmt.Errorf("bad log level '%s'", s) + } + return +} + +// String implements the stringer interface for LevelType. +func (lt LevelType) String() string { + switch lt { + case LogNone: + return logNone + case LogFatal: + return logFatal + case LogPanic: + return logPanic + case LogError: + return logError + case LogWarning: + return logWarning + case LogInfo: + return logInfo + case LogDebug: + return logDebug + default: + return logUnknown + } +} + +// Filter defines functions for filtering HTTP request/response content. +type Filter struct { + // URL returns a potentially modified string representation of a request URL. + URL func(u *url.URL) string + + // Header returns a potentially modified set of values for the specified key. + // To completely exclude the header key/values return false. + Header func(key string, val []string) (bool, []string) + + // Body returns a potentially modified request/response body. + Body func(b []byte) []byte +} + +func (f Filter) processURL(u *url.URL) string { + if f.URL == nil { + return u.String() + } + return f.URL(u) +} + +func (f Filter) processHeader(k string, val []string) (bool, []string) { + if f.Header == nil { + return true, val + } + return f.Header(k, val) +} + +func (f Filter) processBody(b []byte) []byte { + if f.Body == nil { + return b + } + return f.Body(b) +} + +// Writer defines methods for writing to a logging facility. +type Writer interface { + // Writeln writes the specified message with the standard log entry header and new-line character. + Writeln(level LevelType, message string) + + // Writef writes the specified format specifier with the standard log entry header and no new-line character. + Writef(level LevelType, format string, a ...interface{}) + + // WriteRequest writes the specified HTTP request to the logger if the log level is greater than + // or equal to LogInfo. The request body, if set, is logged at level LogDebug or higher. + // Custom filters can be specified to exclude URL, header, and/or body content from the log. + // By default no request content is excluded. + WriteRequest(req *http.Request, filter Filter) + + // WriteResponse writes the specified HTTP response to the logger if the log level is greater than + // or equal to LogInfo. The response body, if set, is logged at level LogDebug or higher. + // Custom filters can be specified to exclude URL, header, and/or body content from the log. + // By default no response content is excluded. + WriteResponse(resp *http.Response, filter Filter) +} + +// Instance is the default log writer initialized during package init. +// This can be replaced with a custom implementation as required. +var Instance Writer + +// default log level +var logLevel = LogNone + +// Level returns the value specified in AZURE_GO_AUTOREST_LOG_LEVEL. +// If no value was specified the default value is LogNone. +// Custom loggers can call this to retrieve the configured log level. +func Level() LevelType { + return logLevel +} + +func init() { + // separated for testing purposes + initDefaultLogger() +} + +func initDefaultLogger() { + // init with nilLogger so callers don't have to do a nil check on Default + Instance = nilLogger{} + llStr := strings.ToLower(os.Getenv("AZURE_GO_SDK_LOG_LEVEL")) + if llStr == "" { + return + } + var err error + logLevel, err = ParseLevel(llStr) + if err != nil { + fmt.Fprintf(os.Stderr, "go-autorest: failed to parse log level: %s\n", err.Error()) + return + } + if logLevel == LogNone { + return + } + // default to stderr + dest := os.Stderr + lfStr := os.Getenv("AZURE_GO_SDK_LOG_FILE") + if strings.EqualFold(lfStr, "stdout") { + dest = os.Stdout + } else if lfStr != "" { + lf, err := os.Create(lfStr) + if err == nil { + dest = lf + } else { + fmt.Fprintf(os.Stderr, "go-autorest: failed to create log file, using stderr: %s\n", err.Error()) + } + } + Instance = fileLogger{ + logLevel: logLevel, + mu: &sync.Mutex{}, + logFile: dest, + } +} + +// the nil logger does nothing +type nilLogger struct{} + +func (nilLogger) Writeln(LevelType, string) {} + +func (nilLogger) Writef(LevelType, string, ...interface{}) {} + +func (nilLogger) WriteRequest(*http.Request, Filter) {} + +func (nilLogger) WriteResponse(*http.Response, Filter) {} + +// A File is used instead of a Logger so the stream can be flushed after every write. +type fileLogger struct { + logLevel LevelType + mu *sync.Mutex // for synchronizing writes to logFile + logFile *os.File +} + +func (fl fileLogger) Writeln(level LevelType, message string) { + fl.Writef(level, "%s\n", message) +} + +func (fl fileLogger) Writef(level LevelType, format string, a ...interface{}) { + if fl.logLevel >= level { + fl.mu.Lock() + defer fl.mu.Unlock() + fmt.Fprintf(fl.logFile, "%s %s", entryHeader(level), fmt.Sprintf(format, a...)) + fl.logFile.Sync() + } +} + +func (fl fileLogger) WriteRequest(req *http.Request, filter Filter) { + if req == nil || fl.logLevel < LogInfo { + return + } + b := &bytes.Buffer{} + fmt.Fprintf(b, "%s REQUEST: %s %s\n", entryHeader(LogInfo), req.Method, filter.processURL(req.URL)) + // dump headers + for k, v := range req.Header { + if ok, mv := filter.processHeader(k, v); ok { + fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ",")) + } + } + if fl.shouldLogBody(req.Header, req.Body) { + // dump body + body, err := ioutil.ReadAll(req.Body) + if err == nil { + fmt.Fprintln(b, string(filter.processBody(body))) + if nc, ok := req.Body.(io.Seeker); ok { + // rewind to the beginning + nc.Seek(0, io.SeekStart) + } else { + // recreate the body + req.Body = ioutil.NopCloser(bytes.NewReader(body)) + } + } else { + fmt.Fprintf(b, "failed to read body: %v\n", err) + } + } + fl.mu.Lock() + defer fl.mu.Unlock() + fmt.Fprint(fl.logFile, b.String()) + fl.logFile.Sync() +} + +func (fl fileLogger) WriteResponse(resp *http.Response, filter Filter) { + if resp == nil || fl.logLevel < LogInfo { + return + } + b := &bytes.Buffer{} + fmt.Fprintf(b, "%s RESPONSE: %d %s\n", entryHeader(LogInfo), resp.StatusCode, filter.processURL(resp.Request.URL)) + // dump headers + for k, v := range resp.Header { + if ok, mv := filter.processHeader(k, v); ok { + fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ",")) + } + } + if fl.shouldLogBody(resp.Header, resp.Body) { + // dump body + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err == nil { + fmt.Fprintln(b, string(filter.processBody(body))) + resp.Body = ioutil.NopCloser(bytes.NewReader(body)) + } else { + fmt.Fprintf(b, "failed to read body: %v\n", err) + } + } + fl.mu.Lock() + defer fl.mu.Unlock() + fmt.Fprint(fl.logFile, b.String()) + fl.logFile.Sync() +} + +// returns true if the provided body should be included in the log +func (fl fileLogger) shouldLogBody(header http.Header, body io.ReadCloser) bool { + ct := header.Get("Content-Type") + return fl.logLevel >= LogDebug && body != nil && !strings.Contains(ct, "application/octet-stream") +} + +// creates standard header for log entries, it contains a timestamp and the log level +func entryHeader(level LevelType) string { + // this format provides a fixed number of digits so the size of the timestamp is constant + return fmt.Sprintf("(%s) %s:", time.Now().Format("2006-01-02T15:04:05.0000000Z07:00"), level.String()) +} diff --git a/vendor/github.com/Azure/go-autorest/tracing/tracing.go b/vendor/github.com/Azure/go-autorest/tracing/tracing.go new file mode 100644 index 0000000000..cd61cb18b6 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/tracing/tracing.go @@ -0,0 +1,190 @@ +package tracing + +// Copyright 2018 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "fmt" + "net/http" + "os" + + "contrib.go.opencensus.io/exporter/ocagent" + "go.opencensus.io/plugin/ochttp" + "go.opencensus.io/plugin/ochttp/propagation/tracecontext" + "go.opencensus.io/stats/view" + "go.opencensus.io/trace" +) + +var ( + // Transport is the default tracing RoundTripper. The custom options setter will control + // if traces are being emitted or not. + Transport = &ochttp.Transport{ + Propagation: &tracecontext.HTTPFormat{}, + GetStartOptions: getStartOptions, + } + + // enabled is the flag for marking if tracing is enabled. + enabled = false + + // Sampler is the tracing sampler. If tracing is disabled it will never sample. Otherwise + // it will be using the parent sampler or the default. + sampler = trace.NeverSample() + + // Views for metric instrumentation. + views = map[string]*view.View{} + + // the trace exporter + traceExporter trace.Exporter +) + +func init() { + enableFromEnv() +} + +func enableFromEnv() { + _, ok := os.LookupEnv("AZURE_SDK_TRACING_ENABLED") + _, legacyOk := os.LookupEnv("AZURE_SDK_TRACING_ENABELD") + if ok || legacyOk { + agentEndpoint, ok := os.LookupEnv("OCAGENT_TRACE_EXPORTER_ENDPOINT") + + if ok { + EnableWithAIForwarding(agentEndpoint) + } else { + Enable() + } + } +} + +// IsEnabled returns true if monitoring is enabled for the sdk. +func IsEnabled() bool { + return enabled +} + +// Enable will start instrumentation for metrics and traces. +func Enable() error { + enabled = true + sampler = nil + + err := initStats() + return err +} + +// Disable will disable instrumentation for metrics and traces. +func Disable() { + disableStats() + sampler = trace.NeverSample() + if traceExporter != nil { + trace.UnregisterExporter(traceExporter) + } + enabled = false +} + +// EnableWithAIForwarding will start instrumentation and will connect to app insights forwarder +// exporter making the metrics and traces available in app insights. +func EnableWithAIForwarding(agentEndpoint string) (err error) { + err = Enable() + if err != nil { + return err + } + + traceExporter, err := ocagent.NewExporter(ocagent.WithInsecure(), ocagent.WithAddress(agentEndpoint)) + if err != nil { + return err + } + trace.RegisterExporter(traceExporter) + return +} + +// getStartOptions is the custom options setter for the ochttp package. +func getStartOptions(*http.Request) trace.StartOptions { + return trace.StartOptions{ + Sampler: sampler, + } +} + +// initStats registers the views for the http metrics +func initStats() (err error) { + clientViews := []*view.View{ + ochttp.ClientCompletedCount, + ochttp.ClientRoundtripLatencyDistribution, + ochttp.ClientReceivedBytesDistribution, + ochttp.ClientSentBytesDistribution, + } + for _, cv := range clientViews { + vn := fmt.Sprintf("Azure/go-autorest/tracing-%s", cv.Name) + views[vn] = cv.WithName(vn) + err = view.Register(views[vn]) + if err != nil { + return err + } + } + return +} + +// disableStats will unregister the previously registered metrics +func disableStats() { + for _, v := range views { + view.Unregister(v) + } +} + +// StartSpan starts a trace span +func StartSpan(ctx context.Context, name string) context.Context { + ctx, _ = trace.StartSpan(ctx, name, trace.WithSampler(sampler)) + return ctx +} + +// EndSpan ends a previously started span stored in the context +func EndSpan(ctx context.Context, httpStatusCode int, err error) { + span := trace.FromContext(ctx) + + if span == nil { + return + } + + if err != nil { + span.SetStatus(trace.Status{Message: err.Error(), Code: toTraceStatusCode(httpStatusCode)}) + } + span.End() +} + +// toTraceStatusCode converts HTTP Codes to OpenCensus codes as defined +// at https://github.com/census-instrumentation/opencensus-specs/blob/master/trace/HTTP.md#status +func toTraceStatusCode(httpStatusCode int) int32 { + switch { + case http.StatusOK <= httpStatusCode && httpStatusCode < http.StatusBadRequest: + return trace.StatusCodeOK + case httpStatusCode == http.StatusBadRequest: + return trace.StatusCodeInvalidArgument + case httpStatusCode == http.StatusUnauthorized: // 401 is actually unauthenticated. + return trace.StatusCodeUnauthenticated + case httpStatusCode == http.StatusForbidden: + return trace.StatusCodePermissionDenied + case httpStatusCode == http.StatusNotFound: + return trace.StatusCodeNotFound + case httpStatusCode == http.StatusTooManyRequests: + return trace.StatusCodeResourceExhausted + case httpStatusCode == 499: + return trace.StatusCodeCancelled + case httpStatusCode == http.StatusNotImplemented: + return trace.StatusCodeUnimplemented + case httpStatusCode == http.StatusServiceUnavailable: + return trace.StatusCodeUnavailable + case httpStatusCode == http.StatusGatewayTimeout: + return trace.StatusCodeDeadlineExceeded + default: + return trace.StatusCodeUnknown + } +} diff --git a/vendor/github.com/PuerkitoBio/purell/bench_test.go b/vendor/github.com/PuerkitoBio/purell/bench_test.go deleted file mode 100644 index 7549731fc4..0000000000 --- a/vendor/github.com/PuerkitoBio/purell/bench_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package purell - -import ( - "testing" -) - -var ( - safeUrl = "HttPS://..iaMHost..Test:443/paTh^A%ef//./%41PaTH/..//?" - usuallySafeUrl = "HttPS://..iaMHost..Test:443/paTh^A%ef//./%41PaTH/../final/" - unsafeUrl = "HttPS://..www.iaMHost..Test:443/paTh^A%ef//./%41PaTH/../final/index.html?t=val1&a=val4&z=val5&a=val1#fragment" - allDWORDUrl = "HttPS://1113982867:/paTh^A%ef//./%41PaTH/../final/index.html?t=val1&a=val4&z=val5&a=val1#fragment" - allOctalUrl = "HttPS://0102.0146.07.0223:/paTh^A%ef//./%41PaTH/../final/index.html?t=val1&a=val4&z=val5&a=val1#fragment" - allHexUrl = "HttPS://0x42660793:/paTh^A%ef//./%41PaTH/../final/index.html?t=val1&a=val4&z=val5&a=val1#fragment" - allCombinedUrl = "HttPS://..0x42660793.:/paTh^A%ef//./%41PaTH/../final/index.html?t=val1&a=val4&z=val5&a=val1#fragment" -) - -func BenchmarkSafe(b *testing.B) { - for i := 0; i < b.N; i++ { - NormalizeURLString(safeUrl, FlagsSafe) - } -} - -func BenchmarkUsuallySafe(b *testing.B) { - for i := 0; i < b.N; i++ { - NormalizeURLString(usuallySafeUrl, FlagsUsuallySafeGreedy) - } -} - -func BenchmarkUnsafe(b *testing.B) { - for i := 0; i < b.N; i++ { - NormalizeURLString(unsafeUrl, FlagsUnsafeGreedy) - } -} - -func BenchmarkAllDWORD(b *testing.B) { - for i := 0; i < b.N; i++ { - NormalizeURLString(allDWORDUrl, FlagsAllGreedy) - } -} - -func BenchmarkAllOctal(b *testing.B) { - for i := 0; i < b.N; i++ { - NormalizeURLString(allOctalUrl, FlagsAllGreedy) - } -} - -func BenchmarkAllHex(b *testing.B) { - for i := 0; i < b.N; i++ { - NormalizeURLString(allHexUrl, FlagsAllGreedy) - } -} - -func BenchmarkAllCombined(b *testing.B) { - for i := 0; i < b.N; i++ { - NormalizeURLString(allCombinedUrl, FlagsAllGreedy) - } -} diff --git a/vendor/github.com/PuerkitoBio/purell/example_test.go b/vendor/github.com/PuerkitoBio/purell/example_test.go deleted file mode 100644 index 997b95369c..0000000000 --- a/vendor/github.com/PuerkitoBio/purell/example_test.go +++ /dev/null @@ -1,35 +0,0 @@ -package purell - -import ( - "fmt" - "net/url" -) - -func ExampleNormalizeURLString() { - if normalized, err := NormalizeURLString("hTTp://someWEBsite.com:80/Amazing%3f/url/", - FlagLowercaseScheme|FlagLowercaseHost|FlagUppercaseEscapes); err != nil { - panic(err) - } else { - fmt.Print(normalized) - } - // Output: http://somewebsite.com:80/Amazing%3F/url/ -} - -func ExampleMustNormalizeURLString() { - normalized := MustNormalizeURLString("hTTpS://someWEBsite.com:443/Amazing%fa/url/", - FlagsUnsafeGreedy) - fmt.Print(normalized) - - // Output: http://somewebsite.com/Amazing%FA/url -} - -func ExampleNormalizeURL() { - if u, err := url.Parse("Http://SomeUrl.com:8080/a/b/.././c///g?c=3&a=1&b=9&c=0#target"); err != nil { - panic(err) - } else { - normalized := NormalizeURL(u, FlagsUsuallySafeGreedy|FlagRemoveDuplicateSlashes|FlagRemoveFragment) - fmt.Print(normalized) - } - - // Output: http://someurl.com:8080/a/c/g?c=3&a=1&b=9&c=0 -} diff --git a/vendor/github.com/PuerkitoBio/purell/purell_test.go b/vendor/github.com/PuerkitoBio/purell/purell_test.go deleted file mode 100644 index a3732e5a3d..0000000000 --- a/vendor/github.com/PuerkitoBio/purell/purell_test.go +++ /dev/null @@ -1,768 +0,0 @@ -package purell - -import ( - "fmt" - "net/url" - "testing" -) - -type testCase struct { - nm string - src string - flgs NormalizationFlags - res string - parsed bool -} - -var ( - cases = [...]*testCase{ - &testCase{ - "LowerScheme", - "HTTP://www.SRC.ca", - FlagLowercaseScheme, - "http://www.SRC.ca", - false, - }, - &testCase{ - "LowerScheme2", - "http://www.SRC.ca", - FlagLowercaseScheme, - "http://www.SRC.ca", - false, - }, - &testCase{ - "LowerHost", - "HTTP://www.SRC.ca/", - FlagLowercaseHost, - "http://www.src.ca/", // Since Go1.1, scheme is automatically lowercased - false, - }, - &testCase{ - "UpperEscapes", - `http://www.whatever.com/Some%aa%20Special%8Ecases/`, - FlagUppercaseEscapes, - "http://www.whatever.com/Some%AA%20Special%8Ecases/", - false, - }, - &testCase{ - "UnnecessaryEscapes", - `http://www.toto.com/%41%42%2E%44/%32%33%52%2D/%5f%7E`, - FlagDecodeUnnecessaryEscapes, - "http://www.toto.com/AB.D/23R-/_~", - false, - }, - &testCase{ - "RemoveDefaultPort", - "HTTP://www.SRC.ca:80/", - FlagRemoveDefaultPort, - "http://www.SRC.ca/", // Since Go1.1, scheme is automatically lowercased - false, - }, - &testCase{ - "RemoveDefaultPort2", - "HTTP://www.SRC.ca:80", - FlagRemoveDefaultPort, - "http://www.SRC.ca", // Since Go1.1, scheme is automatically lowercased - false, - }, - &testCase{ - "RemoveDefaultPort3", - "HTTP://www.SRC.ca:8080", - FlagRemoveDefaultPort, - "http://www.SRC.ca:8080", // Since Go1.1, scheme is automatically lowercased - false, - }, - &testCase{ - "Safe", - "HTTP://www.SRC.ca:80/to%1ato%8b%ee/OKnow%41%42%43%7e", - FlagsSafe, - "http://www.src.ca/to%1Ato%8B%EE/OKnowABC~", - false, - }, - &testCase{ - "BothLower", - "HTTP://www.SRC.ca:80/to%1ato%8b%ee/OKnow%41%42%43%7e", - FlagLowercaseHost | FlagLowercaseScheme, - "http://www.src.ca:80/to%1Ato%8B%EE/OKnowABC~", - false, - }, - &testCase{ - "RemoveTrailingSlash", - "HTTP://www.SRC.ca:80/", - FlagRemoveTrailingSlash, - "http://www.SRC.ca:80", // Since Go1.1, scheme is automatically lowercased - false, - }, - &testCase{ - "RemoveTrailingSlash2", - "HTTP://www.SRC.ca:80/toto/titi/", - FlagRemoveTrailingSlash, - "http://www.SRC.ca:80/toto/titi", // Since Go1.1, scheme is automatically lowercased - false, - }, - &testCase{ - "RemoveTrailingSlash3", - "HTTP://www.SRC.ca:80/toto/titi/fin/?a=1", - FlagRemoveTrailingSlash, - "http://www.SRC.ca:80/toto/titi/fin?a=1", // Since Go1.1, scheme is automatically lowercased - false, - }, - &testCase{ - "AddTrailingSlash", - "HTTP://www.SRC.ca:80", - FlagAddTrailingSlash, - "http://www.SRC.ca:80/", // Since Go1.1, scheme is automatically lowercased - false, - }, - &testCase{ - "AddTrailingSlash2", - "HTTP://www.SRC.ca:80/toto/titi.html", - FlagAddTrailingSlash, - "http://www.SRC.ca:80/toto/titi.html/", // Since Go1.1, scheme is automatically lowercased - false, - }, - &testCase{ - "AddTrailingSlash3", - "HTTP://www.SRC.ca:80/toto/titi/fin?a=1", - FlagAddTrailingSlash, - "http://www.SRC.ca:80/toto/titi/fin/?a=1", // Since Go1.1, scheme is automatically lowercased - false, - }, - &testCase{ - "RemoveDotSegments", - "HTTP://root/a/b/./../../c/", - FlagRemoveDotSegments, - "http://root/c/", // Since Go1.1, scheme is automatically lowercased - false, - }, - &testCase{ - "RemoveDotSegments2", - "HTTP://root/../a/b/./../c/../d", - FlagRemoveDotSegments, - "http://root/a/d", // Since Go1.1, scheme is automatically lowercased - false, - }, - &testCase{ - "UsuallySafe", - "HTTP://www.SRC.ca:80/to%1ato%8b%ee/./c/d/../OKnow%41%42%43%7e/?a=b#test", - FlagsUsuallySafeGreedy, - "http://www.src.ca/to%1Ato%8B%EE/c/OKnowABC~?a=b#test", - false, - }, - &testCase{ - "RemoveDirectoryIndex", - "HTTP://root/a/b/c/default.aspx", - FlagRemoveDirectoryIndex, - "http://root/a/b/c/", // Since Go1.1, scheme is automatically lowercased - false, - }, - &testCase{ - "RemoveDirectoryIndex2", - "HTTP://root/a/b/c/default#a=b", - FlagRemoveDirectoryIndex, - "http://root/a/b/c/default#a=b", // Since Go1.1, scheme is automatically lowercased - false, - }, - &testCase{ - "RemoveFragment", - "HTTP://root/a/b/c/default#toto=tata", - FlagRemoveFragment, - "http://root/a/b/c/default", // Since Go1.1, scheme is automatically lowercased - false, - }, - &testCase{ - "ForceHTTP", - "https://root/a/b/c/default#toto=tata", - FlagForceHTTP, - "http://root/a/b/c/default#toto=tata", - false, - }, - &testCase{ - "RemoveDuplicateSlashes", - "https://root/a//b///c////default#toto=tata", - FlagRemoveDuplicateSlashes, - "https://root/a/b/c/default#toto=tata", - false, - }, - &testCase{ - "RemoveDuplicateSlashes2", - "https://root//a//b///c////default#toto=tata", - FlagRemoveDuplicateSlashes, - "https://root/a/b/c/default#toto=tata", - false, - }, - &testCase{ - "RemoveWWW", - "https://www.root/a/b/c/", - FlagRemoveWWW, - "https://root/a/b/c/", - false, - }, - &testCase{ - "RemoveWWW2", - "https://WwW.Root/a/b/c/", - FlagRemoveWWW, - "https://Root/a/b/c/", - false, - }, - &testCase{ - "AddWWW", - "https://Root/a/b/c/", - FlagAddWWW, - "https://www.Root/a/b/c/", - false, - }, - &testCase{ - "SortQuery", - "http://root/toto/?b=4&a=1&c=3&b=2&a=5", - FlagSortQuery, - "http://root/toto/?a=1&a=5&b=2&b=4&c=3", - false, - }, - &testCase{ - "RemoveEmptyQuerySeparator", - "http://root/toto/?", - FlagRemoveEmptyQuerySeparator, - "http://root/toto/", - false, - }, - &testCase{ - "Unsafe", - "HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid", - FlagsUnsafeGreedy, - "http://root.com/toto/tE%1F/a/c?a=4&w=1&w=2&z=3", - false, - }, - &testCase{ - "Safe2", - "HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid", - FlagsSafe, - "https://www.root.com/toto/tE%1F///a/./b/../c/?z=3&w=2&a=4&w=1#invalid", - false, - }, - &testCase{ - "UsuallySafe2", - "HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid", - FlagsUsuallySafeGreedy, - "https://www.root.com/toto/tE%1F///a/c?z=3&w=2&a=4&w=1#invalid", - false, - }, - &testCase{ - "AddTrailingSlashBug", - "http://src.ca/", - FlagsAllNonGreedy, - "http://www.src.ca/", - false, - }, - &testCase{ - "SourceModified", - "HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid", - FlagsUnsafeGreedy, - "http://root.com/toto/tE%1F/a/c?a=4&w=1&w=2&z=3", - true, - }, - &testCase{ - "IPv6-1", - "http://[2001:db8:1f70::999:de8:7648:6e8]/test", - FlagsSafe | FlagRemoveDotSegments, - "http://[2001:db8:1f70::999:de8:7648:6e8]/test", - false, - }, - &testCase{ - "IPv6-2", - "http://[::ffff:192.168.1.1]/test", - FlagsSafe | FlagRemoveDotSegments, - "http://[::ffff:192.168.1.1]/test", - false, - }, - &testCase{ - "IPv6-3", - "http://[::ffff:192.168.1.1]:80/test", - FlagsSafe | FlagRemoveDotSegments, - "http://[::ffff:192.168.1.1]/test", - false, - }, - &testCase{ - "IPv6-4", - "htTps://[::fFff:192.168.1.1]:443/test", - FlagsSafe | FlagRemoveDotSegments, - "https://[::ffff:192.168.1.1]/test", - false, - }, - &testCase{ - "FTP", - "ftp://user:pass@ftp.foo.net/foo/bar", - FlagsSafe | FlagRemoveDotSegments, - "ftp://user:pass@ftp.foo.net/foo/bar", - false, - }, - &testCase{ - "Standard-1", - "http://www.foo.com:80/foo", - FlagsSafe | FlagRemoveDotSegments, - "http://www.foo.com/foo", - false, - }, - &testCase{ - "Standard-2", - "http://www.foo.com:8000/foo", - FlagsSafe | FlagRemoveDotSegments, - "http://www.foo.com:8000/foo", - false, - }, - &testCase{ - "Standard-3", - "http://www.foo.com/%7ebar", - FlagsSafe | FlagRemoveDotSegments, - "http://www.foo.com/~bar", - false, - }, - &testCase{ - "Standard-4", - "http://www.foo.com/%7Ebar", - FlagsSafe | FlagRemoveDotSegments, - "http://www.foo.com/~bar", - false, - }, - &testCase{ - "Standard-5", - "http://USER:pass@www.Example.COM/foo/bar", - FlagsSafe | FlagRemoveDotSegments, - "http://USER:pass@www.example.com/foo/bar", - false, - }, - &testCase{ - "Standard-6", - "http://test.example/?a=%26&b=1", - FlagsSafe | FlagRemoveDotSegments, - "http://test.example/?a=%26&b=1", - false, - }, - &testCase{ - "Standard-7", - "http://test.example/%25/?p=%20val%20%25", - FlagsSafe | FlagRemoveDotSegments, - "http://test.example/%25/?p=%20val%20%25", - false, - }, - &testCase{ - "Standard-8", - "http://test.example/path/with a%20space+/", - FlagsSafe | FlagRemoveDotSegments, - "http://test.example/path/with%20a%20space+/", - false, - }, - &testCase{ - "Standard-9", - "http://test.example/?", - FlagsSafe | FlagRemoveDotSegments, - "http://test.example/", - false, - }, - &testCase{ - "Standard-10", - "http://a.COM/path/?b&a", - FlagsSafe | FlagRemoveDotSegments, - "http://a.com/path/?b&a", - false, - }, - &testCase{ - "StandardCasesAddTrailingSlash", - "http://test.example?", - FlagsSafe | FlagAddTrailingSlash, - "http://test.example/", - false, - }, - &testCase{ - "OctalIP-1", - "http://0123.011.0.4/", - FlagsSafe | FlagDecodeOctalHost, - "http://0123.011.0.4/", - false, - }, - &testCase{ - "OctalIP-2", - "http://0102.0146.07.0223/", - FlagsSafe | FlagDecodeOctalHost, - "http://66.102.7.147/", - false, - }, - &testCase{ - "OctalIP-3", - "http://0102.0146.07.0223.:23/", - FlagsSafe | FlagDecodeOctalHost, - "http://66.102.7.147.:23/", - false, - }, - &testCase{ - "OctalIP-4", - "http://USER:pass@0102.0146.07.0223../", - FlagsSafe | FlagDecodeOctalHost, - "http://USER:pass@66.102.7.147../", - false, - }, - &testCase{ - "DWORDIP-1", - "http://123.1113982867/", - FlagsSafe | FlagDecodeDWORDHost, - "http://123.1113982867/", - false, - }, - &testCase{ - "DWORDIP-2", - "http://1113982867/", - FlagsSafe | FlagDecodeDWORDHost, - "http://66.102.7.147/", - false, - }, - &testCase{ - "DWORDIP-3", - "http://1113982867.:23/", - FlagsSafe | FlagDecodeDWORDHost, - "http://66.102.7.147.:23/", - false, - }, - &testCase{ - "DWORDIP-4", - "http://USER:pass@1113982867../", - FlagsSafe | FlagDecodeDWORDHost, - "http://USER:pass@66.102.7.147../", - false, - }, - &testCase{ - "HexIP-1", - "http://0x123.1113982867/", - FlagsSafe | FlagDecodeHexHost, - "http://0x123.1113982867/", - false, - }, - &testCase{ - "HexIP-2", - "http://0x42660793/", - FlagsSafe | FlagDecodeHexHost, - "http://66.102.7.147/", - false, - }, - &testCase{ - "HexIP-3", - "http://0x42660793.:23/", - FlagsSafe | FlagDecodeHexHost, - "http://66.102.7.147.:23/", - false, - }, - &testCase{ - "HexIP-4", - "http://USER:pass@0x42660793../", - FlagsSafe | FlagDecodeHexHost, - "http://USER:pass@66.102.7.147../", - false, - }, - &testCase{ - "UnnecessaryHostDots-1", - "http://.www.foo.com../foo/bar.html", - FlagsSafe | FlagRemoveUnnecessaryHostDots, - "http://www.foo.com/foo/bar.html", - false, - }, - &testCase{ - "UnnecessaryHostDots-2", - "http://www.foo.com./foo/bar.html", - FlagsSafe | FlagRemoveUnnecessaryHostDots, - "http://www.foo.com/foo/bar.html", - false, - }, - &testCase{ - "UnnecessaryHostDots-3", - "http://www.foo.com.:81/foo", - FlagsSafe | FlagRemoveUnnecessaryHostDots, - "http://www.foo.com:81/foo", - false, - }, - &testCase{ - "UnnecessaryHostDots-4", - "http://www.example.com./", - FlagsSafe | FlagRemoveUnnecessaryHostDots, - "http://www.example.com/", - false, - }, - &testCase{ - "EmptyPort-1", - "http://www.thedraymin.co.uk:/main/?p=308", - FlagsSafe | FlagRemoveEmptyPortSeparator, - "http://www.thedraymin.co.uk/main/?p=308", - false, - }, - &testCase{ - "EmptyPort-2", - "http://www.src.ca:", - FlagsSafe | FlagRemoveEmptyPortSeparator, - "http://www.src.ca", - false, - }, - &testCase{ - "Slashes-1", - "http://test.example/foo/bar/.", - FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, - "http://test.example/foo/bar/", - false, - }, - &testCase{ - "Slashes-2", - "http://test.example/foo/bar/./", - FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, - "http://test.example/foo/bar/", - false, - }, - &testCase{ - "Slashes-3", - "http://test.example/foo/bar/..", - FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, - "http://test.example/foo/", - false, - }, - &testCase{ - "Slashes-4", - "http://test.example/foo/bar/../", - FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, - "http://test.example/foo/", - false, - }, - &testCase{ - "Slashes-5", - "http://test.example/foo/bar/../baz", - FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, - "http://test.example/foo/baz", - false, - }, - &testCase{ - "Slashes-6", - "http://test.example/foo/bar/../..", - FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, - "http://test.example/", - false, - }, - &testCase{ - "Slashes-7", - "http://test.example/foo/bar/../../", - FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, - "http://test.example/", - false, - }, - &testCase{ - "Slashes-8", - "http://test.example/foo/bar/../../baz", - FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, - "http://test.example/baz", - false, - }, - &testCase{ - "Slashes-9", - "http://test.example/foo/bar/../../../baz", - FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, - "http://test.example/baz", - false, - }, - &testCase{ - "Slashes-10", - "http://test.example/foo/bar/../../../../baz", - FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, - "http://test.example/baz", - false, - }, - &testCase{ - "Slashes-11", - "http://test.example/./foo", - FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, - "http://test.example/foo", - false, - }, - &testCase{ - "Slashes-12", - "http://test.example/../foo", - FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, - "http://test.example/foo", - false, - }, - &testCase{ - "Slashes-13", - "http://test.example/foo.", - FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, - "http://test.example/foo.", - false, - }, - &testCase{ - "Slashes-14", - "http://test.example/.foo", - FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, - "http://test.example/.foo", - false, - }, - &testCase{ - "Slashes-15", - "http://test.example/foo..", - FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, - "http://test.example/foo..", - false, - }, - &testCase{ - "Slashes-16", - "http://test.example/..foo", - FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, - "http://test.example/..foo", - false, - }, - &testCase{ - "Slashes-17", - "http://test.example/./../foo", - FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, - "http://test.example/foo", - false, - }, - &testCase{ - "Slashes-18", - "http://test.example/./foo/.", - FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, - "http://test.example/foo/", - false, - }, - &testCase{ - "Slashes-19", - "http://test.example/foo/./bar", - FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, - "http://test.example/foo/bar", - false, - }, - &testCase{ - "Slashes-20", - "http://test.example/foo/../bar", - FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, - "http://test.example/bar", - false, - }, - &testCase{ - "Slashes-21", - "http://test.example/foo//", - FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, - "http://test.example/foo/", - false, - }, - &testCase{ - "Slashes-22", - "http://test.example/foo///bar//", - FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, - "http://test.example/foo/bar/", - false, - }, - &testCase{ - "Relative", - "foo/bar", - FlagsAllGreedy, - "foo/bar", - false, - }, - &testCase{ - "Relative-1", - "./../foo", - FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, - "foo", - false, - }, - &testCase{ - "Relative-2", - "./foo/bar/../baz/../bang/..", - FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, - "foo/", - false, - }, - &testCase{ - "Relative-3", - "foo///bar//", - FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, - "foo/bar/", - false, - }, - &testCase{ - "Relative-4", - "www.youtube.com", - FlagsUsuallySafeGreedy, - "www.youtube.com", - false, - }, - /*&testCase{ - "UrlNorm-5", - "http://ja.wikipedia.org/wiki/%E3%82%AD%E3%83%A3%E3%82%BF%E3%83%94%E3%83%A9%E3%83%BC%E3%82%B8%E3%83%A3%E3%83%91%E3%83%B3", - FlagsSafe | FlagRemoveDotSegments, - "http://ja.wikipedia.org/wiki/\xe3\x82\xad\xe3\x83\xa3\xe3\x82\xbf\xe3\x83\x94\xe3\x83\xa9\xe3\x83\xbc\xe3\x82\xb8\xe3\x83\xa3\xe3\x83\x91\xe3\x83\xb3", - false, - }, - &testCase{ - "UrlNorm-1", - "http://test.example/?a=%e3%82%82%26", - FlagsAllGreedy, - "http://test.example/?a=\xe3\x82\x82%26", - false, - },*/ - } -) - -func TestRunner(t *testing.T) { - for _, tc := range cases { - runCase(tc, t) - } -} - -func runCase(tc *testCase, t *testing.T) { - t.Logf("running %s...", tc.nm) - if tc.parsed { - u, e := url.Parse(tc.src) - if e != nil { - t.Errorf("%s - FAIL : %s", tc.nm, e) - return - } else { - NormalizeURL(u, tc.flgs) - if s := u.String(); s != tc.res { - t.Errorf("%s - FAIL expected '%s', got '%s'", tc.nm, tc.res, s) - } - } - } else { - if s, e := NormalizeURLString(tc.src, tc.flgs); e != nil { - t.Errorf("%s - FAIL : %s", tc.nm, e) - } else if s != tc.res { - t.Errorf("%s - FAIL expected '%s', got '%s'", tc.nm, tc.res, s) - } - } -} - -func TestDecodeUnnecessaryEscapesAll(t *testing.T) { - var url = "http://host/" - - for i := 0; i < 256; i++ { - url += fmt.Sprintf("%%%02x", i) - } - if s, e := NormalizeURLString(url, FlagDecodeUnnecessaryEscapes); e != nil { - t.Fatalf("Got error %s", e.Error()) - } else { - const want = "http://host/%00%01%02%03%04%05%06%07%08%09%0A%0B%0C%0D%0E%0F%10%11%12%13%14%15%16%17%18%19%1A%1B%1C%1D%1E%1F%20!%22%23$%25&'()*+,-./0123456789:;%3C=%3E%3F@ABCDEFGHIJKLMNOPQRSTUVWXYZ[%5C]%5E_%60abcdefghijklmnopqrstuvwxyz%7B%7C%7D~%7F%80%81%82%83%84%85%86%87%88%89%8A%8B%8C%8D%8E%8F%90%91%92%93%94%95%96%97%98%99%9A%9B%9C%9D%9E%9F%A0%A1%A2%A3%A4%A5%A6%A7%A8%A9%AA%AB%AC%AD%AE%AF%B0%B1%B2%B3%B4%B5%B6%B7%B8%B9%BA%BB%BC%BD%BE%BF%C0%C1%C2%C3%C4%C5%C6%C7%C8%C9%CA%CB%CC%CD%CE%CF%D0%D1%D2%D3%D4%D5%D6%D7%D8%D9%DA%DB%DC%DD%DE%DF%E0%E1%E2%E3%E4%E5%E6%E7%E8%E9%EA%EB%EC%ED%EE%EF%F0%F1%F2%F3%F4%F5%F6%F7%F8%F9%FA%FB%FC%FD%FE%FF" - if s != want { - t.Errorf("DecodeUnnecessaryEscapesAll:\nwant\n%s\ngot\n%s", want, s) - } - } -} - -func TestEncodeNecessaryEscapesAll(t *testing.T) { - var url = "http://host/" - - for i := 0; i < 256; i++ { - if i != 0x25 { - url += string(i) - } - } - if s, e := NormalizeURLString(url, FlagEncodeNecessaryEscapes); e != nil { - t.Fatalf("Got error %s", e.Error()) - } else { - const want = "http://host/%00%01%02%03%04%05%06%07%08%09%0A%0B%0C%0D%0E%0F%10%11%12%13%14%15%16%17%18%19%1A%1B%1C%1D%1E%1F%20!%22#$&'()*+,-./0123456789:;%3C=%3E?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[%5C]%5E_%60abcdefghijklmnopqrstuvwxyz%7B%7C%7D~%7F%C2%80%C2%81%C2%82%C2%83%C2%84%C2%85%C2%86%C2%87%C2%88%C2%89%C2%8A%C2%8B%C2%8C%C2%8D%C2%8E%C2%8F%C2%90%C2%91%C2%92%C2%93%C2%94%C2%95%C2%96%C2%97%C2%98%C2%99%C2%9A%C2%9B%C2%9C%C2%9D%C2%9E%C2%9F%C2%A0%C2%A1%C2%A2%C2%A3%C2%A4%C2%A5%C2%A6%C2%A7%C2%A8%C2%A9%C2%AA%C2%AB%C2%AC%C2%AD%C2%AE%C2%AF%C2%B0%C2%B1%C2%B2%C2%B3%C2%B4%C2%B5%C2%B6%C2%B7%C2%B8%C2%B9%C2%BA%C2%BB%C2%BC%C2%BD%C2%BE%C2%BF%C3%80%C3%81%C3%82%C3%83%C3%84%C3%85%C3%86%C3%87%C3%88%C3%89%C3%8A%C3%8B%C3%8C%C3%8D%C3%8E%C3%8F%C3%90%C3%91%C3%92%C3%93%C3%94%C3%95%C3%96%C3%97%C3%98%C3%99%C3%9A%C3%9B%C3%9C%C3%9D%C3%9E%C3%9F%C3%A0%C3%A1%C3%A2%C3%A3%C3%A4%C3%A5%C3%A6%C3%A7%C3%A8%C3%A9%C3%AA%C3%AB%C3%AC%C3%AD%C3%AE%C3%AF%C3%B0%C3%B1%C3%B2%C3%B3%C3%B4%C3%B5%C3%B6%C3%B7%C3%B8%C3%B9%C3%BA%C3%BB%C3%BC%C3%BD%C3%BE%C3%BF" - if s != want { - t.Errorf("EncodeNecessaryEscapesAll:\nwant\n%s\ngot\n%s", want, s) - } - } -} diff --git a/vendor/github.com/PuerkitoBio/purell/urlnorm_test.go b/vendor/github.com/PuerkitoBio/purell/urlnorm_test.go deleted file mode 100644 index d1b2ca6c0e..0000000000 --- a/vendor/github.com/PuerkitoBio/purell/urlnorm_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package purell - -import ( - "testing" -) - -// Test cases merged from PR #1 -// Originally from https://github.com/jehiah/urlnorm/blob/master/test_urlnorm.py - -func assertMap(t *testing.T, cases map[string]string, f NormalizationFlags) { - for bad, good := range cases { - s, e := NormalizeURLString(bad, f) - if e != nil { - t.Errorf("%s normalizing %v to %v", e.Error(), bad, good) - } else { - if s != good { - t.Errorf("source: %v expected: %v got: %v", bad, good, s) - } - } - } -} - -// This tests normalization to a unicode representation -// precent escapes for unreserved values are unescaped to their unicode value -// tests normalization to idna domains -// test ip word handling, ipv6 address handling, and trailing domain periods -// in general, this matches google chromes unescaping for things in the address bar. -// spaces are converted to '+' (perhaphs controversial) -// http://code.google.com/p/google-url/ probably is another good reference for this approach -func TestUrlnorm(t *testing.T) { - testcases := map[string]string{ - "http://test.example/?a=%e3%82%82%26": "http://test.example/?a=%e3%82%82%26", - //"http://test.example/?a=%e3%82%82%26": "http://test.example/?a=\xe3\x82\x82%26", //should return a unicode character - "http://s.xn--q-bga.DE/": "http://s.xn--q-bga.de/", //should be in idna format - "http://XBLA\u306eXbox.com": "http://xn--xblaxbox-jf4g.com", //test utf8 and unicode - "http://президент.рф": "http://xn--d1abbgf6aiiy.xn--p1ai", - "http://ПРЕЗИДЕНТ.РФ": "http://xn--d1abbgf6aiiy.xn--p1ai", - "http://ab¥ヲ₩○.com": "http://xn--ab-ida8983azmfnvs.com", //test width folding - "http://\u00e9.com": "http://xn--9ca.com", - "http://e\u0301.com": "http://xn--9ca.com", - "http://ja.wikipedia.org/wiki/%E3%82%AD%E3%83%A3%E3%82%BF%E3%83%94%E3%83%A9%E3%83%BC%E3%82%B8%E3%83%A3%E3%83%91%E3%83%B3": "http://ja.wikipedia.org/wiki/%E3%82%AD%E3%83%A3%E3%82%BF%E3%83%94%E3%83%A9%E3%83%BC%E3%82%B8%E3%83%A3%E3%83%91%E3%83%B3", - //"http://ja.wikipedia.org/wiki/%E3%82%AD%E3%83%A3%E3%82%BF%E3%83%94%E3%83%A9%E3%83%BC%E3%82%B8%E3%83%A3%E3%83%91%E3%83%B3": "http://ja.wikipedia.org/wiki/\xe3\x82\xad\xe3\x83\xa3\xe3\x82\xbf\xe3\x83\x94\xe3\x83\xa9\xe3\x83\xbc\xe3\x82\xb8\xe3\x83\xa3\xe3\x83\x91\xe3\x83\xb3", - - "http://test.example/\xe3\x82\xad": "http://test.example/%E3%82%AD", - //"http://test.example/\xe3\x82\xad": "http://test.example/\xe3\x82\xad", - "http://test.example/?p=%23val#test-%23-val%25": "http://test.example/?p=%23val#test-%23-val%25", //check that %23 (#) is not escaped where it shouldn't be - - "http://test.domain/I%C3%B1t%C3%ABrn%C3%A2ti%C3%B4n%EF%BF%BDliz%C3%A6ti%C3%B8n": "http://test.domain/I%C3%B1t%C3%ABrn%C3%A2ti%C3%B4n%EF%BF%BDliz%C3%A6ti%C3%B8n", - //"http://test.domain/I%C3%B1t%C3%ABrn%C3%A2ti%C3%B4n%EF%BF%BDliz%C3%A6ti%C3%B8n": "http://test.domain/I\xc3\xb1t\xc3\xabrn\xc3\xa2ti\xc3\xb4n\xef\xbf\xbdliz\xc3\xa6ti\xc3\xb8n", - } - - assertMap(t, testcases, FlagsSafe|FlagRemoveDotSegments) -} diff --git a/vendor/github.com/PuerkitoBio/urlesc/urlesc_test.go b/vendor/github.com/PuerkitoBio/urlesc/urlesc_test.go deleted file mode 100644 index 45202e1dd8..0000000000 --- a/vendor/github.com/PuerkitoBio/urlesc/urlesc_test.go +++ /dev/null @@ -1,641 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package urlesc - -import ( - "net/url" - "testing" -) - -type URLTest struct { - in string - out *url.URL - roundtrip string // expected result of reserializing the URL; empty means same as "in". -} - -var urltests = []URLTest{ - // no path - { - "http://www.google.com", - &url.URL{ - Scheme: "http", - Host: "www.google.com", - }, - "", - }, - // path - { - "http://www.google.com/", - &url.URL{ - Scheme: "http", - Host: "www.google.com", - Path: "/", - }, - "", - }, - // path with hex escaping - { - "http://www.google.com/file%20one%26two", - &url.URL{ - Scheme: "http", - Host: "www.google.com", - Path: "/file one&two", - }, - "http://www.google.com/file%20one&two", - }, - // user - { - "ftp://webmaster@www.google.com/", - &url.URL{ - Scheme: "ftp", - User: url.User("webmaster"), - Host: "www.google.com", - Path: "/", - }, - "", - }, - // escape sequence in username - { - "ftp://john%20doe@www.google.com/", - &url.URL{ - Scheme: "ftp", - User: url.User("john doe"), - Host: "www.google.com", - Path: "/", - }, - "ftp://john%20doe@www.google.com/", - }, - // query - { - "http://www.google.com/?q=go+language", - &url.URL{ - Scheme: "http", - Host: "www.google.com", - Path: "/", - RawQuery: "q=go+language", - }, - "", - }, - // query with hex escaping: NOT parsed - { - "http://www.google.com/?q=go%20language", - &url.URL{ - Scheme: "http", - Host: "www.google.com", - Path: "/", - RawQuery: "q=go%20language", - }, - "", - }, - // %20 outside query - { - "http://www.google.com/a%20b?q=c+d", - &url.URL{ - Scheme: "http", - Host: "www.google.com", - Path: "/a b", - RawQuery: "q=c+d", - }, - "", - }, - // path without leading /, so no parsing - { - "http:www.google.com/?q=go+language", - &url.URL{ - Scheme: "http", - Opaque: "www.google.com/", - RawQuery: "q=go+language", - }, - "http:www.google.com/?q=go+language", - }, - // path without leading /, so no parsing - { - "http:%2f%2fwww.google.com/?q=go+language", - &url.URL{ - Scheme: "http", - Opaque: "%2f%2fwww.google.com/", - RawQuery: "q=go+language", - }, - "http:%2f%2fwww.google.com/?q=go+language", - }, - // non-authority with path - { - "mailto:/webmaster@golang.org", - &url.URL{ - Scheme: "mailto", - Path: "/webmaster@golang.org", - }, - "mailto:///webmaster@golang.org", // unfortunate compromise - }, - // non-authority - { - "mailto:webmaster@golang.org", - &url.URL{ - Scheme: "mailto", - Opaque: "webmaster@golang.org", - }, - "", - }, - // unescaped :// in query should not create a scheme - { - "/foo?query=http://bad", - &url.URL{ - Path: "/foo", - RawQuery: "query=http://bad", - }, - "", - }, - // leading // without scheme should create an authority - { - "//foo", - &url.URL{ - Host: "foo", - }, - "", - }, - // leading // without scheme, with userinfo, path, and query - { - "//user@foo/path?a=b", - &url.URL{ - User: url.User("user"), - Host: "foo", - Path: "/path", - RawQuery: "a=b", - }, - "", - }, - // Three leading slashes isn't an authority, but doesn't return an error. - // (We can't return an error, as this code is also used via - // ServeHTTP -> ReadRequest -> Parse, which is arguably a - // different URL parsing context, but currently shares the - // same codepath) - { - "///threeslashes", - &url.URL{ - Path: "///threeslashes", - }, - "", - }, - { - "http://user:password@google.com", - &url.URL{ - Scheme: "http", - User: url.UserPassword("user", "password"), - Host: "google.com", - }, - "http://user:password@google.com", - }, - // unescaped @ in username should not confuse host - { - "http://j@ne:password@google.com", - &url.URL{ - Scheme: "http", - User: url.UserPassword("j@ne", "password"), - Host: "google.com", - }, - "http://j%40ne:password@google.com", - }, - // unescaped @ in password should not confuse host - { - "http://jane:p@ssword@google.com", - &url.URL{ - Scheme: "http", - User: url.UserPassword("jane", "p@ssword"), - Host: "google.com", - }, - "http://jane:p%40ssword@google.com", - }, - { - "http://j@ne:password@google.com/p@th?q=@go", - &url.URL{ - Scheme: "http", - User: url.UserPassword("j@ne", "password"), - Host: "google.com", - Path: "/p@th", - RawQuery: "q=@go", - }, - "http://j%40ne:password@google.com/p@th?q=@go", - }, - { - "http://www.google.com/?q=go+language#foo", - &url.URL{ - Scheme: "http", - Host: "www.google.com", - Path: "/", - RawQuery: "q=go+language", - Fragment: "foo", - }, - "", - }, - { - "http://www.google.com/?q=go+language#foo%26bar", - &url.URL{ - Scheme: "http", - Host: "www.google.com", - Path: "/", - RawQuery: "q=go+language", - Fragment: "foo&bar", - }, - "http://www.google.com/?q=go+language#foo&bar", - }, - { - "file:///home/adg/rabbits", - &url.URL{ - Scheme: "file", - Host: "", - Path: "/home/adg/rabbits", - }, - "file:///home/adg/rabbits", - }, - // "Windows" paths are no exception to the rule. - // See golang.org/issue/6027, especially comment #9. - { - "file:///C:/FooBar/Baz.txt", - &url.URL{ - Scheme: "file", - Host: "", - Path: "/C:/FooBar/Baz.txt", - }, - "file:///C:/FooBar/Baz.txt", - }, - // case-insensitive scheme - { - "MaIlTo:webmaster@golang.org", - &url.URL{ - Scheme: "mailto", - Opaque: "webmaster@golang.org", - }, - "mailto:webmaster@golang.org", - }, - // Relative path - { - "a/b/c", - &url.URL{ - Path: "a/b/c", - }, - "a/b/c", - }, - // escaped '?' in username and password - { - "http://%3Fam:pa%3Fsword@google.com", - &url.URL{ - Scheme: "http", - User: url.UserPassword("?am", "pa?sword"), - Host: "google.com", - }, - "", - }, - // escaped '?' and '#' in path - { - "http://example.com/%3F%23", - &url.URL{ - Scheme: "http", - Host: "example.com", - Path: "?#", - }, - "", - }, - // unescaped [ ] ! ' ( ) * in path - { - "http://example.com/[]!'()*", - &url.URL{ - Scheme: "http", - Host: "example.com", - Path: "[]!'()*", - }, - "http://example.com/[]!'()*", - }, - // escaped : / ? # [ ] @ in username and password - { - "http://%3A%2F%3F:%23%5B%5D%40@example.com", - &url.URL{ - Scheme: "http", - User: url.UserPassword(":/?", "#[]@"), - Host: "example.com", - }, - "", - }, - // unescaped ! $ & ' ( ) * + , ; = in username and password - { - "http://!$&'():*+,;=@example.com", - &url.URL{ - Scheme: "http", - User: url.UserPassword("!$&'()", "*+,;="), - Host: "example.com", - }, - "", - }, - // unescaped = : / . ? = in query component - { - "http://example.com/?q=http://google.com/?q=", - &url.URL{ - Scheme: "http", - Host: "example.com", - Path: "/", - RawQuery: "q=http://google.com/?q=", - }, - "", - }, - // unescaped : / ? [ ] @ ! $ & ' ( ) * + , ; = in fragment - { - "http://example.com/#:/?%23[]@!$&'()*+,;=", - &url.URL{ - Scheme: "http", - Host: "example.com", - Path: "/", - Fragment: ":/?#[]@!$&'()*+,;=", - }, - "", - }, -} - -func DoTestString(t *testing.T, parse func(string) (*url.URL, error), name string, tests []URLTest) { - for _, tt := range tests { - u, err := parse(tt.in) - if err != nil { - t.Errorf("%s(%q) returned error %s", name, tt.in, err) - continue - } - expected := tt.in - if len(tt.roundtrip) > 0 { - expected = tt.roundtrip - } - s := Escape(u) - if s != expected { - t.Errorf("Escape(%s(%q)) == %q (expected %q)", name, tt.in, s, expected) - } - } -} - -func TestURLString(t *testing.T) { - DoTestString(t, url.Parse, "Parse", urltests) - - // no leading slash on path should prepend - // slash on String() call - noslash := URLTest{ - "http://www.google.com/search", - &url.URL{ - Scheme: "http", - Host: "www.google.com", - Path: "search", - }, - "", - } - s := Escape(noslash.out) - if s != noslash.in { - t.Errorf("Expected %s; go %s", noslash.in, s) - } -} - -type EscapeTest struct { - in string - out string - err error -} - -var escapeTests = []EscapeTest{ - { - "", - "", - nil, - }, - { - "abc", - "abc", - nil, - }, - { - "one two", - "one+two", - nil, - }, - { - "10%", - "10%25", - nil, - }, - { - " ?&=#+%!<>#\"{}|\\^[]`☺\t:/@$'()*,;", - "+?%26%3D%23%2B%25%21%3C%3E%23%22%7B%7D%7C%5C%5E%5B%5D%60%E2%98%BA%09%3A/%40%24%27%28%29%2A%2C%3B", - nil, - }, -} - -func TestEscape(t *testing.T) { - for _, tt := range escapeTests { - actual := QueryEscape(tt.in) - if tt.out != actual { - t.Errorf("QueryEscape(%q) = %q, want %q", tt.in, actual, tt.out) - } - - // for bonus points, verify that escape:unescape is an identity. - roundtrip, err := url.QueryUnescape(actual) - if roundtrip != tt.in || err != nil { - t.Errorf("QueryUnescape(%q) = %q, %s; want %q, %s", actual, roundtrip, err, tt.in, "[no error]") - } - } -} - -var resolveReferenceTests = []struct { - base, rel, expected string -}{ - // Absolute URL references - {"http://foo.com?a=b", "https://bar.com/", "https://bar.com/"}, - {"http://foo.com/", "https://bar.com/?a=b", "https://bar.com/?a=b"}, - {"http://foo.com/bar", "mailto:foo@example.com", "mailto:foo@example.com"}, - - // Path-absolute references - {"http://foo.com/bar", "/baz", "http://foo.com/baz"}, - {"http://foo.com/bar?a=b#f", "/baz", "http://foo.com/baz"}, - {"http://foo.com/bar?a=b", "/baz?c=d", "http://foo.com/baz?c=d"}, - - // Scheme-relative - {"https://foo.com/bar?a=b", "//bar.com/quux", "https://bar.com/quux"}, - - // Path-relative references: - - // ... current directory - {"http://foo.com", ".", "http://foo.com/"}, - {"http://foo.com/bar", ".", "http://foo.com/"}, - {"http://foo.com/bar/", ".", "http://foo.com/bar/"}, - - // ... going down - {"http://foo.com", "bar", "http://foo.com/bar"}, - {"http://foo.com/", "bar", "http://foo.com/bar"}, - {"http://foo.com/bar/baz", "quux", "http://foo.com/bar/quux"}, - - // ... going up - {"http://foo.com/bar/baz", "../quux", "http://foo.com/quux"}, - {"http://foo.com/bar/baz", "../../../../../quux", "http://foo.com/quux"}, - {"http://foo.com/bar", "..", "http://foo.com/"}, - {"http://foo.com/bar/baz", "./..", "http://foo.com/"}, - // ".." in the middle (issue 3560) - {"http://foo.com/bar/baz", "quux/dotdot/../tail", "http://foo.com/bar/quux/tail"}, - {"http://foo.com/bar/baz", "quux/./dotdot/../tail", "http://foo.com/bar/quux/tail"}, - {"http://foo.com/bar/baz", "quux/./dotdot/.././tail", "http://foo.com/bar/quux/tail"}, - {"http://foo.com/bar/baz", "quux/./dotdot/./../tail", "http://foo.com/bar/quux/tail"}, - {"http://foo.com/bar/baz", "quux/./dotdot/dotdot/././../../tail", "http://foo.com/bar/quux/tail"}, - {"http://foo.com/bar/baz", "quux/./dotdot/dotdot/./.././../tail", "http://foo.com/bar/quux/tail"}, - {"http://foo.com/bar/baz", "quux/./dotdot/dotdot/dotdot/./../../.././././tail", "http://foo.com/bar/quux/tail"}, - {"http://foo.com/bar/baz", "quux/./dotdot/../dotdot/../dot/./tail/..", "http://foo.com/bar/quux/dot/"}, - - // Remove any dot-segments prior to forming the target URI. - // http://tools.ietf.org/html/rfc3986#section-5.2.4 - {"http://foo.com/dot/./dotdot/../foo/bar", "../baz", "http://foo.com/dot/baz"}, - - // Triple dot isn't special - {"http://foo.com/bar", "...", "http://foo.com/..."}, - - // Fragment - {"http://foo.com/bar", ".#frag", "http://foo.com/#frag"}, - - // RFC 3986: Normal Examples - // http://tools.ietf.org/html/rfc3986#section-5.4.1 - {"http://a/b/c/d;p?q", "g:h", "g:h"}, - {"http://a/b/c/d;p?q", "g", "http://a/b/c/g"}, - {"http://a/b/c/d;p?q", "./g", "http://a/b/c/g"}, - {"http://a/b/c/d;p?q", "g/", "http://a/b/c/g/"}, - {"http://a/b/c/d;p?q", "/g", "http://a/g"}, - {"http://a/b/c/d;p?q", "//g", "http://g"}, - {"http://a/b/c/d;p?q", "?y", "http://a/b/c/d;p?y"}, - {"http://a/b/c/d;p?q", "g?y", "http://a/b/c/g?y"}, - {"http://a/b/c/d;p?q", "#s", "http://a/b/c/d;p?q#s"}, - {"http://a/b/c/d;p?q", "g#s", "http://a/b/c/g#s"}, - {"http://a/b/c/d;p?q", "g?y#s", "http://a/b/c/g?y#s"}, - {"http://a/b/c/d;p?q", ";x", "http://a/b/c/;x"}, - {"http://a/b/c/d;p?q", "g;x", "http://a/b/c/g;x"}, - {"http://a/b/c/d;p?q", "g;x?y#s", "http://a/b/c/g;x?y#s"}, - {"http://a/b/c/d;p?q", "", "http://a/b/c/d;p?q"}, - {"http://a/b/c/d;p?q", ".", "http://a/b/c/"}, - {"http://a/b/c/d;p?q", "./", "http://a/b/c/"}, - {"http://a/b/c/d;p?q", "..", "http://a/b/"}, - {"http://a/b/c/d;p?q", "../", "http://a/b/"}, - {"http://a/b/c/d;p?q", "../g", "http://a/b/g"}, - {"http://a/b/c/d;p?q", "../..", "http://a/"}, - {"http://a/b/c/d;p?q", "../../", "http://a/"}, - {"http://a/b/c/d;p?q", "../../g", "http://a/g"}, - - // RFC 3986: Abnormal Examples - // http://tools.ietf.org/html/rfc3986#section-5.4.2 - {"http://a/b/c/d;p?q", "../../../g", "http://a/g"}, - {"http://a/b/c/d;p?q", "../../../../g", "http://a/g"}, - {"http://a/b/c/d;p?q", "/./g", "http://a/g"}, - {"http://a/b/c/d;p?q", "/../g", "http://a/g"}, - {"http://a/b/c/d;p?q", "g.", "http://a/b/c/g."}, - {"http://a/b/c/d;p?q", ".g", "http://a/b/c/.g"}, - {"http://a/b/c/d;p?q", "g..", "http://a/b/c/g.."}, - {"http://a/b/c/d;p?q", "..g", "http://a/b/c/..g"}, - {"http://a/b/c/d;p?q", "./../g", "http://a/b/g"}, - {"http://a/b/c/d;p?q", "./g/.", "http://a/b/c/g/"}, - {"http://a/b/c/d;p?q", "g/./h", "http://a/b/c/g/h"}, - {"http://a/b/c/d;p?q", "g/../h", "http://a/b/c/h"}, - {"http://a/b/c/d;p?q", "g;x=1/./y", "http://a/b/c/g;x=1/y"}, - {"http://a/b/c/d;p?q", "g;x=1/../y", "http://a/b/c/y"}, - {"http://a/b/c/d;p?q", "g?y/./x", "http://a/b/c/g?y/./x"}, - {"http://a/b/c/d;p?q", "g?y/../x", "http://a/b/c/g?y/../x"}, - {"http://a/b/c/d;p?q", "g#s/./x", "http://a/b/c/g#s/./x"}, - {"http://a/b/c/d;p?q", "g#s/../x", "http://a/b/c/g#s/../x"}, - - // Extras. - {"https://a/b/c/d;p?q", "//g?q", "https://g?q"}, - {"https://a/b/c/d;p?q", "//g#s", "https://g#s"}, - {"https://a/b/c/d;p?q", "//g/d/e/f?y#s", "https://g/d/e/f?y#s"}, - {"https://a/b/c/d;p#s", "?y", "https://a/b/c/d;p?y"}, - {"https://a/b/c/d;p?q#s", "?y", "https://a/b/c/d;p?y"}, -} - -func TestResolveReference(t *testing.T) { - mustParse := func(url_ string) *url.URL { - u, err := url.Parse(url_) - if err != nil { - t.Fatalf("Expected URL to parse: %q, got error: %v", url_, err) - } - return u - } - opaque := &url.URL{Scheme: "scheme", Opaque: "opaque"} - for _, test := range resolveReferenceTests { - base := mustParse(test.base) - rel := mustParse(test.rel) - url := base.ResolveReference(rel) - if Escape(url) != test.expected { - t.Errorf("URL(%q).ResolveReference(%q) == %q, got %q", test.base, test.rel, test.expected, Escape(url)) - } - // Ensure that new instances are returned. - if base == url { - t.Errorf("Expected URL.ResolveReference to return new URL instance.") - } - // Test the convenience wrapper too. - url, err := base.Parse(test.rel) - if err != nil { - t.Errorf("URL(%q).Parse(%q) failed: %v", test.base, test.rel, err) - } else if Escape(url) != test.expected { - t.Errorf("URL(%q).Parse(%q) == %q, got %q", test.base, test.rel, test.expected, Escape(url)) - } else if base == url { - // Ensure that new instances are returned for the wrapper too. - t.Errorf("Expected URL.Parse to return new URL instance.") - } - // Ensure Opaque resets the URL. - url = base.ResolveReference(opaque) - if *url != *opaque { - t.Errorf("ResolveReference failed to resolve opaque URL: want %#v, got %#v", url, opaque) - } - // Test the convenience wrapper with an opaque URL too. - url, err = base.Parse("scheme:opaque") - if err != nil { - t.Errorf(`URL(%q).Parse("scheme:opaque") failed: %v`, test.base, err) - } else if *url != *opaque { - t.Errorf("Parse failed to resolve opaque URL: want %#v, got %#v", url, opaque) - } else if base == url { - // Ensure that new instances are returned, again. - t.Errorf("Expected URL.Parse to return new URL instance.") - } - } -} - -type shouldEscapeTest struct { - in byte - mode encoding - escape bool -} - -var shouldEscapeTests = []shouldEscapeTest{ - // Unreserved characters (§2.3) - {'a', encodePath, false}, - {'a', encodeUserPassword, false}, - {'a', encodeQueryComponent, false}, - {'a', encodeFragment, false}, - {'z', encodePath, false}, - {'A', encodePath, false}, - {'Z', encodePath, false}, - {'0', encodePath, false}, - {'9', encodePath, false}, - {'-', encodePath, false}, - {'-', encodeUserPassword, false}, - {'-', encodeQueryComponent, false}, - {'-', encodeFragment, false}, - {'.', encodePath, false}, - {'_', encodePath, false}, - {'~', encodePath, false}, - - // User information (§3.2.1) - {':', encodeUserPassword, true}, - {'/', encodeUserPassword, true}, - {'?', encodeUserPassword, true}, - {'@', encodeUserPassword, true}, - {'$', encodeUserPassword, false}, - {'&', encodeUserPassword, false}, - {'+', encodeUserPassword, false}, - {',', encodeUserPassword, false}, - {';', encodeUserPassword, false}, - {'=', encodeUserPassword, false}, -} - -func TestShouldEscape(t *testing.T) { - for _, tt := range shouldEscapeTests { - if shouldEscape(tt.in, tt.mode) != tt.escape { - t.Errorf("shouldEscape(%q, %v) returned %v; expected %v", tt.in, tt.mode, !tt.escape, tt.escape) - } - } -} diff --git a/vendor/github.com/Sirupsen/logrus/.gitignore b/vendor/github.com/Sirupsen/logrus/.gitignore new file mode 100644 index 0000000000..6b7d7d1e8b --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/.gitignore @@ -0,0 +1,2 @@ +logrus +vendor diff --git a/vendor/github.com/Sirupsen/logrus/.travis.yml b/vendor/github.com/Sirupsen/logrus/.travis.yml new file mode 100644 index 0000000000..7e54dc6e3b --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/.travis.yml @@ -0,0 +1,21 @@ +language: go +go_import_path: github.com/sirupsen/logrus +git: + depth: 1 +env: + - GO111MODULE=on + - GO111MODULE=off +go: [ 1.10.x, 1.11.x, 1.12.x ] +os: [ linux, osx, windows ] +matrix: + exclude: + - env: GO111MODULE=on + go: 1.10.x +install: + - if [[ "$GO111MODULE" == "on" ]]; then go mod download; fi + - if [[ "$GO111MODULE" == "off" ]]; then go get github.com/stretchr/testify/assert golang.org/x/sys/unix github.com/konsorten/go-windows-terminal-sequences; fi +script: + - export GOMAXPROCS=4 + - export GORACE=halt_on_error=1 + - go test -race -v ./... + - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then go test -race -v -tags appengine ./... ; fi diff --git a/vendor/github.com/Sirupsen/logrus/CHANGELOG.md b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md new file mode 100644 index 0000000000..f62cbd24a7 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md @@ -0,0 +1,198 @@ +# 1.4.1 +This new release introduces: + * Enhance TextFormatter to not print caller information when they are empty (#944) + * Remove dependency on golang.org/x/crypto (#932, #943) + +Fixes: + * Fix Entry.WithContext method to return a copy of the initial entry (#941) + +# 1.4.0 +This new release introduces: + * Add `DeferExitHandler`, similar to `RegisterExitHandler` but prepending the handler to the list of handlers (semantically like `defer`) (#848). + * Add `CallerPrettyfier` to `JSONFormatter` and `TextFormatter (#909, #911) + * Add `Entry.WithContext()` and `Entry.Context`, to set a context on entries to be used e.g. in hooks (#919). + +Fixes: + * Fix wrong method calls `Logger.Print` and `Logger.Warningln` (#893). + * Update `Entry.Logf` to not do string formatting unless the log level is enabled (#903) + * Fix infinite recursion on unknown `Level.String()` (#907) + * Fix race condition in `getCaller` (#916). + + +# 1.3.0 +This new release introduces: + * Log, Logf, Logln functions for Logger and Entry that take a Level + +Fixes: + * Building prometheus node_exporter on AIX (#840) + * Race condition in TextFormatter (#468) + * Travis CI import path (#868) + * Remove coloured output on Windows (#862) + * Pointer to func as field in JSONFormatter (#870) + * Properly marshal Levels (#873) + +# 1.2.0 +This new release introduces: + * A new method `SetReportCaller` in the `Logger` to enable the file, line and calling function from which the trace has been issued + * A new trace level named `Trace` whose level is below `Debug` + * A configurable exit function to be called upon a Fatal trace + * The `Level` object now implements `encoding.TextUnmarshaler` interface + +# 1.1.1 +This is a bug fix release. + * fix the build break on Solaris + * don't drop a whole trace in JSONFormatter when a field param is a function pointer which can not be serialized + +# 1.1.0 +This new release introduces: + * several fixes: + * a fix for a race condition on entry formatting + * proper cleanup of previously used entries before putting them back in the pool + * the extra new line at the end of message in text formatter has been removed + * a new global public API to check if a level is activated: IsLevelEnabled + * the following methods have been added to the Logger object + * IsLevelEnabled + * SetFormatter + * SetOutput + * ReplaceHooks + * introduction of go module + * an indent configuration for the json formatter + * output colour support for windows + * the field sort function is now configurable for text formatter + * the CLICOLOR and CLICOLOR\_FORCE environment variable support in text formater + +# 1.0.6 + +This new release introduces: + * a new api WithTime which allows to easily force the time of the log entry + which is mostly useful for logger wrapper + * a fix reverting the immutability of the entry given as parameter to the hooks + a new configuration field of the json formatter in order to put all the fields + in a nested dictionnary + * a new SetOutput method in the Logger + * a new configuration of the textformatter to configure the name of the default keys + * a new configuration of the text formatter to disable the level truncation + +# 1.0.5 + +* Fix hooks race (#707) +* Fix panic deadlock (#695) + +# 1.0.4 + +* Fix race when adding hooks (#612) +* Fix terminal check in AppEngine (#635) + +# 1.0.3 + +* Replace example files with testable examples + +# 1.0.2 + +* bug: quote non-string values in text formatter (#583) +* Make (*Logger) SetLevel a public method + +# 1.0.1 + +* bug: fix escaping in text formatter (#575) + +# 1.0.0 + +* Officially changed name to lower-case +* bug: colors on Windows 10 (#541) +* bug: fix race in accessing level (#512) + +# 0.11.5 + +* feature: add writer and writerlevel to entry (#372) + +# 0.11.4 + +* bug: fix undefined variable on solaris (#493) + +# 0.11.3 + +* formatter: configure quoting of empty values (#484) +* formatter: configure quoting character (default is `"`) (#484) +* bug: fix not importing io correctly in non-linux environments (#481) + +# 0.11.2 + +* bug: fix windows terminal detection (#476) + +# 0.11.1 + +* bug: fix tty detection with custom out (#471) + +# 0.11.0 + +* performance: Use bufferpool to allocate (#370) +* terminal: terminal detection for app-engine (#343) +* feature: exit handler (#375) + +# 0.10.0 + +* feature: Add a test hook (#180) +* feature: `ParseLevel` is now case-insensitive (#326) +* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308) +* performance: avoid re-allocations on `WithFields` (#335) + +# 0.9.0 + +* logrus/text_formatter: don't emit empty msg +* logrus/hooks/airbrake: move out of main repository +* logrus/hooks/sentry: move out of main repository +* logrus/hooks/papertrail: move out of main repository +* logrus/hooks/bugsnag: move out of main repository +* logrus/core: run tests with `-race` +* logrus/core: detect TTY based on `stderr` +* logrus/core: support `WithError` on logger +* logrus/core: Solaris support + +# 0.8.7 + +* logrus/core: fix possible race (#216) +* logrus/doc: small typo fixes and doc improvements + + +# 0.8.6 + +* hooks/raven: allow passing an initialized client + +# 0.8.5 + +* logrus/core: revert #208 + +# 0.8.4 + +* formatter/text: fix data race (#218) + +# 0.8.3 + +* logrus/core: fix entry log level (#208) +* logrus/core: improve performance of text formatter by 40% +* logrus/core: expose `LevelHooks` type +* logrus/core: add support for DragonflyBSD and NetBSD +* formatter/text: print structs more verbosely + +# 0.8.2 + +* logrus: fix more Fatal family functions + +# 0.8.1 + +* logrus: fix not exiting on `Fatalf` and `Fatalln` + +# 0.8.0 + +* logrus: defaults to stderr instead of stdout +* hooks/sentry: add special field for `*http.Request` +* formatter/text: ignore Windows for colors + +# 0.7.3 + +* formatter/\*: allow configuration of timestamp layout + +# 0.7.2 + +* formatter/text: Add configuration option for time format (#158) diff --git a/vendor/github.com/Sirupsen/logrus/LICENSE b/vendor/github.com/Sirupsen/logrus/LICENSE new file mode 100644 index 0000000000..f090cb42f3 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Simon Eskildsen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Sirupsen/logrus/README.md b/vendor/github.com/Sirupsen/logrus/README.md new file mode 100644 index 0000000000..a4796eb07d --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/README.md @@ -0,0 +1,495 @@ +# Logrus :walrus: [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus) + +Logrus is a structured logger for Go (golang), completely API compatible with +the standard library logger. + +**Seeing weird case-sensitive problems?** It's in the past been possible to +import Logrus as both upper- and lower-case. Due to the Go package environment, +this caused issues in the community and we needed a standard. Some environments +experienced problems with the upper-case variant, so the lower-case was decided. +Everything using `logrus` will need to use the lower-case: +`github.com/sirupsen/logrus`. Any package that isn't, should be changed. + +To fix Glide, see [these +comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437). +For an in-depth explanation of the casing issue, see [this +comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276). + +**Are you interested in assisting in maintaining Logrus?** Currently I have a +lot of obligations, and I am unable to provide Logrus with the maintainership it +needs. If you'd like to help, please reach out to me at `simon at author's +username dot com`. + +Nicely color-coded in development (when a TTY is attached, otherwise just +plain text): + +![Colored](http://i.imgur.com/PY7qMwd.png) + +With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash +or Splunk: + +```json +{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the +ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} + +{"level":"warning","msg":"The group's number increased tremendously!", +"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"A giant walrus appears!", +"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", +"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} + +{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, +"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} +``` + +With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not +attached, the output is compatible with the +[logfmt](http://godoc.org/github.com/kr/logfmt) format: + +```text +time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 +time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 +time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true +time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 +time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 +time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true +``` +To ensure this behaviour even if a TTY is attached, set your formatter as follows: + +```go + log.SetFormatter(&log.TextFormatter{ + DisableColors: true, + FullTimestamp: true, + }) +``` + +#### Logging Method Name + +If you wish to add the calling method as a field, instruct the logger via: +```go +log.SetReportCaller(true) +``` +This adds the caller as 'method' like so: + +```json +{"animal":"penguin","level":"fatal","method":"github.com/sirupsen/arcticcreatures.migrate","msg":"a penguin swims by", +"time":"2014-03-10 19:57:38.562543129 -0400 EDT"} +``` + +```text +time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcreatures.migrate msg="a penguin swims by" animal=penguin +``` +Note that this does add measurable overhead - the cost will depend on the version of Go, but is +between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your +environment via benchmarks: +``` +go test -bench=.*CallerTracing +``` + + +#### Case-sensitivity + +The organization's name was changed to lower-case--and this will not be changed +back. If you are getting import conflicts due to case sensitivity, please use +the lower-case import: `github.com/sirupsen/logrus`. + +#### Example + +The simplest way to use Logrus is simply the package-level exported logger: + +```go +package main + +import ( + log "github.com/sirupsen/logrus" +) + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + }).Info("A walrus appears") +} +``` + +Note that it's completely api-compatible with the stdlib logger, so you can +replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"` +and you'll now have the flexibility of Logrus. You can customize it all you +want: + +```go +package main + +import ( + "os" + log "github.com/sirupsen/logrus" +) + +func init() { + // Log as JSON instead of the default ASCII formatter. + log.SetFormatter(&log.JSONFormatter{}) + + // Output to stdout instead of the default stderr + // Can be any io.Writer, see below for File example + log.SetOutput(os.Stdout) + + // Only log the warning severity or above. + log.SetLevel(log.WarnLevel) +} + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + log.WithFields(log.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + log.WithFields(log.Fields{ + "omg": true, + "number": 100, + }).Fatal("The ice breaks!") + + // A common pattern is to re-use fields between logging statements by re-using + // the logrus.Entry returned from WithFields() + contextLogger := log.WithFields(log.Fields{ + "common": "this is a common field", + "other": "I also should be logged always", + }) + + contextLogger.Info("I'll be logged with common and other field") + contextLogger.Info("Me too") +} +``` + +For more advanced usage such as logging to multiple locations from the same +application, you can also create an instance of the `logrus` Logger: + +```go +package main + +import ( + "os" + "github.com/sirupsen/logrus" +) + +// Create a new instance of the logger. You can have any number of instances. +var log = logrus.New() + +func main() { + // The API for setting attributes is a little different than the package level + // exported logger. See Godoc. + log.Out = os.Stdout + + // You could set this to any `io.Writer` such as a file + // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666) + // if err == nil { + // log.Out = file + // } else { + // log.Info("Failed to log to file, using default stderr") + // } + + log.WithFields(logrus.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") +} +``` + +#### Fields + +Logrus encourages careful, structured logging through logging fields instead of +long, unparseable error messages. For example, instead of: `log.Fatalf("Failed +to send event %s to topic %s with key %d")`, you should log the much more +discoverable: + +```go +log.WithFields(log.Fields{ + "event": event, + "topic": topic, + "key": key, +}).Fatal("Failed to send event") +``` + +We've found this API forces you to think about logging in a way that produces +much more useful logging messages. We've been in countless situations where just +a single added field to a log statement that was already there would've saved us +hours. The `WithFields` call is optional. + +In general, with Logrus using any of the `printf`-family functions should be +seen as a hint you should add a field, however, you can still use the +`printf`-family functions with Logrus. + +#### Default Fields + +Often it's helpful to have fields _always_ attached to log statements in an +application or parts of one. For example, you may want to always log the +`request_id` and `user_ip` in the context of a request. Instead of writing +`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on +every line, you can create a `logrus.Entry` to pass around instead: + +```go +requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip}) +requestLogger.Info("something happened on that request") # will log request_id and user_ip +requestLogger.Warn("something not great happened") +``` + +#### Hooks + +You can add hooks for logging levels. For example to send errors to an exception +tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to +multiple places simultaneously, e.g. syslog. + +Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in +`init`: + +```go +import ( + log "github.com/sirupsen/logrus" + "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "airbrake" + logrus_syslog "github.com/sirupsen/logrus/hooks/syslog" + "log/syslog" +) + +func init() { + + // Use the Airbrake hook to report errors that have Error severity or above to + // an exception tracker. You can create custom hooks, see the Hooks section. + log.AddHook(airbrake.NewHook(123, "xyz", "production")) + + hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") + if err != nil { + log.Error("Unable to connect to local syslog daemon") + } else { + log.AddHook(hook) + } +} +``` +Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). + +A list of currently known of service hook can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks) + + +#### Level logging + +Logrus has seven logging levels: Trace, Debug, Info, Warning, Error, Fatal and Panic. + +```go +log.Trace("Something very low level.") +log.Debug("Useful debugging information.") +log.Info("Something noteworthy happened!") +log.Warn("You should probably take a look at this.") +log.Error("Something failed but I'm not quitting.") +// Calls os.Exit(1) after logging +log.Fatal("Bye.") +// Calls panic() after logging +log.Panic("I'm bailing.") +``` + +You can set the logging level on a `Logger`, then it will only log entries with +that severity or anything above it: + +```go +// Will log anything that is info or above (warn, error, fatal, panic). Default. +log.SetLevel(log.InfoLevel) +``` + +It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose +environment if your application has that. + +#### Entries + +Besides the fields added with `WithField` or `WithFields` some fields are +automatically added to all logging events: + +1. `time`. The timestamp when the entry was created. +2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after + the `AddFields` call. E.g. `Failed to send event.` +3. `level`. The logging level. E.g. `info`. + +#### Environments + +Logrus has no notion of environment. + +If you wish for hooks and formatters to only be used in specific environments, +you should handle that yourself. For example, if your application has a global +variable `Environment`, which is a string representation of the environment you +could do: + +```go +import ( + log "github.com/sirupsen/logrus" +) + +init() { + // do something here to set environment depending on an environment variable + // or command-line flag + if Environment == "production" { + log.SetFormatter(&log.JSONFormatter{}) + } else { + // The TextFormatter is default, you don't actually have to do this. + log.SetFormatter(&log.TextFormatter{}) + } +} +``` + +This configuration is how `logrus` was intended to be used, but JSON in +production is mostly only useful if you do log aggregation with tools like +Splunk or Logstash. + +#### Formatters + +The built-in logging formatters are: + +* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise + without colors. + * *Note:* to force colored output when there is no TTY, set the `ForceColors` + field to `true`. To force no colored output even if there is a TTY set the + `DisableColors` field to `true`. For Windows, see + [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable). + * When colors are enabled, levels are truncated to 4 characters by default. To disable + truncation set the `DisableLevelTruncation` field to `true`. + * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter). +* `logrus.JSONFormatter`. Logs fields as JSON. + * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter). + +Third party logging formatters: + +* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine. +* [`GELF`](https://github.com/fabienm/go-logrus-formatters). Formats entries so they comply to Graylog's [GELF 1.1 specification](http://docs.graylog.org/en/2.4/pages/gelf.html). +* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events. +* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. +* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. +* [`nested-logrus-formatter`](https://github.com/antonfisher/nested-logrus-formatter). Converts logrus fields to a nested structure. + +You can define your formatter by implementing the `Formatter` interface, +requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a +`Fields` type (`map[string]interface{}`) with all your fields as well as the +default ones (see Entries section above): + +```go +type MyJSONFormatter struct { +} + +log.SetFormatter(new(MyJSONFormatter)) + +func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) { + // Note this doesn't include Time, Level and Message which are available on + // the Entry. Consult `godoc` on information about those fields or read the + // source of the official loggers. + serialized, err := json.Marshal(entry.Data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + } + return append(serialized, '\n'), nil +} +``` + +#### Logger as an `io.Writer` + +Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. + +```go +w := logger.Writer() +defer w.Close() + +srv := http.Server{ + // create a stdlib log.Logger that writes to + // logrus.Logger. + ErrorLog: log.New(w, "", 0), +} +``` + +Each line written to that writer will be printed the usual way, using formatters +and hooks. The level for those entries is `info`. + +This means that we can override the standard library logger easily: + +```go +logger := logrus.New() +logger.Formatter = &logrus.JSONFormatter{} + +// Use logrus for standard log output +// Note that `log` here references stdlib's log +// Not logrus imported under the name `log`. +log.SetOutput(logger.Writer()) +``` + +#### Rotation + +Log rotation is not provided with Logrus. Log rotation should be done by an +external program (like `logrotate(8)`) that can compress and delete old log +entries. It should not be a feature of the application-level logger. + +#### Tools + +| Tool | Description | +| ---- | ----------- | +|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.| +|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) | + +#### Testing + +Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides: + +* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook +* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any): + +```go +import( + "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestSomething(t*testing.T){ + logger, hook := test.NewNullLogger() + logger.Error("Helloerror") + + assert.Equal(t, 1, len(hook.Entries)) + assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level) + assert.Equal(t, "Helloerror", hook.LastEntry().Message) + + hook.Reset() + assert.Nil(t, hook.LastEntry()) +} +``` + +#### Fatal handlers + +Logrus can register one or more functions that will be called when any `fatal` +level message is logged. The registered handlers will be executed before +logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need +to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted. + +``` +... +handler := func() { + // gracefully shutdown something... +} +logrus.RegisterExitHandler(handler) +... +``` + +#### Thread safety + +By default, Logger is protected by a mutex for concurrent writes. The mutex is held when calling hooks and writing logs. +If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking. + +Situation when locking is not needed includes: + +* You have no hooks registered, or hooks calling is already thread-safe. + +* Writing to logger.Out is already thread-safe, for example: + + 1) logger.Out is protected by locks. + + 2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing) + + (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/) diff --git a/vendor/github.com/Sirupsen/logrus/alt_exit.go b/vendor/github.com/Sirupsen/logrus/alt_exit.go new file mode 100644 index 0000000000..8fd189e1cc --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/alt_exit.go @@ -0,0 +1,76 @@ +package logrus + +// The following code was sourced and modified from the +// https://github.com/tebeka/atexit package governed by the following license: +// +// Copyright (c) 2012 Miki Tebeka . +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +// the Software, and to permit persons to whom the Software is furnished to do so, +// subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import ( + "fmt" + "os" +) + +var handlers = []func(){} + +func runHandler(handler func()) { + defer func() { + if err := recover(); err != nil { + fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err) + } + }() + + handler() +} + +func runHandlers() { + for _, handler := range handlers { + runHandler(handler) + } +} + +// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code) +func Exit(code int) { + runHandlers() + os.Exit(code) +} + +// RegisterExitHandler appends a Logrus Exit handler to the list of handlers, +// call logrus.Exit to invoke all handlers. The handlers will also be invoked when +// any Fatal log entry is made. +// +// This method is useful when a caller wishes to use logrus to log a fatal +// message but also needs to gracefully shutdown. An example usecase could be +// closing database connections, or sending a alert that the application is +// closing. +func RegisterExitHandler(handler func()) { + handlers = append(handlers, handler) +} + +// DeferExitHandler prepends a Logrus Exit handler to the list of handlers, +// call logrus.Exit to invoke all handlers. The handlers will also be invoked when +// any Fatal log entry is made. +// +// This method is useful when a caller wishes to use logrus to log a fatal +// message but also needs to gracefully shutdown. An example usecase could be +// closing database connections, or sending a alert that the application is +// closing. +func DeferExitHandler(handler func()) { + handlers = append([]func(){handler}, handlers...) +} diff --git a/vendor/github.com/Sirupsen/logrus/appveyor.yml b/vendor/github.com/Sirupsen/logrus/appveyor.yml new file mode 100644 index 0000000000..96c2ce15f8 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/appveyor.yml @@ -0,0 +1,14 @@ +version: "{build}" +platform: x64 +clone_folder: c:\gopath\src\github.com\sirupsen\logrus +environment: + GOPATH: c:\gopath +branches: + only: + - master +install: + - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% + - go version +build_script: + - go get -t + - go test diff --git a/vendor/github.com/Sirupsen/logrus/doc.go b/vendor/github.com/Sirupsen/logrus/doc.go new file mode 100644 index 0000000000..da67aba06d --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/doc.go @@ -0,0 +1,26 @@ +/* +Package logrus is a structured logger for Go, completely API compatible with the standard library logger. + + +The simplest way to use Logrus is simply the package-level exported logger: + + package main + + import ( + log "github.com/sirupsen/logrus" + ) + + func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "number": 1, + "size": 10, + }).Info("A walrus appears") + } + +Output: + time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10 + +For a full guide visit https://github.com/sirupsen/logrus +*/ +package logrus diff --git a/vendor/github.com/Sirupsen/logrus/entry.go b/vendor/github.com/Sirupsen/logrus/entry.go new file mode 100644 index 0000000000..63e25583cb --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/entry.go @@ -0,0 +1,407 @@ +package logrus + +import ( + "bytes" + "context" + "fmt" + "os" + "reflect" + "runtime" + "strings" + "sync" + "time" +) + +var ( + bufferPool *sync.Pool + + // qualified package name, cached at first use + logrusPackage string + + // Positions in the call stack when tracing to report the calling method + minimumCallerDepth int + + // Used for caller information initialisation + callerInitOnce sync.Once +) + +const ( + maximumCallerDepth int = 25 + knownLogrusFrames int = 4 +) + +func init() { + bufferPool = &sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + } + + // start at the bottom of the stack before the package-name cache is primed + minimumCallerDepth = 1 +} + +// Defines the key when adding errors using WithError. +var ErrorKey = "error" + +// An entry is the final or intermediate Logrus logging entry. It contains all +// the fields passed with WithField{,s}. It's finally logged when Trace, Debug, +// Info, Warn, Error, Fatal or Panic is called on it. These objects can be +// reused and passed around as much as you wish to avoid field duplication. +type Entry struct { + Logger *Logger + + // Contains all the fields set by the user. + Data Fields + + // Time at which the log entry was created + Time time.Time + + // Level the log entry was logged at: Trace, Debug, Info, Warn, Error, Fatal or Panic + // This field will be set on entry firing and the value will be equal to the one in Logger struct field. + Level Level + + // Calling method, with package name + Caller *runtime.Frame + + // Message passed to Trace, Debug, Info, Warn, Error, Fatal or Panic + Message string + + // When formatter is called in entry.log(), a Buffer may be set to entry + Buffer *bytes.Buffer + + // Contains the context set by the user. Useful for hook processing etc. + Context context.Context + + // err may contain a field formatting error + err string +} + +func NewEntry(logger *Logger) *Entry { + return &Entry{ + Logger: logger, + // Default is three fields, plus one optional. Give a little extra room. + Data: make(Fields, 6), + } +} + +// Returns the string representation from the reader and ultimately the +// formatter. +func (entry *Entry) String() (string, error) { + serialized, err := entry.Logger.Formatter.Format(entry) + if err != nil { + return "", err + } + str := string(serialized) + return str, nil +} + +// Add an error as single field (using the key defined in ErrorKey) to the Entry. +func (entry *Entry) WithError(err error) *Entry { + return entry.WithField(ErrorKey, err) +} + +// Add a context to the Entry. +func (entry *Entry) WithContext(ctx context.Context) *Entry { + return &Entry{Logger: entry.Logger, Data: entry.Data, Time: entry.Time, err: entry.err, Context: ctx} +} + +// Add a single field to the Entry. +func (entry *Entry) WithField(key string, value interface{}) *Entry { + return entry.WithFields(Fields{key: value}) +} + +// Add a map of fields to the Entry. +func (entry *Entry) WithFields(fields Fields) *Entry { + data := make(Fields, len(entry.Data)+len(fields)) + for k, v := range entry.Data { + data[k] = v + } + fieldErr := entry.err + for k, v := range fields { + isErrField := false + if t := reflect.TypeOf(v); t != nil { + switch t.Kind() { + case reflect.Func: + isErrField = true + case reflect.Ptr: + isErrField = t.Elem().Kind() == reflect.Func + } + } + if isErrField { + tmp := fmt.Sprintf("can not add field %q", k) + if fieldErr != "" { + fieldErr = entry.err + ", " + tmp + } else { + fieldErr = tmp + } + } else { + data[k] = v + } + } + return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: fieldErr, Context: entry.Context} +} + +// Overrides the time of the Entry. +func (entry *Entry) WithTime(t time.Time) *Entry { + return &Entry{Logger: entry.Logger, Data: entry.Data, Time: t, err: entry.err, Context: entry.Context} +} + +// getPackageName reduces a fully qualified function name to the package name +// There really ought to be to be a better way... +func getPackageName(f string) string { + for { + lastPeriod := strings.LastIndex(f, ".") + lastSlash := strings.LastIndex(f, "/") + if lastPeriod > lastSlash { + f = f[:lastPeriod] + } else { + break + } + } + + return f +} + +// getCaller retrieves the name of the first non-logrus calling function +func getCaller() *runtime.Frame { + + // cache this package's fully-qualified name + callerInitOnce.Do(func() { + pcs := make([]uintptr, 2) + _ = runtime.Callers(0, pcs) + logrusPackage = getPackageName(runtime.FuncForPC(pcs[1]).Name()) + + // now that we have the cache, we can skip a minimum count of known-logrus functions + // XXX this is dubious, the number of frames may vary + minimumCallerDepth = knownLogrusFrames + }) + + // Restrict the lookback frames to avoid runaway lookups + pcs := make([]uintptr, maximumCallerDepth) + depth := runtime.Callers(minimumCallerDepth, pcs) + frames := runtime.CallersFrames(pcs[:depth]) + + for f, again := frames.Next(); again; f, again = frames.Next() { + pkg := getPackageName(f.Function) + + // If the caller isn't part of this package, we're done + if pkg != logrusPackage { + return &f + } + } + + // if we got here, we failed to find the caller's context + return nil +} + +func (entry Entry) HasCaller() (has bool) { + return entry.Logger != nil && + entry.Logger.ReportCaller && + entry.Caller != nil +} + +// This function is not declared with a pointer value because otherwise +// race conditions will occur when using multiple goroutines +func (entry Entry) log(level Level, msg string) { + var buffer *bytes.Buffer + + // Default to now, but allow users to override if they want. + // + // We don't have to worry about polluting future calls to Entry#log() + // with this assignment because this function is declared with a + // non-pointer receiver. + if entry.Time.IsZero() { + entry.Time = time.Now() + } + + entry.Level = level + entry.Message = msg + if entry.Logger.ReportCaller { + entry.Caller = getCaller() + } + + entry.fireHooks() + + buffer = bufferPool.Get().(*bytes.Buffer) + buffer.Reset() + defer bufferPool.Put(buffer) + entry.Buffer = buffer + + entry.write() + + entry.Buffer = nil + + // To avoid Entry#log() returning a value that only would make sense for + // panic() to use in Entry#Panic(), we avoid the allocation by checking + // directly here. + if level <= PanicLevel { + panic(&entry) + } +} + +func (entry *Entry) fireHooks() { + entry.Logger.mu.Lock() + defer entry.Logger.mu.Unlock() + err := entry.Logger.Hooks.Fire(entry.Level, entry) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) + } +} + +func (entry *Entry) write() { + entry.Logger.mu.Lock() + defer entry.Logger.mu.Unlock() + serialized, err := entry.Logger.Formatter.Format(entry) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) + } else { + _, err = entry.Logger.Out.Write(serialized) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) + } + } +} + +func (entry *Entry) Log(level Level, args ...interface{}) { + if entry.Logger.IsLevelEnabled(level) { + entry.log(level, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Trace(args ...interface{}) { + entry.Log(TraceLevel, args...) +} + +func (entry *Entry) Debug(args ...interface{}) { + entry.Log(DebugLevel, args...) +} + +func (entry *Entry) Print(args ...interface{}) { + entry.Info(args...) +} + +func (entry *Entry) Info(args ...interface{}) { + entry.Log(InfoLevel, args...) +} + +func (entry *Entry) Warn(args ...interface{}) { + entry.Log(WarnLevel, args...) +} + +func (entry *Entry) Warning(args ...interface{}) { + entry.Warn(args...) +} + +func (entry *Entry) Error(args ...interface{}) { + entry.Log(ErrorLevel, args...) +} + +func (entry *Entry) Fatal(args ...interface{}) { + entry.Log(FatalLevel, args...) + entry.Logger.Exit(1) +} + +func (entry *Entry) Panic(args ...interface{}) { + entry.Log(PanicLevel, args...) + panic(fmt.Sprint(args...)) +} + +// Entry Printf family functions + +func (entry *Entry) Logf(level Level, format string, args ...interface{}) { + if entry.Logger.IsLevelEnabled(level) { + entry.Log(level, fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Tracef(format string, args ...interface{}) { + entry.Logf(TraceLevel, format, args...) +} + +func (entry *Entry) Debugf(format string, args ...interface{}) { + entry.Logf(DebugLevel, format, args...) +} + +func (entry *Entry) Infof(format string, args ...interface{}) { + entry.Logf(InfoLevel, format, args...) +} + +func (entry *Entry) Printf(format string, args ...interface{}) { + entry.Infof(format, args...) +} + +func (entry *Entry) Warnf(format string, args ...interface{}) { + entry.Logf(WarnLevel, format, args...) +} + +func (entry *Entry) Warningf(format string, args ...interface{}) { + entry.Warnf(format, args...) +} + +func (entry *Entry) Errorf(format string, args ...interface{}) { + entry.Logf(ErrorLevel, format, args...) +} + +func (entry *Entry) Fatalf(format string, args ...interface{}) { + entry.Logf(FatalLevel, format, args...) + entry.Logger.Exit(1) +} + +func (entry *Entry) Panicf(format string, args ...interface{}) { + entry.Logf(PanicLevel, format, args...) +} + +// Entry Println family functions + +func (entry *Entry) Logln(level Level, args ...interface{}) { + if entry.Logger.IsLevelEnabled(level) { + entry.Log(level, entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Traceln(args ...interface{}) { + entry.Logln(TraceLevel, args...) +} + +func (entry *Entry) Debugln(args ...interface{}) { + entry.Logln(DebugLevel, args...) +} + +func (entry *Entry) Infoln(args ...interface{}) { + entry.Logln(InfoLevel, args...) +} + +func (entry *Entry) Println(args ...interface{}) { + entry.Infoln(args...) +} + +func (entry *Entry) Warnln(args ...interface{}) { + entry.Logln(WarnLevel, args...) +} + +func (entry *Entry) Warningln(args ...interface{}) { + entry.Warnln(args...) +} + +func (entry *Entry) Errorln(args ...interface{}) { + entry.Logln(ErrorLevel, args...) +} + +func (entry *Entry) Fatalln(args ...interface{}) { + entry.Logln(FatalLevel, args...) + entry.Logger.Exit(1) +} + +func (entry *Entry) Panicln(args ...interface{}) { + entry.Logln(PanicLevel, args...) +} + +// Sprintlnn => Sprint no newline. This is to get the behavior of how +// fmt.Sprintln where spaces are always added between operands, regardless of +// their type. Instead of vendoring the Sprintln implementation to spare a +// string allocation, we do the simplest thing. +func (entry *Entry) sprintlnn(args ...interface{}) string { + msg := fmt.Sprintln(args...) + return msg[:len(msg)-1] +} diff --git a/vendor/github.com/Sirupsen/logrus/exported.go b/vendor/github.com/Sirupsen/logrus/exported.go new file mode 100644 index 0000000000..62fc2f2193 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/exported.go @@ -0,0 +1,225 @@ +package logrus + +import ( + "context" + "io" + "time" +) + +var ( + // std is the name of the standard logger in stdlib `log` + std = New() +) + +func StandardLogger() *Logger { + return std +} + +// SetOutput sets the standard logger output. +func SetOutput(out io.Writer) { + std.SetOutput(out) +} + +// SetFormatter sets the standard logger formatter. +func SetFormatter(formatter Formatter) { + std.SetFormatter(formatter) +} + +// SetReportCaller sets whether the standard logger will include the calling +// method as a field. +func SetReportCaller(include bool) { + std.SetReportCaller(include) +} + +// SetLevel sets the standard logger level. +func SetLevel(level Level) { + std.SetLevel(level) +} + +// GetLevel returns the standard logger level. +func GetLevel() Level { + return std.GetLevel() +} + +// IsLevelEnabled checks if the log level of the standard logger is greater than the level param +func IsLevelEnabled(level Level) bool { + return std.IsLevelEnabled(level) +} + +// AddHook adds a hook to the standard logger hooks. +func AddHook(hook Hook) { + std.AddHook(hook) +} + +// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key. +func WithError(err error) *Entry { + return std.WithField(ErrorKey, err) +} + +// WithContext creates an entry from the standard logger and adds a context to it. +func WithContext(ctx context.Context) *Entry { + return std.WithContext(ctx) +} + +// WithField creates an entry from the standard logger and adds a field to +// it. If you want multiple fields, use `WithFields`. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithField(key string, value interface{}) *Entry { + return std.WithField(key, value) +} + +// WithFields creates an entry from the standard logger and adds multiple +// fields to it. This is simply a helper for `WithField`, invoking it +// once for each field. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithFields(fields Fields) *Entry { + return std.WithFields(fields) +} + +// WithTime creats an entry from the standard logger and overrides the time of +// logs generated with it. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithTime(t time.Time) *Entry { + return std.WithTime(t) +} + +// Trace logs a message at level Trace on the standard logger. +func Trace(args ...interface{}) { + std.Trace(args...) +} + +// Debug logs a message at level Debug on the standard logger. +func Debug(args ...interface{}) { + std.Debug(args...) +} + +// Print logs a message at level Info on the standard logger. +func Print(args ...interface{}) { + std.Print(args...) +} + +// Info logs a message at level Info on the standard logger. +func Info(args ...interface{}) { + std.Info(args...) +} + +// Warn logs a message at level Warn on the standard logger. +func Warn(args ...interface{}) { + std.Warn(args...) +} + +// Warning logs a message at level Warn on the standard logger. +func Warning(args ...interface{}) { + std.Warning(args...) +} + +// Error logs a message at level Error on the standard logger. +func Error(args ...interface{}) { + std.Error(args...) +} + +// Panic logs a message at level Panic on the standard logger. +func Panic(args ...interface{}) { + std.Panic(args...) +} + +// Fatal logs a message at level Fatal on the standard logger then the process will exit with status set to 1. +func Fatal(args ...interface{}) { + std.Fatal(args...) +} + +// Tracef logs a message at level Trace on the standard logger. +func Tracef(format string, args ...interface{}) { + std.Tracef(format, args...) +} + +// Debugf logs a message at level Debug on the standard logger. +func Debugf(format string, args ...interface{}) { + std.Debugf(format, args...) +} + +// Printf logs a message at level Info on the standard logger. +func Printf(format string, args ...interface{}) { + std.Printf(format, args...) +} + +// Infof logs a message at level Info on the standard logger. +func Infof(format string, args ...interface{}) { + std.Infof(format, args...) +} + +// Warnf logs a message at level Warn on the standard logger. +func Warnf(format string, args ...interface{}) { + std.Warnf(format, args...) +} + +// Warningf logs a message at level Warn on the standard logger. +func Warningf(format string, args ...interface{}) { + std.Warningf(format, args...) +} + +// Errorf logs a message at level Error on the standard logger. +func Errorf(format string, args ...interface{}) { + std.Errorf(format, args...) +} + +// Panicf logs a message at level Panic on the standard logger. +func Panicf(format string, args ...interface{}) { + std.Panicf(format, args...) +} + +// Fatalf logs a message at level Fatal on the standard logger then the process will exit with status set to 1. +func Fatalf(format string, args ...interface{}) { + std.Fatalf(format, args...) +} + +// Traceln logs a message at level Trace on the standard logger. +func Traceln(args ...interface{}) { + std.Traceln(args...) +} + +// Debugln logs a message at level Debug on the standard logger. +func Debugln(args ...interface{}) { + std.Debugln(args...) +} + +// Println logs a message at level Info on the standard logger. +func Println(args ...interface{}) { + std.Println(args...) +} + +// Infoln logs a message at level Info on the standard logger. +func Infoln(args ...interface{}) { + std.Infoln(args...) +} + +// Warnln logs a message at level Warn on the standard logger. +func Warnln(args ...interface{}) { + std.Warnln(args...) +} + +// Warningln logs a message at level Warn on the standard logger. +func Warningln(args ...interface{}) { + std.Warningln(args...) +} + +// Errorln logs a message at level Error on the standard logger. +func Errorln(args ...interface{}) { + std.Errorln(args...) +} + +// Panicln logs a message at level Panic on the standard logger. +func Panicln(args ...interface{}) { + std.Panicln(args...) +} + +// Fatalln logs a message at level Fatal on the standard logger then the process will exit with status set to 1. +func Fatalln(args ...interface{}) { + std.Fatalln(args...) +} diff --git a/vendor/github.com/Sirupsen/logrus/formatter.go b/vendor/github.com/Sirupsen/logrus/formatter.go new file mode 100644 index 0000000000..408883773e --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/formatter.go @@ -0,0 +1,78 @@ +package logrus + +import "time" + +// Default key names for the default fields +const ( + defaultTimestampFormat = time.RFC3339 + FieldKeyMsg = "msg" + FieldKeyLevel = "level" + FieldKeyTime = "time" + FieldKeyLogrusError = "logrus_error" + FieldKeyFunc = "func" + FieldKeyFile = "file" +) + +// The Formatter interface is used to implement a custom Formatter. It takes an +// `Entry`. It exposes all the fields, including the default ones: +// +// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. +// * `entry.Data["time"]`. The timestamp. +// * `entry.Data["level"]. The level the entry was logged at. +// +// Any additional fields added with `WithField` or `WithFields` are also in +// `entry.Data`. Format is expected to return an array of bytes which are then +// logged to `logger.Out`. +type Formatter interface { + Format(*Entry) ([]byte, error) +} + +// This is to not silently overwrite `time`, `msg`, `func` and `level` fields when +// dumping it. If this code wasn't there doing: +// +// logrus.WithField("level", 1).Info("hello") +// +// Would just silently drop the user provided level. Instead with this code +// it'll logged as: +// +// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} +// +// It's not exported because it's still using Data in an opinionated way. It's to +// avoid code duplication between the two default formatters. +func prefixFieldClashes(data Fields, fieldMap FieldMap, reportCaller bool) { + timeKey := fieldMap.resolve(FieldKeyTime) + if t, ok := data[timeKey]; ok { + data["fields."+timeKey] = t + delete(data, timeKey) + } + + msgKey := fieldMap.resolve(FieldKeyMsg) + if m, ok := data[msgKey]; ok { + data["fields."+msgKey] = m + delete(data, msgKey) + } + + levelKey := fieldMap.resolve(FieldKeyLevel) + if l, ok := data[levelKey]; ok { + data["fields."+levelKey] = l + delete(data, levelKey) + } + + logrusErrKey := fieldMap.resolve(FieldKeyLogrusError) + if l, ok := data[logrusErrKey]; ok { + data["fields."+logrusErrKey] = l + delete(data, logrusErrKey) + } + + // If reportCaller is not set, 'func' will not conflict. + if reportCaller { + funcKey := fieldMap.resolve(FieldKeyFunc) + if l, ok := data[funcKey]; ok { + data["fields."+funcKey] = l + } + fileKey := fieldMap.resolve(FieldKeyFile) + if l, ok := data[fileKey]; ok { + data["fields."+fileKey] = l + } + } +} diff --git a/vendor/github.com/Sirupsen/logrus/go.mod b/vendor/github.com/Sirupsen/logrus/go.mod new file mode 100644 index 0000000000..8261a2b3a2 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/go.mod @@ -0,0 +1,10 @@ +module github.com/sirupsen/logrus + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/konsorten/go-windows-terminal-sequences v1.0.1 + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/objx v0.1.1 // indirect + github.com/stretchr/testify v1.2.2 + golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33 +) diff --git a/vendor/github.com/Sirupsen/logrus/go.sum b/vendor/github.com/Sirupsen/logrus/go.sum new file mode 100644 index 0000000000..2d787be60d --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/go.sum @@ -0,0 +1,13 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe h1:CHRGQ8V7OlCYtwaKPJi3iA7J+YdNKdo8j7nG5IgDhjs= +github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33 h1:I6FyU15t786LL7oL/hn43zqTuEGr4PN7F4XJ1p4E3Y8= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/Sirupsen/logrus/hooks.go b/vendor/github.com/Sirupsen/logrus/hooks.go new file mode 100644 index 0000000000..3f151cdc39 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/hooks.go @@ -0,0 +1,34 @@ +package logrus + +// A hook to be fired when logging on the logging levels returned from +// `Levels()` on your implementation of the interface. Note that this is not +// fired in a goroutine or a channel with workers, you should handle such +// functionality yourself if your call is non-blocking and you don't wish for +// the logging calls for levels returned from `Levels()` to block. +type Hook interface { + Levels() []Level + Fire(*Entry) error +} + +// Internal type for storing the hooks on a logger instance. +type LevelHooks map[Level][]Hook + +// Add a hook to an instance of logger. This is called with +// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. +func (hooks LevelHooks) Add(hook Hook) { + for _, level := range hook.Levels() { + hooks[level] = append(hooks[level], hook) + } +} + +// Fire all the hooks for the passed level. Used by `entry.log` to fire +// appropriate hooks for a log entry. +func (hooks LevelHooks) Fire(level Level, entry *Entry) error { + for _, hook := range hooks[level] { + if err := hook.Fire(entry); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/Sirupsen/logrus/json_formatter.go b/vendor/github.com/Sirupsen/logrus/json_formatter.go new file mode 100644 index 0000000000..098a21a067 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/json_formatter.go @@ -0,0 +1,121 @@ +package logrus + +import ( + "bytes" + "encoding/json" + "fmt" + "runtime" +) + +type fieldKey string + +// FieldMap allows customization of the key names for default fields. +type FieldMap map[fieldKey]string + +func (f FieldMap) resolve(key fieldKey) string { + if k, ok := f[key]; ok { + return k + } + + return string(key) +} + +// JSONFormatter formats logs into parsable json +type JSONFormatter struct { + // TimestampFormat sets the format used for marshaling timestamps. + TimestampFormat string + + // DisableTimestamp allows disabling automatic timestamps in output + DisableTimestamp bool + + // DataKey allows users to put all the log entry parameters into a nested dictionary at a given key. + DataKey string + + // FieldMap allows users to customize the names of keys for default fields. + // As an example: + // formatter := &JSONFormatter{ + // FieldMap: FieldMap{ + // FieldKeyTime: "@timestamp", + // FieldKeyLevel: "@level", + // FieldKeyMsg: "@message", + // FieldKeyFunc: "@caller", + // }, + // } + FieldMap FieldMap + + // CallerPrettyfier can be set by the user to modify the content + // of the function and file keys in the json data when ReportCaller is + // activated. If any of the returned value is the empty string the + // corresponding key will be removed from json fields. + CallerPrettyfier func(*runtime.Frame) (function string, file string) + + // PrettyPrint will indent all json logs + PrettyPrint bool +} + +// Format renders a single log entry +func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { + data := make(Fields, len(entry.Data)+4) + for k, v := range entry.Data { + switch v := v.(type) { + case error: + // Otherwise errors are ignored by `encoding/json` + // https://github.com/sirupsen/logrus/issues/137 + data[k] = v.Error() + default: + data[k] = v + } + } + + if f.DataKey != "" { + newData := make(Fields, 4) + newData[f.DataKey] = data + data = newData + } + + prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = defaultTimestampFormat + } + + if entry.err != "" { + data[f.FieldMap.resolve(FieldKeyLogrusError)] = entry.err + } + if !f.DisableTimestamp { + data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat) + } + data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message + data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String() + if entry.HasCaller() { + funcVal := entry.Caller.Function + fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + if f.CallerPrettyfier != nil { + funcVal, fileVal = f.CallerPrettyfier(entry.Caller) + } + if funcVal != "" { + data[f.FieldMap.resolve(FieldKeyFunc)] = funcVal + } + if fileVal != "" { + data[f.FieldMap.resolve(FieldKeyFile)] = fileVal + } + } + + var b *bytes.Buffer + if entry.Buffer != nil { + b = entry.Buffer + } else { + b = &bytes.Buffer{} + } + + encoder := json.NewEncoder(b) + if f.PrettyPrint { + encoder.SetIndent("", " ") + } + if err := encoder.Encode(data); err != nil { + return nil, fmt.Errorf("failed to marshal fields to JSON, %v", err) + } + + return b.Bytes(), nil +} diff --git a/vendor/github.com/Sirupsen/logrus/logger.go b/vendor/github.com/Sirupsen/logrus/logger.go new file mode 100644 index 0000000000..c0c0b1e559 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/logger.go @@ -0,0 +1,351 @@ +package logrus + +import ( + "context" + "io" + "os" + "sync" + "sync/atomic" + "time" +) + +type Logger struct { + // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a + // file, or leave it default which is `os.Stderr`. You can also set this to + // something more adventurous, such as logging to Kafka. + Out io.Writer + // Hooks for the logger instance. These allow firing events based on logging + // levels and log entries. For example, to send errors to an error tracking + // service, log to StatsD or dump the core on fatal errors. + Hooks LevelHooks + // All log entries pass through the formatter before logged to Out. The + // included formatters are `TextFormatter` and `JSONFormatter` for which + // TextFormatter is the default. In development (when a TTY is attached) it + // logs with colors, but to a file it wouldn't. You can easily implement your + // own that implements the `Formatter` interface, see the `README` or included + // formatters for examples. + Formatter Formatter + + // Flag for whether to log caller info (off by default) + ReportCaller bool + + // The logging level the logger should log at. This is typically (and defaults + // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be + // logged. + Level Level + // Used to sync writing to the log. Locking is enabled by Default + mu MutexWrap + // Reusable empty entry + entryPool sync.Pool + // Function to exit the application, defaults to `os.Exit()` + ExitFunc exitFunc +} + +type exitFunc func(int) + +type MutexWrap struct { + lock sync.Mutex + disabled bool +} + +func (mw *MutexWrap) Lock() { + if !mw.disabled { + mw.lock.Lock() + } +} + +func (mw *MutexWrap) Unlock() { + if !mw.disabled { + mw.lock.Unlock() + } +} + +func (mw *MutexWrap) Disable() { + mw.disabled = true +} + +// Creates a new logger. Configuration should be set by changing `Formatter`, +// `Out` and `Hooks` directly on the default logger instance. You can also just +// instantiate your own: +// +// var log = &Logger{ +// Out: os.Stderr, +// Formatter: new(JSONFormatter), +// Hooks: make(LevelHooks), +// Level: logrus.DebugLevel, +// } +// +// It's recommended to make this a global instance called `log`. +func New() *Logger { + return &Logger{ + Out: os.Stderr, + Formatter: new(TextFormatter), + Hooks: make(LevelHooks), + Level: InfoLevel, + ExitFunc: os.Exit, + ReportCaller: false, + } +} + +func (logger *Logger) newEntry() *Entry { + entry, ok := logger.entryPool.Get().(*Entry) + if ok { + return entry + } + return NewEntry(logger) +} + +func (logger *Logger) releaseEntry(entry *Entry) { + entry.Data = map[string]interface{}{} + logger.entryPool.Put(entry) +} + +// Adds a field to the log entry, note that it doesn't log until you call +// Debug, Print, Info, Warn, Error, Fatal or Panic. It only creates a log entry. +// If you want multiple fields, use `WithFields`. +func (logger *Logger) WithField(key string, value interface{}) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithField(key, value) +} + +// Adds a struct of fields to the log entry. All it does is call `WithField` for +// each `Field`. +func (logger *Logger) WithFields(fields Fields) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithFields(fields) +} + +// Add an error as single field to the log entry. All it does is call +// `WithError` for the given `error`. +func (logger *Logger) WithError(err error) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithError(err) +} + +// Add a context to the log entry. +func (logger *Logger) WithContext(ctx context.Context) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithContext(ctx) +} + +// Overrides the time of the log entry. +func (logger *Logger) WithTime(t time.Time) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithTime(t) +} + +func (logger *Logger) Logf(level Level, format string, args ...interface{}) { + if logger.IsLevelEnabled(level) { + entry := logger.newEntry() + entry.Logf(level, format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Tracef(format string, args ...interface{}) { + logger.Logf(TraceLevel, format, args...) +} + +func (logger *Logger) Debugf(format string, args ...interface{}) { + logger.Logf(DebugLevel, format, args...) +} + +func (logger *Logger) Infof(format string, args ...interface{}) { + logger.Logf(InfoLevel, format, args...) +} + +func (logger *Logger) Printf(format string, args ...interface{}) { + entry := logger.newEntry() + entry.Printf(format, args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warnf(format string, args ...interface{}) { + logger.Logf(WarnLevel, format, args...) +} + +func (logger *Logger) Warningf(format string, args ...interface{}) { + logger.Warnf(format, args...) +} + +func (logger *Logger) Errorf(format string, args ...interface{}) { + logger.Logf(ErrorLevel, format, args...) +} + +func (logger *Logger) Fatalf(format string, args ...interface{}) { + logger.Logf(FatalLevel, format, args...) + logger.Exit(1) +} + +func (logger *Logger) Panicf(format string, args ...interface{}) { + logger.Logf(PanicLevel, format, args...) +} + +func (logger *Logger) Log(level Level, args ...interface{}) { + if logger.IsLevelEnabled(level) { + entry := logger.newEntry() + entry.Log(level, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Trace(args ...interface{}) { + logger.Log(TraceLevel, args...) +} + +func (logger *Logger) Debug(args ...interface{}) { + logger.Log(DebugLevel, args...) +} + +func (logger *Logger) Info(args ...interface{}) { + logger.Log(InfoLevel, args...) +} + +func (logger *Logger) Print(args ...interface{}) { + entry := logger.newEntry() + entry.Print(args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warn(args ...interface{}) { + logger.Log(WarnLevel, args...) +} + +func (logger *Logger) Warning(args ...interface{}) { + logger.Warn(args...) +} + +func (logger *Logger) Error(args ...interface{}) { + logger.Log(ErrorLevel, args...) +} + +func (logger *Logger) Fatal(args ...interface{}) { + logger.Log(FatalLevel, args...) + logger.Exit(1) +} + +func (logger *Logger) Panic(args ...interface{}) { + logger.Log(PanicLevel, args...) +} + +func (logger *Logger) Logln(level Level, args ...interface{}) { + if logger.IsLevelEnabled(level) { + entry := logger.newEntry() + entry.Logln(level, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Traceln(args ...interface{}) { + logger.Logln(TraceLevel, args...) +} + +func (logger *Logger) Debugln(args ...interface{}) { + logger.Logln(DebugLevel, args...) +} + +func (logger *Logger) Infoln(args ...interface{}) { + logger.Logln(InfoLevel, args...) +} + +func (logger *Logger) Println(args ...interface{}) { + entry := logger.newEntry() + entry.Println(args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warnln(args ...interface{}) { + logger.Logln(WarnLevel, args...) +} + +func (logger *Logger) Warningln(args ...interface{}) { + logger.Warnln(args...) +} + +func (logger *Logger) Errorln(args ...interface{}) { + logger.Logln(ErrorLevel, args...) +} + +func (logger *Logger) Fatalln(args ...interface{}) { + logger.Logln(FatalLevel, args...) + logger.Exit(1) +} + +func (logger *Logger) Panicln(args ...interface{}) { + logger.Logln(PanicLevel, args...) +} + +func (logger *Logger) Exit(code int) { + runHandlers() + if logger.ExitFunc == nil { + logger.ExitFunc = os.Exit + } + logger.ExitFunc(code) +} + +//When file is opened with appending mode, it's safe to +//write concurrently to a file (within 4k message on Linux). +//In these cases user can choose to disable the lock. +func (logger *Logger) SetNoLock() { + logger.mu.Disable() +} + +func (logger *Logger) level() Level { + return Level(atomic.LoadUint32((*uint32)(&logger.Level))) +} + +// SetLevel sets the logger level. +func (logger *Logger) SetLevel(level Level) { + atomic.StoreUint32((*uint32)(&logger.Level), uint32(level)) +} + +// GetLevel returns the logger level. +func (logger *Logger) GetLevel() Level { + return logger.level() +} + +// AddHook adds a hook to the logger hooks. +func (logger *Logger) AddHook(hook Hook) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Hooks.Add(hook) +} + +// IsLevelEnabled checks if the log level of the logger is greater than the level param +func (logger *Logger) IsLevelEnabled(level Level) bool { + return logger.level() >= level +} + +// SetFormatter sets the logger formatter. +func (logger *Logger) SetFormatter(formatter Formatter) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Formatter = formatter +} + +// SetOutput sets the logger output. +func (logger *Logger) SetOutput(output io.Writer) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Out = output +} + +func (logger *Logger) SetReportCaller(reportCaller bool) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.ReportCaller = reportCaller +} + +// ReplaceHooks replaces the logger hooks and returns the old ones +func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks { + logger.mu.Lock() + oldHooks := logger.Hooks + logger.Hooks = hooks + logger.mu.Unlock() + return oldHooks +} diff --git a/vendor/github.com/Sirupsen/logrus/logrus.go b/vendor/github.com/Sirupsen/logrus/logrus.go new file mode 100644 index 0000000000..8644761f73 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/logrus.go @@ -0,0 +1,186 @@ +package logrus + +import ( + "fmt" + "log" + "strings" +) + +// Fields type, used to pass to `WithFields`. +type Fields map[string]interface{} + +// Level type +type Level uint32 + +// Convert the Level to a string. E.g. PanicLevel becomes "panic". +func (level Level) String() string { + if b, err := level.MarshalText(); err == nil { + return string(b) + } else { + return "unknown" + } +} + +// ParseLevel takes a string level and returns the Logrus log level constant. +func ParseLevel(lvl string) (Level, error) { + switch strings.ToLower(lvl) { + case "panic": + return PanicLevel, nil + case "fatal": + return FatalLevel, nil + case "error": + return ErrorLevel, nil + case "warn", "warning": + return WarnLevel, nil + case "info": + return InfoLevel, nil + case "debug": + return DebugLevel, nil + case "trace": + return TraceLevel, nil + } + + var l Level + return l, fmt.Errorf("not a valid logrus Level: %q", lvl) +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (level *Level) UnmarshalText(text []byte) error { + l, err := ParseLevel(string(text)) + if err != nil { + return err + } + + *level = Level(l) + + return nil +} + +func (level Level) MarshalText() ([]byte, error) { + switch level { + case TraceLevel: + return []byte("trace"), nil + case DebugLevel: + return []byte("debug"), nil + case InfoLevel: + return []byte("info"), nil + case WarnLevel: + return []byte("warning"), nil + case ErrorLevel: + return []byte("error"), nil + case FatalLevel: + return []byte("fatal"), nil + case PanicLevel: + return []byte("panic"), nil + } + + return nil, fmt.Errorf("not a valid logrus level %d", level) +} + +// A constant exposing all logging levels +var AllLevels = []Level{ + PanicLevel, + FatalLevel, + ErrorLevel, + WarnLevel, + InfoLevel, + DebugLevel, + TraceLevel, +} + +// These are the different logging levels. You can set the logging level to log +// on your instance of logger, obtained with `logrus.New()`. +const ( + // PanicLevel level, highest level of severity. Logs and then calls panic with the + // message passed to Debug, Info, ... + PanicLevel Level = iota + // FatalLevel level. Logs and then calls `logger.Exit(1)`. It will exit even if the + // logging level is set to Panic. + FatalLevel + // ErrorLevel level. Logs. Used for errors that should definitely be noted. + // Commonly used for hooks to send errors to an error tracking service. + ErrorLevel + // WarnLevel level. Non-critical entries that deserve eyes. + WarnLevel + // InfoLevel level. General operational entries about what's going on inside the + // application. + InfoLevel + // DebugLevel level. Usually only enabled when debugging. Very verbose logging. + DebugLevel + // TraceLevel level. Designates finer-grained informational events than the Debug. + TraceLevel +) + +// Won't compile if StdLogger can't be realized by a log.Logger +var ( + _ StdLogger = &log.Logger{} + _ StdLogger = &Entry{} + _ StdLogger = &Logger{} +) + +// StdLogger is what your logrus-enabled library should take, that way +// it'll accept a stdlib logger and a logrus logger. There's no standard +// interface, this is the closest we get, unfortunately. +type StdLogger interface { + Print(...interface{}) + Printf(string, ...interface{}) + Println(...interface{}) + + Fatal(...interface{}) + Fatalf(string, ...interface{}) + Fatalln(...interface{}) + + Panic(...interface{}) + Panicf(string, ...interface{}) + Panicln(...interface{}) +} + +// The FieldLogger interface generalizes the Entry and Logger types +type FieldLogger interface { + WithField(key string, value interface{}) *Entry + WithFields(fields Fields) *Entry + WithError(err error) *Entry + + Debugf(format string, args ...interface{}) + Infof(format string, args ...interface{}) + Printf(format string, args ...interface{}) + Warnf(format string, args ...interface{}) + Warningf(format string, args ...interface{}) + Errorf(format string, args ...interface{}) + Fatalf(format string, args ...interface{}) + Panicf(format string, args ...interface{}) + + Debug(args ...interface{}) + Info(args ...interface{}) + Print(args ...interface{}) + Warn(args ...interface{}) + Warning(args ...interface{}) + Error(args ...interface{}) + Fatal(args ...interface{}) + Panic(args ...interface{}) + + Debugln(args ...interface{}) + Infoln(args ...interface{}) + Println(args ...interface{}) + Warnln(args ...interface{}) + Warningln(args ...interface{}) + Errorln(args ...interface{}) + Fatalln(args ...interface{}) + Panicln(args ...interface{}) + + // IsDebugEnabled() bool + // IsInfoEnabled() bool + // IsWarnEnabled() bool + // IsErrorEnabled() bool + // IsFatalEnabled() bool + // IsPanicEnabled() bool +} + +// Ext1FieldLogger (the first extension to FieldLogger) is superfluous, it is +// here for consistancy. Do not use. Use Logger or Entry instead. +type Ext1FieldLogger interface { + FieldLogger + Tracef(format string, args ...interface{}) + Trace(args ...interface{}) + Traceln(args ...interface{}) +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_check_appengine.go b/vendor/github.com/Sirupsen/logrus/terminal_check_appengine.go new file mode 100644 index 0000000000..2403de9819 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_check_appengine.go @@ -0,0 +1,11 @@ +// +build appengine + +package logrus + +import ( + "io" +) + +func checkIfTerminal(w io.Writer) bool { + return true +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_check_bsd.go b/vendor/github.com/Sirupsen/logrus/terminal_check_bsd.go new file mode 100644 index 0000000000..3c4f43f91c --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_check_bsd.go @@ -0,0 +1,13 @@ +// +build darwin dragonfly freebsd netbsd openbsd + +package logrus + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TIOCGETA + +func isTerminal(fd int) bool { + _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + return err == nil +} + diff --git a/vendor/github.com/Sirupsen/logrus/terminal_check_js.go b/vendor/github.com/Sirupsen/logrus/terminal_check_js.go new file mode 100644 index 0000000000..0c209750a3 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_check_js.go @@ -0,0 +1,11 @@ +// +build js + +package logrus + +import ( + "io" +) + +func checkIfTerminal(w io.Writer) bool { + return false +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_check_notappengine.go b/vendor/github.com/Sirupsen/logrus/terminal_check_notappengine.go new file mode 100644 index 0000000000..7be2d87c59 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_check_notappengine.go @@ -0,0 +1,17 @@ +// +build !appengine,!js,!windows + +package logrus + +import ( + "io" + "os" +) + +func checkIfTerminal(w io.Writer) bool { + switch v := w.(type) { + case *os.File: + return isTerminal(int(v.Fd())) + default: + return false + } +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_check_unix.go b/vendor/github.com/Sirupsen/logrus/terminal_check_unix.go new file mode 100644 index 0000000000..355dc966f0 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_check_unix.go @@ -0,0 +1,13 @@ +// +build linux aix + +package logrus + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TCGETS + +func isTerminal(fd int) bool { + _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + return err == nil +} + diff --git a/vendor/github.com/Sirupsen/logrus/terminal_check_windows.go b/vendor/github.com/Sirupsen/logrus/terminal_check_windows.go new file mode 100644 index 0000000000..3b9d2864ca --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_check_windows.go @@ -0,0 +1,20 @@ +// +build !appengine,!js,windows + +package logrus + +import ( + "io" + "os" + "syscall" +) + +func checkIfTerminal(w io.Writer) bool { + switch v := w.(type) { + case *os.File: + var mode uint32 + err := syscall.GetConsoleMode(syscall.Handle(v.Fd()), &mode) + return err == nil + default: + return false + } +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go new file mode 100644 index 0000000000..3dbd237203 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go @@ -0,0 +1,8 @@ +// +build !windows + +package logrus + +import "io" + +func initTerminal(w io.Writer) { +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_windows.go b/vendor/github.com/Sirupsen/logrus/terminal_windows.go new file mode 100644 index 0000000000..b4ef5286cd --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_windows.go @@ -0,0 +1,18 @@ +// +build !appengine,!js,windows + +package logrus + +import ( + "io" + "os" + "syscall" + + sequences "github.com/konsorten/go-windows-terminal-sequences" +) + +func initTerminal(w io.Writer) { + switch v := w.(type) { + case *os.File: + sequences.EnableVirtualTerminalProcessing(syscall.Handle(v.Fd()), true) + } +} diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter.go b/vendor/github.com/Sirupsen/logrus/text_formatter.go new file mode 100644 index 0000000000..1569161eb8 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/text_formatter.go @@ -0,0 +1,299 @@ +package logrus + +import ( + "bytes" + "fmt" + "os" + "runtime" + "sort" + "strings" + "sync" + "time" +) + +const ( + red = 31 + yellow = 33 + blue = 36 + gray = 37 +) + +var baseTimestamp time.Time + +func init() { + baseTimestamp = time.Now() +} + +// TextFormatter formats logs into text +type TextFormatter struct { + // Set to true to bypass checking for a TTY before outputting colors. + ForceColors bool + + // Force disabling colors. + DisableColors bool + + // Override coloring based on CLICOLOR and CLICOLOR_FORCE. - https://bixense.com/clicolors/ + EnvironmentOverrideColors bool + + // Disable timestamp logging. useful when output is redirected to logging + // system that already adds timestamps. + DisableTimestamp bool + + // Enable logging the full timestamp when a TTY is attached instead of just + // the time passed since beginning of execution. + FullTimestamp bool + + // TimestampFormat to use for display when a full timestamp is printed + TimestampFormat string + + // The fields are sorted by default for a consistent output. For applications + // that log extremely frequently and don't use the JSON formatter this may not + // be desired. + DisableSorting bool + + // The keys sorting function, when uninitialized it uses sort.Strings. + SortingFunc func([]string) + + // Disables the truncation of the level text to 4 characters. + DisableLevelTruncation bool + + // QuoteEmptyFields will wrap empty fields in quotes if true + QuoteEmptyFields bool + + // Whether the logger's out is to a terminal + isTerminal bool + + // FieldMap allows users to customize the names of keys for default fields. + // As an example: + // formatter := &TextFormatter{ + // FieldMap: FieldMap{ + // FieldKeyTime: "@timestamp", + // FieldKeyLevel: "@level", + // FieldKeyMsg: "@message"}} + FieldMap FieldMap + + // CallerPrettyfier can be set by the user to modify the content + // of the function and file keys in the data when ReportCaller is + // activated. If any of the returned value is the empty string the + // corresponding key will be removed from fields. + CallerPrettyfier func(*runtime.Frame) (function string, file string) + + terminalInitOnce sync.Once +} + +func (f *TextFormatter) init(entry *Entry) { + if entry.Logger != nil { + f.isTerminal = checkIfTerminal(entry.Logger.Out) + + if f.isTerminal { + initTerminal(entry.Logger.Out) + } + } +} + +func (f *TextFormatter) isColored() bool { + isColored := f.ForceColors || (f.isTerminal && (runtime.GOOS != "windows")) + + if f.EnvironmentOverrideColors { + if force, ok := os.LookupEnv("CLICOLOR_FORCE"); ok && force != "0" { + isColored = true + } else if ok && force == "0" { + isColored = false + } else if os.Getenv("CLICOLOR") == "0" { + isColored = false + } + } + + return isColored && !f.DisableColors +} + +// Format renders a single log entry +func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { + data := make(Fields) + for k, v := range entry.Data { + data[k] = v + } + prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) + keys := make([]string, 0, len(data)) + for k := range data { + keys = append(keys, k) + } + + var funcVal, fileVal string + + fixedKeys := make([]string, 0, 4+len(data)) + if !f.DisableTimestamp { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyTime)) + } + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLevel)) + if entry.Message != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyMsg)) + } + if entry.err != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLogrusError)) + } + if entry.HasCaller() { + if f.CallerPrettyfier != nil { + funcVal, fileVal = f.CallerPrettyfier(entry.Caller) + } else { + funcVal = entry.Caller.Function + fileVal = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + } + + if funcVal != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFunc)) + } + if fileVal != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFile)) + } + } + + if !f.DisableSorting { + if f.SortingFunc == nil { + sort.Strings(keys) + fixedKeys = append(fixedKeys, keys...) + } else { + if !f.isColored() { + fixedKeys = append(fixedKeys, keys...) + f.SortingFunc(fixedKeys) + } else { + f.SortingFunc(keys) + } + } + } else { + fixedKeys = append(fixedKeys, keys...) + } + + var b *bytes.Buffer + if entry.Buffer != nil { + b = entry.Buffer + } else { + b = &bytes.Buffer{} + } + + f.terminalInitOnce.Do(func() { f.init(entry) }) + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = defaultTimestampFormat + } + if f.isColored() { + f.printColored(b, entry, keys, data, timestampFormat) + } else { + + for _, key := range fixedKeys { + var value interface{} + switch { + case key == f.FieldMap.resolve(FieldKeyTime): + value = entry.Time.Format(timestampFormat) + case key == f.FieldMap.resolve(FieldKeyLevel): + value = entry.Level.String() + case key == f.FieldMap.resolve(FieldKeyMsg): + value = entry.Message + case key == f.FieldMap.resolve(FieldKeyLogrusError): + value = entry.err + case key == f.FieldMap.resolve(FieldKeyFunc) && entry.HasCaller(): + value = funcVal + case key == f.FieldMap.resolve(FieldKeyFile) && entry.HasCaller(): + value = fileVal + default: + value = data[key] + } + f.appendKeyValue(b, key, value) + } + } + + b.WriteByte('\n') + return b.Bytes(), nil +} + +func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, data Fields, timestampFormat string) { + var levelColor int + switch entry.Level { + case DebugLevel, TraceLevel: + levelColor = gray + case WarnLevel: + levelColor = yellow + case ErrorLevel, FatalLevel, PanicLevel: + levelColor = red + default: + levelColor = blue + } + + levelText := strings.ToUpper(entry.Level.String()) + if !f.DisableLevelTruncation { + levelText = levelText[0:4] + } + + // Remove a single newline if it already exists in the message to keep + // the behavior of logrus text_formatter the same as the stdlib log package + entry.Message = strings.TrimSuffix(entry.Message, "\n") + + caller := "" + if entry.HasCaller() { + funcVal := fmt.Sprintf("%s()", entry.Caller.Function) + fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + + if f.CallerPrettyfier != nil { + funcVal, fileVal = f.CallerPrettyfier(entry.Caller) + } + + if fileVal == "" { + caller = funcVal + } else if funcVal == "" { + caller = fileVal + } else { + caller = fileVal + " " + funcVal + } + } + + if f.DisableTimestamp { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m%s %-44s ", levelColor, levelText, caller, entry.Message) + } else if !f.FullTimestamp { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d]%s %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), caller, entry.Message) + } else { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s]%s %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), caller, entry.Message) + } + for _, k := range keys { + v := data[k] + fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k) + f.appendValue(b, v) + } +} + +func (f *TextFormatter) needsQuoting(text string) bool { + if f.QuoteEmptyFields && len(text) == 0 { + return true + } + for _, ch := range text { + if !((ch >= 'a' && ch <= 'z') || + (ch >= 'A' && ch <= 'Z') || + (ch >= '0' && ch <= '9') || + ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') { + return true + } + } + return false +} + +func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { + if b.Len() > 0 { + b.WriteByte(' ') + } + b.WriteString(key) + b.WriteByte('=') + f.appendValue(b, value) +} + +func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) { + stringVal, ok := value.(string) + if !ok { + stringVal = fmt.Sprint(value) + } + + if !f.needsQuoting(stringVal) { + b.WriteString(stringVal) + } else { + b.WriteString(fmt.Sprintf("%q", stringVal)) + } +} diff --git a/vendor/github.com/Sirupsen/logrus/writer.go b/vendor/github.com/Sirupsen/logrus/writer.go new file mode 100644 index 0000000000..9e1f751359 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/writer.go @@ -0,0 +1,64 @@ +package logrus + +import ( + "bufio" + "io" + "runtime" +) + +func (logger *Logger) Writer() *io.PipeWriter { + return logger.WriterLevel(InfoLevel) +} + +func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { + return NewEntry(logger).WriterLevel(level) +} + +func (entry *Entry) Writer() *io.PipeWriter { + return entry.WriterLevel(InfoLevel) +} + +func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { + reader, writer := io.Pipe() + + var printFunc func(args ...interface{}) + + switch level { + case TraceLevel: + printFunc = entry.Trace + case DebugLevel: + printFunc = entry.Debug + case InfoLevel: + printFunc = entry.Info + case WarnLevel: + printFunc = entry.Warn + case ErrorLevel: + printFunc = entry.Error + case FatalLevel: + printFunc = entry.Fatal + case PanicLevel: + printFunc = entry.Panic + default: + printFunc = entry.Print + } + + go entry.writerScanner(reader, printFunc) + runtime.SetFinalizer(writer, writerFinalizer) + + return writer +} + +func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + printFunc(scanner.Text()) + } + if err := scanner.Err(); err != nil { + entry.Errorf("Error while reading from Writer: %s", err) + } + reader.Close() +} + +func writerFinalizer(writer *io.PipeWriter) { + writer.Close() +} diff --git a/vendor/github.com/armon/go-proxyproto/protocol.go b/vendor/github.com/armon/go-proxyproto/protocol.go index 576507294f..9df25e9a05 100644 --- a/vendor/github.com/armon/go-proxyproto/protocol.go +++ b/vendor/github.com/armon/go-proxyproto/protocol.go @@ -28,12 +28,13 @@ var ( // passed in as an argument. If the function returns an error due to the source // being disallowed, it should return ErrInvalidUpstream. // -// Behavior is as follows: -// * If error is not nil, the call to Accept() will fail. If the reason for +// If error is not nil, the call to Accept() will fail. If the reason for // triggering this failure is due to a disallowed source, it should return // ErrInvalidUpstream. -// * If bool is true, the PROXY-set address is used. -// * If bool is false, the connection's remote address is used, rather than the +// +// If bool is true, the PROXY-set address is used. +// +// If bool is false, the connection's remote address is used, rather than the // address claimed in the PROXY info. type SourceChecker func(net.Addr) (bool, error) @@ -58,7 +59,7 @@ type Conn struct { conn net.Conn dstAddr *net.TCPAddr srcAddr *net.TCPAddr - useConnRemoteAddr bool + useConnAddr bool once sync.Once proxyHeaderTimeout time.Duration } @@ -70,18 +71,18 @@ func (p *Listener) Accept() (net.Conn, error) { if err != nil { return nil, err } - var useConnRemoteAddr bool + var useConnAddr bool if p.SourceCheck != nil { allowed, err := p.SourceCheck(conn.RemoteAddr()) if err != nil { return nil, err } if !allowed { - useConnRemoteAddr = true + useConnAddr = true } } newConn := NewConn(conn, p.ProxyHeaderTimeout) - newConn.useConnRemoteAddr = useConnRemoteAddr + newConn.useConnAddr = useConnAddr return newConn, nil } @@ -127,6 +128,10 @@ func (p *Conn) Close() error { } func (p *Conn) LocalAddr() net.Addr { + p.checkPrefixOnce() + if p.dstAddr != nil && !p.useConnAddr { + return p.dstAddr + } return p.conn.LocalAddr() } @@ -138,14 +143,8 @@ func (p *Conn) LocalAddr() net.Addr { // client is slow. Using a Deadline is recommended if this is called // before Read() func (p *Conn) RemoteAddr() net.Addr { - p.once.Do(func() { - if err := p.checkPrefix(); err != nil && err != io.EOF { - log.Printf("[ERR] Failed to read proxy prefix: %v", err) - p.Close() - p.bufReader = bufio.NewReader(p.conn) - } - }) - if p.srcAddr != nil && !p.useConnRemoteAddr { + p.checkPrefixOnce() + if p.srcAddr != nil && !p.useConnAddr { return p.srcAddr } return p.conn.RemoteAddr() @@ -163,6 +162,16 @@ func (p *Conn) SetWriteDeadline(t time.Time) error { return p.conn.SetWriteDeadline(t) } +func (p *Conn) checkPrefixOnce() { + p.once.Do(func() { + if err := p.checkPrefix(); err != nil && err != io.EOF { + log.Printf("[ERR] Failed to read proxy prefix: %v", err) + p.Close() + p.bufReader = bufio.NewReader(p.conn) + } + }) +} + func (p *Conn) checkPrefix() error { if p.proxyHeaderTimeout != 0 { readDeadLine := time.Now().Add(p.proxyHeaderTimeout) diff --git a/vendor/github.com/armon/go-proxyproto/protocol_test.go b/vendor/github.com/armon/go-proxyproto/protocol_test.go deleted file mode 100644 index 04a9bc8593..0000000000 --- a/vendor/github.com/armon/go-proxyproto/protocol_test.go +++ /dev/null @@ -1,383 +0,0 @@ -package proxyproto - -import ( - "bytes" - "net" - "testing" - "time" -) - -const ( - goodAddr = "127.0.0.1" - badAddr = "127.0.0.2" - errAddr = "9999.0.0.2" -) - -var ( - checkAddr string -) - -func TestPassthrough(t *testing.T) { - l, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatalf("err: %v", err) - } - - pl := &Listener{Listener: l} - - go func() { - conn, err := net.Dial("tcp", pl.Addr().String()) - if err != nil { - t.Fatalf("err: %v", err) - } - defer conn.Close() - - conn.Write([]byte("ping")) - recv := make([]byte, 4) - _, err = conn.Read(recv) - if err != nil { - t.Fatalf("err: %v", err) - } - if !bytes.Equal(recv, []byte("pong")) { - t.Fatalf("bad: %v", recv) - } - }() - - conn, err := pl.Accept() - if err != nil { - t.Fatalf("err: %v", err) - } - defer conn.Close() - - recv := make([]byte, 4) - _, err = conn.Read(recv) - if err != nil { - t.Fatalf("err: %v", err) - } - if !bytes.Equal(recv, []byte("ping")) { - t.Fatalf("bad: %v", recv) - } - - if _, err := conn.Write([]byte("pong")); err != nil { - t.Fatalf("err: %v", err) - } -} - -func TestTimeout(t *testing.T) { - l, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatalf("err: %v", err) - } - - clientWriteDelay := 200 * time.Millisecond - proxyHeaderTimeout := 50 * time.Millisecond - pl := &Listener{Listener: l, ProxyHeaderTimeout: proxyHeaderTimeout} - - go func() { - conn, err := net.Dial("tcp", pl.Addr().String()) - if err != nil { - t.Fatalf("err: %v", err) - } - defer conn.Close() - - // Do not send data for a while - time.Sleep(clientWriteDelay) - - conn.Write([]byte("ping")) - recv := make([]byte, 4) - _, err = conn.Read(recv) - if err != nil { - t.Fatalf("err: %v", err) - } - if !bytes.Equal(recv, []byte("pong")) { - t.Fatalf("bad: %v", recv) - } - }() - - conn, err := pl.Accept() - if err != nil { - t.Fatalf("err: %v", err) - } - defer conn.Close() - - // Check the remote addr is the original 127.0.0.1 - remoteAddrStartTime := time.Now() - addr := conn.RemoteAddr().(*net.TCPAddr) - if addr.IP.String() != "127.0.0.1" { - t.Fatalf("bad: %v", addr) - } - remoteAddrDuration := time.Since(remoteAddrStartTime) - - // Check RemoteAddr() call did timeout - if remoteAddrDuration >= clientWriteDelay { - t.Fatalf("RemoteAddr() took longer than the specified timeout: %v < %v", proxyHeaderTimeout, remoteAddrDuration) - } - - recv := make([]byte, 4) - _, err = conn.Read(recv) - if err != nil { - t.Fatalf("err: %v", err) - } - if !bytes.Equal(recv, []byte("ping")) { - t.Fatalf("bad: %v", recv) - } - - if _, err := conn.Write([]byte("pong")); err != nil { - t.Fatalf("err: %v", err) - } -} - -func TestParse_ipv4(t *testing.T) { - l, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatalf("err: %v", err) - } - - pl := &Listener{Listener: l} - - go func() { - conn, err := net.Dial("tcp", pl.Addr().String()) - if err != nil { - t.Fatalf("err: %v", err) - } - defer conn.Close() - - // Write out the header! - header := "PROXY TCP4 10.1.1.1 20.2.2.2 1000 2000\r\n" - conn.Write([]byte(header)) - - conn.Write([]byte("ping")) - recv := make([]byte, 4) - _, err = conn.Read(recv) - if err != nil { - t.Fatalf("err: %v", err) - } - if !bytes.Equal(recv, []byte("pong")) { - t.Fatalf("bad: %v", recv) - } - }() - - conn, err := pl.Accept() - if err != nil { - t.Fatalf("err: %v", err) - } - defer conn.Close() - - recv := make([]byte, 4) - _, err = conn.Read(recv) - if err != nil { - t.Fatalf("err: %v", err) - } - if !bytes.Equal(recv, []byte("ping")) { - t.Fatalf("bad: %v", recv) - } - - if _, err := conn.Write([]byte("pong")); err != nil { - t.Fatalf("err: %v", err) - } - - // Check the remote addr - addr := conn.RemoteAddr().(*net.TCPAddr) - if addr.IP.String() != "10.1.1.1" { - t.Fatalf("bad: %v", addr) - } - if addr.Port != 1000 { - t.Fatalf("bad: %v", addr) - } -} - -func TestParse_ipv6(t *testing.T) { - l, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatalf("err: %v", err) - } - - pl := &Listener{Listener: l} - - go func() { - conn, err := net.Dial("tcp", pl.Addr().String()) - if err != nil { - t.Fatalf("err: %v", err) - } - defer conn.Close() - - // Write out the header! - header := "PROXY TCP6 ffff::ffff ffff::ffff 1000 2000\r\n" - conn.Write([]byte(header)) - - conn.Write([]byte("ping")) - recv := make([]byte, 4) - _, err = conn.Read(recv) - if err != nil { - t.Fatalf("err: %v", err) - } - if !bytes.Equal(recv, []byte("pong")) { - t.Fatalf("bad: %v", recv) - } - }() - - conn, err := pl.Accept() - if err != nil { - t.Fatalf("err: %v", err) - } - defer conn.Close() - - recv := make([]byte, 4) - _, err = conn.Read(recv) - if err != nil { - t.Fatalf("err: %v", err) - } - if !bytes.Equal(recv, []byte("ping")) { - t.Fatalf("bad: %v", recv) - } - - if _, err := conn.Write([]byte("pong")); err != nil { - t.Fatalf("err: %v", err) - } - - // Check the remote addr - addr := conn.RemoteAddr().(*net.TCPAddr) - if addr.IP.String() != "ffff::ffff" { - t.Fatalf("bad: %v", addr) - } - if addr.Port != 1000 { - t.Fatalf("bad: %v", addr) - } -} - -func TestParse_BadHeader(t *testing.T) { - l, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatalf("err: %v", err) - } - - pl := &Listener{Listener: l} - - go func() { - conn, err := net.Dial("tcp", pl.Addr().String()) - if err != nil { - t.Fatalf("err: %v", err) - } - defer conn.Close() - - // Write out the header! - header := "PROXY TCP4 what 127.0.0.1 1000 2000\r\n" - conn.Write([]byte(header)) - - conn.Write([]byte("ping")) - - recv := make([]byte, 4) - _, err = conn.Read(recv) - if err == nil { - t.Fatalf("err: %v", err) - } - }() - - conn, err := pl.Accept() - if err != nil { - t.Fatalf("err: %v", err) - } - defer conn.Close() - - // Check the remote addr, should be the local addr - addr := conn.RemoteAddr().(*net.TCPAddr) - if addr.IP.String() != "127.0.0.1" { - t.Fatalf("bad: %v", addr) - } - - // Read should fail - recv := make([]byte, 4) - _, err = conn.Read(recv) - if err == nil { - t.Fatalf("err: %v", err) - } -} - -func TestParse_ipv4_checkfunc(t *testing.T) { - checkAddr = goodAddr - testParse_ipv4_checkfunc(t) - checkAddr = badAddr - testParse_ipv4_checkfunc(t) - checkAddr = errAddr - testParse_ipv4_checkfunc(t) -} - -func testParse_ipv4_checkfunc(t *testing.T) { - l, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatalf("err: %v", err) - } - - checkFunc := func(addr net.Addr) (bool, error) { - tcpAddr := addr.(*net.TCPAddr) - if tcpAddr.IP.String() == checkAddr { - return true, nil - } - return false, nil - } - - pl := &Listener{Listener: l, SourceCheck: checkFunc} - - go func() { - conn, err := net.Dial("tcp", pl.Addr().String()) - if err != nil { - t.Fatalf("err: %v", err) - } - defer conn.Close() - - // Write out the header! - header := "PROXY TCP4 10.1.1.1 20.2.2.2 1000 2000\r\n" - conn.Write([]byte(header)) - - conn.Write([]byte("ping")) - recv := make([]byte, 4) - _, err = conn.Read(recv) - if err != nil { - t.Fatalf("err: %v", err) - } - if !bytes.Equal(recv, []byte("pong")) { - t.Fatalf("bad: %v", recv) - } - }() - - conn, err := pl.Accept() - if err != nil { - if checkAddr == badAddr { - return - } - t.Fatalf("err: %v", err) - } - defer conn.Close() - - recv := make([]byte, 4) - _, err = conn.Read(recv) - if err != nil { - t.Fatalf("err: %v", err) - } - if !bytes.Equal(recv, []byte("ping")) { - t.Fatalf("bad: %v", recv) - } - - if _, err := conn.Write([]byte("pong")); err != nil { - t.Fatalf("err: %v", err) - } - - // Check the remote addr - addr := conn.RemoteAddr().(*net.TCPAddr) - switch checkAddr { - case goodAddr: - if addr.IP.String() != "10.1.1.1" { - t.Fatalf("bad: %v", addr) - } - if addr.Port != 1000 { - t.Fatalf("bad: %v", addr) - } - case badAddr: - if addr.IP.String() != "127.0.0.1" { - t.Fatalf("bad: %v", addr) - } - if addr.Port == 1000 { - t.Fatalf("bad: %v", addr) - } - } -} diff --git a/vendor/github.com/beorn7/perks/.gitignore b/vendor/github.com/beorn7/perks/.gitignore deleted file mode 100644 index 1bd9209aa1..0000000000 --- a/vendor/github.com/beorn7/perks/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -*.test -*.prof diff --git a/vendor/github.com/beorn7/perks/README.md b/vendor/github.com/beorn7/perks/README.md deleted file mode 100644 index fc05777701..0000000000 --- a/vendor/github.com/beorn7/perks/README.md +++ /dev/null @@ -1,31 +0,0 @@ -# Perks for Go (golang.org) - -Perks contains the Go package quantile that computes approximate quantiles over -an unbounded data stream within low memory and CPU bounds. - -For more information and examples, see: -http://godoc.org/github.com/bmizerany/perks - -A very special thank you and shout out to Graham Cormode (Rutgers University), -Flip Korn (AT&T Labs–Research), S. Muthukrishnan (Rutgers University), and -Divesh Srivastava (AT&T Labs–Research) for their research and publication of -[Effective Computation of Biased Quantiles over Data Streams](http://www.cs.rutgers.edu/~muthu/bquant.pdf) - -Thank you, also: -* Armon Dadgar (@armon) -* Andrew Gerrand (@nf) -* Brad Fitzpatrick (@bradfitz) -* Keith Rarick (@kr) - -FAQ: - -Q: Why not move the quantile package into the project root? -A: I want to add more packages to perks later. - -Copyright (C) 2013 Blake Mizerany - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/beorn7/perks/quantile/bench_test.go b/vendor/github.com/beorn7/perks/quantile/bench_test.go deleted file mode 100644 index 0bd0e4e775..0000000000 --- a/vendor/github.com/beorn7/perks/quantile/bench_test.go +++ /dev/null @@ -1,63 +0,0 @@ -package quantile - -import ( - "testing" -) - -func BenchmarkInsertTargeted(b *testing.B) { - b.ReportAllocs() - - s := NewTargeted(Targets) - b.ResetTimer() - for i := float64(0); i < float64(b.N); i++ { - s.Insert(i) - } -} - -func BenchmarkInsertTargetedSmallEpsilon(b *testing.B) { - s := NewTargeted(TargetsSmallEpsilon) - b.ResetTimer() - for i := float64(0); i < float64(b.N); i++ { - s.Insert(i) - } -} - -func BenchmarkInsertBiased(b *testing.B) { - s := NewLowBiased(0.01) - b.ResetTimer() - for i := float64(0); i < float64(b.N); i++ { - s.Insert(i) - } -} - -func BenchmarkInsertBiasedSmallEpsilon(b *testing.B) { - s := NewLowBiased(0.0001) - b.ResetTimer() - for i := float64(0); i < float64(b.N); i++ { - s.Insert(i) - } -} - -func BenchmarkQuery(b *testing.B) { - s := NewTargeted(Targets) - for i := float64(0); i < 1e6; i++ { - s.Insert(i) - } - b.ResetTimer() - n := float64(b.N) - for i := float64(0); i < n; i++ { - s.Query(i / n) - } -} - -func BenchmarkQuerySmallEpsilon(b *testing.B) { - s := NewTargeted(TargetsSmallEpsilon) - for i := float64(0); i < 1e6; i++ { - s.Insert(i) - } - b.ResetTimer() - n := float64(b.N) - for i := float64(0); i < n; i++ { - s.Query(i / n) - } -} diff --git a/vendor/github.com/beorn7/perks/quantile/example_test.go b/vendor/github.com/beorn7/perks/quantile/example_test.go deleted file mode 100644 index ab3293aaf2..0000000000 --- a/vendor/github.com/beorn7/perks/quantile/example_test.go +++ /dev/null @@ -1,121 +0,0 @@ -// +build go1.1 - -package quantile_test - -import ( - "bufio" - "fmt" - "log" - "os" - "strconv" - "time" - - "github.com/beorn7/perks/quantile" -) - -func Example_simple() { - ch := make(chan float64) - go sendFloats(ch) - - // Compute the 50th, 90th, and 99th percentile. - q := quantile.NewTargeted(map[float64]float64{ - 0.50: 0.005, - 0.90: 0.001, - 0.99: 0.0001, - }) - for v := range ch { - q.Insert(v) - } - - fmt.Println("perc50:", q.Query(0.50)) - fmt.Println("perc90:", q.Query(0.90)) - fmt.Println("perc99:", q.Query(0.99)) - fmt.Println("count:", q.Count()) - // Output: - // perc50: 5 - // perc90: 16 - // perc99: 223 - // count: 2388 -} - -func Example_mergeMultipleStreams() { - // Scenario: - // We have multiple database shards. On each shard, there is a process - // collecting query response times from the database logs and inserting - // them into a Stream (created via NewTargeted(0.90)), much like the - // Simple example. These processes expose a network interface for us to - // ask them to serialize and send us the results of their - // Stream.Samples so we may Merge and Query them. - // - // NOTES: - // * These sample sets are small, allowing us to get them - // across the network much faster than sending the entire list of data - // points. - // - // * For this to work correctly, we must supply the same quantiles - // a priori the process collecting the samples supplied to NewTargeted, - // even if we do not plan to query them all here. - ch := make(chan quantile.Samples) - getDBQuerySamples(ch) - q := quantile.NewTargeted(map[float64]float64{0.90: 0.001}) - for samples := range ch { - q.Merge(samples) - } - fmt.Println("perc90:", q.Query(0.90)) -} - -func Example_window() { - // Scenario: We want the 90th, 95th, and 99th percentiles for each - // minute. - - ch := make(chan float64) - go sendStreamValues(ch) - - tick := time.NewTicker(1 * time.Minute) - q := quantile.NewTargeted(map[float64]float64{ - 0.90: 0.001, - 0.95: 0.0005, - 0.99: 0.0001, - }) - for { - select { - case t := <-tick.C: - flushToDB(t, q.Samples()) - q.Reset() - case v := <-ch: - q.Insert(v) - } - } -} - -func sendStreamValues(ch chan float64) { - // Use your imagination -} - -func flushToDB(t time.Time, samples quantile.Samples) { - // Use your imagination -} - -// This is a stub for the above example. In reality this would hit the remote -// servers via http or something like it. -func getDBQuerySamples(ch chan quantile.Samples) {} - -func sendFloats(ch chan<- float64) { - f, err := os.Open("exampledata.txt") - if err != nil { - log.Fatal(err) - } - sc := bufio.NewScanner(f) - for sc.Scan() { - b := sc.Bytes() - v, err := strconv.ParseFloat(string(b), 64) - if err != nil { - log.Fatal(err) - } - ch <- v - } - if sc.Err() != nil { - log.Fatal(sc.Err()) - } - close(ch) -} diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go index f4cabd6695..d7d14f8eb6 100644 --- a/vendor/github.com/beorn7/perks/quantile/stream.go +++ b/vendor/github.com/beorn7/perks/quantile/stream.go @@ -77,15 +77,20 @@ func NewHighBiased(epsilon float64) *Stream { // is guaranteed to be within (Quantile±Epsilon). // // See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. -func NewTargeted(targets map[float64]float64) *Stream { +func NewTargeted(targetMap map[float64]float64) *Stream { + // Convert map to slice to avoid slow iterations on a map. + // ƒ is called on the hot path, so converting the map to a slice + // beforehand results in significant CPU savings. + targets := targetMapToSlice(targetMap) + ƒ := func(s *stream, r float64) float64 { var m = math.MaxFloat64 var f float64 - for quantile, epsilon := range targets { - if quantile*s.n <= r { - f = (2 * epsilon * r) / quantile + for _, t := range targets { + if t.quantile*s.n <= r { + f = (2 * t.epsilon * r) / t.quantile } else { - f = (2 * epsilon * (s.n - r)) / (1 - quantile) + f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile) } if f < m { m = f @@ -96,6 +101,25 @@ func NewTargeted(targets map[float64]float64) *Stream { return newStream(ƒ) } +type target struct { + quantile float64 + epsilon float64 +} + +func targetMapToSlice(targetMap map[float64]float64) []target { + targets := make([]target, 0, len(targetMap)) + + for quantile, epsilon := range targetMap { + t := target{ + quantile: quantile, + epsilon: epsilon, + } + targets = append(targets, t) + } + + return targets +} + // Stream computes quantiles for a stream of float64s. It is not thread-safe by // design. Take care when using across multiple goroutines. type Stream struct { diff --git a/vendor/github.com/beorn7/perks/quantile/stream_test.go b/vendor/github.com/beorn7/perks/quantile/stream_test.go deleted file mode 100644 index 855195097e..0000000000 --- a/vendor/github.com/beorn7/perks/quantile/stream_test.go +++ /dev/null @@ -1,215 +0,0 @@ -package quantile - -import ( - "math" - "math/rand" - "sort" - "testing" -) - -var ( - Targets = map[float64]float64{ - 0.01: 0.001, - 0.10: 0.01, - 0.50: 0.05, - 0.90: 0.01, - 0.99: 0.001, - } - TargetsSmallEpsilon = map[float64]float64{ - 0.01: 0.0001, - 0.10: 0.001, - 0.50: 0.005, - 0.90: 0.001, - 0.99: 0.0001, - } - LowQuantiles = []float64{0.01, 0.1, 0.5} - HighQuantiles = []float64{0.99, 0.9, 0.5} -) - -const RelativeEpsilon = 0.01 - -func verifyPercsWithAbsoluteEpsilon(t *testing.T, a []float64, s *Stream) { - sort.Float64s(a) - for quantile, epsilon := range Targets { - n := float64(len(a)) - k := int(quantile * n) - if k < 1 { - k = 1 - } - lower := int((quantile - epsilon) * n) - if lower < 1 { - lower = 1 - } - upper := int(math.Ceil((quantile + epsilon) * n)) - if upper > len(a) { - upper = len(a) - } - w, min, max := a[k-1], a[lower-1], a[upper-1] - if g := s.Query(quantile); g < min || g > max { - t.Errorf("q=%f: want %v [%f,%f], got %v", quantile, w, min, max, g) - } - } -} - -func verifyLowPercsWithRelativeEpsilon(t *testing.T, a []float64, s *Stream) { - sort.Float64s(a) - for _, qu := range LowQuantiles { - n := float64(len(a)) - k := int(qu * n) - - lowerRank := int((1 - RelativeEpsilon) * qu * n) - upperRank := int(math.Ceil((1 + RelativeEpsilon) * qu * n)) - w, min, max := a[k-1], a[lowerRank-1], a[upperRank-1] - if g := s.Query(qu); g < min || g > max { - t.Errorf("q=%f: want %v [%f,%f], got %v", qu, w, min, max, g) - } - } -} - -func verifyHighPercsWithRelativeEpsilon(t *testing.T, a []float64, s *Stream) { - sort.Float64s(a) - for _, qu := range HighQuantiles { - n := float64(len(a)) - k := int(qu * n) - - lowerRank := int((1 - (1+RelativeEpsilon)*(1-qu)) * n) - upperRank := int(math.Ceil((1 - (1-RelativeEpsilon)*(1-qu)) * n)) - w, min, max := a[k-1], a[lowerRank-1], a[upperRank-1] - if g := s.Query(qu); g < min || g > max { - t.Errorf("q=%f: want %v [%f,%f], got %v", qu, w, min, max, g) - } - } -} - -func populateStream(s *Stream) []float64 { - a := make([]float64, 0, 1e5+100) - for i := 0; i < cap(a); i++ { - v := rand.NormFloat64() - // Add 5% asymmetric outliers. - if i%20 == 0 { - v = v*v + 1 - } - s.Insert(v) - a = append(a, v) - } - return a -} - -func TestTargetedQuery(t *testing.T) { - rand.Seed(42) - s := NewTargeted(Targets) - a := populateStream(s) - verifyPercsWithAbsoluteEpsilon(t, a, s) -} - -func TestTargetedQuerySmallSampleSize(t *testing.T) { - rand.Seed(42) - s := NewTargeted(TargetsSmallEpsilon) - a := []float64{1, 2, 3, 4, 5} - for _, v := range a { - s.Insert(v) - } - verifyPercsWithAbsoluteEpsilon(t, a, s) - // If not yet flushed, results should be precise: - if !s.flushed() { - for φ, want := range map[float64]float64{ - 0.01: 1, - 0.10: 1, - 0.50: 3, - 0.90: 5, - 0.99: 5, - } { - if got := s.Query(φ); got != want { - t.Errorf("want %f for φ=%f, got %f", want, φ, got) - } - } - } -} - -func TestLowBiasedQuery(t *testing.T) { - rand.Seed(42) - s := NewLowBiased(RelativeEpsilon) - a := populateStream(s) - verifyLowPercsWithRelativeEpsilon(t, a, s) -} - -func TestHighBiasedQuery(t *testing.T) { - rand.Seed(42) - s := NewHighBiased(RelativeEpsilon) - a := populateStream(s) - verifyHighPercsWithRelativeEpsilon(t, a, s) -} - -// BrokenTestTargetedMerge is broken, see Merge doc comment. -func BrokenTestTargetedMerge(t *testing.T) { - rand.Seed(42) - s1 := NewTargeted(Targets) - s2 := NewTargeted(Targets) - a := populateStream(s1) - a = append(a, populateStream(s2)...) - s1.Merge(s2.Samples()) - verifyPercsWithAbsoluteEpsilon(t, a, s1) -} - -// BrokenTestLowBiasedMerge is broken, see Merge doc comment. -func BrokenTestLowBiasedMerge(t *testing.T) { - rand.Seed(42) - s1 := NewLowBiased(RelativeEpsilon) - s2 := NewLowBiased(RelativeEpsilon) - a := populateStream(s1) - a = append(a, populateStream(s2)...) - s1.Merge(s2.Samples()) - verifyLowPercsWithRelativeEpsilon(t, a, s2) -} - -// BrokenTestHighBiasedMerge is broken, see Merge doc comment. -func BrokenTestHighBiasedMerge(t *testing.T) { - rand.Seed(42) - s1 := NewHighBiased(RelativeEpsilon) - s2 := NewHighBiased(RelativeEpsilon) - a := populateStream(s1) - a = append(a, populateStream(s2)...) - s1.Merge(s2.Samples()) - verifyHighPercsWithRelativeEpsilon(t, a, s2) -} - -func TestUncompressed(t *testing.T) { - q := NewTargeted(Targets) - for i := 100; i > 0; i-- { - q.Insert(float64(i)) - } - if g := q.Count(); g != 100 { - t.Errorf("want count 100, got %d", g) - } - // Before compression, Query should have 100% accuracy. - for quantile := range Targets { - w := quantile * 100 - if g := q.Query(quantile); g != w { - t.Errorf("want %f, got %f", w, g) - } - } -} - -func TestUncompressedSamples(t *testing.T) { - q := NewTargeted(map[float64]float64{0.99: 0.001}) - for i := 1; i <= 100; i++ { - q.Insert(float64(i)) - } - if g := q.Samples().Len(); g != 100 { - t.Errorf("want count 100, got %d", g) - } -} - -func TestUncompressedOne(t *testing.T) { - q := NewTargeted(map[float64]float64{0.99: 0.01}) - q.Insert(3.14) - if g := q.Query(0.90); g != 3.14 { - t.Error("want PI, got", g) - } -} - -func TestDefaults(t *testing.T) { - if g := NewTargeted(map[float64]float64{0.99: 0.001}).Query(0.99); g != 0 { - t.Errorf("want 0, got %f", g) - } -} diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/AUTHORS b/vendor/github.com/census-instrumentation/opencensus-proto/AUTHORS new file mode 100644 index 0000000000..e068e731ea --- /dev/null +++ b/vendor/github.com/census-instrumentation/opencensus-proto/AUTHORS @@ -0,0 +1 @@ +Google Inc. \ No newline at end of file diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/LICENSE b/vendor/github.com/census-instrumentation/opencensus-proto/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/census-instrumentation/opencensus-proto/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go new file mode 100644 index 0000000000..12b578d068 --- /dev/null +++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go @@ -0,0 +1,356 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: opencensus/proto/agent/common/v1/common.proto + +package v1 + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type LibraryInfo_Language int32 + +const ( + LibraryInfo_LANGUAGE_UNSPECIFIED LibraryInfo_Language = 0 + LibraryInfo_CPP LibraryInfo_Language = 1 + LibraryInfo_C_SHARP LibraryInfo_Language = 2 + LibraryInfo_ERLANG LibraryInfo_Language = 3 + LibraryInfo_GO_LANG LibraryInfo_Language = 4 + LibraryInfo_JAVA LibraryInfo_Language = 5 + LibraryInfo_NODE_JS LibraryInfo_Language = 6 + LibraryInfo_PHP LibraryInfo_Language = 7 + LibraryInfo_PYTHON LibraryInfo_Language = 8 + LibraryInfo_RUBY LibraryInfo_Language = 9 +) + +var LibraryInfo_Language_name = map[int32]string{ + 0: "LANGUAGE_UNSPECIFIED", + 1: "CPP", + 2: "C_SHARP", + 3: "ERLANG", + 4: "GO_LANG", + 5: "JAVA", + 6: "NODE_JS", + 7: "PHP", + 8: "PYTHON", + 9: "RUBY", +} + +var LibraryInfo_Language_value = map[string]int32{ + "LANGUAGE_UNSPECIFIED": 0, + "CPP": 1, + "C_SHARP": 2, + "ERLANG": 3, + "GO_LANG": 4, + "JAVA": 5, + "NODE_JS": 6, + "PHP": 7, + "PYTHON": 8, + "RUBY": 9, +} + +func (x LibraryInfo_Language) String() string { + return proto.EnumName(LibraryInfo_Language_name, int32(x)) +} + +func (LibraryInfo_Language) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_126c72ed8a252c84, []int{2, 0} +} + +// Identifier metadata of the Node that produces the span or tracing data. +// Note, this is not the metadata about the Node or service that is described by associated spans. +// In the future we plan to extend the identifier proto definition to support +// additional information (e.g cloud id, etc.) +type Node struct { + // Identifier that uniquely identifies a process within a VM/container. + Identifier *ProcessIdentifier `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"` + // Information on the OpenCensus Library that initiates the stream. + LibraryInfo *LibraryInfo `protobuf:"bytes,2,opt,name=library_info,json=libraryInfo,proto3" json:"library_info,omitempty"` + // Additional information on service. + ServiceInfo *ServiceInfo `protobuf:"bytes,3,opt,name=service_info,json=serviceInfo,proto3" json:"service_info,omitempty"` + // Additional attributes. + Attributes map[string]string `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Node) Reset() { *m = Node{} } +func (m *Node) String() string { return proto.CompactTextString(m) } +func (*Node) ProtoMessage() {} +func (*Node) Descriptor() ([]byte, []int) { + return fileDescriptor_126c72ed8a252c84, []int{0} +} + +func (m *Node) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Node.Unmarshal(m, b) +} +func (m *Node) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Node.Marshal(b, m, deterministic) +} +func (m *Node) XXX_Merge(src proto.Message) { + xxx_messageInfo_Node.Merge(m, src) +} +func (m *Node) XXX_Size() int { + return xxx_messageInfo_Node.Size(m) +} +func (m *Node) XXX_DiscardUnknown() { + xxx_messageInfo_Node.DiscardUnknown(m) +} + +var xxx_messageInfo_Node proto.InternalMessageInfo + +func (m *Node) GetIdentifier() *ProcessIdentifier { + if m != nil { + return m.Identifier + } + return nil +} + +func (m *Node) GetLibraryInfo() *LibraryInfo { + if m != nil { + return m.LibraryInfo + } + return nil +} + +func (m *Node) GetServiceInfo() *ServiceInfo { + if m != nil { + return m.ServiceInfo + } + return nil +} + +func (m *Node) GetAttributes() map[string]string { + if m != nil { + return m.Attributes + } + return nil +} + +// Identifier that uniquely identifies a process within a VM/container. +type ProcessIdentifier struct { + // The host name. Usually refers to the machine/container name. + // For example: os.Hostname() in Go, socket.gethostname() in Python. + HostName string `protobuf:"bytes,1,opt,name=host_name,json=hostName,proto3" json:"host_name,omitempty"` + // Process id. + Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` + // Start time of this ProcessIdentifier. Represented in epoch time. + StartTimestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProcessIdentifier) Reset() { *m = ProcessIdentifier{} } +func (m *ProcessIdentifier) String() string { return proto.CompactTextString(m) } +func (*ProcessIdentifier) ProtoMessage() {} +func (*ProcessIdentifier) Descriptor() ([]byte, []int) { + return fileDescriptor_126c72ed8a252c84, []int{1} +} + +func (m *ProcessIdentifier) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ProcessIdentifier.Unmarshal(m, b) +} +func (m *ProcessIdentifier) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ProcessIdentifier.Marshal(b, m, deterministic) +} +func (m *ProcessIdentifier) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProcessIdentifier.Merge(m, src) +} +func (m *ProcessIdentifier) XXX_Size() int { + return xxx_messageInfo_ProcessIdentifier.Size(m) +} +func (m *ProcessIdentifier) XXX_DiscardUnknown() { + xxx_messageInfo_ProcessIdentifier.DiscardUnknown(m) +} + +var xxx_messageInfo_ProcessIdentifier proto.InternalMessageInfo + +func (m *ProcessIdentifier) GetHostName() string { + if m != nil { + return m.HostName + } + return "" +} + +func (m *ProcessIdentifier) GetPid() uint32 { + if m != nil { + return m.Pid + } + return 0 +} + +func (m *ProcessIdentifier) GetStartTimestamp() *timestamp.Timestamp { + if m != nil { + return m.StartTimestamp + } + return nil +} + +// Information on OpenCensus Library. +type LibraryInfo struct { + // Language of OpenCensus Library. + Language LibraryInfo_Language `protobuf:"varint,1,opt,name=language,proto3,enum=opencensus.proto.agent.common.v1.LibraryInfo_Language" json:"language,omitempty"` + // Version of Agent exporter of Library. + ExporterVersion string `protobuf:"bytes,2,opt,name=exporter_version,json=exporterVersion,proto3" json:"exporter_version,omitempty"` + // Version of OpenCensus Library. + CoreLibraryVersion string `protobuf:"bytes,3,opt,name=core_library_version,json=coreLibraryVersion,proto3" json:"core_library_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LibraryInfo) Reset() { *m = LibraryInfo{} } +func (m *LibraryInfo) String() string { return proto.CompactTextString(m) } +func (*LibraryInfo) ProtoMessage() {} +func (*LibraryInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_126c72ed8a252c84, []int{2} +} + +func (m *LibraryInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LibraryInfo.Unmarshal(m, b) +} +func (m *LibraryInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LibraryInfo.Marshal(b, m, deterministic) +} +func (m *LibraryInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_LibraryInfo.Merge(m, src) +} +func (m *LibraryInfo) XXX_Size() int { + return xxx_messageInfo_LibraryInfo.Size(m) +} +func (m *LibraryInfo) XXX_DiscardUnknown() { + xxx_messageInfo_LibraryInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_LibraryInfo proto.InternalMessageInfo + +func (m *LibraryInfo) GetLanguage() LibraryInfo_Language { + if m != nil { + return m.Language + } + return LibraryInfo_LANGUAGE_UNSPECIFIED +} + +func (m *LibraryInfo) GetExporterVersion() string { + if m != nil { + return m.ExporterVersion + } + return "" +} + +func (m *LibraryInfo) GetCoreLibraryVersion() string { + if m != nil { + return m.CoreLibraryVersion + } + return "" +} + +// Additional service information. +type ServiceInfo struct { + // Name of the service. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceInfo) Reset() { *m = ServiceInfo{} } +func (m *ServiceInfo) String() string { return proto.CompactTextString(m) } +func (*ServiceInfo) ProtoMessage() {} +func (*ServiceInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_126c72ed8a252c84, []int{3} +} + +func (m *ServiceInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceInfo.Unmarshal(m, b) +} +func (m *ServiceInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceInfo.Marshal(b, m, deterministic) +} +func (m *ServiceInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceInfo.Merge(m, src) +} +func (m *ServiceInfo) XXX_Size() int { + return xxx_messageInfo_ServiceInfo.Size(m) +} +func (m *ServiceInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceInfo proto.InternalMessageInfo + +func (m *ServiceInfo) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func init() { + proto.RegisterEnum("opencensus.proto.agent.common.v1.LibraryInfo_Language", LibraryInfo_Language_name, LibraryInfo_Language_value) + proto.RegisterType((*Node)(nil), "opencensus.proto.agent.common.v1.Node") + proto.RegisterMapType((map[string]string)(nil), "opencensus.proto.agent.common.v1.Node.AttributesEntry") + proto.RegisterType((*ProcessIdentifier)(nil), "opencensus.proto.agent.common.v1.ProcessIdentifier") + proto.RegisterType((*LibraryInfo)(nil), "opencensus.proto.agent.common.v1.LibraryInfo") + proto.RegisterType((*ServiceInfo)(nil), "opencensus.proto.agent.common.v1.ServiceInfo") +} + +func init() { + proto.RegisterFile("opencensus/proto/agent/common/v1/common.proto", fileDescriptor_126c72ed8a252c84) +} + +var fileDescriptor_126c72ed8a252c84 = []byte{ + // 590 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x94, 0x4f, 0x4f, 0xdb, 0x3e, + 0x1c, 0xc6, 0x7f, 0x69, 0x0a, 0xb4, 0xdf, 0xfc, 0x06, 0x99, 0xc5, 0xa1, 0x62, 0x87, 0xb1, 0xee, + 0xc2, 0x0e, 0x4d, 0x06, 0x48, 0xd3, 0x34, 0x69, 0x87, 0x52, 0x3a, 0x28, 0x42, 0x25, 0x72, 0x01, + 0x89, 0x5d, 0xa2, 0xb4, 0xb8, 0xc1, 0x5a, 0x63, 0x57, 0xb6, 0x53, 0x8d, 0xd3, 0x8e, 0xd3, 0xde, + 0xc0, 0x5e, 0xd4, 0x5e, 0xd5, 0x64, 0x3b, 0x69, 0xa3, 0x71, 0x28, 0xb7, 0xef, 0x9f, 0xe7, 0xf9, + 0x38, 0x7a, 0x6c, 0x05, 0x3a, 0x7c, 0x4e, 0xd8, 0x84, 0x30, 0x99, 0xcb, 0x70, 0x2e, 0xb8, 0xe2, + 0x61, 0x92, 0x12, 0xa6, 0xc2, 0x09, 0xcf, 0x32, 0xce, 0xc2, 0xc5, 0x61, 0x51, 0x05, 0x66, 0x89, + 0xf6, 0x57, 0x72, 0x3b, 0x09, 0x8c, 0x3c, 0x28, 0x44, 0x8b, 0xc3, 0xbd, 0xd7, 0x29, 0xe7, 0xe9, + 0x8c, 0x58, 0xd8, 0x38, 0x9f, 0x86, 0x8a, 0x66, 0x44, 0xaa, 0x24, 0x9b, 0x5b, 0x43, 0xfb, 0xb7, + 0x0b, 0xf5, 0x21, 0xbf, 0x27, 0x68, 0x04, 0x40, 0xef, 0x09, 0x53, 0x74, 0x4a, 0x89, 0x68, 0x39, + 0xfb, 0xce, 0x81, 0x77, 0x74, 0x1c, 0xac, 0x3b, 0x20, 0x88, 0x04, 0x9f, 0x10, 0x29, 0x07, 0x4b, + 0x2b, 0xae, 0x60, 0x50, 0x04, 0xff, 0xcf, 0xe8, 0x58, 0x24, 0xe2, 0x31, 0xa6, 0x6c, 0xca, 0x5b, + 0x35, 0x83, 0xed, 0xac, 0xc7, 0x5e, 0x5a, 0xd7, 0x80, 0x4d, 0x39, 0xf6, 0x66, 0xab, 0x46, 0x13, + 0x25, 0x11, 0x0b, 0x3a, 0x21, 0x96, 0xe8, 0x3e, 0x97, 0x38, 0xb2, 0x2e, 0x4b, 0x94, 0xab, 0x06, + 0xdd, 0x02, 0x24, 0x4a, 0x09, 0x3a, 0xce, 0x15, 0x91, 0xad, 0xfa, 0xbe, 0x7b, 0xe0, 0x1d, 0x7d, + 0x58, 0xcf, 0xd3, 0xa1, 0x05, 0xdd, 0xa5, 0xb1, 0xcf, 0x94, 0x78, 0xc4, 0x15, 0xd2, 0xde, 0x67, + 0xd8, 0xf9, 0x67, 0x8d, 0x7c, 0x70, 0xbf, 0x91, 0x47, 0x13, 0x6e, 0x13, 0xeb, 0x12, 0xed, 0xc2, + 0xc6, 0x22, 0x99, 0xe5, 0xc4, 0x24, 0xd3, 0xc4, 0xb6, 0xf9, 0x54, 0xfb, 0xe8, 0xb4, 0x7f, 0x3a, + 0xf0, 0xf2, 0x49, 0xb8, 0xe8, 0x15, 0x34, 0x1f, 0xb8, 0x54, 0x31, 0x4b, 0x32, 0x52, 0x70, 0x1a, + 0x7a, 0x30, 0x4c, 0x32, 0xa2, 0xf1, 0x73, 0x7a, 0x6f, 0x50, 0x2f, 0xb0, 0x2e, 0x51, 0x0f, 0x76, + 0xa4, 0x4a, 0x84, 0x8a, 0x97, 0xd7, 0x5e, 0x04, 0xb6, 0x17, 0xd8, 0x87, 0x11, 0x94, 0x0f, 0x23, + 0xb8, 0x2e, 0x15, 0x78, 0xdb, 0x58, 0x96, 0x7d, 0xfb, 0x4f, 0x0d, 0xbc, 0xca, 0x7d, 0x20, 0x0c, + 0x8d, 0x59, 0xc2, 0xd2, 0x3c, 0x49, 0xed, 0x27, 0x6c, 0x3f, 0x27, 0xae, 0x0a, 0x20, 0xb8, 0x2c, + 0xdc, 0x78, 0xc9, 0x41, 0xef, 0xc0, 0x27, 0xdf, 0xe7, 0x5c, 0x28, 0x22, 0xe2, 0x05, 0x11, 0x92, + 0x72, 0x56, 0x44, 0xb2, 0x53, 0xce, 0x6f, 0xed, 0x18, 0xbd, 0x87, 0xdd, 0x09, 0x17, 0x24, 0x2e, + 0x1f, 0x56, 0x29, 0x77, 0x8d, 0x1c, 0xe9, 0x5d, 0x71, 0x58, 0xe1, 0x68, 0xff, 0x72, 0xa0, 0x51, + 0x9e, 0x89, 0x5a, 0xb0, 0x7b, 0xd9, 0x1d, 0x9e, 0xdd, 0x74, 0xcf, 0xfa, 0xf1, 0xcd, 0x70, 0x14, + 0xf5, 0x7b, 0x83, 0x2f, 0x83, 0xfe, 0xa9, 0xff, 0x1f, 0xda, 0x02, 0xb7, 0x17, 0x45, 0xbe, 0x83, + 0x3c, 0xd8, 0xea, 0xc5, 0xa3, 0xf3, 0x2e, 0x8e, 0xfc, 0x1a, 0x02, 0xd8, 0xec, 0x63, 0xed, 0xf0, + 0x5d, 0xbd, 0x38, 0xbb, 0x8a, 0x4d, 0x53, 0x47, 0x0d, 0xa8, 0x5f, 0x74, 0x6f, 0xbb, 0xfe, 0x86, + 0x1e, 0x0f, 0xaf, 0x4e, 0xfb, 0xf1, 0xc5, 0xc8, 0xdf, 0xd4, 0x94, 0xe8, 0x3c, 0xf2, 0xb7, 0xb4, + 0x31, 0xba, 0xbb, 0x3e, 0xbf, 0x1a, 0xfa, 0x0d, 0xad, 0xc5, 0x37, 0x27, 0x77, 0x7e, 0xb3, 0xfd, + 0x06, 0xbc, 0xca, 0x4b, 0x44, 0x08, 0xea, 0x95, 0xab, 0x34, 0xf5, 0xc9, 0x0f, 0x78, 0x4b, 0xf9, + 0xda, 0x44, 0x4f, 0xbc, 0x9e, 0x29, 0x23, 0xbd, 0x8c, 0x9c, 0xaf, 0x83, 0x94, 0xaa, 0x87, 0x7c, + 0xac, 0x05, 0xa1, 0xf5, 0x75, 0x28, 0x93, 0x4a, 0xe4, 0x19, 0x61, 0x2a, 0x51, 0x94, 0xb3, 0x70, + 0x85, 0xec, 0xd8, 0x9f, 0x4b, 0x4a, 0x58, 0x27, 0x7d, 0xf2, 0x8f, 0x19, 0x6f, 0x9a, 0xed, 0xf1, + 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x94, 0xe5, 0x77, 0x76, 0x8e, 0x04, 0x00, 0x00, +} diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.go new file mode 100644 index 0000000000..801212d925 --- /dev/null +++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.go @@ -0,0 +1,264 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: opencensus/proto/agent/metrics/v1/metrics_service.proto + +package v1 + +import ( + context "context" + fmt "fmt" + v1 "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" + v11 "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" + v12 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type ExportMetricsServiceRequest struct { + // This is required only in the first message on the stream or if the + // previous sent ExportMetricsServiceRequest message has a different Node (e.g. + // when the same RPC is used to send Metrics from multiple Applications). + Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` + // A list of metrics that belong to the last received Node. + Metrics []*v11.Metric `protobuf:"bytes,2,rep,name=metrics,proto3" json:"metrics,omitempty"` + // The resource for the metrics in this message that do not have an explicit + // resource set. + // If unset, the most recently set resource in the RPC stream applies. It is + // valid to never be set within a stream, e.g. when no resource info is known + // at all or when all sent metrics have an explicit resource set. + Resource *v12.Resource `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExportMetricsServiceRequest) Reset() { *m = ExportMetricsServiceRequest{} } +func (m *ExportMetricsServiceRequest) String() string { return proto.CompactTextString(m) } +func (*ExportMetricsServiceRequest) ProtoMessage() {} +func (*ExportMetricsServiceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_47e253a956287d04, []int{0} +} + +func (m *ExportMetricsServiceRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExportMetricsServiceRequest.Unmarshal(m, b) +} +func (m *ExportMetricsServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExportMetricsServiceRequest.Marshal(b, m, deterministic) +} +func (m *ExportMetricsServiceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExportMetricsServiceRequest.Merge(m, src) +} +func (m *ExportMetricsServiceRequest) XXX_Size() int { + return xxx_messageInfo_ExportMetricsServiceRequest.Size(m) +} +func (m *ExportMetricsServiceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ExportMetricsServiceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ExportMetricsServiceRequest proto.InternalMessageInfo + +func (m *ExportMetricsServiceRequest) GetNode() *v1.Node { + if m != nil { + return m.Node + } + return nil +} + +func (m *ExportMetricsServiceRequest) GetMetrics() []*v11.Metric { + if m != nil { + return m.Metrics + } + return nil +} + +func (m *ExportMetricsServiceRequest) GetResource() *v12.Resource { + if m != nil { + return m.Resource + } + return nil +} + +type ExportMetricsServiceResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExportMetricsServiceResponse) Reset() { *m = ExportMetricsServiceResponse{} } +func (m *ExportMetricsServiceResponse) String() string { return proto.CompactTextString(m) } +func (*ExportMetricsServiceResponse) ProtoMessage() {} +func (*ExportMetricsServiceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_47e253a956287d04, []int{1} +} + +func (m *ExportMetricsServiceResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExportMetricsServiceResponse.Unmarshal(m, b) +} +func (m *ExportMetricsServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExportMetricsServiceResponse.Marshal(b, m, deterministic) +} +func (m *ExportMetricsServiceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExportMetricsServiceResponse.Merge(m, src) +} +func (m *ExportMetricsServiceResponse) XXX_Size() int { + return xxx_messageInfo_ExportMetricsServiceResponse.Size(m) +} +func (m *ExportMetricsServiceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ExportMetricsServiceResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ExportMetricsServiceResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ExportMetricsServiceRequest)(nil), "opencensus.proto.agent.metrics.v1.ExportMetricsServiceRequest") + proto.RegisterType((*ExportMetricsServiceResponse)(nil), "opencensus.proto.agent.metrics.v1.ExportMetricsServiceResponse") +} + +func init() { + proto.RegisterFile("opencensus/proto/agent/metrics/v1/metrics_service.proto", fileDescriptor_47e253a956287d04) +} + +var fileDescriptor_47e253a956287d04 = []byte{ + // 340 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x92, 0xc1, 0x4a, 0xf3, 0x40, + 0x14, 0x85, 0xff, 0xf9, 0x2b, 0x55, 0xa6, 0xe0, 0x62, 0xdc, 0x94, 0x2a, 0x52, 0xab, 0x48, 0x45, + 0x32, 0x63, 0xea, 0x42, 0x10, 0x54, 0x28, 0xb8, 0x11, 0x94, 0x12, 0x77, 0x6e, 0xa4, 0x4d, 0x2f, + 0x71, 0x16, 0x99, 0x1b, 0x67, 0x26, 0xc1, 0x57, 0x70, 0xe5, 0x3b, 0xf8, 0x5c, 0x3e, 0x8c, 0x24, + 0x93, 0xb4, 0x94, 0x18, 0x0b, 0xee, 0x2e, 0x99, 0xf3, 0x9d, 0x9c, 0x33, 0x73, 0xe9, 0x05, 0x26, + 0xa0, 0x42, 0x50, 0x26, 0x35, 0x22, 0xd1, 0x68, 0x51, 0x4c, 0x23, 0x50, 0x56, 0xc4, 0x60, 0xb5, + 0x0c, 0x8d, 0xc8, 0xfc, 0x6a, 0x7c, 0x36, 0xa0, 0x33, 0x19, 0x02, 0x2f, 0x64, 0xec, 0x60, 0x09, + 0xba, 0x2f, 0xbc, 0x00, 0x79, 0xa9, 0xe6, 0x99, 0xdf, 0xf3, 0x1a, 0xbc, 0x43, 0x8c, 0x63, 0x54, + 0xb9, 0xb5, 0x9b, 0x1c, 0xdf, 0x3b, 0xa9, 0xc9, 0xeb, 0x21, 0x4a, 0xe9, 0x69, 0x4d, 0xaa, 0xc1, + 0x60, 0xaa, 0x43, 0xc8, 0xb5, 0xd5, 0xec, 0xc4, 0x83, 0x2f, 0x42, 0x77, 0x6f, 0xdf, 0x12, 0xd4, + 0xf6, 0xde, 0x99, 0x3c, 0xba, 0x22, 0x01, 0xbc, 0xa6, 0x60, 0x2c, 0xbb, 0xa4, 0x1b, 0x0a, 0xe7, + 0xd0, 0x25, 0x7d, 0x32, 0xec, 0x8c, 0x8e, 0x79, 0x43, 0xb1, 0x32, 0x6b, 0xe6, 0xf3, 0x07, 0x9c, + 0x43, 0x50, 0x30, 0xec, 0x8a, 0x6e, 0x96, 0xc9, 0xba, 0xff, 0xfb, 0xad, 0x61, 0x67, 0x74, 0x58, + 0xc7, 0x97, 0x37, 0xc2, 0x5d, 0x80, 0xa0, 0x62, 0xd8, 0x98, 0x6e, 0x55, 0x61, 0xbb, 0xad, 0xa6, + 0xdf, 0x2f, 0xea, 0x64, 0x3e, 0x0f, 0xca, 0x39, 0x58, 0x70, 0x83, 0x7d, 0xba, 0xf7, 0x73, 0x3b, + 0x93, 0xa0, 0x32, 0x30, 0xfa, 0x24, 0x74, 0x7b, 0xf5, 0x88, 0x7d, 0x10, 0xda, 0x76, 0x0c, 0xbb, + 0xe6, 0x6b, 0xdf, 0x91, 0xff, 0x72, 0x79, 0xbd, 0x9b, 0x3f, 0xf3, 0x2e, 0xde, 0xe0, 0xdf, 0x90, + 0x9c, 0x91, 0xf1, 0x3b, 0xa1, 0x47, 0x12, 0xd7, 0x7b, 0x8d, 0x77, 0x56, 0x6d, 0x26, 0xb9, 0x6a, + 0x42, 0x9e, 0xee, 0x22, 0x69, 0x5f, 0xd2, 0x59, 0xfe, 0x48, 0xc2, 0x19, 0x78, 0x52, 0x19, 0xab, + 0xd3, 0x18, 0x94, 0x9d, 0x5a, 0x89, 0x4a, 0x2c, 0xbd, 0x3d, 0xb7, 0x32, 0x11, 0x28, 0x2f, 0xaa, + 0xef, 0xfb, 0xac, 0x5d, 0x1c, 0x9f, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, 0x16, 0x61, 0x3b, 0xc3, + 0x1b, 0x03, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// MetricsServiceClient is the client API for MetricsService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MetricsServiceClient interface { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(ctx context.Context, opts ...grpc.CallOption) (MetricsService_ExportClient, error) +} + +type metricsServiceClient struct { + cc *grpc.ClientConn +} + +func NewMetricsServiceClient(cc *grpc.ClientConn) MetricsServiceClient { + return &metricsServiceClient{cc} +} + +func (c *metricsServiceClient) Export(ctx context.Context, opts ...grpc.CallOption) (MetricsService_ExportClient, error) { + stream, err := c.cc.NewStream(ctx, &_MetricsService_serviceDesc.Streams[0], "/opencensus.proto.agent.metrics.v1.MetricsService/Export", opts...) + if err != nil { + return nil, err + } + x := &metricsServiceExportClient{stream} + return x, nil +} + +type MetricsService_ExportClient interface { + Send(*ExportMetricsServiceRequest) error + Recv() (*ExportMetricsServiceResponse, error) + grpc.ClientStream +} + +type metricsServiceExportClient struct { + grpc.ClientStream +} + +func (x *metricsServiceExportClient) Send(m *ExportMetricsServiceRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *metricsServiceExportClient) Recv() (*ExportMetricsServiceResponse, error) { + m := new(ExportMetricsServiceResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// MetricsServiceServer is the server API for MetricsService service. +type MetricsServiceServer interface { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(MetricsService_ExportServer) error +} + +func RegisterMetricsServiceServer(s *grpc.Server, srv MetricsServiceServer) { + s.RegisterService(&_MetricsService_serviceDesc, srv) +} + +func _MetricsService_Export_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(MetricsServiceServer).Export(&metricsServiceExportServer{stream}) +} + +type MetricsService_ExportServer interface { + Send(*ExportMetricsServiceResponse) error + Recv() (*ExportMetricsServiceRequest, error) + grpc.ServerStream +} + +type metricsServiceExportServer struct { + grpc.ServerStream +} + +func (x *metricsServiceExportServer) Send(m *ExportMetricsServiceResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *metricsServiceExportServer) Recv() (*ExportMetricsServiceRequest, error) { + m := new(ExportMetricsServiceRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _MetricsService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "opencensus.proto.agent.metrics.v1.MetricsService", + HandlerType: (*MetricsServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "Export", + Handler: _MetricsService_Export_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "opencensus/proto/agent/metrics/v1/metrics_service.proto", +} diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.go new file mode 100644 index 0000000000..e7c49a387a --- /dev/null +++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.go @@ -0,0 +1,443 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: opencensus/proto/agent/trace/v1/trace_service.proto + +package v1 + +import ( + context "context" + fmt "fmt" + v1 "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" + v12 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" + v11 "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type CurrentLibraryConfig struct { + // This is required only in the first message on the stream or if the + // previous sent CurrentLibraryConfig message has a different Node (e.g. + // when the same RPC is used to configure multiple Applications). + Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` + // Current configuration. + Config *v11.TraceConfig `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CurrentLibraryConfig) Reset() { *m = CurrentLibraryConfig{} } +func (m *CurrentLibraryConfig) String() string { return proto.CompactTextString(m) } +func (*CurrentLibraryConfig) ProtoMessage() {} +func (*CurrentLibraryConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_7027f99caf7ac6a5, []int{0} +} + +func (m *CurrentLibraryConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CurrentLibraryConfig.Unmarshal(m, b) +} +func (m *CurrentLibraryConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CurrentLibraryConfig.Marshal(b, m, deterministic) +} +func (m *CurrentLibraryConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_CurrentLibraryConfig.Merge(m, src) +} +func (m *CurrentLibraryConfig) XXX_Size() int { + return xxx_messageInfo_CurrentLibraryConfig.Size(m) +} +func (m *CurrentLibraryConfig) XXX_DiscardUnknown() { + xxx_messageInfo_CurrentLibraryConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_CurrentLibraryConfig proto.InternalMessageInfo + +func (m *CurrentLibraryConfig) GetNode() *v1.Node { + if m != nil { + return m.Node + } + return nil +} + +func (m *CurrentLibraryConfig) GetConfig() *v11.TraceConfig { + if m != nil { + return m.Config + } + return nil +} + +type UpdatedLibraryConfig struct { + // This field is ignored when the RPC is used to configure only one Application. + // This is required only in the first message on the stream or if the + // previous sent UpdatedLibraryConfig message has a different Node. + Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` + // Requested updated configuration. + Config *v11.TraceConfig `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdatedLibraryConfig) Reset() { *m = UpdatedLibraryConfig{} } +func (m *UpdatedLibraryConfig) String() string { return proto.CompactTextString(m) } +func (*UpdatedLibraryConfig) ProtoMessage() {} +func (*UpdatedLibraryConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_7027f99caf7ac6a5, []int{1} +} + +func (m *UpdatedLibraryConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdatedLibraryConfig.Unmarshal(m, b) +} +func (m *UpdatedLibraryConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdatedLibraryConfig.Marshal(b, m, deterministic) +} +func (m *UpdatedLibraryConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdatedLibraryConfig.Merge(m, src) +} +func (m *UpdatedLibraryConfig) XXX_Size() int { + return xxx_messageInfo_UpdatedLibraryConfig.Size(m) +} +func (m *UpdatedLibraryConfig) XXX_DiscardUnknown() { + xxx_messageInfo_UpdatedLibraryConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdatedLibraryConfig proto.InternalMessageInfo + +func (m *UpdatedLibraryConfig) GetNode() *v1.Node { + if m != nil { + return m.Node + } + return nil +} + +func (m *UpdatedLibraryConfig) GetConfig() *v11.TraceConfig { + if m != nil { + return m.Config + } + return nil +} + +type ExportTraceServiceRequest struct { + // This is required only in the first message on the stream or if the + // previous sent ExportTraceServiceRequest message has a different Node (e.g. + // when the same RPC is used to send Spans from multiple Applications). + Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` + // A list of Spans that belong to the last received Node. + Spans []*v11.Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"` + // The resource for the spans in this message that do not have an explicit + // resource set. + // If unset, the most recently set resource in the RPC stream applies. It is + // valid to never be set within a stream, e.g. when no resource info is known. + Resource *v12.Resource `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExportTraceServiceRequest) Reset() { *m = ExportTraceServiceRequest{} } +func (m *ExportTraceServiceRequest) String() string { return proto.CompactTextString(m) } +func (*ExportTraceServiceRequest) ProtoMessage() {} +func (*ExportTraceServiceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7027f99caf7ac6a5, []int{2} +} + +func (m *ExportTraceServiceRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExportTraceServiceRequest.Unmarshal(m, b) +} +func (m *ExportTraceServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExportTraceServiceRequest.Marshal(b, m, deterministic) +} +func (m *ExportTraceServiceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExportTraceServiceRequest.Merge(m, src) +} +func (m *ExportTraceServiceRequest) XXX_Size() int { + return xxx_messageInfo_ExportTraceServiceRequest.Size(m) +} +func (m *ExportTraceServiceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ExportTraceServiceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ExportTraceServiceRequest proto.InternalMessageInfo + +func (m *ExportTraceServiceRequest) GetNode() *v1.Node { + if m != nil { + return m.Node + } + return nil +} + +func (m *ExportTraceServiceRequest) GetSpans() []*v11.Span { + if m != nil { + return m.Spans + } + return nil +} + +func (m *ExportTraceServiceRequest) GetResource() *v12.Resource { + if m != nil { + return m.Resource + } + return nil +} + +type ExportTraceServiceResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExportTraceServiceResponse) Reset() { *m = ExportTraceServiceResponse{} } +func (m *ExportTraceServiceResponse) String() string { return proto.CompactTextString(m) } +func (*ExportTraceServiceResponse) ProtoMessage() {} +func (*ExportTraceServiceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7027f99caf7ac6a5, []int{3} +} + +func (m *ExportTraceServiceResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExportTraceServiceResponse.Unmarshal(m, b) +} +func (m *ExportTraceServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExportTraceServiceResponse.Marshal(b, m, deterministic) +} +func (m *ExportTraceServiceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExportTraceServiceResponse.Merge(m, src) +} +func (m *ExportTraceServiceResponse) XXX_Size() int { + return xxx_messageInfo_ExportTraceServiceResponse.Size(m) +} +func (m *ExportTraceServiceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ExportTraceServiceResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ExportTraceServiceResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*CurrentLibraryConfig)(nil), "opencensus.proto.agent.trace.v1.CurrentLibraryConfig") + proto.RegisterType((*UpdatedLibraryConfig)(nil), "opencensus.proto.agent.trace.v1.UpdatedLibraryConfig") + proto.RegisterType((*ExportTraceServiceRequest)(nil), "opencensus.proto.agent.trace.v1.ExportTraceServiceRequest") + proto.RegisterType((*ExportTraceServiceResponse)(nil), "opencensus.proto.agent.trace.v1.ExportTraceServiceResponse") +} + +func init() { + proto.RegisterFile("opencensus/proto/agent/trace/v1/trace_service.proto", fileDescriptor_7027f99caf7ac6a5) +} + +var fileDescriptor_7027f99caf7ac6a5 = []byte{ + // 423 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x54, 0xbf, 0x6b, 0xdb, 0x40, + 0x14, 0xee, 0xd9, 0xad, 0x28, 0xe7, 0x2e, 0x15, 0x1d, 0x54, 0x51, 0xb0, 0x11, 0xb4, 0x18, 0x5a, + 0x9d, 0x2a, 0x1b, 0x2f, 0x2e, 0x74, 0xb0, 0x29, 0x74, 0x28, 0xc5, 0xc8, 0xed, 0x92, 0xc5, 0xc8, + 0xd2, 0x8b, 0xa2, 0xc1, 0x77, 0xca, 0xdd, 0x49, 0x24, 0x90, 0x2d, 0x43, 0xf6, 0x0c, 0xf9, 0xc3, + 0xf2, 0x17, 0x05, 0xdd, 0xc9, 0x3f, 0x12, 0x5b, 0x11, 0x24, 0x4b, 0xb6, 0x87, 0xde, 0xf7, 0x7d, + 0xf7, 0xbd, 0x7b, 0xdf, 0x09, 0x0f, 0x59, 0x06, 0x34, 0x02, 0x2a, 0x72, 0xe1, 0x65, 0x9c, 0x49, + 0xe6, 0x85, 0x09, 0x50, 0xe9, 0x49, 0x1e, 0x46, 0xe0, 0x15, 0xbe, 0x2e, 0x16, 0x02, 0x78, 0x91, + 0x46, 0x40, 0x14, 0xc4, 0xec, 0x6e, 0x49, 0xfa, 0x0b, 0x51, 0x24, 0xa2, 0xb0, 0xa4, 0xf0, 0x6d, + 0xb7, 0x46, 0x35, 0x62, 0xab, 0x15, 0xa3, 0xa5, 0xac, 0xae, 0x34, 0xdb, 0xfe, 0xba, 0x07, 0xe7, + 0x20, 0x58, 0xce, 0xb5, 0x83, 0x75, 0x5d, 0x81, 0x3f, 0xef, 0x81, 0xef, 0x7b, 0xad, 0x60, 0xdf, + 0x1a, 0x60, 0x8b, 0x88, 0xd1, 0xe3, 0x34, 0xd1, 0x68, 0xe7, 0x1a, 0xe1, 0x0f, 0xd3, 0x9c, 0x73, + 0xa0, 0xf2, 0x4f, 0xba, 0xe4, 0x21, 0x3f, 0x9f, 0xaa, 0xb6, 0x39, 0xc6, 0xaf, 0x29, 0x8b, 0xc1, + 0x42, 0x3d, 0xd4, 0xef, 0x0c, 0xbe, 0x90, 0x9a, 0xc9, 0xab, 0x71, 0x0a, 0x9f, 0xfc, 0x65, 0x31, + 0x04, 0x8a, 0x63, 0xfe, 0xc4, 0x86, 0x3e, 0xc4, 0x6a, 0xd5, 0xb1, 0xd7, 0x37, 0x46, 0xfe, 0x95, + 0x85, 0x3e, 0x33, 0xa8, 0x58, 0xca, 0xd4, 0xff, 0x2c, 0x0e, 0x25, 0xc4, 0x2f, 0xc7, 0xd4, 0x2d, + 0xc2, 0x1f, 0x7f, 0x9d, 0x65, 0x8c, 0x4b, 0xd5, 0x9d, 0xeb, 0x60, 0x04, 0x70, 0x9a, 0x83, 0x90, + 0xcf, 0x72, 0x36, 0xc2, 0x6f, 0x44, 0x16, 0x52, 0x61, 0xb5, 0x7a, 0xed, 0x7e, 0x67, 0xd0, 0x7d, + 0xc4, 0xd8, 0x3c, 0x0b, 0x69, 0xa0, 0xd1, 0xe6, 0x04, 0xbf, 0x5d, 0x27, 0xc4, 0x6a, 0xd7, 0x1d, + 0xbb, 0xc9, 0x50, 0xe1, 0x93, 0xa0, 0xaa, 0x83, 0x0d, 0xcf, 0xf9, 0x84, 0xed, 0x43, 0x33, 0x89, + 0x8c, 0x51, 0x01, 0x83, 0x9b, 0x16, 0x7e, 0xb7, 0xdb, 0x30, 0x2f, 0xb0, 0x51, 0x6d, 0x62, 0x44, + 0x1a, 0x9e, 0x02, 0x39, 0x94, 0x2a, 0xbb, 0x99, 0x76, 0x68, 0xef, 0xce, 0xab, 0x3e, 0xfa, 0x8e, + 0xcc, 0x2b, 0x84, 0x0d, 0xed, 0xd6, 0x1c, 0x37, 0xea, 0xd4, 0xae, 0xca, 0xfe, 0xf1, 0x24, 0xae, + 0xbe, 0x12, 0xed, 0x64, 0x72, 0x89, 0xb0, 0x93, 0xb2, 0x26, 0x9d, 0xc9, 0xfb, 0x5d, 0x89, 0x59, + 0x89, 0x98, 0xa1, 0xa3, 0xdf, 0x49, 0x2a, 0x4f, 0xf2, 0x65, 0x19, 0x05, 0x4f, 0x93, 0xdd, 0x94, + 0x0a, 0xc9, 0xf3, 0x15, 0x50, 0x19, 0xca, 0x94, 0x51, 0x6f, 0xab, 0xeb, 0xea, 0x17, 0x9c, 0x00, + 0x75, 0x93, 0x87, 0x7f, 0xa8, 0xa5, 0xa1, 0x9a, 0xc3, 0xbb, 0x00, 0x00, 0x00, 0xff, 0xff, 0xcf, + 0x9c, 0x9b, 0xf7, 0xcb, 0x04, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// TraceServiceClient is the client API for TraceService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type TraceServiceClient interface { + // After initialization, this RPC must be kept alive for the entire life of + // the application. The agent pushes configs down to applications via a + // stream. + Config(ctx context.Context, opts ...grpc.CallOption) (TraceService_ConfigClient, error) + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(ctx context.Context, opts ...grpc.CallOption) (TraceService_ExportClient, error) +} + +type traceServiceClient struct { + cc *grpc.ClientConn +} + +func NewTraceServiceClient(cc *grpc.ClientConn) TraceServiceClient { + return &traceServiceClient{cc} +} + +func (c *traceServiceClient) Config(ctx context.Context, opts ...grpc.CallOption) (TraceService_ConfigClient, error) { + stream, err := c.cc.NewStream(ctx, &_TraceService_serviceDesc.Streams[0], "/opencensus.proto.agent.trace.v1.TraceService/Config", opts...) + if err != nil { + return nil, err + } + x := &traceServiceConfigClient{stream} + return x, nil +} + +type TraceService_ConfigClient interface { + Send(*CurrentLibraryConfig) error + Recv() (*UpdatedLibraryConfig, error) + grpc.ClientStream +} + +type traceServiceConfigClient struct { + grpc.ClientStream +} + +func (x *traceServiceConfigClient) Send(m *CurrentLibraryConfig) error { + return x.ClientStream.SendMsg(m) +} + +func (x *traceServiceConfigClient) Recv() (*UpdatedLibraryConfig, error) { + m := new(UpdatedLibraryConfig) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *traceServiceClient) Export(ctx context.Context, opts ...grpc.CallOption) (TraceService_ExportClient, error) { + stream, err := c.cc.NewStream(ctx, &_TraceService_serviceDesc.Streams[1], "/opencensus.proto.agent.trace.v1.TraceService/Export", opts...) + if err != nil { + return nil, err + } + x := &traceServiceExportClient{stream} + return x, nil +} + +type TraceService_ExportClient interface { + Send(*ExportTraceServiceRequest) error + Recv() (*ExportTraceServiceResponse, error) + grpc.ClientStream +} + +type traceServiceExportClient struct { + grpc.ClientStream +} + +func (x *traceServiceExportClient) Send(m *ExportTraceServiceRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *traceServiceExportClient) Recv() (*ExportTraceServiceResponse, error) { + m := new(ExportTraceServiceResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// TraceServiceServer is the server API for TraceService service. +type TraceServiceServer interface { + // After initialization, this RPC must be kept alive for the entire life of + // the application. The agent pushes configs down to applications via a + // stream. + Config(TraceService_ConfigServer) error + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(TraceService_ExportServer) error +} + +func RegisterTraceServiceServer(s *grpc.Server, srv TraceServiceServer) { + s.RegisterService(&_TraceService_serviceDesc, srv) +} + +func _TraceService_Config_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(TraceServiceServer).Config(&traceServiceConfigServer{stream}) +} + +type TraceService_ConfigServer interface { + Send(*UpdatedLibraryConfig) error + Recv() (*CurrentLibraryConfig, error) + grpc.ServerStream +} + +type traceServiceConfigServer struct { + grpc.ServerStream +} + +func (x *traceServiceConfigServer) Send(m *UpdatedLibraryConfig) error { + return x.ServerStream.SendMsg(m) +} + +func (x *traceServiceConfigServer) Recv() (*CurrentLibraryConfig, error) { + m := new(CurrentLibraryConfig) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _TraceService_Export_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(TraceServiceServer).Export(&traceServiceExportServer{stream}) +} + +type TraceService_ExportServer interface { + Send(*ExportTraceServiceResponse) error + Recv() (*ExportTraceServiceRequest, error) + grpc.ServerStream +} + +type traceServiceExportServer struct { + grpc.ServerStream +} + +func (x *traceServiceExportServer) Send(m *ExportTraceServiceResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *traceServiceExportServer) Recv() (*ExportTraceServiceRequest, error) { + m := new(ExportTraceServiceRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _TraceService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "opencensus.proto.agent.trace.v1.TraceService", + HandlerType: (*TraceServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "Config", + Handler: _TraceService_Config_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "Export", + Handler: _TraceService_Export_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "opencensus/proto/agent/trace/v1/trace_service.proto", +} diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.gw.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.gw.go new file mode 100644 index 0000000000..bd4b8a8278 --- /dev/null +++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.gw.go @@ -0,0 +1,154 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: opencensus/proto/agent/trace/v1/trace_service.proto + +/* +Package v1 is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package v1 + +import ( + "io" + "net/http" + + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray + +func request_TraceService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client TraceServiceClient, req *http.Request, pathParams map[string]string) (TraceService_ExportClient, runtime.ServerMetadata, error) { + var metadata runtime.ServerMetadata + stream, err := client.Export(ctx) + if err != nil { + grpclog.Infof("Failed to start streaming: %v", err) + return nil, metadata, err + } + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, berr + } + dec := marshaler.NewDecoder(newReader()) + handleSend := func() error { + var protoReq ExportTraceServiceRequest + err := dec.Decode(&protoReq) + if err == io.EOF { + return err + } + if err != nil { + grpclog.Infof("Failed to decode request: %v", err) + return err + } + if err := stream.Send(&protoReq); err != nil { + grpclog.Infof("Failed to send request: %v", err) + return err + } + return nil + } + if err := handleSend(); err != nil { + if cerr := stream.CloseSend(); cerr != nil { + grpclog.Infof("Failed to terminate client stream: %v", cerr) + } + if err == io.EOF { + return stream, metadata, nil + } + return nil, metadata, err + } + go func() { + for { + if err := handleSend(); err != nil { + break + } + } + if err := stream.CloseSend(); err != nil { + grpclog.Infof("Failed to terminate client stream: %v", err) + } + }() + header, err := stream.Header() + if err != nil { + grpclog.Infof("Failed to get header from client: %v", err) + return nil, metadata, err + } + metadata.HeaderMD = header + return stream, metadata, nil +} + +// RegisterTraceServiceHandlerFromEndpoint is same as RegisterTraceServiceHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterTraceServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterTraceServiceHandler(ctx, mux, conn) +} + +// RegisterTraceServiceHandler registers the http handlers for service TraceService to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterTraceServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterTraceServiceHandlerClient(ctx, mux, NewTraceServiceClient(conn)) +} + +// RegisterTraceServiceHandlerClient registers the http handlers for service TraceService +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "TraceServiceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "TraceServiceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "TraceServiceClient" to call the correct interceptors. +func RegisterTraceServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client TraceServiceClient) error { + + mux.Handle("POST", pattern_TraceService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_TraceService_Export_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_TraceService_Export_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_TraceService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "trace"}, "")) +) + +var ( + forward_TraceService_Export_0 = runtime.ForwardResponseStream +) diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go new file mode 100644 index 0000000000..53b8aa99e1 --- /dev/null +++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go @@ -0,0 +1,1126 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: opencensus/proto/metrics/v1/metrics.proto + +package v1 + +import ( + fmt "fmt" + v1 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" + proto "github.com/golang/protobuf/proto" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + wrappers "github.com/golang/protobuf/ptypes/wrappers" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// The kind of metric. It describes how the data is reported. +// +// A gauge is an instantaneous measurement of a value. +// +// A cumulative measurement is a value accumulated over a time interval. In +// a time series, cumulative measurements should have the same start time, +// increasing values and increasing end times, until an event resets the +// cumulative value to zero and sets a new start time for the following +// points. +type MetricDescriptor_Type int32 + +const ( + // Do not use this default value. + MetricDescriptor_UNSPECIFIED MetricDescriptor_Type = 0 + // Integer gauge. The value can go both up and down. + MetricDescriptor_GAUGE_INT64 MetricDescriptor_Type = 1 + // Floating point gauge. The value can go both up and down. + MetricDescriptor_GAUGE_DOUBLE MetricDescriptor_Type = 2 + // Distribution gauge measurement. The count and sum can go both up and + // down. Recorded values are always >= 0. + // Used in scenarios like a snapshot of time the current items in a queue + // have spent there. + MetricDescriptor_GAUGE_DISTRIBUTION MetricDescriptor_Type = 3 + // Integer cumulative measurement. The value cannot decrease, if resets + // then the start_time should also be reset. + MetricDescriptor_CUMULATIVE_INT64 MetricDescriptor_Type = 4 + // Floating point cumulative measurement. The value cannot decrease, if + // resets then the start_time should also be reset. Recorded values are + // always >= 0. + MetricDescriptor_CUMULATIVE_DOUBLE MetricDescriptor_Type = 5 + // Distribution cumulative measurement. The count and sum cannot decrease, + // if resets then the start_time should also be reset. + MetricDescriptor_CUMULATIVE_DISTRIBUTION MetricDescriptor_Type = 6 + // Some frameworks implemented Histograms as a summary of observations + // (usually things like request durations and response sizes). While it + // also provides a total count of observations and a sum of all observed + // values, it calculates configurable percentiles over a sliding time + // window. This is not recommended, since it cannot be aggregated. + MetricDescriptor_SUMMARY MetricDescriptor_Type = 7 +) + +var MetricDescriptor_Type_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "GAUGE_INT64", + 2: "GAUGE_DOUBLE", + 3: "GAUGE_DISTRIBUTION", + 4: "CUMULATIVE_INT64", + 5: "CUMULATIVE_DOUBLE", + 6: "CUMULATIVE_DISTRIBUTION", + 7: "SUMMARY", +} + +var MetricDescriptor_Type_value = map[string]int32{ + "UNSPECIFIED": 0, + "GAUGE_INT64": 1, + "GAUGE_DOUBLE": 2, + "GAUGE_DISTRIBUTION": 3, + "CUMULATIVE_INT64": 4, + "CUMULATIVE_DOUBLE": 5, + "CUMULATIVE_DISTRIBUTION": 6, + "SUMMARY": 7, +} + +func (x MetricDescriptor_Type) String() string { + return proto.EnumName(MetricDescriptor_Type_name, int32(x)) +} + +func (MetricDescriptor_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{1, 0} +} + +// Defines a Metric which has one or more timeseries. +type Metric struct { + // The descriptor of the Metric. + // TODO(issue #152): consider only sending the name of descriptor for + // optimization. + MetricDescriptor *MetricDescriptor `protobuf:"bytes,1,opt,name=metric_descriptor,json=metricDescriptor,proto3" json:"metric_descriptor,omitempty"` + // One or more timeseries for a single metric, where each timeseries has + // one or more points. + Timeseries []*TimeSeries `protobuf:"bytes,2,rep,name=timeseries,proto3" json:"timeseries,omitempty"` + // The resource for the metric. If unset, it may be set to a default value + // provided for a sequence of messages in an RPC stream. + Resource *v1.Resource `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Metric) Reset() { *m = Metric{} } +func (m *Metric) String() string { return proto.CompactTextString(m) } +func (*Metric) ProtoMessage() {} +func (*Metric) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{0} +} + +func (m *Metric) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Metric.Unmarshal(m, b) +} +func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Metric.Marshal(b, m, deterministic) +} +func (m *Metric) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metric.Merge(m, src) +} +func (m *Metric) XXX_Size() int { + return xxx_messageInfo_Metric.Size(m) +} +func (m *Metric) XXX_DiscardUnknown() { + xxx_messageInfo_Metric.DiscardUnknown(m) +} + +var xxx_messageInfo_Metric proto.InternalMessageInfo + +func (m *Metric) GetMetricDescriptor() *MetricDescriptor { + if m != nil { + return m.MetricDescriptor + } + return nil +} + +func (m *Metric) GetTimeseries() []*TimeSeries { + if m != nil { + return m.Timeseries + } + return nil +} + +func (m *Metric) GetResource() *v1.Resource { + if m != nil { + return m.Resource + } + return nil +} + +// Defines a metric type and its schema. +type MetricDescriptor struct { + // The metric type, including its DNS name prefix. It must be unique. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A detailed description of the metric, which can be used in documentation. + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + // The unit in which the metric value is reported. Follows the format + // described by http://unitsofmeasure.org/ucum.html. + Unit string `protobuf:"bytes,3,opt,name=unit,proto3" json:"unit,omitempty"` + Type MetricDescriptor_Type `protobuf:"varint,4,opt,name=type,proto3,enum=opencensus.proto.metrics.v1.MetricDescriptor_Type" json:"type,omitempty"` + // The label keys associated with the metric descriptor. + LabelKeys []*LabelKey `protobuf:"bytes,5,rep,name=label_keys,json=labelKeys,proto3" json:"label_keys,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MetricDescriptor) Reset() { *m = MetricDescriptor{} } +func (m *MetricDescriptor) String() string { return proto.CompactTextString(m) } +func (*MetricDescriptor) ProtoMessage() {} +func (*MetricDescriptor) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{1} +} + +func (m *MetricDescriptor) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MetricDescriptor.Unmarshal(m, b) +} +func (m *MetricDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MetricDescriptor.Marshal(b, m, deterministic) +} +func (m *MetricDescriptor) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricDescriptor.Merge(m, src) +} +func (m *MetricDescriptor) XXX_Size() int { + return xxx_messageInfo_MetricDescriptor.Size(m) +} +func (m *MetricDescriptor) XXX_DiscardUnknown() { + xxx_messageInfo_MetricDescriptor.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricDescriptor proto.InternalMessageInfo + +func (m *MetricDescriptor) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *MetricDescriptor) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *MetricDescriptor) GetUnit() string { + if m != nil { + return m.Unit + } + return "" +} + +func (m *MetricDescriptor) GetType() MetricDescriptor_Type { + if m != nil { + return m.Type + } + return MetricDescriptor_UNSPECIFIED +} + +func (m *MetricDescriptor) GetLabelKeys() []*LabelKey { + if m != nil { + return m.LabelKeys + } + return nil +} + +// Defines a label key associated with a metric descriptor. +type LabelKey struct { + // The key for the label. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // A human-readable description of what this label key represents. + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LabelKey) Reset() { *m = LabelKey{} } +func (m *LabelKey) String() string { return proto.CompactTextString(m) } +func (*LabelKey) ProtoMessage() {} +func (*LabelKey) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{2} +} + +func (m *LabelKey) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LabelKey.Unmarshal(m, b) +} +func (m *LabelKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LabelKey.Marshal(b, m, deterministic) +} +func (m *LabelKey) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelKey.Merge(m, src) +} +func (m *LabelKey) XXX_Size() int { + return xxx_messageInfo_LabelKey.Size(m) +} +func (m *LabelKey) XXX_DiscardUnknown() { + xxx_messageInfo_LabelKey.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelKey proto.InternalMessageInfo + +func (m *LabelKey) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *LabelKey) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +// A collection of data points that describes the time-varying values +// of a metric. +type TimeSeries struct { + // Must be present for cumulative metrics. The time when the cumulative value + // was reset to zero. Exclusive. The cumulative value is over the time interval + // (start_timestamp, timestamp]. If not specified, the backend can use the + // previous recorded value. + StartTimestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"` + // The set of label values that uniquely identify this timeseries. Applies to + // all points. The order of label values must match that of label keys in the + // metric descriptor. + LabelValues []*LabelValue `protobuf:"bytes,2,rep,name=label_values,json=labelValues,proto3" json:"label_values,omitempty"` + // The data points of this timeseries. Point.value type MUST match the + // MetricDescriptor.type. + Points []*Point `protobuf:"bytes,3,rep,name=points,proto3" json:"points,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TimeSeries) Reset() { *m = TimeSeries{} } +func (m *TimeSeries) String() string { return proto.CompactTextString(m) } +func (*TimeSeries) ProtoMessage() {} +func (*TimeSeries) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{3} +} + +func (m *TimeSeries) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TimeSeries.Unmarshal(m, b) +} +func (m *TimeSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TimeSeries.Marshal(b, m, deterministic) +} +func (m *TimeSeries) XXX_Merge(src proto.Message) { + xxx_messageInfo_TimeSeries.Merge(m, src) +} +func (m *TimeSeries) XXX_Size() int { + return xxx_messageInfo_TimeSeries.Size(m) +} +func (m *TimeSeries) XXX_DiscardUnknown() { + xxx_messageInfo_TimeSeries.DiscardUnknown(m) +} + +var xxx_messageInfo_TimeSeries proto.InternalMessageInfo + +func (m *TimeSeries) GetStartTimestamp() *timestamp.Timestamp { + if m != nil { + return m.StartTimestamp + } + return nil +} + +func (m *TimeSeries) GetLabelValues() []*LabelValue { + if m != nil { + return m.LabelValues + } + return nil +} + +func (m *TimeSeries) GetPoints() []*Point { + if m != nil { + return m.Points + } + return nil +} + +type LabelValue struct { + // The value for the label. + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + // If false the value field is ignored and considered not set. + // This is used to differentiate a missing label from an empty string. + HasValue bool `protobuf:"varint,2,opt,name=has_value,json=hasValue,proto3" json:"has_value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LabelValue) Reset() { *m = LabelValue{} } +func (m *LabelValue) String() string { return proto.CompactTextString(m) } +func (*LabelValue) ProtoMessage() {} +func (*LabelValue) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{4} +} + +func (m *LabelValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LabelValue.Unmarshal(m, b) +} +func (m *LabelValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LabelValue.Marshal(b, m, deterministic) +} +func (m *LabelValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelValue.Merge(m, src) +} +func (m *LabelValue) XXX_Size() int { + return xxx_messageInfo_LabelValue.Size(m) +} +func (m *LabelValue) XXX_DiscardUnknown() { + xxx_messageInfo_LabelValue.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelValue proto.InternalMessageInfo + +func (m *LabelValue) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +func (m *LabelValue) GetHasValue() bool { + if m != nil { + return m.HasValue + } + return false +} + +// A timestamped measurement. +type Point struct { + // The moment when this point was recorded. Inclusive. + // If not specified, the timestamp will be decided by the backend. + Timestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // The actual point value. + // + // Types that are valid to be assigned to Value: + // *Point_Int64Value + // *Point_DoubleValue + // *Point_DistributionValue + // *Point_SummaryValue + Value isPoint_Value `protobuf_oneof:"value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Point) Reset() { *m = Point{} } +func (m *Point) String() string { return proto.CompactTextString(m) } +func (*Point) ProtoMessage() {} +func (*Point) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{5} +} + +func (m *Point) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Point.Unmarshal(m, b) +} +func (m *Point) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Point.Marshal(b, m, deterministic) +} +func (m *Point) XXX_Merge(src proto.Message) { + xxx_messageInfo_Point.Merge(m, src) +} +func (m *Point) XXX_Size() int { + return xxx_messageInfo_Point.Size(m) +} +func (m *Point) XXX_DiscardUnknown() { + xxx_messageInfo_Point.DiscardUnknown(m) +} + +var xxx_messageInfo_Point proto.InternalMessageInfo + +func (m *Point) GetTimestamp() *timestamp.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +type isPoint_Value interface { + isPoint_Value() +} + +type Point_Int64Value struct { + Int64Value int64 `protobuf:"varint,2,opt,name=int64_value,json=int64Value,proto3,oneof"` +} + +type Point_DoubleValue struct { + DoubleValue float64 `protobuf:"fixed64,3,opt,name=double_value,json=doubleValue,proto3,oneof"` +} + +type Point_DistributionValue struct { + DistributionValue *DistributionValue `protobuf:"bytes,4,opt,name=distribution_value,json=distributionValue,proto3,oneof"` +} + +type Point_SummaryValue struct { + SummaryValue *SummaryValue `protobuf:"bytes,5,opt,name=summary_value,json=summaryValue,proto3,oneof"` +} + +func (*Point_Int64Value) isPoint_Value() {} + +func (*Point_DoubleValue) isPoint_Value() {} + +func (*Point_DistributionValue) isPoint_Value() {} + +func (*Point_SummaryValue) isPoint_Value() {} + +func (m *Point) GetValue() isPoint_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *Point) GetInt64Value() int64 { + if x, ok := m.GetValue().(*Point_Int64Value); ok { + return x.Int64Value + } + return 0 +} + +func (m *Point) GetDoubleValue() float64 { + if x, ok := m.GetValue().(*Point_DoubleValue); ok { + return x.DoubleValue + } + return 0 +} + +func (m *Point) GetDistributionValue() *DistributionValue { + if x, ok := m.GetValue().(*Point_DistributionValue); ok { + return x.DistributionValue + } + return nil +} + +func (m *Point) GetSummaryValue() *SummaryValue { + if x, ok := m.GetValue().(*Point_SummaryValue); ok { + return x.SummaryValue + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Point) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Point_Int64Value)(nil), + (*Point_DoubleValue)(nil), + (*Point_DistributionValue)(nil), + (*Point_SummaryValue)(nil), + } +} + +// Distribution contains summary statistics for a population of values. It +// optionally contains a histogram representing the distribution of those +// values across a set of buckets. +type DistributionValue struct { + // The number of values in the population. Must be non-negative. This value + // must equal the sum of the values in bucket_counts if a histogram is + // provided. + Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` + // The sum of the values in the population. If count is zero then this field + // must be zero. + Sum float64 `protobuf:"fixed64,2,opt,name=sum,proto3" json:"sum,omitempty"` + // The sum of squared deviations from the mean of the values in the + // population. For values x_i this is: + // + // Sum[i=1..n]((x_i - mean)^2) + // + // Knuth, "The Art of Computer Programming", Vol. 2, page 323, 3rd edition + // describes Welford's method for accumulating this sum in one pass. + // + // If count is zero then this field must be zero. + SumOfSquaredDeviation float64 `protobuf:"fixed64,3,opt,name=sum_of_squared_deviation,json=sumOfSquaredDeviation,proto3" json:"sum_of_squared_deviation,omitempty"` + // Don't change bucket boundaries within a TimeSeries if your backend doesn't + // support this. + // TODO(issue #152): consider not required to send bucket options for + // optimization. + BucketOptions *DistributionValue_BucketOptions `protobuf:"bytes,4,opt,name=bucket_options,json=bucketOptions,proto3" json:"bucket_options,omitempty"` + // If the distribution does not have a histogram, then omit this field. + // If there is a histogram, then the sum of the values in the Bucket counts + // must equal the value in the count field of the distribution. + Buckets []*DistributionValue_Bucket `protobuf:"bytes,5,rep,name=buckets,proto3" json:"buckets,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DistributionValue) Reset() { *m = DistributionValue{} } +func (m *DistributionValue) String() string { return proto.CompactTextString(m) } +func (*DistributionValue) ProtoMessage() {} +func (*DistributionValue) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{6} +} + +func (m *DistributionValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DistributionValue.Unmarshal(m, b) +} +func (m *DistributionValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DistributionValue.Marshal(b, m, deterministic) +} +func (m *DistributionValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_DistributionValue.Merge(m, src) +} +func (m *DistributionValue) XXX_Size() int { + return xxx_messageInfo_DistributionValue.Size(m) +} +func (m *DistributionValue) XXX_DiscardUnknown() { + xxx_messageInfo_DistributionValue.DiscardUnknown(m) +} + +var xxx_messageInfo_DistributionValue proto.InternalMessageInfo + +func (m *DistributionValue) GetCount() int64 { + if m != nil { + return m.Count + } + return 0 +} + +func (m *DistributionValue) GetSum() float64 { + if m != nil { + return m.Sum + } + return 0 +} + +func (m *DistributionValue) GetSumOfSquaredDeviation() float64 { + if m != nil { + return m.SumOfSquaredDeviation + } + return 0 +} + +func (m *DistributionValue) GetBucketOptions() *DistributionValue_BucketOptions { + if m != nil { + return m.BucketOptions + } + return nil +} + +func (m *DistributionValue) GetBuckets() []*DistributionValue_Bucket { + if m != nil { + return m.Buckets + } + return nil +} + +// A Distribution may optionally contain a histogram of the values in the +// population. The bucket boundaries for that histogram are described by +// BucketOptions. +// +// If bucket_options has no type, then there is no histogram associated with +// the Distribution. +type DistributionValue_BucketOptions struct { + // Types that are valid to be assigned to Type: + // *DistributionValue_BucketOptions_Explicit_ + Type isDistributionValue_BucketOptions_Type `protobuf_oneof:"type"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DistributionValue_BucketOptions) Reset() { *m = DistributionValue_BucketOptions{} } +func (m *DistributionValue_BucketOptions) String() string { return proto.CompactTextString(m) } +func (*DistributionValue_BucketOptions) ProtoMessage() {} +func (*DistributionValue_BucketOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{6, 0} +} + +func (m *DistributionValue_BucketOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DistributionValue_BucketOptions.Unmarshal(m, b) +} +func (m *DistributionValue_BucketOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DistributionValue_BucketOptions.Marshal(b, m, deterministic) +} +func (m *DistributionValue_BucketOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_DistributionValue_BucketOptions.Merge(m, src) +} +func (m *DistributionValue_BucketOptions) XXX_Size() int { + return xxx_messageInfo_DistributionValue_BucketOptions.Size(m) +} +func (m *DistributionValue_BucketOptions) XXX_DiscardUnknown() { + xxx_messageInfo_DistributionValue_BucketOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_DistributionValue_BucketOptions proto.InternalMessageInfo + +type isDistributionValue_BucketOptions_Type interface { + isDistributionValue_BucketOptions_Type() +} + +type DistributionValue_BucketOptions_Explicit_ struct { + Explicit *DistributionValue_BucketOptions_Explicit `protobuf:"bytes,1,opt,name=explicit,proto3,oneof"` +} + +func (*DistributionValue_BucketOptions_Explicit_) isDistributionValue_BucketOptions_Type() {} + +func (m *DistributionValue_BucketOptions) GetType() isDistributionValue_BucketOptions_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *DistributionValue_BucketOptions) GetExplicit() *DistributionValue_BucketOptions_Explicit { + if x, ok := m.GetType().(*DistributionValue_BucketOptions_Explicit_); ok { + return x.Explicit + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*DistributionValue_BucketOptions) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*DistributionValue_BucketOptions_Explicit_)(nil), + } +} + +// Specifies a set of buckets with arbitrary upper-bounds. +// This defines size(bounds) + 1 (= N) buckets. The boundaries for bucket +// index i are: +// +// [0, bucket_bounds[i]) for i == 0 +// [bucket_bounds[i-1], bucket_bounds[i]) for 0 < i < N-1 +// [bucket_bounds[i], +infinity) for i == N-1 +type DistributionValue_BucketOptions_Explicit struct { + // The values must be strictly increasing and > 0. + Bounds []float64 `protobuf:"fixed64,1,rep,packed,name=bounds,proto3" json:"bounds,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DistributionValue_BucketOptions_Explicit) Reset() { + *m = DistributionValue_BucketOptions_Explicit{} +} +func (m *DistributionValue_BucketOptions_Explicit) String() string { return proto.CompactTextString(m) } +func (*DistributionValue_BucketOptions_Explicit) ProtoMessage() {} +func (*DistributionValue_BucketOptions_Explicit) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{6, 0, 0} +} + +func (m *DistributionValue_BucketOptions_Explicit) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DistributionValue_BucketOptions_Explicit.Unmarshal(m, b) +} +func (m *DistributionValue_BucketOptions_Explicit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DistributionValue_BucketOptions_Explicit.Marshal(b, m, deterministic) +} +func (m *DistributionValue_BucketOptions_Explicit) XXX_Merge(src proto.Message) { + xxx_messageInfo_DistributionValue_BucketOptions_Explicit.Merge(m, src) +} +func (m *DistributionValue_BucketOptions_Explicit) XXX_Size() int { + return xxx_messageInfo_DistributionValue_BucketOptions_Explicit.Size(m) +} +func (m *DistributionValue_BucketOptions_Explicit) XXX_DiscardUnknown() { + xxx_messageInfo_DistributionValue_BucketOptions_Explicit.DiscardUnknown(m) +} + +var xxx_messageInfo_DistributionValue_BucketOptions_Explicit proto.InternalMessageInfo + +func (m *DistributionValue_BucketOptions_Explicit) GetBounds() []float64 { + if m != nil { + return m.Bounds + } + return nil +} + +type DistributionValue_Bucket struct { + // The number of values in each bucket of the histogram, as described in + // bucket_bounds. + Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` + // If the distribution does not have a histogram, then omit this field. + Exemplar *DistributionValue_Exemplar `protobuf:"bytes,2,opt,name=exemplar,proto3" json:"exemplar,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DistributionValue_Bucket) Reset() { *m = DistributionValue_Bucket{} } +func (m *DistributionValue_Bucket) String() string { return proto.CompactTextString(m) } +func (*DistributionValue_Bucket) ProtoMessage() {} +func (*DistributionValue_Bucket) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{6, 1} +} + +func (m *DistributionValue_Bucket) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DistributionValue_Bucket.Unmarshal(m, b) +} +func (m *DistributionValue_Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DistributionValue_Bucket.Marshal(b, m, deterministic) +} +func (m *DistributionValue_Bucket) XXX_Merge(src proto.Message) { + xxx_messageInfo_DistributionValue_Bucket.Merge(m, src) +} +func (m *DistributionValue_Bucket) XXX_Size() int { + return xxx_messageInfo_DistributionValue_Bucket.Size(m) +} +func (m *DistributionValue_Bucket) XXX_DiscardUnknown() { + xxx_messageInfo_DistributionValue_Bucket.DiscardUnknown(m) +} + +var xxx_messageInfo_DistributionValue_Bucket proto.InternalMessageInfo + +func (m *DistributionValue_Bucket) GetCount() int64 { + if m != nil { + return m.Count + } + return 0 +} + +func (m *DistributionValue_Bucket) GetExemplar() *DistributionValue_Exemplar { + if m != nil { + return m.Exemplar + } + return nil +} + +// Exemplars are example points that may be used to annotate aggregated +// Distribution values. They are metadata that gives information about a +// particular value added to a Distribution bucket. +type DistributionValue_Exemplar struct { + // Value of the exemplar point. It determines which bucket the exemplar + // belongs to. + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + // The observation (sampling) time of the above value. + Timestamp *timestamp.Timestamp `protobuf:"bytes,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // Contextual information about the example value. + Attachments map[string]string `protobuf:"bytes,3,rep,name=attachments,proto3" json:"attachments,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DistributionValue_Exemplar) Reset() { *m = DistributionValue_Exemplar{} } +func (m *DistributionValue_Exemplar) String() string { return proto.CompactTextString(m) } +func (*DistributionValue_Exemplar) ProtoMessage() {} +func (*DistributionValue_Exemplar) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{6, 2} +} + +func (m *DistributionValue_Exemplar) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DistributionValue_Exemplar.Unmarshal(m, b) +} +func (m *DistributionValue_Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DistributionValue_Exemplar.Marshal(b, m, deterministic) +} +func (m *DistributionValue_Exemplar) XXX_Merge(src proto.Message) { + xxx_messageInfo_DistributionValue_Exemplar.Merge(m, src) +} +func (m *DistributionValue_Exemplar) XXX_Size() int { + return xxx_messageInfo_DistributionValue_Exemplar.Size(m) +} +func (m *DistributionValue_Exemplar) XXX_DiscardUnknown() { + xxx_messageInfo_DistributionValue_Exemplar.DiscardUnknown(m) +} + +var xxx_messageInfo_DistributionValue_Exemplar proto.InternalMessageInfo + +func (m *DistributionValue_Exemplar) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *DistributionValue_Exemplar) GetTimestamp() *timestamp.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +func (m *DistributionValue_Exemplar) GetAttachments() map[string]string { + if m != nil { + return m.Attachments + } + return nil +} + +// The start_timestamp only applies to the count and sum in the SummaryValue. +type SummaryValue struct { + // The total number of recorded values since start_time. Optional since + // some systems don't expose this. + Count *wrappers.Int64Value `protobuf:"bytes,1,opt,name=count,proto3" json:"count,omitempty"` + // The total sum of recorded values since start_time. Optional since some + // systems don't expose this. If count is zero then this field must be zero. + // This field must be unset if the sum is not available. + Sum *wrappers.DoubleValue `protobuf:"bytes,2,opt,name=sum,proto3" json:"sum,omitempty"` + // Values calculated over an arbitrary time window. + Snapshot *SummaryValue_Snapshot `protobuf:"bytes,3,opt,name=snapshot,proto3" json:"snapshot,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SummaryValue) Reset() { *m = SummaryValue{} } +func (m *SummaryValue) String() string { return proto.CompactTextString(m) } +func (*SummaryValue) ProtoMessage() {} +func (*SummaryValue) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{7} +} + +func (m *SummaryValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SummaryValue.Unmarshal(m, b) +} +func (m *SummaryValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SummaryValue.Marshal(b, m, deterministic) +} +func (m *SummaryValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_SummaryValue.Merge(m, src) +} +func (m *SummaryValue) XXX_Size() int { + return xxx_messageInfo_SummaryValue.Size(m) +} +func (m *SummaryValue) XXX_DiscardUnknown() { + xxx_messageInfo_SummaryValue.DiscardUnknown(m) +} + +var xxx_messageInfo_SummaryValue proto.InternalMessageInfo + +func (m *SummaryValue) GetCount() *wrappers.Int64Value { + if m != nil { + return m.Count + } + return nil +} + +func (m *SummaryValue) GetSum() *wrappers.DoubleValue { + if m != nil { + return m.Sum + } + return nil +} + +func (m *SummaryValue) GetSnapshot() *SummaryValue_Snapshot { + if m != nil { + return m.Snapshot + } + return nil +} + +// The values in this message can be reset at arbitrary unknown times, with +// the requirement that all of them are reset at the same time. +type SummaryValue_Snapshot struct { + // The number of values in the snapshot. Optional since some systems don't + // expose this. + Count *wrappers.Int64Value `protobuf:"bytes,1,opt,name=count,proto3" json:"count,omitempty"` + // The sum of values in the snapshot. Optional since some systems don't + // expose this. If count is zero then this field must be zero or not set + // (if not supported). + Sum *wrappers.DoubleValue `protobuf:"bytes,2,opt,name=sum,proto3" json:"sum,omitempty"` + // A list of values at different percentiles of the distribution calculated + // from the current snapshot. The percentiles must be strictly increasing. + PercentileValues []*SummaryValue_Snapshot_ValueAtPercentile `protobuf:"bytes,3,rep,name=percentile_values,json=percentileValues,proto3" json:"percentile_values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SummaryValue_Snapshot) Reset() { *m = SummaryValue_Snapshot{} } +func (m *SummaryValue_Snapshot) String() string { return proto.CompactTextString(m) } +func (*SummaryValue_Snapshot) ProtoMessage() {} +func (*SummaryValue_Snapshot) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{7, 0} +} + +func (m *SummaryValue_Snapshot) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SummaryValue_Snapshot.Unmarshal(m, b) +} +func (m *SummaryValue_Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SummaryValue_Snapshot.Marshal(b, m, deterministic) +} +func (m *SummaryValue_Snapshot) XXX_Merge(src proto.Message) { + xxx_messageInfo_SummaryValue_Snapshot.Merge(m, src) +} +func (m *SummaryValue_Snapshot) XXX_Size() int { + return xxx_messageInfo_SummaryValue_Snapshot.Size(m) +} +func (m *SummaryValue_Snapshot) XXX_DiscardUnknown() { + xxx_messageInfo_SummaryValue_Snapshot.DiscardUnknown(m) +} + +var xxx_messageInfo_SummaryValue_Snapshot proto.InternalMessageInfo + +func (m *SummaryValue_Snapshot) GetCount() *wrappers.Int64Value { + if m != nil { + return m.Count + } + return nil +} + +func (m *SummaryValue_Snapshot) GetSum() *wrappers.DoubleValue { + if m != nil { + return m.Sum + } + return nil +} + +func (m *SummaryValue_Snapshot) GetPercentileValues() []*SummaryValue_Snapshot_ValueAtPercentile { + if m != nil { + return m.PercentileValues + } + return nil +} + +// Represents the value at a given percentile of a distribution. +type SummaryValue_Snapshot_ValueAtPercentile struct { + // The percentile of a distribution. Must be in the interval + // (0.0, 100.0]. + Percentile float64 `protobuf:"fixed64,1,opt,name=percentile,proto3" json:"percentile,omitempty"` + // The value at the given percentile of a distribution. + Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SummaryValue_Snapshot_ValueAtPercentile) Reset() { + *m = SummaryValue_Snapshot_ValueAtPercentile{} +} +func (m *SummaryValue_Snapshot_ValueAtPercentile) String() string { return proto.CompactTextString(m) } +func (*SummaryValue_Snapshot_ValueAtPercentile) ProtoMessage() {} +func (*SummaryValue_Snapshot_ValueAtPercentile) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{7, 0, 0} +} + +func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.Unmarshal(m, b) +} +func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.Marshal(b, m, deterministic) +} +func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_Merge(src proto.Message) { + xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.Merge(m, src) +} +func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_Size() int { + return xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.Size(m) +} +func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_DiscardUnknown() { + xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.DiscardUnknown(m) +} + +var xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile proto.InternalMessageInfo + +func (m *SummaryValue_Snapshot_ValueAtPercentile) GetPercentile() float64 { + if m != nil { + return m.Percentile + } + return 0 +} + +func (m *SummaryValue_Snapshot_ValueAtPercentile) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func init() { + proto.RegisterEnum("opencensus.proto.metrics.v1.MetricDescriptor_Type", MetricDescriptor_Type_name, MetricDescriptor_Type_value) + proto.RegisterType((*Metric)(nil), "opencensus.proto.metrics.v1.Metric") + proto.RegisterType((*MetricDescriptor)(nil), "opencensus.proto.metrics.v1.MetricDescriptor") + proto.RegisterType((*LabelKey)(nil), "opencensus.proto.metrics.v1.LabelKey") + proto.RegisterType((*TimeSeries)(nil), "opencensus.proto.metrics.v1.TimeSeries") + proto.RegisterType((*LabelValue)(nil), "opencensus.proto.metrics.v1.LabelValue") + proto.RegisterType((*Point)(nil), "opencensus.proto.metrics.v1.Point") + proto.RegisterType((*DistributionValue)(nil), "opencensus.proto.metrics.v1.DistributionValue") + proto.RegisterType((*DistributionValue_BucketOptions)(nil), "opencensus.proto.metrics.v1.DistributionValue.BucketOptions") + proto.RegisterType((*DistributionValue_BucketOptions_Explicit)(nil), "opencensus.proto.metrics.v1.DistributionValue.BucketOptions.Explicit") + proto.RegisterType((*DistributionValue_Bucket)(nil), "opencensus.proto.metrics.v1.DistributionValue.Bucket") + proto.RegisterType((*DistributionValue_Exemplar)(nil), "opencensus.proto.metrics.v1.DistributionValue.Exemplar") + proto.RegisterMapType((map[string]string)(nil), "opencensus.proto.metrics.v1.DistributionValue.Exemplar.AttachmentsEntry") + proto.RegisterType((*SummaryValue)(nil), "opencensus.proto.metrics.v1.SummaryValue") + proto.RegisterType((*SummaryValue_Snapshot)(nil), "opencensus.proto.metrics.v1.SummaryValue.Snapshot") + proto.RegisterType((*SummaryValue_Snapshot_ValueAtPercentile)(nil), "opencensus.proto.metrics.v1.SummaryValue.Snapshot.ValueAtPercentile") +} + +func init() { + proto.RegisterFile("opencensus/proto/metrics/v1/metrics.proto", fileDescriptor_0ee3deb72053811a) +} + +var fileDescriptor_0ee3deb72053811a = []byte{ + // 1098 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0xdd, 0x6e, 0x1b, 0xc5, + 0x17, 0xcf, 0xda, 0x8e, 0xe3, 0x9c, 0x75, 0xdb, 0xf5, 0xa8, 0xed, 0xdf, 0xda, 0xfc, 0x15, 0xc2, + 0x22, 0x20, 0x15, 0xca, 0x5a, 0x31, 0xa5, 0xad, 0x2a, 0x54, 0x14, 0xc7, 0x6e, 0x62, 0xc8, 0x87, + 0x35, 0xb6, 0x2b, 0xd1, 0x1b, 0x6b, 0xbd, 0x9e, 0x24, 0x4b, 0xbc, 0x1f, 0xdd, 0x99, 0x35, 0xf8, + 0x05, 0x78, 0x04, 0xae, 0xb9, 0x45, 0x3c, 0x07, 0x57, 0x3c, 0x01, 0x4f, 0x81, 0x78, 0x03, 0xb4, + 0x33, 0xb3, 0x1f, 0x89, 0xc1, 0xd4, 0x45, 0xe2, 0xee, 0x9c, 0x33, 0xe7, 0xfc, 0xfc, 0x3b, 0x9f, + 0x5e, 0x78, 0xe4, 0x07, 0xc4, 0xb3, 0x89, 0x47, 0x23, 0xda, 0x08, 0x42, 0x9f, 0xf9, 0x0d, 0x97, + 0xb0, 0xd0, 0xb1, 0x69, 0x63, 0xb6, 0x9f, 0x88, 0x26, 0x7f, 0x40, 0x5b, 0x99, 0xab, 0xb0, 0x98, + 0xc9, 0xfb, 0x6c, 0x5f, 0x7f, 0xef, 0xd2, 0xf7, 0x2f, 0xa7, 0x44, 0x60, 0x8c, 0xa3, 0x8b, 0x06, + 0x73, 0x5c, 0x42, 0x99, 0xe5, 0x06, 0xc2, 0x57, 0xdf, 0xbe, 0xed, 0xf0, 0x6d, 0x68, 0x05, 0x01, + 0x09, 0x25, 0x96, 0xfe, 0xc9, 0x02, 0x91, 0x90, 0x50, 0x3f, 0x0a, 0x6d, 0x12, 0x33, 0x49, 0x64, + 0xe1, 0x6c, 0xfc, 0xa1, 0x40, 0xf9, 0x94, 0xff, 0x38, 0x7a, 0x0d, 0x35, 0x41, 0x63, 0x34, 0x21, + 0xd4, 0x0e, 0x9d, 0x80, 0xf9, 0x61, 0x5d, 0xd9, 0x51, 0x76, 0xd5, 0xe6, 0x9e, 0xb9, 0x84, 0xb1, + 0x29, 0xe2, 0xdb, 0x69, 0x10, 0xd6, 0xdc, 0x5b, 0x16, 0x74, 0x04, 0xc0, 0xd3, 0x20, 0xa1, 0x43, + 0x68, 0xbd, 0xb0, 0x53, 0xdc, 0x55, 0x9b, 0x1f, 0x2f, 0x05, 0x1d, 0x38, 0x2e, 0xe9, 0x73, 0x77, + 0x9c, 0x0b, 0x45, 0x2d, 0xa8, 0x24, 0x19, 0xd4, 0x8b, 0x9c, 0xdb, 0x47, 0x8b, 0x30, 0x69, 0x8e, + 0xb3, 0x7d, 0x13, 0x4b, 0x19, 0xa7, 0x71, 0xc6, 0x0f, 0x45, 0xd0, 0x6e, 0x73, 0x46, 0x08, 0x4a, + 0x9e, 0xe5, 0x12, 0x9e, 0xf0, 0x26, 0xe6, 0x32, 0xda, 0x01, 0x35, 0x29, 0x85, 0xe3, 0x7b, 0xf5, + 0x02, 0x7f, 0xca, 0x9b, 0xe2, 0xa8, 0xc8, 0x73, 0x18, 0xa7, 0xb2, 0x89, 0xb9, 0x8c, 0x5e, 0x42, + 0x89, 0xcd, 0x03, 0x52, 0x2f, 0xed, 0x28, 0xbb, 0x77, 0x9b, 0xcd, 0x95, 0x4a, 0x67, 0x0e, 0xe6, + 0x01, 0xc1, 0x3c, 0x1e, 0xb5, 0x01, 0xa6, 0xd6, 0x98, 0x4c, 0x47, 0xd7, 0x64, 0x4e, 0xeb, 0xeb, + 0xbc, 0x66, 0x1f, 0x2e, 0x45, 0x3b, 0x89, 0xdd, 0xbf, 0x22, 0x73, 0xbc, 0x39, 0x95, 0x12, 0x35, + 0x7e, 0x52, 0xa0, 0x14, 0x83, 0xa2, 0x7b, 0xa0, 0x0e, 0xcf, 0xfa, 0xbd, 0xce, 0x61, 0xf7, 0x65, + 0xb7, 0xd3, 0xd6, 0xd6, 0x62, 0xc3, 0xd1, 0xc1, 0xf0, 0xa8, 0x33, 0xea, 0x9e, 0x0d, 0x9e, 0x3c, + 0xd6, 0x14, 0xa4, 0x41, 0x55, 0x18, 0xda, 0xe7, 0xc3, 0xd6, 0x49, 0x47, 0x2b, 0xa0, 0x87, 0x80, + 0xa4, 0xa5, 0xdb, 0x1f, 0xe0, 0x6e, 0x6b, 0x38, 0xe8, 0x9e, 0x9f, 0x69, 0x45, 0x74, 0x1f, 0xb4, + 0xc3, 0xe1, 0xe9, 0xf0, 0xe4, 0x60, 0xd0, 0x7d, 0x95, 0xc4, 0x97, 0xd0, 0x03, 0xa8, 0xe5, 0xac, + 0x12, 0x64, 0x1d, 0x6d, 0xc1, 0xff, 0xf2, 0xe6, 0x3c, 0x52, 0x19, 0xa9, 0xb0, 0xd1, 0x1f, 0x9e, + 0x9e, 0x1e, 0xe0, 0xaf, 0xb5, 0x0d, 0xe3, 0x05, 0x54, 0x92, 0x14, 0x90, 0x06, 0xc5, 0x6b, 0x32, + 0x97, 0xed, 0x88, 0xc5, 0x7f, 0xee, 0x86, 0xf1, 0x9b, 0x02, 0x90, 0xcd, 0x0d, 0x3a, 0x84, 0x7b, + 0x94, 0x59, 0x21, 0x1b, 0xa5, 0x1b, 0x24, 0xc7, 0x59, 0x37, 0xc5, 0x0a, 0x99, 0xc9, 0x0a, 0xf1, + 0x69, 0xe3, 0x1e, 0xf8, 0x2e, 0x0f, 0x49, 0x75, 0xf4, 0x25, 0x54, 0x45, 0x17, 0x66, 0xd6, 0x34, + 0x7a, 0xcb, 0xd9, 0xe5, 0x49, 0xbc, 0x8a, 0xfd, 0xb1, 0x3a, 0x4d, 0x65, 0x8a, 0x9e, 0x43, 0x39, + 0xf0, 0x1d, 0x8f, 0xd1, 0x7a, 0x91, 0xa3, 0x18, 0x4b, 0x51, 0x7a, 0xb1, 0x2b, 0x96, 0x11, 0xc6, + 0x17, 0x00, 0x19, 0x2c, 0xba, 0x0f, 0xeb, 0x9c, 0x8f, 0xac, 0x8f, 0x50, 0xd0, 0x16, 0x6c, 0x5e, + 0x59, 0x54, 0x30, 0xe5, 0xf5, 0xa9, 0xe0, 0xca, 0x95, 0x45, 0x79, 0x88, 0xf1, 0x4b, 0x01, 0xd6, + 0x39, 0x24, 0x7a, 0x06, 0x9b, 0xab, 0x54, 0x24, 0x73, 0x46, 0xef, 0x83, 0xea, 0x78, 0xec, 0xc9, + 0xe3, 0xdc, 0x4f, 0x14, 0x8f, 0xd7, 0x30, 0x70, 0xa3, 0x60, 0xf6, 0x01, 0x54, 0x27, 0x7e, 0x34, + 0x9e, 0x12, 0xe9, 0x13, 0x6f, 0x86, 0x72, 0xbc, 0x86, 0x55, 0x61, 0x15, 0x4e, 0x23, 0x40, 0x13, + 0x87, 0xb2, 0xd0, 0x19, 0x47, 0x71, 0xe3, 0xa4, 0x6b, 0x89, 0x53, 0x31, 0x97, 0x16, 0xa5, 0x9d, + 0x0b, 0xe3, 0x58, 0xc7, 0x6b, 0xb8, 0x36, 0xb9, 0x6d, 0x44, 0x3d, 0xb8, 0x43, 0x23, 0xd7, 0xb5, + 0xc2, 0xb9, 0xc4, 0x5e, 0xe7, 0xd8, 0x8f, 0x96, 0x62, 0xf7, 0x45, 0x44, 0x02, 0x5b, 0xa5, 0x39, + 0xbd, 0xb5, 0x21, 0x2b, 0x6e, 0xfc, 0x5a, 0x86, 0xda, 0x02, 0x8b, 0xb8, 0x21, 0xb6, 0x1f, 0x79, + 0x8c, 0xd7, 0xb3, 0x88, 0x85, 0x12, 0x0f, 0x31, 0x8d, 0x5c, 0x5e, 0x27, 0x05, 0xc7, 0x22, 0x7a, + 0x0a, 0x75, 0x1a, 0xb9, 0x23, 0xff, 0x62, 0x44, 0xdf, 0x44, 0x56, 0x48, 0x26, 0xa3, 0x09, 0x99, + 0x39, 0x16, 0x9f, 0x68, 0x5e, 0x2a, 0xfc, 0x80, 0x46, 0xee, 0xf9, 0x45, 0x5f, 0xbc, 0xb6, 0x93, + 0x47, 0x64, 0xc3, 0xdd, 0x71, 0x64, 0x5f, 0x13, 0x36, 0xf2, 0xf9, 0xb0, 0x53, 0x59, 0xae, 0xcf, + 0x57, 0x2b, 0x97, 0xd9, 0xe2, 0x20, 0xe7, 0x02, 0x03, 0xdf, 0x19, 0xe7, 0x55, 0x74, 0x0e, 0x1b, + 0xc2, 0x90, 0xdc, 0x9b, 0xcf, 0xde, 0x09, 0x1d, 0x27, 0x28, 0xfa, 0x8f, 0x0a, 0xdc, 0xb9, 0xf1, + 0x8b, 0xc8, 0x86, 0x0a, 0xf9, 0x2e, 0x98, 0x3a, 0xb6, 0xc3, 0xe4, 0xec, 0x75, 0xfe, 0x4d, 0x06, + 0x66, 0x47, 0x82, 0x1d, 0xaf, 0xe1, 0x14, 0x58, 0x37, 0xa0, 0x92, 0xd8, 0xd1, 0x43, 0x28, 0x8f, + 0xfd, 0xc8, 0x9b, 0xd0, 0xba, 0xb2, 0x53, 0xdc, 0x55, 0xb0, 0xd4, 0x5a, 0x65, 0x71, 0xa6, 0x75, + 0x0a, 0x65, 0x81, 0xf8, 0x37, 0x3d, 0xec, 0xc7, 0x84, 0x89, 0x1b, 0x4c, 0xad, 0x90, 0x37, 0x52, + 0x6d, 0x3e, 0x5d, 0x91, 0x70, 0x47, 0x86, 0xe3, 0x14, 0x48, 0xff, 0xbe, 0x10, 0x33, 0x14, 0xca, + 0xcd, 0x65, 0x56, 0x92, 0x65, 0xbe, 0xb1, 0xa5, 0x85, 0x55, 0xb6, 0xf4, 0x1b, 0x50, 0x2d, 0xc6, + 0x2c, 0xfb, 0xca, 0x25, 0xd9, 0xad, 0x39, 0x7e, 0x47, 0xd2, 0xe6, 0x41, 0x06, 0xd5, 0xf1, 0x58, + 0x38, 0xc7, 0x79, 0x70, 0xfd, 0x05, 0x68, 0xb7, 0x1d, 0xfe, 0xe2, 0x74, 0xa7, 0x19, 0x16, 0x72, + 0xe7, 0xea, 0x79, 0xe1, 0x99, 0x62, 0xfc, 0x5e, 0x84, 0x6a, 0x7e, 0xef, 0xd0, 0x7e, 0xbe, 0x09, + 0x6a, 0x73, 0x6b, 0x21, 0xe5, 0x6e, 0x7a, 0x6b, 0x92, 0x0e, 0x99, 0xd9, 0x96, 0xa9, 0xcd, 0xff, + 0x2f, 0x04, 0xb4, 0xb3, 0xc3, 0x23, 0x76, 0xf0, 0x0c, 0x2a, 0xd4, 0xb3, 0x02, 0x7a, 0xe5, 0x33, + 0xf9, 0x0d, 0xd1, 0x7c, 0xeb, 0xbb, 0x60, 0xf6, 0x65, 0x24, 0x4e, 0x31, 0xf4, 0x9f, 0x0b, 0x50, + 0x49, 0xcc, 0xff, 0x05, 0xff, 0x37, 0x50, 0x0b, 0x48, 0x68, 0x13, 0x8f, 0x39, 0xc9, 0x99, 0x4d, + 0xba, 0xdc, 0x5e, 0x3d, 0x11, 0x93, 0xab, 0x07, 0xac, 0x97, 0x42, 0x62, 0x2d, 0x83, 0x17, 0xff, + 0x5c, 0x7a, 0x17, 0x6a, 0x0b, 0x6e, 0x68, 0x1b, 0x20, 0x73, 0x94, 0xc3, 0x9b, 0xb3, 0xdc, 0xec, + 0x7a, 0x32, 0xd7, 0xad, 0x19, 0x6c, 0x3b, 0xfe, 0x32, 0x9a, 0xad, 0xaa, 0xf8, 0x2a, 0xa2, 0xbd, + 0xf8, 0xa1, 0xa7, 0xbc, 0x6e, 0x5f, 0x3a, 0xec, 0x2a, 0x1a, 0x9b, 0xb6, 0xef, 0x36, 0x44, 0xcc, + 0x9e, 0xe3, 0x51, 0x16, 0x46, 0xf1, 0xcc, 0xf1, 0xeb, 0xd8, 0xc8, 0xe0, 0xf6, 0xc4, 0x27, 0xef, + 0x25, 0xf1, 0xf6, 0x2e, 0xf3, 0x9f, 0xe0, 0xe3, 0x32, 0x7f, 0xf8, 0xf4, 0xcf, 0x00, 0x00, 0x00, + 0xff, 0xff, 0x8e, 0xfc, 0xd7, 0x46, 0xa8, 0x0b, 0x00, 0x00, +} diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go new file mode 100644 index 0000000000..38faa9fdf1 --- /dev/null +++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go @@ -0,0 +1,99 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: opencensus/proto/resource/v1/resource.proto + +package v1 + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Resource information. +type Resource struct { + // Type identifier for the resource. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // Set of labels that describe the resource. + Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Resource) Reset() { *m = Resource{} } +func (m *Resource) String() string { return proto.CompactTextString(m) } +func (*Resource) ProtoMessage() {} +func (*Resource) Descriptor() ([]byte, []int) { + return fileDescriptor_584700775a2fc762, []int{0} +} + +func (m *Resource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Resource.Unmarshal(m, b) +} +func (m *Resource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Resource.Marshal(b, m, deterministic) +} +func (m *Resource) XXX_Merge(src proto.Message) { + xxx_messageInfo_Resource.Merge(m, src) +} +func (m *Resource) XXX_Size() int { + return xxx_messageInfo_Resource.Size(m) +} +func (m *Resource) XXX_DiscardUnknown() { + xxx_messageInfo_Resource.DiscardUnknown(m) +} + +var xxx_messageInfo_Resource proto.InternalMessageInfo + +func (m *Resource) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Resource) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func init() { + proto.RegisterType((*Resource)(nil), "opencensus.proto.resource.v1.Resource") + proto.RegisterMapType((map[string]string)(nil), "opencensus.proto.resource.v1.Resource.LabelsEntry") +} + +func init() { + proto.RegisterFile("opencensus/proto/resource/v1/resource.proto", fileDescriptor_584700775a2fc762) +} + +var fileDescriptor_584700775a2fc762 = []byte{ + // 234 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0xce, 0x2f, 0x48, 0xcd, + 0x4b, 0x4e, 0xcd, 0x2b, 0x2e, 0x2d, 0xd6, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, 0x2f, 0x4a, 0x2d, + 0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0xd5, 0x2f, 0x33, 0x84, 0xb3, 0xf5, 0xc0, 0x52, 0x42, 0x32, 0x08, + 0xc5, 0x10, 0x11, 0x3d, 0xb8, 0x82, 0x32, 0x43, 0xa5, 0xa5, 0x8c, 0x5c, 0x1c, 0x41, 0x50, 0xbe, + 0x90, 0x10, 0x17, 0x4b, 0x49, 0x65, 0x41, 0xaa, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x98, + 0x2d, 0xe4, 0xc5, 0xc5, 0x96, 0x93, 0x98, 0x94, 0x9a, 0x53, 0x2c, 0xc1, 0xa4, 0xc0, 0xac, 0xc1, + 0x6d, 0x64, 0xa4, 0x87, 0xcf, 0x3c, 0x3d, 0x98, 0x59, 0x7a, 0x3e, 0x60, 0x4d, 0xae, 0x79, 0x25, + 0x45, 0x95, 0x41, 0x50, 0x13, 0xa4, 0x2c, 0xb9, 0xb8, 0x91, 0x84, 0x85, 0x04, 0xb8, 0x98, 0xb3, + 0x53, 0x2b, 0xa1, 0xb6, 0x81, 0x98, 0x42, 0x22, 0x5c, 0xac, 0x65, 0x89, 0x39, 0xa5, 0xa9, 0x12, + 0x4c, 0x60, 0x31, 0x08, 0xc7, 0x8a, 0xc9, 0x82, 0xd1, 0xa9, 0x92, 0x4b, 0x3e, 0x33, 0x1f, 0xaf, + 0xd5, 0x4e, 0xbc, 0x30, 0xbb, 0x03, 0x40, 0x52, 0x01, 0x8c, 0x51, 0xae, 0xe9, 0x99, 0x25, 0x19, + 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0x10, 0x5d, 0xba, 0x99, 0x79, 0xc5, 0x25, 0x45, 0xa5, + 0xb9, 0xa9, 0x79, 0x25, 0x89, 0x25, 0x99, 0xf9, 0x79, 0xfa, 0x08, 0x03, 0x75, 0x21, 0x01, 0x99, + 0x9e, 0x9a, 0xa7, 0x9b, 0x8e, 0x12, 0x9e, 0x49, 0x6c, 0x60, 0x19, 0x63, 0x40, 0x00, 0x00, 0x00, + 0xff, 0xff, 0x8e, 0x11, 0xaf, 0xda, 0x76, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go new file mode 100644 index 0000000000..4de05355a4 --- /dev/null +++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go @@ -0,0 +1,1543 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: opencensus/proto/trace/v1/trace.proto + +package v1 + +import ( + fmt "fmt" + v1 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" + proto "github.com/golang/protobuf/proto" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + wrappers "github.com/golang/protobuf/ptypes/wrappers" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Type of span. Can be used to specify additional relationships between spans +// in addition to a parent/child relationship. +type Span_SpanKind int32 + +const ( + // Unspecified. + Span_SPAN_KIND_UNSPECIFIED Span_SpanKind = 0 + // Indicates that the span covers server-side handling of an RPC or other + // remote network request. + Span_SERVER Span_SpanKind = 1 + // Indicates that the span covers the client-side wrapper around an RPC or + // other remote request. + Span_CLIENT Span_SpanKind = 2 +) + +var Span_SpanKind_name = map[int32]string{ + 0: "SPAN_KIND_UNSPECIFIED", + 1: "SERVER", + 2: "CLIENT", +} + +var Span_SpanKind_value = map[string]int32{ + "SPAN_KIND_UNSPECIFIED": 0, + "SERVER": 1, + "CLIENT": 2, +} + +func (x Span_SpanKind) String() string { + return proto.EnumName(Span_SpanKind_name, int32(x)) +} + +func (Span_SpanKind) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 0} +} + +// Indicates whether the message was sent or received. +type Span_TimeEvent_MessageEvent_Type int32 + +const ( + // Unknown event type. + Span_TimeEvent_MessageEvent_TYPE_UNSPECIFIED Span_TimeEvent_MessageEvent_Type = 0 + // Indicates a sent message. + Span_TimeEvent_MessageEvent_SENT Span_TimeEvent_MessageEvent_Type = 1 + // Indicates a received message. + Span_TimeEvent_MessageEvent_RECEIVED Span_TimeEvent_MessageEvent_Type = 2 +) + +var Span_TimeEvent_MessageEvent_Type_name = map[int32]string{ + 0: "TYPE_UNSPECIFIED", + 1: "SENT", + 2: "RECEIVED", +} + +var Span_TimeEvent_MessageEvent_Type_value = map[string]int32{ + "TYPE_UNSPECIFIED": 0, + "SENT": 1, + "RECEIVED": 2, +} + +func (x Span_TimeEvent_MessageEvent_Type) String() string { + return proto.EnumName(Span_TimeEvent_MessageEvent_Type_name, int32(x)) +} + +func (Span_TimeEvent_MessageEvent_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 2, 1, 0} +} + +// The relationship of the current span relative to the linked span: child, +// parent, or unspecified. +type Span_Link_Type int32 + +const ( + // The relationship of the two spans is unknown, or known but other + // than parent-child. + Span_Link_TYPE_UNSPECIFIED Span_Link_Type = 0 + // The linked span is a child of the current span. + Span_Link_CHILD_LINKED_SPAN Span_Link_Type = 1 + // The linked span is a parent of the current span. + Span_Link_PARENT_LINKED_SPAN Span_Link_Type = 2 +) + +var Span_Link_Type_name = map[int32]string{ + 0: "TYPE_UNSPECIFIED", + 1: "CHILD_LINKED_SPAN", + 2: "PARENT_LINKED_SPAN", +} + +var Span_Link_Type_value = map[string]int32{ + "TYPE_UNSPECIFIED": 0, + "CHILD_LINKED_SPAN": 1, + "PARENT_LINKED_SPAN": 2, +} + +func (x Span_Link_Type) String() string { + return proto.EnumName(Span_Link_Type_name, int32(x)) +} + +func (Span_Link_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 4, 0} +} + +// A span represents a single operation within a trace. Spans can be +// nested to form a trace tree. Spans may also be linked to other spans +// from the same or different trace. And form graphs. Often, a trace +// contains a root span that describes the end-to-end latency, and one +// or more subspans for its sub-operations. A trace can also contain +// multiple root spans, or none at all. Spans do not need to be +// contiguous - there may be gaps or overlaps between spans in a trace. +// +// The next id is 17. +// TODO(bdrutu): Add an example. +type Span struct { + // A unique identifier for a trace. All spans from the same trace share + // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes + // is considered invalid. + // + // This field is semantically required. Receiver should generate new + // random trace_id if empty or invalid trace_id was received. + // + // This field is required. + TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` + // A unique identifier for a span within a trace, assigned when the span + // is created. The ID is an 8-byte array. An ID with all zeroes is considered + // invalid. + // + // This field is semantically required. Receiver should generate new + // random span_id if empty or invalid span_id was received. + // + // This field is required. + SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` + // The Tracestate on the span. + Tracestate *Span_Tracestate `protobuf:"bytes,15,opt,name=tracestate,proto3" json:"tracestate,omitempty"` + // The `span_id` of this span's parent span. If this is a root span, then this + // field must be empty. The ID is an 8-byte array. + ParentSpanId []byte `protobuf:"bytes,3,opt,name=parent_span_id,json=parentSpanId,proto3" json:"parent_span_id,omitempty"` + // A description of the span's operation. + // + // For example, the name can be a qualified method name or a file name + // and a line number where the operation is called. A best practice is to use + // the same display name at the same call point in an application. + // This makes it easier to correlate spans in different traces. + // + // This field is semantically required to be set to non-empty string. + // When null or empty string received - receiver may use string "name" + // as a replacement. There might be smarted algorithms implemented by + // receiver to fix the empty span name. + // + // This field is required. + Name *TruncatableString `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // Distinguishes between spans generated in a particular context. For example, + // two spans with the same name may be distinguished using `CLIENT` (caller) + // and `SERVER` (callee) to identify queueing latency associated with the span. + Kind Span_SpanKind `protobuf:"varint,14,opt,name=kind,proto3,enum=opencensus.proto.trace.v1.Span_SpanKind" json:"kind,omitempty"` + // The start time of the span. On the client side, this is the time kept by + // the local machine where the span execution starts. On the server side, this + // is the time when the server's application handler starts running. + // + // This field is semantically required. When not set on receive - + // receiver should set it to the value of end_time field if it was + // set. Or to the current time if neither was set. It is important to + // keep end_time > start_time for consistency. + // + // This field is required. + StartTime *timestamp.Timestamp `protobuf:"bytes,5,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + // The end time of the span. On the client side, this is the time kept by + // the local machine where the span execution ends. On the server side, this + // is the time when the server application handler stops running. + // + // This field is semantically required. When not set on receive - + // receiver should set it to start_time value. It is important to + // keep end_time > start_time for consistency. + // + // This field is required. + EndTime *timestamp.Timestamp `protobuf:"bytes,6,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` + // A set of attributes on the span. + Attributes *Span_Attributes `protobuf:"bytes,7,opt,name=attributes,proto3" json:"attributes,omitempty"` + // A stack trace captured at the start of the span. + StackTrace *StackTrace `protobuf:"bytes,8,opt,name=stack_trace,json=stackTrace,proto3" json:"stack_trace,omitempty"` + // The included time events. + TimeEvents *Span_TimeEvents `protobuf:"bytes,9,opt,name=time_events,json=timeEvents,proto3" json:"time_events,omitempty"` + // The included links. + Links *Span_Links `protobuf:"bytes,10,opt,name=links,proto3" json:"links,omitempty"` + // An optional final status for this span. Semantically when Status + // wasn't set it is means span ended without errors and assume + // Status.Ok (code = 0). + Status *Status `protobuf:"bytes,11,opt,name=status,proto3" json:"status,omitempty"` + // An optional resource that is associated with this span. If not set, this span + // should be part of a batch that does include the resource information, unless resource + // information is unknown. + Resource *v1.Resource `protobuf:"bytes,16,opt,name=resource,proto3" json:"resource,omitempty"` + // A highly recommended but not required flag that identifies when a + // trace crosses a process boundary. True when the parent_span belongs + // to the same process as the current span. This flag is most commonly + // used to indicate the need to adjust time as clocks in different + // processes may not be synchronized. + SameProcessAsParentSpan *wrappers.BoolValue `protobuf:"bytes,12,opt,name=same_process_as_parent_span,json=sameProcessAsParentSpan,proto3" json:"same_process_as_parent_span,omitempty"` + // An optional number of child spans that were generated while this span + // was active. If set, allows an implementation to detect missing child spans. + ChildSpanCount *wrappers.UInt32Value `protobuf:"bytes,13,opt,name=child_span_count,json=childSpanCount,proto3" json:"child_span_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span) Reset() { *m = Span{} } +func (m *Span) String() string { return proto.CompactTextString(m) } +func (*Span) ProtoMessage() {} +func (*Span) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0} +} + +func (m *Span) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span.Unmarshal(m, b) +} +func (m *Span) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span.Marshal(b, m, deterministic) +} +func (m *Span) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span.Merge(m, src) +} +func (m *Span) XXX_Size() int { + return xxx_messageInfo_Span.Size(m) +} +func (m *Span) XXX_DiscardUnknown() { + xxx_messageInfo_Span.DiscardUnknown(m) +} + +var xxx_messageInfo_Span proto.InternalMessageInfo + +func (m *Span) GetTraceId() []byte { + if m != nil { + return m.TraceId + } + return nil +} + +func (m *Span) GetSpanId() []byte { + if m != nil { + return m.SpanId + } + return nil +} + +func (m *Span) GetTracestate() *Span_Tracestate { + if m != nil { + return m.Tracestate + } + return nil +} + +func (m *Span) GetParentSpanId() []byte { + if m != nil { + return m.ParentSpanId + } + return nil +} + +func (m *Span) GetName() *TruncatableString { + if m != nil { + return m.Name + } + return nil +} + +func (m *Span) GetKind() Span_SpanKind { + if m != nil { + return m.Kind + } + return Span_SPAN_KIND_UNSPECIFIED +} + +func (m *Span) GetStartTime() *timestamp.Timestamp { + if m != nil { + return m.StartTime + } + return nil +} + +func (m *Span) GetEndTime() *timestamp.Timestamp { + if m != nil { + return m.EndTime + } + return nil +} + +func (m *Span) GetAttributes() *Span_Attributes { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *Span) GetStackTrace() *StackTrace { + if m != nil { + return m.StackTrace + } + return nil +} + +func (m *Span) GetTimeEvents() *Span_TimeEvents { + if m != nil { + return m.TimeEvents + } + return nil +} + +func (m *Span) GetLinks() *Span_Links { + if m != nil { + return m.Links + } + return nil +} + +func (m *Span) GetStatus() *Status { + if m != nil { + return m.Status + } + return nil +} + +func (m *Span) GetResource() *v1.Resource { + if m != nil { + return m.Resource + } + return nil +} + +func (m *Span) GetSameProcessAsParentSpan() *wrappers.BoolValue { + if m != nil { + return m.SameProcessAsParentSpan + } + return nil +} + +func (m *Span) GetChildSpanCount() *wrappers.UInt32Value { + if m != nil { + return m.ChildSpanCount + } + return nil +} + +// This field conveys information about request position in multiple distributed tracing graphs. +// It is a list of Tracestate.Entry with a maximum of 32 members in the list. +// +// See the https://github.com/w3c/distributed-tracing for more details about this field. +type Span_Tracestate struct { + // A list of entries that represent the Tracestate. + Entries []*Span_Tracestate_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_Tracestate) Reset() { *m = Span_Tracestate{} } +func (m *Span_Tracestate) String() string { return proto.CompactTextString(m) } +func (*Span_Tracestate) ProtoMessage() {} +func (*Span_Tracestate) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 0} +} + +func (m *Span_Tracestate) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_Tracestate.Unmarshal(m, b) +} +func (m *Span_Tracestate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_Tracestate.Marshal(b, m, deterministic) +} +func (m *Span_Tracestate) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_Tracestate.Merge(m, src) +} +func (m *Span_Tracestate) XXX_Size() int { + return xxx_messageInfo_Span_Tracestate.Size(m) +} +func (m *Span_Tracestate) XXX_DiscardUnknown() { + xxx_messageInfo_Span_Tracestate.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_Tracestate proto.InternalMessageInfo + +func (m *Span_Tracestate) GetEntries() []*Span_Tracestate_Entry { + if m != nil { + return m.Entries + } + return nil +} + +type Span_Tracestate_Entry struct { + // The key must begin with a lowercase letter, and can only contain + // lowercase letters 'a'-'z', digits '0'-'9', underscores '_', dashes + // '-', asterisks '*', and forward slashes '/'. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // The value is opaque string up to 256 characters printable ASCII + // RFC0020 characters (i.e., the range 0x20 to 0x7E) except ',' and '='. + // Note that this also excludes tabs, newlines, carriage returns, etc. + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_Tracestate_Entry) Reset() { *m = Span_Tracestate_Entry{} } +func (m *Span_Tracestate_Entry) String() string { return proto.CompactTextString(m) } +func (*Span_Tracestate_Entry) ProtoMessage() {} +func (*Span_Tracestate_Entry) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 0, 0} +} + +func (m *Span_Tracestate_Entry) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_Tracestate_Entry.Unmarshal(m, b) +} +func (m *Span_Tracestate_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_Tracestate_Entry.Marshal(b, m, deterministic) +} +func (m *Span_Tracestate_Entry) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_Tracestate_Entry.Merge(m, src) +} +func (m *Span_Tracestate_Entry) XXX_Size() int { + return xxx_messageInfo_Span_Tracestate_Entry.Size(m) +} +func (m *Span_Tracestate_Entry) XXX_DiscardUnknown() { + xxx_messageInfo_Span_Tracestate_Entry.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_Tracestate_Entry proto.InternalMessageInfo + +func (m *Span_Tracestate_Entry) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *Span_Tracestate_Entry) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +// A set of attributes, each with a key and a value. +type Span_Attributes struct { + // The set of attributes. The value can be a string, an integer, a double + // or the Boolean values `true` or `false`. Note, global attributes like + // server name can be set as tags using resource API. Examples of attributes: + // + // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" + // "/http/server_latency": 300 + // "abc.com/myattribute": true + // "abc.com/score": 10.239 + AttributeMap map[string]*AttributeValue `protobuf:"bytes,1,rep,name=attribute_map,json=attributeMap,proto3" json:"attribute_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // The number of attributes that were discarded. Attributes can be discarded + // because their keys are too long or because there are too many attributes. + // If this value is 0, then no attributes were dropped. + DroppedAttributesCount int32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_Attributes) Reset() { *m = Span_Attributes{} } +func (m *Span_Attributes) String() string { return proto.CompactTextString(m) } +func (*Span_Attributes) ProtoMessage() {} +func (*Span_Attributes) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 1} +} + +func (m *Span_Attributes) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_Attributes.Unmarshal(m, b) +} +func (m *Span_Attributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_Attributes.Marshal(b, m, deterministic) +} +func (m *Span_Attributes) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_Attributes.Merge(m, src) +} +func (m *Span_Attributes) XXX_Size() int { + return xxx_messageInfo_Span_Attributes.Size(m) +} +func (m *Span_Attributes) XXX_DiscardUnknown() { + xxx_messageInfo_Span_Attributes.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_Attributes proto.InternalMessageInfo + +func (m *Span_Attributes) GetAttributeMap() map[string]*AttributeValue { + if m != nil { + return m.AttributeMap + } + return nil +} + +func (m *Span_Attributes) GetDroppedAttributesCount() int32 { + if m != nil { + return m.DroppedAttributesCount + } + return 0 +} + +// A time-stamped annotation or message event in the Span. +type Span_TimeEvent struct { + // The time the event occurred. + Time *timestamp.Timestamp `protobuf:"bytes,1,opt,name=time,proto3" json:"time,omitempty"` + // A `TimeEvent` can contain either an `Annotation` object or a + // `MessageEvent` object, but not both. + // + // Types that are valid to be assigned to Value: + // *Span_TimeEvent_Annotation_ + // *Span_TimeEvent_MessageEvent_ + Value isSpan_TimeEvent_Value `protobuf_oneof:"value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_TimeEvent) Reset() { *m = Span_TimeEvent{} } +func (m *Span_TimeEvent) String() string { return proto.CompactTextString(m) } +func (*Span_TimeEvent) ProtoMessage() {} +func (*Span_TimeEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 2} +} + +func (m *Span_TimeEvent) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_TimeEvent.Unmarshal(m, b) +} +func (m *Span_TimeEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_TimeEvent.Marshal(b, m, deterministic) +} +func (m *Span_TimeEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_TimeEvent.Merge(m, src) +} +func (m *Span_TimeEvent) XXX_Size() int { + return xxx_messageInfo_Span_TimeEvent.Size(m) +} +func (m *Span_TimeEvent) XXX_DiscardUnknown() { + xxx_messageInfo_Span_TimeEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_TimeEvent proto.InternalMessageInfo + +func (m *Span_TimeEvent) GetTime() *timestamp.Timestamp { + if m != nil { + return m.Time + } + return nil +} + +type isSpan_TimeEvent_Value interface { + isSpan_TimeEvent_Value() +} + +type Span_TimeEvent_Annotation_ struct { + Annotation *Span_TimeEvent_Annotation `protobuf:"bytes,2,opt,name=annotation,proto3,oneof"` +} + +type Span_TimeEvent_MessageEvent_ struct { + MessageEvent *Span_TimeEvent_MessageEvent `protobuf:"bytes,3,opt,name=message_event,json=messageEvent,proto3,oneof"` +} + +func (*Span_TimeEvent_Annotation_) isSpan_TimeEvent_Value() {} + +func (*Span_TimeEvent_MessageEvent_) isSpan_TimeEvent_Value() {} + +func (m *Span_TimeEvent) GetValue() isSpan_TimeEvent_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *Span_TimeEvent) GetAnnotation() *Span_TimeEvent_Annotation { + if x, ok := m.GetValue().(*Span_TimeEvent_Annotation_); ok { + return x.Annotation + } + return nil +} + +func (m *Span_TimeEvent) GetMessageEvent() *Span_TimeEvent_MessageEvent { + if x, ok := m.GetValue().(*Span_TimeEvent_MessageEvent_); ok { + return x.MessageEvent + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Span_TimeEvent) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Span_TimeEvent_Annotation_)(nil), + (*Span_TimeEvent_MessageEvent_)(nil), + } +} + +// A text annotation with a set of attributes. +type Span_TimeEvent_Annotation struct { + // A user-supplied message describing the event. + Description *TruncatableString `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + // A set of attributes on the annotation. + Attributes *Span_Attributes `protobuf:"bytes,2,opt,name=attributes,proto3" json:"attributes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_TimeEvent_Annotation) Reset() { *m = Span_TimeEvent_Annotation{} } +func (m *Span_TimeEvent_Annotation) String() string { return proto.CompactTextString(m) } +func (*Span_TimeEvent_Annotation) ProtoMessage() {} +func (*Span_TimeEvent_Annotation) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 2, 0} +} + +func (m *Span_TimeEvent_Annotation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_TimeEvent_Annotation.Unmarshal(m, b) +} +func (m *Span_TimeEvent_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_TimeEvent_Annotation.Marshal(b, m, deterministic) +} +func (m *Span_TimeEvent_Annotation) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_TimeEvent_Annotation.Merge(m, src) +} +func (m *Span_TimeEvent_Annotation) XXX_Size() int { + return xxx_messageInfo_Span_TimeEvent_Annotation.Size(m) +} +func (m *Span_TimeEvent_Annotation) XXX_DiscardUnknown() { + xxx_messageInfo_Span_TimeEvent_Annotation.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_TimeEvent_Annotation proto.InternalMessageInfo + +func (m *Span_TimeEvent_Annotation) GetDescription() *TruncatableString { + if m != nil { + return m.Description + } + return nil +} + +func (m *Span_TimeEvent_Annotation) GetAttributes() *Span_Attributes { + if m != nil { + return m.Attributes + } + return nil +} + +// An event describing a message sent/received between Spans. +type Span_TimeEvent_MessageEvent struct { + // The type of MessageEvent. Indicates whether the message was sent or + // received. + Type Span_TimeEvent_MessageEvent_Type `protobuf:"varint,1,opt,name=type,proto3,enum=opencensus.proto.trace.v1.Span_TimeEvent_MessageEvent_Type" json:"type,omitempty"` + // An identifier for the MessageEvent's message that can be used to match + // SENT and RECEIVED MessageEvents. For example, this field could + // represent a sequence ID for a streaming RPC. It is recommended to be + // unique within a Span. + Id uint64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"` + // The number of uncompressed bytes sent or received. + UncompressedSize uint64 `protobuf:"varint,3,opt,name=uncompressed_size,json=uncompressedSize,proto3" json:"uncompressed_size,omitempty"` + // The number of compressed bytes sent or received. If zero, assumed to + // be the same size as uncompressed. + CompressedSize uint64 `protobuf:"varint,4,opt,name=compressed_size,json=compressedSize,proto3" json:"compressed_size,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_TimeEvent_MessageEvent) Reset() { *m = Span_TimeEvent_MessageEvent{} } +func (m *Span_TimeEvent_MessageEvent) String() string { return proto.CompactTextString(m) } +func (*Span_TimeEvent_MessageEvent) ProtoMessage() {} +func (*Span_TimeEvent_MessageEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 2, 1} +} + +func (m *Span_TimeEvent_MessageEvent) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_TimeEvent_MessageEvent.Unmarshal(m, b) +} +func (m *Span_TimeEvent_MessageEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_TimeEvent_MessageEvent.Marshal(b, m, deterministic) +} +func (m *Span_TimeEvent_MessageEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_TimeEvent_MessageEvent.Merge(m, src) +} +func (m *Span_TimeEvent_MessageEvent) XXX_Size() int { + return xxx_messageInfo_Span_TimeEvent_MessageEvent.Size(m) +} +func (m *Span_TimeEvent_MessageEvent) XXX_DiscardUnknown() { + xxx_messageInfo_Span_TimeEvent_MessageEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_TimeEvent_MessageEvent proto.InternalMessageInfo + +func (m *Span_TimeEvent_MessageEvent) GetType() Span_TimeEvent_MessageEvent_Type { + if m != nil { + return m.Type + } + return Span_TimeEvent_MessageEvent_TYPE_UNSPECIFIED +} + +func (m *Span_TimeEvent_MessageEvent) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *Span_TimeEvent_MessageEvent) GetUncompressedSize() uint64 { + if m != nil { + return m.UncompressedSize + } + return 0 +} + +func (m *Span_TimeEvent_MessageEvent) GetCompressedSize() uint64 { + if m != nil { + return m.CompressedSize + } + return 0 +} + +// A collection of `TimeEvent`s. A `TimeEvent` is a time-stamped annotation +// on the span, consisting of either user-supplied key-value pairs, or +// details of a message sent/received between Spans. +type Span_TimeEvents struct { + // A collection of `TimeEvent`s. + TimeEvent []*Span_TimeEvent `protobuf:"bytes,1,rep,name=time_event,json=timeEvent,proto3" json:"time_event,omitempty"` + // The number of dropped annotations in all the included time events. + // If the value is 0, then no annotations were dropped. + DroppedAnnotationsCount int32 `protobuf:"varint,2,opt,name=dropped_annotations_count,json=droppedAnnotationsCount,proto3" json:"dropped_annotations_count,omitempty"` + // The number of dropped message events in all the included time events. + // If the value is 0, then no message events were dropped. + DroppedMessageEventsCount int32 `protobuf:"varint,3,opt,name=dropped_message_events_count,json=droppedMessageEventsCount,proto3" json:"dropped_message_events_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_TimeEvents) Reset() { *m = Span_TimeEvents{} } +func (m *Span_TimeEvents) String() string { return proto.CompactTextString(m) } +func (*Span_TimeEvents) ProtoMessage() {} +func (*Span_TimeEvents) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 3} +} + +func (m *Span_TimeEvents) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_TimeEvents.Unmarshal(m, b) +} +func (m *Span_TimeEvents) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_TimeEvents.Marshal(b, m, deterministic) +} +func (m *Span_TimeEvents) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_TimeEvents.Merge(m, src) +} +func (m *Span_TimeEvents) XXX_Size() int { + return xxx_messageInfo_Span_TimeEvents.Size(m) +} +func (m *Span_TimeEvents) XXX_DiscardUnknown() { + xxx_messageInfo_Span_TimeEvents.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_TimeEvents proto.InternalMessageInfo + +func (m *Span_TimeEvents) GetTimeEvent() []*Span_TimeEvent { + if m != nil { + return m.TimeEvent + } + return nil +} + +func (m *Span_TimeEvents) GetDroppedAnnotationsCount() int32 { + if m != nil { + return m.DroppedAnnotationsCount + } + return 0 +} + +func (m *Span_TimeEvents) GetDroppedMessageEventsCount() int32 { + if m != nil { + return m.DroppedMessageEventsCount + } + return 0 +} + +// A pointer from the current span to another span in the same trace or in a +// different trace. For example, this can be used in batching operations, +// where a single batch handler processes multiple requests from different +// traces or when the handler receives a request from a different project. +type Span_Link struct { + // A unique identifier of a trace that this linked span is part of. The ID is a + // 16-byte array. + TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` + // A unique identifier for the linked span. The ID is an 8-byte array. + SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` + // The relationship of the current span relative to the linked span. + Type Span_Link_Type `protobuf:"varint,3,opt,name=type,proto3,enum=opencensus.proto.trace.v1.Span_Link_Type" json:"type,omitempty"` + // A set of attributes on the link. + Attributes *Span_Attributes `protobuf:"bytes,4,opt,name=attributes,proto3" json:"attributes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_Link) Reset() { *m = Span_Link{} } +func (m *Span_Link) String() string { return proto.CompactTextString(m) } +func (*Span_Link) ProtoMessage() {} +func (*Span_Link) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 4} +} + +func (m *Span_Link) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_Link.Unmarshal(m, b) +} +func (m *Span_Link) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_Link.Marshal(b, m, deterministic) +} +func (m *Span_Link) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_Link.Merge(m, src) +} +func (m *Span_Link) XXX_Size() int { + return xxx_messageInfo_Span_Link.Size(m) +} +func (m *Span_Link) XXX_DiscardUnknown() { + xxx_messageInfo_Span_Link.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_Link proto.InternalMessageInfo + +func (m *Span_Link) GetTraceId() []byte { + if m != nil { + return m.TraceId + } + return nil +} + +func (m *Span_Link) GetSpanId() []byte { + if m != nil { + return m.SpanId + } + return nil +} + +func (m *Span_Link) GetType() Span_Link_Type { + if m != nil { + return m.Type + } + return Span_Link_TYPE_UNSPECIFIED +} + +func (m *Span_Link) GetAttributes() *Span_Attributes { + if m != nil { + return m.Attributes + } + return nil +} + +// A collection of links, which are references from this span to a span +// in the same or different trace. +type Span_Links struct { + // A collection of links. + Link []*Span_Link `protobuf:"bytes,1,rep,name=link,proto3" json:"link,omitempty"` + // The number of dropped links after the maximum size was enforced. If + // this value is 0, then no links were dropped. + DroppedLinksCount int32 `protobuf:"varint,2,opt,name=dropped_links_count,json=droppedLinksCount,proto3" json:"dropped_links_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_Links) Reset() { *m = Span_Links{} } +func (m *Span_Links) String() string { return proto.CompactTextString(m) } +func (*Span_Links) ProtoMessage() {} +func (*Span_Links) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 5} +} + +func (m *Span_Links) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_Links.Unmarshal(m, b) +} +func (m *Span_Links) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_Links.Marshal(b, m, deterministic) +} +func (m *Span_Links) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_Links.Merge(m, src) +} +func (m *Span_Links) XXX_Size() int { + return xxx_messageInfo_Span_Links.Size(m) +} +func (m *Span_Links) XXX_DiscardUnknown() { + xxx_messageInfo_Span_Links.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_Links proto.InternalMessageInfo + +func (m *Span_Links) GetLink() []*Span_Link { + if m != nil { + return m.Link + } + return nil +} + +func (m *Span_Links) GetDroppedLinksCount() int32 { + if m != nil { + return m.DroppedLinksCount + } + return 0 +} + +// The `Status` type defines a logical error model that is suitable for different +// programming environments, including REST APIs and RPC APIs. This proto's fields +// are a subset of those of +// [google.rpc.Status](https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto), +// which is used by [gRPC](https://github.com/grpc). +type Status struct { + // The status code. This is optional field. It is safe to assume 0 (OK) + // when not set. + Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + // A developer-facing error message, which should be in English. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Status) Reset() { *m = Status{} } +func (m *Status) String() string { return proto.CompactTextString(m) } +func (*Status) ProtoMessage() {} +func (*Status) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{1} +} + +func (m *Status) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Status.Unmarshal(m, b) +} +func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Status.Marshal(b, m, deterministic) +} +func (m *Status) XXX_Merge(src proto.Message) { + xxx_messageInfo_Status.Merge(m, src) +} +func (m *Status) XXX_Size() int { + return xxx_messageInfo_Status.Size(m) +} +func (m *Status) XXX_DiscardUnknown() { + xxx_messageInfo_Status.DiscardUnknown(m) +} + +var xxx_messageInfo_Status proto.InternalMessageInfo + +func (m *Status) GetCode() int32 { + if m != nil { + return m.Code + } + return 0 +} + +func (m *Status) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +// The value of an Attribute. +type AttributeValue struct { + // The type of the value. + // + // Types that are valid to be assigned to Value: + // *AttributeValue_StringValue + // *AttributeValue_IntValue + // *AttributeValue_BoolValue + // *AttributeValue_DoubleValue + Value isAttributeValue_Value `protobuf_oneof:"value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AttributeValue) Reset() { *m = AttributeValue{} } +func (m *AttributeValue) String() string { return proto.CompactTextString(m) } +func (*AttributeValue) ProtoMessage() {} +func (*AttributeValue) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{2} +} + +func (m *AttributeValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AttributeValue.Unmarshal(m, b) +} +func (m *AttributeValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AttributeValue.Marshal(b, m, deterministic) +} +func (m *AttributeValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_AttributeValue.Merge(m, src) +} +func (m *AttributeValue) XXX_Size() int { + return xxx_messageInfo_AttributeValue.Size(m) +} +func (m *AttributeValue) XXX_DiscardUnknown() { + xxx_messageInfo_AttributeValue.DiscardUnknown(m) +} + +var xxx_messageInfo_AttributeValue proto.InternalMessageInfo + +type isAttributeValue_Value interface { + isAttributeValue_Value() +} + +type AttributeValue_StringValue struct { + StringValue *TruncatableString `protobuf:"bytes,1,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type AttributeValue_IntValue struct { + IntValue int64 `protobuf:"varint,2,opt,name=int_value,json=intValue,proto3,oneof"` +} + +type AttributeValue_BoolValue struct { + BoolValue bool `protobuf:"varint,3,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type AttributeValue_DoubleValue struct { + DoubleValue float64 `protobuf:"fixed64,4,opt,name=double_value,json=doubleValue,proto3,oneof"` +} + +func (*AttributeValue_StringValue) isAttributeValue_Value() {} + +func (*AttributeValue_IntValue) isAttributeValue_Value() {} + +func (*AttributeValue_BoolValue) isAttributeValue_Value() {} + +func (*AttributeValue_DoubleValue) isAttributeValue_Value() {} + +func (m *AttributeValue) GetValue() isAttributeValue_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *AttributeValue) GetStringValue() *TruncatableString { + if x, ok := m.GetValue().(*AttributeValue_StringValue); ok { + return x.StringValue + } + return nil +} + +func (m *AttributeValue) GetIntValue() int64 { + if x, ok := m.GetValue().(*AttributeValue_IntValue); ok { + return x.IntValue + } + return 0 +} + +func (m *AttributeValue) GetBoolValue() bool { + if x, ok := m.GetValue().(*AttributeValue_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (m *AttributeValue) GetDoubleValue() float64 { + if x, ok := m.GetValue().(*AttributeValue_DoubleValue); ok { + return x.DoubleValue + } + return 0 +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*AttributeValue) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*AttributeValue_StringValue)(nil), + (*AttributeValue_IntValue)(nil), + (*AttributeValue_BoolValue)(nil), + (*AttributeValue_DoubleValue)(nil), + } +} + +// The call stack which originated this span. +type StackTrace struct { + // Stack frames in this stack trace. + StackFrames *StackTrace_StackFrames `protobuf:"bytes,1,opt,name=stack_frames,json=stackFrames,proto3" json:"stack_frames,omitempty"` + // The hash ID is used to conserve network bandwidth for duplicate + // stack traces within a single trace. + // + // Often multiple spans will have identical stack traces. + // The first occurrence of a stack trace should contain both + // `stack_frames` and a value in `stack_trace_hash_id`. + // + // Subsequent spans within the same request can refer + // to that stack trace by setting only `stack_trace_hash_id`. + // + // TODO: describe how to deal with the case where stack_trace_hash_id is + // zero because it was not set. + StackTraceHashId uint64 `protobuf:"varint,2,opt,name=stack_trace_hash_id,json=stackTraceHashId,proto3" json:"stack_trace_hash_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StackTrace) Reset() { *m = StackTrace{} } +func (m *StackTrace) String() string { return proto.CompactTextString(m) } +func (*StackTrace) ProtoMessage() {} +func (*StackTrace) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{3} +} + +func (m *StackTrace) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StackTrace.Unmarshal(m, b) +} +func (m *StackTrace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StackTrace.Marshal(b, m, deterministic) +} +func (m *StackTrace) XXX_Merge(src proto.Message) { + xxx_messageInfo_StackTrace.Merge(m, src) +} +func (m *StackTrace) XXX_Size() int { + return xxx_messageInfo_StackTrace.Size(m) +} +func (m *StackTrace) XXX_DiscardUnknown() { + xxx_messageInfo_StackTrace.DiscardUnknown(m) +} + +var xxx_messageInfo_StackTrace proto.InternalMessageInfo + +func (m *StackTrace) GetStackFrames() *StackTrace_StackFrames { + if m != nil { + return m.StackFrames + } + return nil +} + +func (m *StackTrace) GetStackTraceHashId() uint64 { + if m != nil { + return m.StackTraceHashId + } + return 0 +} + +// A single stack frame in a stack trace. +type StackTrace_StackFrame struct { + // The fully-qualified name that uniquely identifies the function or + // method that is active in this frame. + FunctionName *TruncatableString `protobuf:"bytes,1,opt,name=function_name,json=functionName,proto3" json:"function_name,omitempty"` + // An un-mangled function name, if `function_name` is + // [mangled](http://www.avabodh.com/cxxin/namemangling.html). The name can + // be fully qualified. + OriginalFunctionName *TruncatableString `protobuf:"bytes,2,opt,name=original_function_name,json=originalFunctionName,proto3" json:"original_function_name,omitempty"` + // The name of the source file where the function call appears. + FileName *TruncatableString `protobuf:"bytes,3,opt,name=file_name,json=fileName,proto3" json:"file_name,omitempty"` + // The line number in `file_name` where the function call appears. + LineNumber int64 `protobuf:"varint,4,opt,name=line_number,json=lineNumber,proto3" json:"line_number,omitempty"` + // The column number where the function call appears, if available. + // This is important in JavaScript because of its anonymous functions. + ColumnNumber int64 `protobuf:"varint,5,opt,name=column_number,json=columnNumber,proto3" json:"column_number,omitempty"` + // The binary module from where the code was loaded. + LoadModule *Module `protobuf:"bytes,6,opt,name=load_module,json=loadModule,proto3" json:"load_module,omitempty"` + // The version of the deployed source code. + SourceVersion *TruncatableString `protobuf:"bytes,7,opt,name=source_version,json=sourceVersion,proto3" json:"source_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StackTrace_StackFrame) Reset() { *m = StackTrace_StackFrame{} } +func (m *StackTrace_StackFrame) String() string { return proto.CompactTextString(m) } +func (*StackTrace_StackFrame) ProtoMessage() {} +func (*StackTrace_StackFrame) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{3, 0} +} + +func (m *StackTrace_StackFrame) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StackTrace_StackFrame.Unmarshal(m, b) +} +func (m *StackTrace_StackFrame) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StackTrace_StackFrame.Marshal(b, m, deterministic) +} +func (m *StackTrace_StackFrame) XXX_Merge(src proto.Message) { + xxx_messageInfo_StackTrace_StackFrame.Merge(m, src) +} +func (m *StackTrace_StackFrame) XXX_Size() int { + return xxx_messageInfo_StackTrace_StackFrame.Size(m) +} +func (m *StackTrace_StackFrame) XXX_DiscardUnknown() { + xxx_messageInfo_StackTrace_StackFrame.DiscardUnknown(m) +} + +var xxx_messageInfo_StackTrace_StackFrame proto.InternalMessageInfo + +func (m *StackTrace_StackFrame) GetFunctionName() *TruncatableString { + if m != nil { + return m.FunctionName + } + return nil +} + +func (m *StackTrace_StackFrame) GetOriginalFunctionName() *TruncatableString { + if m != nil { + return m.OriginalFunctionName + } + return nil +} + +func (m *StackTrace_StackFrame) GetFileName() *TruncatableString { + if m != nil { + return m.FileName + } + return nil +} + +func (m *StackTrace_StackFrame) GetLineNumber() int64 { + if m != nil { + return m.LineNumber + } + return 0 +} + +func (m *StackTrace_StackFrame) GetColumnNumber() int64 { + if m != nil { + return m.ColumnNumber + } + return 0 +} + +func (m *StackTrace_StackFrame) GetLoadModule() *Module { + if m != nil { + return m.LoadModule + } + return nil +} + +func (m *StackTrace_StackFrame) GetSourceVersion() *TruncatableString { + if m != nil { + return m.SourceVersion + } + return nil +} + +// A collection of stack frames, which can be truncated. +type StackTrace_StackFrames struct { + // Stack frames in this call stack. + Frame []*StackTrace_StackFrame `protobuf:"bytes,1,rep,name=frame,proto3" json:"frame,omitempty"` + // The number of stack frames that were dropped because there + // were too many stack frames. + // If this value is 0, then no stack frames were dropped. + DroppedFramesCount int32 `protobuf:"varint,2,opt,name=dropped_frames_count,json=droppedFramesCount,proto3" json:"dropped_frames_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StackTrace_StackFrames) Reset() { *m = StackTrace_StackFrames{} } +func (m *StackTrace_StackFrames) String() string { return proto.CompactTextString(m) } +func (*StackTrace_StackFrames) ProtoMessage() {} +func (*StackTrace_StackFrames) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{3, 1} +} + +func (m *StackTrace_StackFrames) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StackTrace_StackFrames.Unmarshal(m, b) +} +func (m *StackTrace_StackFrames) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StackTrace_StackFrames.Marshal(b, m, deterministic) +} +func (m *StackTrace_StackFrames) XXX_Merge(src proto.Message) { + xxx_messageInfo_StackTrace_StackFrames.Merge(m, src) +} +func (m *StackTrace_StackFrames) XXX_Size() int { + return xxx_messageInfo_StackTrace_StackFrames.Size(m) +} +func (m *StackTrace_StackFrames) XXX_DiscardUnknown() { + xxx_messageInfo_StackTrace_StackFrames.DiscardUnknown(m) +} + +var xxx_messageInfo_StackTrace_StackFrames proto.InternalMessageInfo + +func (m *StackTrace_StackFrames) GetFrame() []*StackTrace_StackFrame { + if m != nil { + return m.Frame + } + return nil +} + +func (m *StackTrace_StackFrames) GetDroppedFramesCount() int32 { + if m != nil { + return m.DroppedFramesCount + } + return 0 +} + +// A description of a binary module. +type Module struct { + // TODO: document the meaning of this field. + // For example: main binary, kernel modules, and dynamic libraries + // such as libc.so, sharedlib.so. + Module *TruncatableString `protobuf:"bytes,1,opt,name=module,proto3" json:"module,omitempty"` + // A unique identifier for the module, usually a hash of its + // contents. + BuildId *TruncatableString `protobuf:"bytes,2,opt,name=build_id,json=buildId,proto3" json:"build_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Module) Reset() { *m = Module{} } +func (m *Module) String() string { return proto.CompactTextString(m) } +func (*Module) ProtoMessage() {} +func (*Module) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{4} +} + +func (m *Module) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Module.Unmarshal(m, b) +} +func (m *Module) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Module.Marshal(b, m, deterministic) +} +func (m *Module) XXX_Merge(src proto.Message) { + xxx_messageInfo_Module.Merge(m, src) +} +func (m *Module) XXX_Size() int { + return xxx_messageInfo_Module.Size(m) +} +func (m *Module) XXX_DiscardUnknown() { + xxx_messageInfo_Module.DiscardUnknown(m) +} + +var xxx_messageInfo_Module proto.InternalMessageInfo + +func (m *Module) GetModule() *TruncatableString { + if m != nil { + return m.Module + } + return nil +} + +func (m *Module) GetBuildId() *TruncatableString { + if m != nil { + return m.BuildId + } + return nil +} + +// A string that might be shortened to a specified length. +type TruncatableString struct { + // The shortened string. For example, if the original string was 500 bytes long and + // the limit of the string was 128 bytes, then this value contains the first 128 + // bytes of the 500-byte string. Note that truncation always happens on a + // character boundary, to ensure that a truncated string is still valid UTF-8. + // Because it may contain multi-byte characters, the size of the truncated string + // may be less than the truncation limit. + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + // The number of bytes removed from the original string. If this + // value is 0, then the string was not shortened. + TruncatedByteCount int32 `protobuf:"varint,2,opt,name=truncated_byte_count,json=truncatedByteCount,proto3" json:"truncated_byte_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TruncatableString) Reset() { *m = TruncatableString{} } +func (m *TruncatableString) String() string { return proto.CompactTextString(m) } +func (*TruncatableString) ProtoMessage() {} +func (*TruncatableString) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{5} +} + +func (m *TruncatableString) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TruncatableString.Unmarshal(m, b) +} +func (m *TruncatableString) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TruncatableString.Marshal(b, m, deterministic) +} +func (m *TruncatableString) XXX_Merge(src proto.Message) { + xxx_messageInfo_TruncatableString.Merge(m, src) +} +func (m *TruncatableString) XXX_Size() int { + return xxx_messageInfo_TruncatableString.Size(m) +} +func (m *TruncatableString) XXX_DiscardUnknown() { + xxx_messageInfo_TruncatableString.DiscardUnknown(m) +} + +var xxx_messageInfo_TruncatableString proto.InternalMessageInfo + +func (m *TruncatableString) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +func (m *TruncatableString) GetTruncatedByteCount() int32 { + if m != nil { + return m.TruncatedByteCount + } + return 0 +} + +func init() { + proto.RegisterEnum("opencensus.proto.trace.v1.Span_SpanKind", Span_SpanKind_name, Span_SpanKind_value) + proto.RegisterEnum("opencensus.proto.trace.v1.Span_TimeEvent_MessageEvent_Type", Span_TimeEvent_MessageEvent_Type_name, Span_TimeEvent_MessageEvent_Type_value) + proto.RegisterEnum("opencensus.proto.trace.v1.Span_Link_Type", Span_Link_Type_name, Span_Link_Type_value) + proto.RegisterType((*Span)(nil), "opencensus.proto.trace.v1.Span") + proto.RegisterType((*Span_Tracestate)(nil), "opencensus.proto.trace.v1.Span.Tracestate") + proto.RegisterType((*Span_Tracestate_Entry)(nil), "opencensus.proto.trace.v1.Span.Tracestate.Entry") + proto.RegisterType((*Span_Attributes)(nil), "opencensus.proto.trace.v1.Span.Attributes") + proto.RegisterMapType((map[string]*AttributeValue)(nil), "opencensus.proto.trace.v1.Span.Attributes.AttributeMapEntry") + proto.RegisterType((*Span_TimeEvent)(nil), "opencensus.proto.trace.v1.Span.TimeEvent") + proto.RegisterType((*Span_TimeEvent_Annotation)(nil), "opencensus.proto.trace.v1.Span.TimeEvent.Annotation") + proto.RegisterType((*Span_TimeEvent_MessageEvent)(nil), "opencensus.proto.trace.v1.Span.TimeEvent.MessageEvent") + proto.RegisterType((*Span_TimeEvents)(nil), "opencensus.proto.trace.v1.Span.TimeEvents") + proto.RegisterType((*Span_Link)(nil), "opencensus.proto.trace.v1.Span.Link") + proto.RegisterType((*Span_Links)(nil), "opencensus.proto.trace.v1.Span.Links") + proto.RegisterType((*Status)(nil), "opencensus.proto.trace.v1.Status") + proto.RegisterType((*AttributeValue)(nil), "opencensus.proto.trace.v1.AttributeValue") + proto.RegisterType((*StackTrace)(nil), "opencensus.proto.trace.v1.StackTrace") + proto.RegisterType((*StackTrace_StackFrame)(nil), "opencensus.proto.trace.v1.StackTrace.StackFrame") + proto.RegisterType((*StackTrace_StackFrames)(nil), "opencensus.proto.trace.v1.StackTrace.StackFrames") + proto.RegisterType((*Module)(nil), "opencensus.proto.trace.v1.Module") + proto.RegisterType((*TruncatableString)(nil), "opencensus.proto.trace.v1.TruncatableString") +} + +func init() { + proto.RegisterFile("opencensus/proto/trace/v1/trace.proto", fileDescriptor_8ea38bbb821bf584) +} + +var fileDescriptor_8ea38bbb821bf584 = []byte{ + // 1557 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x58, 0xeb, 0x52, 0x1b, 0x47, + 0x16, 0x66, 0x74, 0xd7, 0x91, 0x90, 0x45, 0x1b, 0xdb, 0x83, 0xd6, 0xbb, 0x66, 0x65, 0x7b, 0x17, + 0xaf, 0x17, 0x61, 0xb0, 0xd7, 0xe5, 0x6b, 0x79, 0x11, 0x88, 0x48, 0x06, 0x2b, 0x72, 0x4b, 0xa6, + 0x72, 0xa9, 0xd4, 0xd4, 0x48, 0xd3, 0x88, 0x09, 0x52, 0xcf, 0x64, 0xa6, 0x87, 0x14, 0x7e, 0x81, + 0x54, 0x2a, 0xff, 0x52, 0x95, 0xca, 0x0b, 0xe4, 0x47, 0x5e, 0x24, 0x0f, 0x90, 0xca, 0x73, 0xe4, + 0x09, 0xf2, 0x27, 0xd5, 0xdd, 0x73, 0x13, 0xd8, 0xa0, 0xc8, 0x7f, 0xa8, 0x9e, 0xee, 0xf3, 0x7d, + 0x7d, 0x4e, 0x9f, 0x2b, 0x82, 0xdb, 0x96, 0x4d, 0xe8, 0x80, 0x50, 0xd7, 0x73, 0xd7, 0x6c, 0xc7, + 0x62, 0xd6, 0x1a, 0x73, 0xf4, 0x01, 0x59, 0x3b, 0x5e, 0x97, 0x8b, 0x9a, 0xd8, 0x44, 0x4b, 0x91, + 0x98, 0xdc, 0xa9, 0xc9, 0xd3, 0xe3, 0xf5, 0xca, 0xdd, 0x33, 0x0c, 0x0e, 0x71, 0x2d, 0xcf, 0x91, + 0x24, 0xc1, 0x5a, 0xa2, 0x2a, 0x37, 0x86, 0x96, 0x35, 0x1c, 0x11, 0x29, 0xd8, 0xf7, 0x0e, 0xd6, + 0x98, 0x39, 0x26, 0x2e, 0xd3, 0xc7, 0xb6, 0x2f, 0xf0, 0x8f, 0xd3, 0x02, 0x5f, 0x3b, 0xba, 0x6d, + 0x13, 0xc7, 0xbf, 0xb6, 0xfa, 0xcb, 0x15, 0x48, 0x75, 0x6d, 0x9d, 0xa2, 0x25, 0xc8, 0x09, 0x15, + 0x34, 0xd3, 0x50, 0x95, 0x65, 0x65, 0xa5, 0x88, 0xb3, 0xe2, 0xbb, 0x65, 0xa0, 0x6b, 0x90, 0x75, + 0x6d, 0x9d, 0xf2, 0x93, 0x84, 0x38, 0xc9, 0xf0, 0xcf, 0x96, 0x81, 0x5e, 0x02, 0x08, 0x19, 0x97, + 0xe9, 0x8c, 0xa8, 0x97, 0x96, 0x95, 0x95, 0xc2, 0xc6, 0x7f, 0x6a, 0xef, 0x35, 0xad, 0xc6, 0x2f, + 0xaa, 0xf5, 0x42, 0x04, 0x8e, 0xa1, 0xd1, 0x2d, 0x28, 0xd9, 0xba, 0x43, 0x28, 0xd3, 0x82, 0xbb, + 0x92, 0xe2, 0xae, 0xa2, 0xdc, 0xed, 0xca, 0x1b, 0xff, 0x0f, 0x29, 0xaa, 0x8f, 0x89, 0x9a, 0x12, + 0x77, 0xfd, 0xf7, 0x9c, 0xbb, 0x7a, 0x8e, 0x47, 0x07, 0x3a, 0xd3, 0xfb, 0x23, 0xd2, 0x65, 0x8e, + 0x49, 0x87, 0x58, 0x20, 0xd1, 0x33, 0x48, 0x1d, 0x99, 0xd4, 0x50, 0x4b, 0xcb, 0xca, 0x4a, 0x69, + 0x63, 0xe5, 0x22, 0x6d, 0xf9, 0x9f, 0x5d, 0x93, 0x1a, 0x58, 0xa0, 0xd0, 0x63, 0x00, 0x97, 0xe9, + 0x0e, 0xd3, 0xf8, 0x3b, 0xab, 0x69, 0xa1, 0x45, 0xa5, 0x26, 0xdf, 0xb8, 0x16, 0xbc, 0x71, 0xad, + 0x17, 0x38, 0x01, 0xe7, 0x85, 0x34, 0xff, 0x46, 0xff, 0x83, 0x1c, 0xa1, 0x86, 0x04, 0x66, 0x2e, + 0x04, 0x66, 0x09, 0x35, 0x04, 0xec, 0x25, 0x80, 0xce, 0x98, 0x63, 0xf6, 0x3d, 0x46, 0x5c, 0x35, + 0x3b, 0xdd, 0x1b, 0x6f, 0x86, 0x08, 0x1c, 0x43, 0xa3, 0x1d, 0x28, 0xb8, 0x4c, 0x1f, 0x1c, 0x69, + 0x42, 0x5a, 0xcd, 0x09, 0xb2, 0xdb, 0xe7, 0x91, 0x71, 0x69, 0xe1, 0x30, 0x0c, 0x6e, 0xb8, 0x46, + 0xbb, 0x50, 0xe0, 0x66, 0x68, 0xe4, 0x98, 0x50, 0xe6, 0xaa, 0xf9, 0x29, 0x1d, 0x6f, 0x8e, 0x49, + 0x43, 0x20, 0x30, 0xb0, 0x70, 0x8d, 0x9e, 0x42, 0x7a, 0x64, 0xd2, 0x23, 0x57, 0x85, 0x8b, 0xd5, + 0xe1, 0x34, 0x7b, 0x5c, 0x18, 0x4b, 0x0c, 0x7a, 0x0c, 0x19, 0x1e, 0x3e, 0x9e, 0xab, 0x16, 0x04, + 0xfa, 0x9f, 0xe7, 0x1b, 0xc3, 0x3c, 0x17, 0xfb, 0x00, 0x54, 0x87, 0x5c, 0x90, 0x4c, 0x6a, 0x59, + 0x80, 0xff, 0x75, 0x16, 0x1c, 0xa6, 0xdb, 0xf1, 0x7a, 0x0d, 0xfb, 0x6b, 0x1c, 0xe2, 0xd0, 0x27, + 0xf0, 0x37, 0x57, 0x1f, 0x13, 0xcd, 0x76, 0xac, 0x01, 0x71, 0x5d, 0x4d, 0x77, 0xb5, 0x58, 0x10, + 0xab, 0xc5, 0xf7, 0xb8, 0xb9, 0x6e, 0x59, 0xa3, 0x7d, 0x7d, 0xe4, 0x11, 0x7c, 0x8d, 0xc3, 0x3b, + 0x12, 0xbd, 0xe9, 0x76, 0xc2, 0x50, 0x47, 0x3b, 0x50, 0x1e, 0x1c, 0x9a, 0x23, 0x43, 0x66, 0xc3, + 0xc0, 0xf2, 0x28, 0x53, 0xe7, 0x05, 0xdd, 0xf5, 0x33, 0x74, 0x6f, 0x5a, 0x94, 0xdd, 0xdf, 0x90, + 0x84, 0x25, 0x81, 0xe2, 0x14, 0x5b, 0x1c, 0x53, 0xf9, 0x56, 0x01, 0x88, 0x32, 0x0e, 0xbd, 0x84, + 0x2c, 0xa1, 0xcc, 0x31, 0x89, 0xab, 0x2a, 0xcb, 0xc9, 0x95, 0xc2, 0xc6, 0xbd, 0xe9, 0xd3, 0xb5, + 0xd6, 0xa0, 0xcc, 0x39, 0xc1, 0x01, 0x41, 0x65, 0x0d, 0xd2, 0x62, 0x07, 0x95, 0x21, 0x79, 0x44, + 0x4e, 0x44, 0xd5, 0xc8, 0x63, 0xbe, 0x44, 0x8b, 0x90, 0x3e, 0xe6, 0xea, 0x88, 0x7a, 0x91, 0xc7, + 0xf2, 0xa3, 0xf2, 0x43, 0x02, 0x20, 0x8a, 0x4c, 0xa4, 0xc3, 0x7c, 0x18, 0x9b, 0xda, 0x58, 0xb7, + 0x7d, 0x8d, 0x9e, 0x4d, 0x1f, 0xdc, 0xd1, 0xf2, 0x95, 0x6e, 0x4b, 0xed, 0x8a, 0x7a, 0x6c, 0x0b, + 0x3d, 0x02, 0xd5, 0x70, 0x2c, 0xdb, 0x26, 0x86, 0x16, 0xa5, 0x81, 0xff, 0x9a, 0x5c, 0xb5, 0x34, + 0xbe, 0xea, 0x9f, 0x47, 0xa4, 0xf2, 0xdd, 0xbe, 0x84, 0x85, 0x33, 0xe4, 0xef, 0x30, 0xf4, 0x45, + 0xdc, 0xd0, 0xc2, 0xc6, 0x9d, 0x73, 0x74, 0x0f, 0xe9, 0xa4, 0xa3, 0x24, 0xee, 0x49, 0xe2, 0x91, + 0x52, 0xf9, 0x29, 0x0d, 0xf9, 0x30, 0x39, 0x50, 0x0d, 0x52, 0xa2, 0x46, 0x28, 0x17, 0xd6, 0x08, + 0x21, 0x87, 0xf6, 0x01, 0x74, 0x4a, 0x2d, 0xa6, 0x33, 0xd3, 0xa2, 0xbe, 0x1e, 0x0f, 0xa6, 0xce, + 0xc5, 0xda, 0x66, 0x88, 0x6d, 0xce, 0xe1, 0x18, 0x13, 0xfa, 0x02, 0xe6, 0xc7, 0xc4, 0x75, 0xf5, + 0xa1, 0x9f, 0xe7, 0xa2, 0x1e, 0x17, 0x36, 0x1e, 0x4e, 0x4f, 0xfd, 0x4a, 0xc2, 0xc5, 0x47, 0x73, + 0x0e, 0x17, 0xc7, 0xb1, 0xef, 0xca, 0xcf, 0x0a, 0x40, 0x74, 0x37, 0x6a, 0x43, 0xc1, 0x20, 0xee, + 0xc0, 0x31, 0x6d, 0x61, 0x86, 0x32, 0x43, 0x7d, 0x8f, 0x13, 0x9c, 0x2a, 0x9b, 0x89, 0x0f, 0x29, + 0x9b, 0x95, 0x3f, 0x14, 0x28, 0xc6, 0x6d, 0x41, 0x1f, 0x43, 0x8a, 0x9d, 0xd8, 0xd2, 0x45, 0xa5, + 0x8d, 0xa7, 0xb3, 0xbd, 0x48, 0xad, 0x77, 0x62, 0x13, 0x2c, 0x88, 0x50, 0x09, 0x12, 0x7e, 0x73, + 0x4d, 0xe1, 0x84, 0x69, 0xa0, 0xbb, 0xb0, 0xe0, 0xd1, 0x81, 0x35, 0xb6, 0x1d, 0xe2, 0xba, 0xc4, + 0xd0, 0x5c, 0xf3, 0x2d, 0x11, 0xef, 0x9f, 0xc2, 0xe5, 0xf8, 0x41, 0xd7, 0x7c, 0x4b, 0xd0, 0xbf, + 0xe1, 0xd2, 0x69, 0xd1, 0x94, 0x10, 0x2d, 0x4d, 0x0a, 0x56, 0x1f, 0x40, 0x8a, 0xdf, 0x89, 0x16, + 0xa1, 0xdc, 0xfb, 0xb4, 0xd3, 0xd0, 0xde, 0xb4, 0xbb, 0x9d, 0xc6, 0x56, 0x6b, 0xa7, 0xd5, 0xd8, + 0x2e, 0xcf, 0xa1, 0x1c, 0xa4, 0xba, 0x8d, 0x76, 0xaf, 0xac, 0xa0, 0x22, 0xe4, 0x70, 0x63, 0xab, + 0xd1, 0xda, 0x6f, 0x6c, 0x97, 0x13, 0xf5, 0xac, 0x1f, 0xe2, 0x95, 0xdf, 0x78, 0x29, 0x89, 0xea, + 0x76, 0x13, 0x20, 0x6a, 0x02, 0x7e, 0xee, 0xde, 0x99, 0xfa, 0x29, 0x70, 0x3e, 0x6c, 0x01, 0xe8, + 0x09, 0x2c, 0x85, 0x59, 0x1a, 0x46, 0xc4, 0x64, 0x9a, 0x5e, 0x0b, 0xd2, 0x34, 0x3a, 0x17, 0x79, + 0x8a, 0x5e, 0xc0, 0xf5, 0x00, 0x3b, 0x11, 0xad, 0x01, 0x3c, 0x29, 0xe0, 0x01, 0x7f, 0xfc, 0xfd, + 0xfd, 0x44, 0xff, 0x3e, 0x01, 0x29, 0xde, 0x52, 0x66, 0x1a, 0x80, 0x9e, 0xfb, 0x81, 0x90, 0x14, + 0x81, 0x70, 0x67, 0x9a, 0xd6, 0x15, 0x77, 0xfb, 0x64, 0x90, 0xa6, 0x3e, 0x24, 0x48, 0xab, 0xbb, + 0xe7, 0x3a, 0xf7, 0x0a, 0x2c, 0x6c, 0x35, 0x5b, 0x7b, 0xdb, 0xda, 0x5e, 0xab, 0xbd, 0xdb, 0xd8, + 0xd6, 0xba, 0x9d, 0xcd, 0x76, 0x59, 0x41, 0x57, 0x01, 0x75, 0x36, 0x71, 0xa3, 0xdd, 0x9b, 0xd8, + 0x4f, 0x54, 0xbe, 0x82, 0xb4, 0x68, 0xb3, 0xe8, 0x11, 0xa4, 0x78, 0xa3, 0xf5, 0xdd, 0x7b, 0x6b, + 0x1a, 0x03, 0xb1, 0x40, 0xa0, 0x1a, 0x5c, 0x0e, 0x1c, 0x23, 0x5a, 0xf5, 0x84, 0x3b, 0x17, 0xfc, + 0x23, 0x71, 0x89, 0xf0, 0x43, 0xf5, 0x39, 0xe4, 0x82, 0x59, 0x0b, 0x2d, 0xc1, 0x15, 0xae, 0x88, + 0xb6, 0xdb, 0x6a, 0x6f, 0x9f, 0x32, 0x04, 0x20, 0xd3, 0x6d, 0xe0, 0xfd, 0x06, 0x2e, 0x2b, 0x7c, + 0xbd, 0xb5, 0xd7, 0xe2, 0x31, 0x9b, 0xa8, 0x3e, 0x84, 0x8c, 0xec, 0xef, 0x08, 0x41, 0x6a, 0x60, + 0x19, 0x32, 0x39, 0xd3, 0x58, 0xac, 0x91, 0x0a, 0x59, 0x3f, 0x3a, 0xfc, 0x8e, 0x14, 0x7c, 0x56, + 0x7f, 0x55, 0xa0, 0x34, 0x59, 0x99, 0xd1, 0x6b, 0x28, 0xba, 0xa2, 0xa2, 0x68, 0xb2, 0xb4, 0xcf, + 0x50, 0x8b, 0x9a, 0x73, 0xb8, 0x20, 0x39, 0x24, 0xe5, 0xdf, 0x21, 0x6f, 0x52, 0xa6, 0x45, 0xad, + 0x22, 0xd9, 0x9c, 0xc3, 0x39, 0x93, 0x32, 0x79, 0x7c, 0x03, 0xa0, 0x6f, 0x59, 0x23, 0xff, 0x9c, + 0x07, 0x53, 0xae, 0x39, 0x87, 0xf3, 0xfd, 0x60, 0x4c, 0x40, 0x37, 0xa1, 0x68, 0x58, 0x5e, 0x7f, + 0x44, 0x7c, 0x11, 0x1e, 0x2a, 0x0a, 0xbf, 0x44, 0xee, 0x0a, 0xa1, 0x30, 0x51, 0xab, 0xdf, 0x65, + 0x00, 0xa2, 0xc9, 0x0d, 0xf5, 0xb8, 0x3d, 0x7c, 0xea, 0x3b, 0x70, 0xf4, 0xb1, 0x68, 0xfc, 0xdc, + 0x9e, 0xf5, 0xa9, 0xc6, 0x3e, 0xb9, 0xdc, 0x11, 0x40, 0x2c, 0x87, 0x47, 0xf9, 0x81, 0x56, 0xe1, + 0x72, 0x6c, 0x96, 0xd4, 0x0e, 0x75, 0xf7, 0x50, 0x0b, 0x6b, 0x58, 0x39, 0x1a, 0x16, 0x9b, 0xba, + 0x7b, 0xd8, 0x32, 0x2a, 0xbf, 0x27, 0x7d, 0x9d, 0x04, 0x1c, 0xbd, 0x86, 0xf9, 0x03, 0x8f, 0x0e, + 0x78, 0x22, 0x6b, 0x62, 0xa0, 0x9f, 0xa5, 0xe0, 0x17, 0x03, 0x8a, 0x36, 0xa7, 0xec, 0xc3, 0x55, + 0xcb, 0x31, 0x87, 0x26, 0xd5, 0x47, 0xda, 0x24, 0x77, 0x62, 0x06, 0xee, 0xc5, 0x80, 0x6b, 0x27, + 0x7e, 0x47, 0x0b, 0xf2, 0x07, 0xe6, 0x88, 0x48, 0xda, 0xe4, 0x0c, 0xb4, 0x39, 0x0e, 0x17, 0x54, + 0x37, 0xa0, 0x30, 0x32, 0x29, 0xd1, 0xa8, 0x37, 0xee, 0x13, 0x47, 0x78, 0x34, 0x89, 0x81, 0x6f, + 0xb5, 0xc5, 0x0e, 0xba, 0x09, 0xf3, 0x03, 0x6b, 0xe4, 0x8d, 0x69, 0x20, 0x92, 0x16, 0x22, 0x45, + 0xb9, 0xe9, 0x0b, 0xd5, 0xa1, 0x30, 0xb2, 0x74, 0x43, 0x1b, 0x5b, 0x86, 0x37, 0x0a, 0xfe, 0xaf, + 0x38, 0x6f, 0x08, 0x7e, 0x25, 0x04, 0x31, 0x70, 0x94, 0x5c, 0xa3, 0x2e, 0x94, 0xe4, 0x38, 0xab, + 0x1d, 0x13, 0xc7, 0xe5, 0xdd, 0x37, 0x3b, 0x83, 0x65, 0xf3, 0x92, 0x63, 0x5f, 0x52, 0x54, 0xbe, + 0x51, 0xa0, 0x10, 0x8b, 0x1d, 0xb4, 0x03, 0x69, 0x11, 0x7e, 0xd3, 0x8c, 0x9d, 0xef, 0x8a, 0x3e, + 0x2c, 0xe1, 0xe8, 0x1e, 0x2c, 0x06, 0x65, 0x45, 0x86, 0xf3, 0x44, 0x5d, 0x41, 0xfe, 0x99, 0xbc, + 0x54, 0x16, 0x96, 0x1f, 0x15, 0xc8, 0xf8, 0x96, 0x6e, 0x43, 0xc6, 0x7f, 0xa8, 0x59, 0xc2, 0xcd, + 0xc7, 0xa2, 0x8f, 0x20, 0xd7, 0xf7, 0xf8, 0x68, 0xee, 0x87, 0xfb, 0x5f, 0xe5, 0xc9, 0x0a, 0x74, + 0xcb, 0xa8, 0x7e, 0x0e, 0x0b, 0x67, 0x4e, 0xa3, 0xd1, 0x59, 0x89, 0x8d, 0xce, 0xdc, 0x6c, 0x26, + 0x45, 0x89, 0xa1, 0xf5, 0x4f, 0x18, 0x99, 0x34, 0x3b, 0x3c, 0xab, 0x9f, 0x30, 0x22, 0xcc, 0xae, + 0xdb, 0x70, 0xdd, 0xb4, 0xde, 0xaf, 0x57, 0x5d, 0xfe, 0x57, 0xd0, 0xe1, 0x9b, 0x1d, 0xe5, 0xb3, + 0xfa, 0xd0, 0x64, 0x87, 0x5e, 0xbf, 0x36, 0xb0, 0xc6, 0x6b, 0x52, 0x7e, 0xd5, 0xa4, 0x2e, 0x73, + 0xbc, 0x31, 0xa1, 0xb2, 0xdf, 0xae, 0x45, 0x54, 0xab, 0xf2, 0x67, 0x89, 0x21, 0xa1, 0xab, 0xc3, + 0xe8, 0xf7, 0x8d, 0x7e, 0x46, 0x6c, 0xdf, 0xff, 0x33, 0x00, 0x00, 0xff, 0xff, 0x1e, 0xe0, 0x94, + 0x45, 0x03, 0x11, 0x00, 0x00, +} diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go new file mode 100644 index 0000000000..2ac2d28c47 --- /dev/null +++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go @@ -0,0 +1,358 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: opencensus/proto/trace/v1/trace_config.proto + +package v1 + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// How spans should be sampled: +// - Always off +// - Always on +// - Always follow the parent Span's decision (off if no parent). +type ConstantSampler_ConstantDecision int32 + +const ( + ConstantSampler_ALWAYS_OFF ConstantSampler_ConstantDecision = 0 + ConstantSampler_ALWAYS_ON ConstantSampler_ConstantDecision = 1 + ConstantSampler_ALWAYS_PARENT ConstantSampler_ConstantDecision = 2 +) + +var ConstantSampler_ConstantDecision_name = map[int32]string{ + 0: "ALWAYS_OFF", + 1: "ALWAYS_ON", + 2: "ALWAYS_PARENT", +} + +var ConstantSampler_ConstantDecision_value = map[string]int32{ + "ALWAYS_OFF": 0, + "ALWAYS_ON": 1, + "ALWAYS_PARENT": 2, +} + +func (x ConstantSampler_ConstantDecision) String() string { + return proto.EnumName(ConstantSampler_ConstantDecision_name, int32(x)) +} + +func (ConstantSampler_ConstantDecision) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_5359209b41ff50c5, []int{2, 0} +} + +// Global configuration of the trace service. All fields must be specified, or +// the default (zero) values will be used for each type. +type TraceConfig struct { + // The global default sampler used to make decisions on span sampling. + // + // Types that are valid to be assigned to Sampler: + // *TraceConfig_ProbabilitySampler + // *TraceConfig_ConstantSampler + // *TraceConfig_RateLimitingSampler + Sampler isTraceConfig_Sampler `protobuf_oneof:"sampler"` + // The global default max number of attributes per span. + MaxNumberOfAttributes int64 `protobuf:"varint,4,opt,name=max_number_of_attributes,json=maxNumberOfAttributes,proto3" json:"max_number_of_attributes,omitempty"` + // The global default max number of annotation events per span. + MaxNumberOfAnnotations int64 `protobuf:"varint,5,opt,name=max_number_of_annotations,json=maxNumberOfAnnotations,proto3" json:"max_number_of_annotations,omitempty"` + // The global default max number of message events per span. + MaxNumberOfMessageEvents int64 `protobuf:"varint,6,opt,name=max_number_of_message_events,json=maxNumberOfMessageEvents,proto3" json:"max_number_of_message_events,omitempty"` + // The global default max number of link entries per span. + MaxNumberOfLinks int64 `protobuf:"varint,7,opt,name=max_number_of_links,json=maxNumberOfLinks,proto3" json:"max_number_of_links,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TraceConfig) Reset() { *m = TraceConfig{} } +func (m *TraceConfig) String() string { return proto.CompactTextString(m) } +func (*TraceConfig) ProtoMessage() {} +func (*TraceConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_5359209b41ff50c5, []int{0} +} + +func (m *TraceConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TraceConfig.Unmarshal(m, b) +} +func (m *TraceConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TraceConfig.Marshal(b, m, deterministic) +} +func (m *TraceConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_TraceConfig.Merge(m, src) +} +func (m *TraceConfig) XXX_Size() int { + return xxx_messageInfo_TraceConfig.Size(m) +} +func (m *TraceConfig) XXX_DiscardUnknown() { + xxx_messageInfo_TraceConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_TraceConfig proto.InternalMessageInfo + +type isTraceConfig_Sampler interface { + isTraceConfig_Sampler() +} + +type TraceConfig_ProbabilitySampler struct { + ProbabilitySampler *ProbabilitySampler `protobuf:"bytes,1,opt,name=probability_sampler,json=probabilitySampler,proto3,oneof"` +} + +type TraceConfig_ConstantSampler struct { + ConstantSampler *ConstantSampler `protobuf:"bytes,2,opt,name=constant_sampler,json=constantSampler,proto3,oneof"` +} + +type TraceConfig_RateLimitingSampler struct { + RateLimitingSampler *RateLimitingSampler `protobuf:"bytes,3,opt,name=rate_limiting_sampler,json=rateLimitingSampler,proto3,oneof"` +} + +func (*TraceConfig_ProbabilitySampler) isTraceConfig_Sampler() {} + +func (*TraceConfig_ConstantSampler) isTraceConfig_Sampler() {} + +func (*TraceConfig_RateLimitingSampler) isTraceConfig_Sampler() {} + +func (m *TraceConfig) GetSampler() isTraceConfig_Sampler { + if m != nil { + return m.Sampler + } + return nil +} + +func (m *TraceConfig) GetProbabilitySampler() *ProbabilitySampler { + if x, ok := m.GetSampler().(*TraceConfig_ProbabilitySampler); ok { + return x.ProbabilitySampler + } + return nil +} + +func (m *TraceConfig) GetConstantSampler() *ConstantSampler { + if x, ok := m.GetSampler().(*TraceConfig_ConstantSampler); ok { + return x.ConstantSampler + } + return nil +} + +func (m *TraceConfig) GetRateLimitingSampler() *RateLimitingSampler { + if x, ok := m.GetSampler().(*TraceConfig_RateLimitingSampler); ok { + return x.RateLimitingSampler + } + return nil +} + +func (m *TraceConfig) GetMaxNumberOfAttributes() int64 { + if m != nil { + return m.MaxNumberOfAttributes + } + return 0 +} + +func (m *TraceConfig) GetMaxNumberOfAnnotations() int64 { + if m != nil { + return m.MaxNumberOfAnnotations + } + return 0 +} + +func (m *TraceConfig) GetMaxNumberOfMessageEvents() int64 { + if m != nil { + return m.MaxNumberOfMessageEvents + } + return 0 +} + +func (m *TraceConfig) GetMaxNumberOfLinks() int64 { + if m != nil { + return m.MaxNumberOfLinks + } + return 0 +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*TraceConfig) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*TraceConfig_ProbabilitySampler)(nil), + (*TraceConfig_ConstantSampler)(nil), + (*TraceConfig_RateLimitingSampler)(nil), + } +} + +// Sampler that tries to uniformly sample traces with a given probability. +// The probability of sampling a trace is equal to that of the specified probability. +type ProbabilitySampler struct { + // The desired probability of sampling. Must be within [0.0, 1.0]. + SamplingProbability float64 `protobuf:"fixed64,1,opt,name=samplingProbability,proto3" json:"samplingProbability,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProbabilitySampler) Reset() { *m = ProbabilitySampler{} } +func (m *ProbabilitySampler) String() string { return proto.CompactTextString(m) } +func (*ProbabilitySampler) ProtoMessage() {} +func (*ProbabilitySampler) Descriptor() ([]byte, []int) { + return fileDescriptor_5359209b41ff50c5, []int{1} +} + +func (m *ProbabilitySampler) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ProbabilitySampler.Unmarshal(m, b) +} +func (m *ProbabilitySampler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ProbabilitySampler.Marshal(b, m, deterministic) +} +func (m *ProbabilitySampler) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProbabilitySampler.Merge(m, src) +} +func (m *ProbabilitySampler) XXX_Size() int { + return xxx_messageInfo_ProbabilitySampler.Size(m) +} +func (m *ProbabilitySampler) XXX_DiscardUnknown() { + xxx_messageInfo_ProbabilitySampler.DiscardUnknown(m) +} + +var xxx_messageInfo_ProbabilitySampler proto.InternalMessageInfo + +func (m *ProbabilitySampler) GetSamplingProbability() float64 { + if m != nil { + return m.SamplingProbability + } + return 0 +} + +// Sampler that always makes a constant decision on span sampling. +type ConstantSampler struct { + Decision ConstantSampler_ConstantDecision `protobuf:"varint,1,opt,name=decision,proto3,enum=opencensus.proto.trace.v1.ConstantSampler_ConstantDecision" json:"decision,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConstantSampler) Reset() { *m = ConstantSampler{} } +func (m *ConstantSampler) String() string { return proto.CompactTextString(m) } +func (*ConstantSampler) ProtoMessage() {} +func (*ConstantSampler) Descriptor() ([]byte, []int) { + return fileDescriptor_5359209b41ff50c5, []int{2} +} + +func (m *ConstantSampler) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConstantSampler.Unmarshal(m, b) +} +func (m *ConstantSampler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConstantSampler.Marshal(b, m, deterministic) +} +func (m *ConstantSampler) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConstantSampler.Merge(m, src) +} +func (m *ConstantSampler) XXX_Size() int { + return xxx_messageInfo_ConstantSampler.Size(m) +} +func (m *ConstantSampler) XXX_DiscardUnknown() { + xxx_messageInfo_ConstantSampler.DiscardUnknown(m) +} + +var xxx_messageInfo_ConstantSampler proto.InternalMessageInfo + +func (m *ConstantSampler) GetDecision() ConstantSampler_ConstantDecision { + if m != nil { + return m.Decision + } + return ConstantSampler_ALWAYS_OFF +} + +// Sampler that tries to sample with a rate per time window. +type RateLimitingSampler struct { + // Rate per second. + Qps int64 `protobuf:"varint,1,opt,name=qps,proto3" json:"qps,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RateLimitingSampler) Reset() { *m = RateLimitingSampler{} } +func (m *RateLimitingSampler) String() string { return proto.CompactTextString(m) } +func (*RateLimitingSampler) ProtoMessage() {} +func (*RateLimitingSampler) Descriptor() ([]byte, []int) { + return fileDescriptor_5359209b41ff50c5, []int{3} +} + +func (m *RateLimitingSampler) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RateLimitingSampler.Unmarshal(m, b) +} +func (m *RateLimitingSampler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RateLimitingSampler.Marshal(b, m, deterministic) +} +func (m *RateLimitingSampler) XXX_Merge(src proto.Message) { + xxx_messageInfo_RateLimitingSampler.Merge(m, src) +} +func (m *RateLimitingSampler) XXX_Size() int { + return xxx_messageInfo_RateLimitingSampler.Size(m) +} +func (m *RateLimitingSampler) XXX_DiscardUnknown() { + xxx_messageInfo_RateLimitingSampler.DiscardUnknown(m) +} + +var xxx_messageInfo_RateLimitingSampler proto.InternalMessageInfo + +func (m *RateLimitingSampler) GetQps() int64 { + if m != nil { + return m.Qps + } + return 0 +} + +func init() { + proto.RegisterEnum("opencensus.proto.trace.v1.ConstantSampler_ConstantDecision", ConstantSampler_ConstantDecision_name, ConstantSampler_ConstantDecision_value) + proto.RegisterType((*TraceConfig)(nil), "opencensus.proto.trace.v1.TraceConfig") + proto.RegisterType((*ProbabilitySampler)(nil), "opencensus.proto.trace.v1.ProbabilitySampler") + proto.RegisterType((*ConstantSampler)(nil), "opencensus.proto.trace.v1.ConstantSampler") + proto.RegisterType((*RateLimitingSampler)(nil), "opencensus.proto.trace.v1.RateLimitingSampler") +} + +func init() { + proto.RegisterFile("opencensus/proto/trace/v1/trace_config.proto", fileDescriptor_5359209b41ff50c5) +} + +var fileDescriptor_5359209b41ff50c5 = []byte{ + // 486 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0xc1, 0x4e, 0xdb, 0x40, + 0x10, 0x86, 0x31, 0xa1, 0x50, 0x06, 0x01, 0xee, 0x5a, 0x54, 0x46, 0xe2, 0x80, 0x7c, 0x29, 0xaa, + 0x6a, 0xbb, 0xd0, 0x43, 0x55, 0x55, 0xaa, 0x94, 0x00, 0x51, 0x0f, 0x69, 0x88, 0x0c, 0x52, 0xd4, + 0x5e, 0xdc, 0xb5, 0xd9, 0xb8, 0xab, 0xc6, 0xb3, 0xae, 0x77, 0x1d, 0xd1, 0x77, 0xe9, 0x43, 0xf4, + 0x11, 0xab, 0xac, 0x5d, 0xdb, 0x49, 0x00, 0x71, 0xdb, 0xf9, 0xff, 0xf9, 0x7e, 0xaf, 0xbc, 0x33, + 0xf0, 0x46, 0x64, 0x0c, 0x63, 0x86, 0xb2, 0x90, 0x7e, 0x96, 0x0b, 0x25, 0x7c, 0x95, 0xd3, 0x98, + 0xf9, 0xb3, 0xd3, 0xf2, 0x10, 0xc6, 0x02, 0x27, 0x3c, 0xf1, 0xb4, 0x47, 0x0e, 0x9b, 0xee, 0x52, + 0xf1, 0x74, 0x93, 0x37, 0x3b, 0x75, 0xfe, 0x6c, 0xc0, 0xce, 0xcd, 0xbc, 0x38, 0xd7, 0x00, 0xf9, + 0x0e, 0x56, 0x96, 0x8b, 0x88, 0x46, 0x7c, 0xca, 0xd5, 0xef, 0x50, 0xd2, 0x34, 0x9b, 0xb2, 0xdc, + 0x36, 0x8e, 0x8d, 0x93, 0x9d, 0x33, 0xd7, 0x7b, 0x30, 0xc8, 0x1b, 0x35, 0xd4, 0x75, 0x09, 0x7d, + 0x5e, 0x0b, 0x48, 0xb6, 0xa2, 0x92, 0x31, 0x98, 0xb1, 0x40, 0xa9, 0x28, 0xaa, 0x3a, 0x7e, 0x5d, + 0xc7, 0xbf, 0x7e, 0x24, 0xfe, 0xbc, 0x42, 0x9a, 0xec, 0xfd, 0x78, 0x51, 0x22, 0xb7, 0x70, 0x90, + 0x53, 0xc5, 0xc2, 0x29, 0x4f, 0xb9, 0xe2, 0x98, 0xd4, 0xe9, 0x1d, 0x9d, 0xee, 0x3d, 0x92, 0x1e, + 0x50, 0xc5, 0x06, 0x15, 0xd6, 0x7c, 0xc1, 0xca, 0x57, 0x65, 0xf2, 0x1e, 0xec, 0x94, 0xde, 0x85, + 0x58, 0xa4, 0x11, 0xcb, 0x43, 0x31, 0x09, 0xa9, 0x52, 0x39, 0x8f, 0x0a, 0xc5, 0xa4, 0xbd, 0x71, + 0x6c, 0x9c, 0x74, 0x82, 0x83, 0x94, 0xde, 0x0d, 0xb5, 0x7d, 0x35, 0xe9, 0xd6, 0x26, 0xf9, 0x00, + 0x87, 0x4b, 0x20, 0xa2, 0x50, 0x54, 0x71, 0x81, 0xd2, 0x7e, 0xa6, 0xc9, 0x97, 0x6d, 0xb2, 0x71, + 0xc9, 0x27, 0x38, 0x5a, 0x44, 0x53, 0x26, 0x25, 0x4d, 0x58, 0xc8, 0x66, 0x0c, 0x95, 0xb4, 0x37, + 0x35, 0x6d, 0xb7, 0xe8, 0x2f, 0x65, 0xc3, 0xa5, 0xf6, 0x89, 0x0b, 0xd6, 0x22, 0x3f, 0xe5, 0xf8, + 0x53, 0xda, 0x5b, 0x1a, 0x33, 0x5b, 0xd8, 0x60, 0xae, 0xf7, 0xb6, 0x61, 0xab, 0xfa, 0x75, 0x4e, + 0x1f, 0xc8, 0xea, 0xc3, 0x92, 0xb7, 0x60, 0xe9, 0x06, 0x8e, 0x49, 0xcb, 0xd5, 0x43, 0x62, 0x04, + 0xf7, 0x59, 0xce, 0x5f, 0x03, 0xf6, 0x97, 0x9e, 0x90, 0x8c, 0xe1, 0xf9, 0x2d, 0x8b, 0xb9, 0xe4, + 0x02, 0x35, 0xba, 0x77, 0xf6, 0xf1, 0xe9, 0x03, 0x50, 0xd7, 0x17, 0x55, 0x44, 0x50, 0x87, 0x39, + 0x17, 0x60, 0x2e, 0xbb, 0x64, 0x0f, 0xa0, 0x3b, 0x18, 0x77, 0xbf, 0x5e, 0x87, 0x57, 0xfd, 0xbe, + 0xb9, 0x46, 0x76, 0x61, 0xfb, 0x7f, 0x3d, 0x34, 0x0d, 0xf2, 0x02, 0x76, 0xab, 0x72, 0xd4, 0x0d, + 0x2e, 0x87, 0x37, 0xe6, 0xba, 0xf3, 0x0a, 0xac, 0x7b, 0xc6, 0x82, 0x98, 0xd0, 0xf9, 0x95, 0x49, + 0x7d, 0xe1, 0x4e, 0x30, 0x3f, 0xf6, 0x66, 0x70, 0xc4, 0xc5, 0xc3, 0x37, 0xef, 0x99, 0xad, 0xfd, + 0x1a, 0xcd, 0xad, 0x91, 0xf1, 0xad, 0x97, 0x70, 0xf5, 0xa3, 0x88, 0xbc, 0x58, 0xa4, 0x7e, 0x49, + 0xb9, 0x1c, 0xa5, 0xca, 0x8b, 0x94, 0x61, 0xf9, 0xea, 0x7e, 0x13, 0xe8, 0x96, 0x1b, 0x9e, 0x30, + 0x74, 0x93, 0x66, 0xd1, 0xa3, 0x4d, 0x2d, 0xbf, 0xfb, 0x17, 0x00, 0x00, 0xff, 0xff, 0x13, 0xe2, + 0xd9, 0x56, 0x0c, 0x04, 0x00, 0x00, +} diff --git a/vendor/github.com/davecgh/go-spew/.travis.yml b/vendor/github.com/davecgh/go-spew/.travis.yml deleted file mode 100644 index 984e0736e7..0000000000 --- a/vendor/github.com/davecgh/go-spew/.travis.yml +++ /dev/null @@ -1,14 +0,0 @@ -language: go -go: - - 1.5.4 - - 1.6.3 - - 1.7 -install: - - go get -v golang.org/x/tools/cmd/cover -script: - - go test -v -tags=safe ./spew - - go test -v -tags=testcgo ./spew -covermode=count -coverprofile=profile.cov -after_success: - - go get -v github.com/mattn/goveralls - - export PATH=$PATH:$HOME/gopath/bin - - goveralls -coverprofile=profile.cov -service=travis-ci diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE index c836416192..bc52e96f2b 100644 --- a/vendor/github.com/davecgh/go-spew/LICENSE +++ b/vendor/github.com/davecgh/go-spew/LICENSE @@ -2,7 +2,7 @@ ISC License Copyright (c) 2012-2016 Dave Collins -Permission to use, copy, modify, and distribute this software for any +Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. diff --git a/vendor/github.com/davecgh/go-spew/README.md b/vendor/github.com/davecgh/go-spew/README.md deleted file mode 100644 index 262430449b..0000000000 --- a/vendor/github.com/davecgh/go-spew/README.md +++ /dev/null @@ -1,205 +0,0 @@ -go-spew -======= - -[![Build Status](https://img.shields.io/travis/davecgh/go-spew.svg)] -(https://travis-ci.org/davecgh/go-spew) [![ISC License] -(http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org) [![Coverage Status] -(https://img.shields.io/coveralls/davecgh/go-spew.svg)] -(https://coveralls.io/r/davecgh/go-spew?branch=master) - - -Go-spew implements a deep pretty printer for Go data structures to aid in -debugging. A comprehensive suite of tests with 100% test coverage is provided -to ensure proper functionality. See `test_coverage.txt` for the gocov coverage -report. Go-spew is licensed under the liberal ISC license, so it may be used in -open source or commercial projects. - -If you're interested in reading about how this package came to life and some -of the challenges involved in providing a deep pretty printer, there is a blog -post about it -[here](https://web.archive.org/web/20160304013555/https://blog.cyphertite.com/go-spew-a-journey-into-dumping-go-data-structures/). - -## Documentation - -[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)] -(http://godoc.org/github.com/davecgh/go-spew/spew) - -Full `go doc` style documentation for the project can be viewed online without -installing this package by using the excellent GoDoc site here: -http://godoc.org/github.com/davecgh/go-spew/spew - -You can also view the documentation locally once the package is installed with -the `godoc` tool by running `godoc -http=":6060"` and pointing your browser to -http://localhost:6060/pkg/github.com/davecgh/go-spew/spew - -## Installation - -```bash -$ go get -u github.com/davecgh/go-spew/spew -``` - -## Quick Start - -Add this import line to the file you're working in: - -```Go -import "github.com/davecgh/go-spew/spew" -``` - -To dump a variable with full newlines, indentation, type, and pointer -information use Dump, Fdump, or Sdump: - -```Go -spew.Dump(myVar1, myVar2, ...) -spew.Fdump(someWriter, myVar1, myVar2, ...) -str := spew.Sdump(myVar1, myVar2, ...) -``` - -Alternatively, if you would prefer to use format strings with a compacted inline -printing style, use the convenience wrappers Printf, Fprintf, etc with %v (most -compact), %+v (adds pointer addresses), %#v (adds types), or %#+v (adds types -and pointer addresses): - -```Go -spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) -spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) -spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) -spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) -``` - -## Debugging a Web Application Example - -Here is an example of how you can use `spew.Sdump()` to help debug a web application. Please be sure to wrap your output using the `html.EscapeString()` function for safety reasons. You should also only use this debugging technique in a development environment, never in production. - -```Go -package main - -import ( - "fmt" - "html" - "net/http" - - "github.com/davecgh/go-spew/spew" -) - -func handler(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "text/html") - fmt.Fprintf(w, "Hi there, %s!", r.URL.Path[1:]) - fmt.Fprintf(w, "") -} - -func main() { - http.HandleFunc("/", handler) - http.ListenAndServe(":8080", nil) -} -``` - -## Sample Dump Output - -``` -(main.Foo) { - unexportedField: (*main.Bar)(0xf84002e210)({ - flag: (main.Flag) flagTwo, - data: (uintptr) - }), - ExportedField: (map[interface {}]interface {}) { - (string) "one": (bool) true - } -} -([]uint8) { - 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | - 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| - 00000020 31 32 |12| -} -``` - -## Sample Formatter Output - -Double pointer to a uint8: -``` - %v: <**>5 - %+v: <**>(0xf8400420d0->0xf8400420c8)5 - %#v: (**uint8)5 - %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 -``` - -Pointer to circular struct with a uint8 field and a pointer to itself: -``` - %v: <*>{1 <*>} - %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} - %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} - %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} -``` - -## Configuration Options - -Configuration of spew is handled by fields in the ConfigState type. For -convenience, all of the top-level functions use a global state available via the -spew.Config global. - -It is also possible to create a ConfigState instance that provides methods -equivalent to the top-level functions. This allows concurrent configuration -options. See the ConfigState documentation for more details. - -``` -* Indent - String to use for each indentation level for Dump functions. - It is a single space by default. A popular alternative is "\t". - -* MaxDepth - Maximum number of levels to descend into nested data structures. - There is no limit by default. - -* DisableMethods - Disables invocation of error and Stringer interface methods. - Method invocation is enabled by default. - -* DisablePointerMethods - Disables invocation of error and Stringer interface methods on types - which only accept pointer receivers from non-pointer variables. This option - relies on access to the unsafe package, so it will not have any effect when - running in environments without access to the unsafe package such as Google - App Engine or with the "safe" build tag specified. - Pointer method invocation is enabled by default. - -* DisablePointerAddresses - DisablePointerAddresses specifies whether to disable the printing of - pointer addresses. This is useful when diffing data structures in tests. - -* DisableCapacities - DisableCapacities specifies whether to disable the printing of capacities - for arrays, slices, maps and channels. This is useful when diffing data - structures in tests. - -* ContinueOnMethod - Enables recursion into types after invoking error and Stringer interface - methods. Recursion after method invocation is disabled by default. - -* SortKeys - Specifies map keys should be sorted before being printed. Use - this to have a more deterministic, diffable output. Note that - only native types (bool, int, uint, floats, uintptr and string) - and types which implement error or Stringer interfaces are supported, - with other types sorted according to the reflect.Value.String() output - which guarantees display stability. Natural map order is used by - default. - -* SpewKeys - SpewKeys specifies that, as a last resort attempt, map keys should be - spewed to strings and sorted by those strings. This is only considered - if SortKeys is true. - -``` - -## Unsafe Package Dependency - -This package relies on the unsafe package to perform some of the more advanced -features, however it also supports a "limited" mode which allows it to work in -environments where the unsafe package is not available. By default, it will -operate in this mode on Google App Engine and when compiled with GopherJS. The -"safe" build tag may also be specified to force the package to build without -using the unsafe package. - -## License - -Go-spew is licensed under the [copyfree](http://copyfree.org) ISC License. diff --git a/vendor/github.com/davecgh/go-spew/cov_report.sh b/vendor/github.com/davecgh/go-spew/cov_report.sh deleted file mode 100644 index 9579497e41..0000000000 --- a/vendor/github.com/davecgh/go-spew/cov_report.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/sh - -# This script uses gocov to generate a test coverage report. -# The gocov tool my be obtained with the following command: -# go get github.com/axw/gocov/gocov -# -# It will be installed to $GOPATH/bin, so ensure that location is in your $PATH. - -# Check for gocov. -if ! type gocov >/dev/null 2>&1; then - echo >&2 "This script requires the gocov tool." - echo >&2 "You may obtain it with the following command:" - echo >&2 "go get github.com/axw/gocov/gocov" - exit 1 -fi - -# Only run the cgo tests if gcc is installed. -if type gcc >/dev/null 2>&1; then - (cd spew && gocov test -tags testcgo | gocov report) -else - (cd spew && gocov test | gocov report) -fi diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go index 8a4a6589a2..792994785e 100644 --- a/vendor/github.com/davecgh/go-spew/spew/bypass.go +++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go @@ -16,7 +16,9 @@ // when the code is not running on Google App Engine, compiled by GopherJS, and // "-tags safe" is not added to the go build command line. The "disableunsafe" // tag is deprecated and thus should not be used. -// +build !js,!appengine,!safe,!disableunsafe +// Go versions prior to 1.4 are disabled because they use a different layout +// for interfaces which make the implementation of unsafeReflectValue more complex. +// +build !js,!appengine,!safe,!disableunsafe,go1.4 package spew @@ -34,80 +36,49 @@ const ( ptrSize = unsafe.Sizeof((*byte)(nil)) ) +type flag uintptr + var ( - // offsetPtr, offsetScalar, and offsetFlag are the offsets for the - // internal reflect.Value fields. These values are valid before golang - // commit ecccf07e7f9d which changed the format. The are also valid - // after commit 82f48826c6c7 which changed the format again to mirror - // the original format. Code in the init function updates these offsets - // as necessary. - offsetPtr = uintptr(ptrSize) - offsetScalar = uintptr(0) - offsetFlag = uintptr(ptrSize * 2) - - // flagKindWidth and flagKindShift indicate various bits that the - // reflect package uses internally to track kind information. - // - // flagRO indicates whether or not the value field of a reflect.Value is - // read-only. - // - // flagIndir indicates whether the value field of a reflect.Value is - // the actual data or a pointer to the data. - // - // These values are valid before golang commit 90a7c3c86944 which - // changed their positions. Code in the init function updates these - // flags as necessary. - flagKindWidth = uintptr(5) - flagKindShift = uintptr(flagKindWidth - 1) - flagRO = uintptr(1 << 0) - flagIndir = uintptr(1 << 1) + // flagRO indicates whether the value field of a reflect.Value + // is read-only. + flagRO flag + + // flagAddr indicates whether the address of the reflect.Value's + // value may be taken. + flagAddr flag ) -func init() { - // Older versions of reflect.Value stored small integers directly in the - // ptr field (which is named val in the older versions). Versions - // between commits ecccf07e7f9d and 82f48826c6c7 added a new field named - // scalar for this purpose which unfortunately came before the flag - // field, so the offset of the flag field is different for those - // versions. - // - // This code constructs a new reflect.Value from a known small integer - // and checks if the size of the reflect.Value struct indicates it has - // the scalar field. When it does, the offsets are updated accordingly. - vv := reflect.ValueOf(0xf00) - if unsafe.Sizeof(vv) == (ptrSize * 4) { - offsetScalar = ptrSize * 2 - offsetFlag = ptrSize * 3 - } +// flagKindMask holds the bits that make up the kind +// part of the flags field. In all the supported versions, +// it is in the lower 5 bits. +const flagKindMask = flag(0x1f) - // Commit 90a7c3c86944 changed the flag positions such that the low - // order bits are the kind. This code extracts the kind from the flags - // field and ensures it's the correct type. When it's not, the flag - // order has been changed to the newer format, so the flags are updated - // accordingly. - upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag) - upfv := *(*uintptr)(upf) - flagKindMask := uintptr((1<>flagKindShift != uintptr(reflect.Int) { - flagKindShift = 0 - flagRO = 1 << 5 - flagIndir = 1 << 6 - - // Commit adf9b30e5594 modified the flags to separate the - // flagRO flag into two bits which specifies whether or not the - // field is embedded. This causes flagIndir to move over a bit - // and means that flagRO is the combination of either of the - // original flagRO bit and the new bit. - // - // This code detects the change by extracting what used to be - // the indirect bit to ensure it's set. When it's not, the flag - // order has been changed to the newer format, so the flags are - // updated accordingly. - if upfv&flagIndir == 0 { - flagRO = 3 << 5 - flagIndir = 1 << 7 - } +// Different versions of Go have used different +// bit layouts for the flags type. This table +// records the known combinations. +var okFlags = []struct { + ro, addr flag +}{{ + // From Go 1.4 to 1.5 + ro: 1 << 5, + addr: 1 << 7, +}, { + // Up to Go tip. + ro: 1<<5 | 1<<6, + addr: 1 << 8, +}} + +var flagValOffset = func() uintptr { + field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") + if !ok { + panic("reflect.Value has no flag field") } + return field.Offset +}() + +// flagField returns a pointer to the flag field of a reflect.Value. +func flagField(v *reflect.Value) *flag { + return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset)) } // unsafeReflectValue converts the passed reflect.Value into a one that bypasses @@ -119,34 +90,56 @@ func init() { // This allows us to check for implementations of the Stringer and error // interfaces to be used for pretty printing ordinarily unaddressable and // inaccessible values such as unexported struct fields. -func unsafeReflectValue(v reflect.Value) (rv reflect.Value) { - indirects := 1 - vt := v.Type() - upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr) - rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag)) - if rvf&flagIndir != 0 { - vt = reflect.PtrTo(v.Type()) - indirects++ - } else if offsetScalar != 0 { - // The value is in the scalar field when it's not one of the - // reference types. - switch vt.Kind() { - case reflect.Uintptr: - case reflect.Chan: - case reflect.Func: - case reflect.Map: - case reflect.Ptr: - case reflect.UnsafePointer: - default: - upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + - offsetScalar) - } +func unsafeReflectValue(v reflect.Value) reflect.Value { + if !v.IsValid() || (v.CanInterface() && v.CanAddr()) { + return v } + flagFieldPtr := flagField(&v) + *flagFieldPtr &^= flagRO + *flagFieldPtr |= flagAddr + return v +} - pv := reflect.NewAt(vt, upv) - rv = pv - for i := 0; i < indirects; i++ { - rv = rv.Elem() +// Sanity checks against future reflect package changes +// to the type or semantics of the Value.flag field. +func init() { + field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") + if !ok { + panic("reflect.Value has no flag field") + } + if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() { + panic("reflect.Value flag field has changed kind") + } + type t0 int + var t struct { + A t0 + // t0 will have flagEmbedRO set. + t0 + // a will have flagStickyRO set + a t0 + } + vA := reflect.ValueOf(t).FieldByName("A") + va := reflect.ValueOf(t).FieldByName("a") + vt0 := reflect.ValueOf(t).FieldByName("t0") + + // Infer flagRO from the difference between the flags + // for the (otherwise identical) fields in t. + flagPublic := *flagField(&vA) + flagWithRO := *flagField(&va) | *flagField(&vt0) + flagRO = flagPublic ^ flagWithRO + + // Infer flagAddr from the difference between a value + // taken from a pointer and not. + vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A") + flagNoPtr := *flagField(&vA) + flagPtr := *flagField(&vPtrA) + flagAddr = flagNoPtr ^ flagPtr + + // Check that the inferred flags tally with one of the known versions. + for _, f := range okFlags { + if flagRO == f.ro && flagAddr == f.addr { + return + } } - return rv + panic("reflect.Value read-only flag has changed semantics") } diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go index 1fe3cf3d5d..205c28d68c 100644 --- a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go +++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go @@ -16,7 +16,7 @@ // when the code is running on Google App Engine, compiled by GopherJS, or // "-tags safe" is added to the go build command line. The "disableunsafe" // tag is deprecated and thus should not be used. -// +build js appengine safe disableunsafe +// +build js appengine safe disableunsafe !go1.4 package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go index 7c519ff47a..1be8ce9457 100644 --- a/vendor/github.com/davecgh/go-spew/spew/common.go +++ b/vendor/github.com/davecgh/go-spew/spew/common.go @@ -180,7 +180,7 @@ func printComplex(w io.Writer, c complex128, floatPrecision int) { w.Write(closeParenBytes) } -// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x' +// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x' // prefix to Writer w. func printHexPtr(w io.Writer, p uintptr) { // Null pointer. diff --git a/vendor/github.com/davecgh/go-spew/spew/common_test.go b/vendor/github.com/davecgh/go-spew/spew/common_test.go deleted file mode 100644 index 0f5ce47dca..0000000000 --- a/vendor/github.com/davecgh/go-spew/spew/common_test.go +++ /dev/null @@ -1,298 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew_test - -import ( - "fmt" - "reflect" - "testing" - - "github.com/davecgh/go-spew/spew" -) - -// custom type to test Stinger interface on non-pointer receiver. -type stringer string - -// String implements the Stringer interface for testing invocation of custom -// stringers on types with non-pointer receivers. -func (s stringer) String() string { - return "stringer " + string(s) -} - -// custom type to test Stinger interface on pointer receiver. -type pstringer string - -// String implements the Stringer interface for testing invocation of custom -// stringers on types with only pointer receivers. -func (s *pstringer) String() string { - return "stringer " + string(*s) -} - -// xref1 and xref2 are cross referencing structs for testing circular reference -// detection. -type xref1 struct { - ps2 *xref2 -} -type xref2 struct { - ps1 *xref1 -} - -// indirCir1, indirCir2, and indirCir3 are used to generate an indirect circular -// reference for testing detection. -type indirCir1 struct { - ps2 *indirCir2 -} -type indirCir2 struct { - ps3 *indirCir3 -} -type indirCir3 struct { - ps1 *indirCir1 -} - -// embed is used to test embedded structures. -type embed struct { - a string -} - -// embedwrap is used to test embedded structures. -type embedwrap struct { - *embed - e *embed -} - -// panicer is used to intentionally cause a panic for testing spew properly -// handles them -type panicer int - -func (p panicer) String() string { - panic("test panic") -} - -// customError is used to test custom error interface invocation. -type customError int - -func (e customError) Error() string { - return fmt.Sprintf("error: %d", int(e)) -} - -// stringizeWants converts a slice of wanted test output into a format suitable -// for a test error message. -func stringizeWants(wants []string) string { - s := "" - for i, want := range wants { - if i > 0 { - s += fmt.Sprintf("want%d: %s", i+1, want) - } else { - s += "want: " + want - } - } - return s -} - -// testFailed returns whether or not a test failed by checking if the result -// of the test is in the slice of wanted strings. -func testFailed(result string, wants []string) bool { - for _, want := range wants { - if result == want { - return false - } - } - return true -} - -type sortableStruct struct { - x int -} - -func (ss sortableStruct) String() string { - return fmt.Sprintf("ss.%d", ss.x) -} - -type unsortableStruct struct { - x int -} - -type sortTestCase struct { - input []reflect.Value - expected []reflect.Value -} - -func helpTestSortValues(tests []sortTestCase, cs *spew.ConfigState, t *testing.T) { - getInterfaces := func(values []reflect.Value) []interface{} { - interfaces := []interface{}{} - for _, v := range values { - interfaces = append(interfaces, v.Interface()) - } - return interfaces - } - - for _, test := range tests { - spew.SortValues(test.input, cs) - // reflect.DeepEqual cannot really make sense of reflect.Value, - // probably because of all the pointer tricks. For instance, - // v(2.0) != v(2.0) on a 32-bits system. Turn them into interface{} - // instead. - input := getInterfaces(test.input) - expected := getInterfaces(test.expected) - if !reflect.DeepEqual(input, expected) { - t.Errorf("Sort mismatch:\n %v != %v", input, expected) - } - } -} - -// TestSortValues ensures the sort functionality for relect.Value based sorting -// works as intended. -func TestSortValues(t *testing.T) { - v := reflect.ValueOf - - a := v("a") - b := v("b") - c := v("c") - embedA := v(embed{"a"}) - embedB := v(embed{"b"}) - embedC := v(embed{"c"}) - tests := []sortTestCase{ - // No values. - { - []reflect.Value{}, - []reflect.Value{}, - }, - // Bools. - { - []reflect.Value{v(false), v(true), v(false)}, - []reflect.Value{v(false), v(false), v(true)}, - }, - // Ints. - { - []reflect.Value{v(2), v(1), v(3)}, - []reflect.Value{v(1), v(2), v(3)}, - }, - // Uints. - { - []reflect.Value{v(uint8(2)), v(uint8(1)), v(uint8(3))}, - []reflect.Value{v(uint8(1)), v(uint8(2)), v(uint8(3))}, - }, - // Floats. - { - []reflect.Value{v(2.0), v(1.0), v(3.0)}, - []reflect.Value{v(1.0), v(2.0), v(3.0)}, - }, - // Strings. - { - []reflect.Value{b, a, c}, - []reflect.Value{a, b, c}, - }, - // Array - { - []reflect.Value{v([3]int{3, 2, 1}), v([3]int{1, 3, 2}), v([3]int{1, 2, 3})}, - []reflect.Value{v([3]int{1, 2, 3}), v([3]int{1, 3, 2}), v([3]int{3, 2, 1})}, - }, - // Uintptrs. - { - []reflect.Value{v(uintptr(2)), v(uintptr(1)), v(uintptr(3))}, - []reflect.Value{v(uintptr(1)), v(uintptr(2)), v(uintptr(3))}, - }, - // SortableStructs. - { - // Note: not sorted - DisableMethods is set. - []reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})}, - []reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})}, - }, - // UnsortableStructs. - { - // Note: not sorted - SpewKeys is false. - []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, - []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, - }, - // Invalid. - { - []reflect.Value{embedB, embedA, embedC}, - []reflect.Value{embedB, embedA, embedC}, - }, - } - cs := spew.ConfigState{DisableMethods: true, SpewKeys: false} - helpTestSortValues(tests, &cs, t) -} - -// TestSortValuesWithMethods ensures the sort functionality for relect.Value -// based sorting works as intended when using string methods. -func TestSortValuesWithMethods(t *testing.T) { - v := reflect.ValueOf - - a := v("a") - b := v("b") - c := v("c") - tests := []sortTestCase{ - // Ints. - { - []reflect.Value{v(2), v(1), v(3)}, - []reflect.Value{v(1), v(2), v(3)}, - }, - // Strings. - { - []reflect.Value{b, a, c}, - []reflect.Value{a, b, c}, - }, - // SortableStructs. - { - []reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})}, - []reflect.Value{v(sortableStruct{1}), v(sortableStruct{2}), v(sortableStruct{3})}, - }, - // UnsortableStructs. - { - // Note: not sorted - SpewKeys is false. - []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, - []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, - }, - } - cs := spew.ConfigState{DisableMethods: false, SpewKeys: false} - helpTestSortValues(tests, &cs, t) -} - -// TestSortValuesWithSpew ensures the sort functionality for relect.Value -// based sorting works as intended when using spew to stringify keys. -func TestSortValuesWithSpew(t *testing.T) { - v := reflect.ValueOf - - a := v("a") - b := v("b") - c := v("c") - tests := []sortTestCase{ - // Ints. - { - []reflect.Value{v(2), v(1), v(3)}, - []reflect.Value{v(1), v(2), v(3)}, - }, - // Strings. - { - []reflect.Value{b, a, c}, - []reflect.Value{a, b, c}, - }, - // SortableStructs. - { - []reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})}, - []reflect.Value{v(sortableStruct{1}), v(sortableStruct{2}), v(sortableStruct{3})}, - }, - // UnsortableStructs. - { - []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, - []reflect.Value{v(unsortableStruct{1}), v(unsortableStruct{2}), v(unsortableStruct{3})}, - }, - } - cs := spew.ConfigState{DisableMethods: true, SpewKeys: true} - helpTestSortValues(tests, &cs, t) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go index df1d582a72..f78d89fc1f 100644 --- a/vendor/github.com/davecgh/go-spew/spew/dump.go +++ b/vendor/github.com/davecgh/go-spew/spew/dump.go @@ -35,16 +35,16 @@ var ( // cCharRE is a regular expression that matches a cgo char. // It is used to detect character arrays to hexdump them. - cCharRE = regexp.MustCompile("^.*\\._Ctype_char$") + cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`) // cUnsignedCharRE is a regular expression that matches a cgo unsigned // char. It is used to detect unsigned character arrays to hexdump // them. - cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$") + cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`) // cUint8tCharRE is a regular expression that matches a cgo uint8_t. // It is used to detect uint8_t arrays to hexdump them. - cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$") + cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`) ) // dumpState contains information about the state of a dump operation. @@ -143,10 +143,10 @@ func (d *dumpState) dumpPtr(v reflect.Value) { // Display dereferenced value. d.w.Write(openParenBytes) switch { - case nilFound == true: + case nilFound: d.w.Write(nilAngleBytes) - case cycleFound == true: + case cycleFound: d.w.Write(circularBytes) default: diff --git a/vendor/github.com/davecgh/go-spew/spew/dump_test.go b/vendor/github.com/davecgh/go-spew/spew/dump_test.go deleted file mode 100644 index 5aad9c7af0..0000000000 --- a/vendor/github.com/davecgh/go-spew/spew/dump_test.go +++ /dev/null @@ -1,1042 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -/* -Test Summary: -NOTE: For each test, a nil pointer, a single pointer and double pointer to the -base test element are also tested to ensure proper indirection across all types. - -- Max int8, int16, int32, int64, int -- Max uint8, uint16, uint32, uint64, uint -- Boolean true and false -- Standard complex64 and complex128 -- Array containing standard ints -- Array containing type with custom formatter on pointer receiver only -- Array containing interfaces -- Array containing bytes -- Slice containing standard float32 values -- Slice containing type with custom formatter on pointer receiver only -- Slice containing interfaces -- Slice containing bytes -- Nil slice -- Standard string -- Nil interface -- Sub-interface -- Map with string keys and int vals -- Map with custom formatter type on pointer receiver only keys and vals -- Map with interface keys and values -- Map with nil interface value -- Struct with primitives -- Struct that contains another struct -- Struct that contains custom type with Stringer pointer interface via both - exported and unexported fields -- Struct that contains embedded struct and field to same struct -- Uintptr to 0 (null pointer) -- Uintptr address of real variable -- Unsafe.Pointer to 0 (null pointer) -- Unsafe.Pointer to address of real variable -- Nil channel -- Standard int channel -- Function with no params and no returns -- Function with param and no returns -- Function with multiple params and multiple returns -- Struct that is circular through self referencing -- Structs that are circular through cross referencing -- Structs that are indirectly circular -- Type that panics in its Stringer interface -*/ - -package spew_test - -import ( - "bytes" - "fmt" - "testing" - "unsafe" - - "github.com/davecgh/go-spew/spew" -) - -// dumpTest is used to describe a test to be performed against the Dump method. -type dumpTest struct { - in interface{} - wants []string -} - -// dumpTests houses all of the tests to be performed against the Dump method. -var dumpTests = make([]dumpTest, 0) - -// addDumpTest is a helper method to append the passed input and desired result -// to dumpTests -func addDumpTest(in interface{}, wants ...string) { - test := dumpTest{in, wants} - dumpTests = append(dumpTests, test) -} - -func addIntDumpTests() { - // Max int8. - v := int8(127) - nv := (*int8)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "int8" - vs := "127" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") - - // Max int16. - v2 := int16(32767) - nv2 := (*int16)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "int16" - v2s := "32767" - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") - addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") - addDumpTest(nv2, "(*"+v2t+")()\n") - - // Max int32. - v3 := int32(2147483647) - nv3 := (*int32)(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "int32" - v3s := "2147483647" - addDumpTest(v3, "("+v3t+") "+v3s+"\n") - addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") - addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") - addDumpTest(nv3, "(*"+v3t+")()\n") - - // Max int64. - v4 := int64(9223372036854775807) - nv4 := (*int64)(nil) - pv4 := &v4 - v4Addr := fmt.Sprintf("%p", pv4) - pv4Addr := fmt.Sprintf("%p", &pv4) - v4t := "int64" - v4s := "9223372036854775807" - addDumpTest(v4, "("+v4t+") "+v4s+"\n") - addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") - addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") - addDumpTest(nv4, "(*"+v4t+")()\n") - - // Max int. - v5 := int(2147483647) - nv5 := (*int)(nil) - pv5 := &v5 - v5Addr := fmt.Sprintf("%p", pv5) - pv5Addr := fmt.Sprintf("%p", &pv5) - v5t := "int" - v5s := "2147483647" - addDumpTest(v5, "("+v5t+") "+v5s+"\n") - addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n") - addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n") - addDumpTest(nv5, "(*"+v5t+")()\n") -} - -func addUintDumpTests() { - // Max uint8. - v := uint8(255) - nv := (*uint8)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "uint8" - vs := "255" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") - - // Max uint16. - v2 := uint16(65535) - nv2 := (*uint16)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "uint16" - v2s := "65535" - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") - addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") - addDumpTest(nv2, "(*"+v2t+")()\n") - - // Max uint32. - v3 := uint32(4294967295) - nv3 := (*uint32)(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "uint32" - v3s := "4294967295" - addDumpTest(v3, "("+v3t+") "+v3s+"\n") - addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") - addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") - addDumpTest(nv3, "(*"+v3t+")()\n") - - // Max uint64. - v4 := uint64(18446744073709551615) - nv4 := (*uint64)(nil) - pv4 := &v4 - v4Addr := fmt.Sprintf("%p", pv4) - pv4Addr := fmt.Sprintf("%p", &pv4) - v4t := "uint64" - v4s := "18446744073709551615" - addDumpTest(v4, "("+v4t+") "+v4s+"\n") - addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") - addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") - addDumpTest(nv4, "(*"+v4t+")()\n") - - // Max uint. - v5 := uint(4294967295) - nv5 := (*uint)(nil) - pv5 := &v5 - v5Addr := fmt.Sprintf("%p", pv5) - pv5Addr := fmt.Sprintf("%p", &pv5) - v5t := "uint" - v5s := "4294967295" - addDumpTest(v5, "("+v5t+") "+v5s+"\n") - addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n") - addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n") - addDumpTest(nv5, "(*"+v5t+")()\n") -} - -func addBoolDumpTests() { - // Boolean true. - v := bool(true) - nv := (*bool)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "bool" - vs := "true" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") - - // Boolean false. - v2 := bool(false) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "bool" - v2s := "false" - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") - addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") -} - -func addFloatDumpTests() { - // Standard float32. - v := float32(3.1415) - nv := (*float32)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "float32" - vs := "3.1415" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") - - // Standard float64. - v2 := float64(3.1415926) - nv2 := (*float64)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "float64" - v2s := "3.1415926" - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") - addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") - addDumpTest(nv2, "(*"+v2t+")()\n") -} - -func addComplexDumpTests() { - // Standard complex64. - v := complex(float32(6), -2) - nv := (*complex64)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "complex64" - vs := "(6-2i)" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") - - // Standard complex128. - v2 := complex(float64(-6), 2) - nv2 := (*complex128)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "complex128" - v2s := "(-6+2i)" - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") - addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") - addDumpTest(nv2, "(*"+v2t+")()\n") -} - -func addArrayDumpTests() { - // Array containing standard ints. - v := [3]int{1, 2, 3} - vLen := fmt.Sprintf("%d", len(v)) - vCap := fmt.Sprintf("%d", cap(v)) - nv := (*[3]int)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "int" - vs := "(len=" + vLen + " cap=" + vCap + ") {\n (" + vt + ") 1,\n (" + - vt + ") 2,\n (" + vt + ") 3\n}" - addDumpTest(v, "([3]"+vt+") "+vs+"\n") - addDumpTest(pv, "(*[3]"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**[3]"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*[3]"+vt+")()\n") - - // Array containing type with custom formatter on pointer receiver only. - v2i0 := pstringer("1") - v2i1 := pstringer("2") - v2i2 := pstringer("3") - v2 := [3]pstringer{v2i0, v2i1, v2i2} - v2i0Len := fmt.Sprintf("%d", len(v2i0)) - v2i1Len := fmt.Sprintf("%d", len(v2i1)) - v2i2Len := fmt.Sprintf("%d", len(v2i2)) - v2Len := fmt.Sprintf("%d", len(v2)) - v2Cap := fmt.Sprintf("%d", cap(v2)) - nv2 := (*[3]pstringer)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "spew_test.pstringer" - v2sp := "(len=" + v2Len + " cap=" + v2Cap + ") {\n (" + v2t + - ") (len=" + v2i0Len + ") stringer 1,\n (" + v2t + - ") (len=" + v2i1Len + ") stringer 2,\n (" + v2t + - ") (len=" + v2i2Len + ") " + "stringer 3\n}" - v2s := v2sp - if spew.UnsafeDisabled { - v2s = "(len=" + v2Len + " cap=" + v2Cap + ") {\n (" + v2t + - ") (len=" + v2i0Len + ") \"1\",\n (" + v2t + ") (len=" + - v2i1Len + ") \"2\",\n (" + v2t + ") (len=" + v2i2Len + - ") " + "\"3\"\n}" - } - addDumpTest(v2, "([3]"+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*[3]"+v2t+")("+v2Addr+")("+v2sp+")\n") - addDumpTest(&pv2, "(**[3]"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2sp+")\n") - addDumpTest(nv2, "(*[3]"+v2t+")()\n") - - // Array containing interfaces. - v3i0 := "one" - v3 := [3]interface{}{v3i0, int(2), uint(3)} - v3i0Len := fmt.Sprintf("%d", len(v3i0)) - v3Len := fmt.Sprintf("%d", len(v3)) - v3Cap := fmt.Sprintf("%d", cap(v3)) - nv3 := (*[3]interface{})(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "[3]interface {}" - v3t2 := "string" - v3t3 := "int" - v3t4 := "uint" - v3s := "(len=" + v3Len + " cap=" + v3Cap + ") {\n (" + v3t2 + ") " + - "(len=" + v3i0Len + ") \"one\",\n (" + v3t3 + ") 2,\n (" + - v3t4 + ") 3\n}" - addDumpTest(v3, "("+v3t+") "+v3s+"\n") - addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") - addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") - addDumpTest(nv3, "(*"+v3t+")()\n") - - // Array containing bytes. - v4 := [34]byte{ - 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, - 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, - 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, - 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, - 0x31, 0x32, - } - v4Len := fmt.Sprintf("%d", len(v4)) - v4Cap := fmt.Sprintf("%d", cap(v4)) - nv4 := (*[34]byte)(nil) - pv4 := &v4 - v4Addr := fmt.Sprintf("%p", pv4) - pv4Addr := fmt.Sprintf("%p", &pv4) - v4t := "[34]uint8" - v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " + - "{\n 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20" + - " |............... |\n" + - " 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30" + - " |!\"#$%&'()*+,-./0|\n" + - " 00000020 31 32 " + - " |12|\n}" - addDumpTest(v4, "("+v4t+") "+v4s+"\n") - addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") - addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") - addDumpTest(nv4, "(*"+v4t+")()\n") -} - -func addSliceDumpTests() { - // Slice containing standard float32 values. - v := []float32{3.14, 6.28, 12.56} - vLen := fmt.Sprintf("%d", len(v)) - vCap := fmt.Sprintf("%d", cap(v)) - nv := (*[]float32)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "float32" - vs := "(len=" + vLen + " cap=" + vCap + ") {\n (" + vt + ") 3.14,\n (" + - vt + ") 6.28,\n (" + vt + ") 12.56\n}" - addDumpTest(v, "([]"+vt+") "+vs+"\n") - addDumpTest(pv, "(*[]"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**[]"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*[]"+vt+")()\n") - - // Slice containing type with custom formatter on pointer receiver only. - v2i0 := pstringer("1") - v2i1 := pstringer("2") - v2i2 := pstringer("3") - v2 := []pstringer{v2i0, v2i1, v2i2} - v2i0Len := fmt.Sprintf("%d", len(v2i0)) - v2i1Len := fmt.Sprintf("%d", len(v2i1)) - v2i2Len := fmt.Sprintf("%d", len(v2i2)) - v2Len := fmt.Sprintf("%d", len(v2)) - v2Cap := fmt.Sprintf("%d", cap(v2)) - nv2 := (*[]pstringer)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "spew_test.pstringer" - v2s := "(len=" + v2Len + " cap=" + v2Cap + ") {\n (" + v2t + ") (len=" + - v2i0Len + ") stringer 1,\n (" + v2t + ") (len=" + v2i1Len + - ") stringer 2,\n (" + v2t + ") (len=" + v2i2Len + ") " + - "stringer 3\n}" - addDumpTest(v2, "([]"+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*[]"+v2t+")("+v2Addr+")("+v2s+")\n") - addDumpTest(&pv2, "(**[]"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") - addDumpTest(nv2, "(*[]"+v2t+")()\n") - - // Slice containing interfaces. - v3i0 := "one" - v3 := []interface{}{v3i0, int(2), uint(3), nil} - v3i0Len := fmt.Sprintf("%d", len(v3i0)) - v3Len := fmt.Sprintf("%d", len(v3)) - v3Cap := fmt.Sprintf("%d", cap(v3)) - nv3 := (*[]interface{})(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "[]interface {}" - v3t2 := "string" - v3t3 := "int" - v3t4 := "uint" - v3t5 := "interface {}" - v3s := "(len=" + v3Len + " cap=" + v3Cap + ") {\n (" + v3t2 + ") " + - "(len=" + v3i0Len + ") \"one\",\n (" + v3t3 + ") 2,\n (" + - v3t4 + ") 3,\n (" + v3t5 + ") \n}" - addDumpTest(v3, "("+v3t+") "+v3s+"\n") - addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") - addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") - addDumpTest(nv3, "(*"+v3t+")()\n") - - // Slice containing bytes. - v4 := []byte{ - 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, - 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, - 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, - 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, - 0x31, 0x32, - } - v4Len := fmt.Sprintf("%d", len(v4)) - v4Cap := fmt.Sprintf("%d", cap(v4)) - nv4 := (*[]byte)(nil) - pv4 := &v4 - v4Addr := fmt.Sprintf("%p", pv4) - pv4Addr := fmt.Sprintf("%p", &pv4) - v4t := "[]uint8" - v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " + - "{\n 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20" + - " |............... |\n" + - " 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30" + - " |!\"#$%&'()*+,-./0|\n" + - " 00000020 31 32 " + - " |12|\n}" - addDumpTest(v4, "("+v4t+") "+v4s+"\n") - addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") - addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") - addDumpTest(nv4, "(*"+v4t+")()\n") - - // Nil slice. - v5 := []int(nil) - nv5 := (*[]int)(nil) - pv5 := &v5 - v5Addr := fmt.Sprintf("%p", pv5) - pv5Addr := fmt.Sprintf("%p", &pv5) - v5t := "[]int" - v5s := "" - addDumpTest(v5, "("+v5t+") "+v5s+"\n") - addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n") - addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n") - addDumpTest(nv5, "(*"+v5t+")()\n") -} - -func addStringDumpTests() { - // Standard string. - v := "test" - vLen := fmt.Sprintf("%d", len(v)) - nv := (*string)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "string" - vs := "(len=" + vLen + ") \"test\"" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") -} - -func addInterfaceDumpTests() { - // Nil interface. - var v interface{} - nv := (*interface{})(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "interface {}" - vs := "" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") - - // Sub-interface. - v2 := interface{}(uint16(65535)) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "uint16" - v2s := "65535" - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") - addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") -} - -func addMapDumpTests() { - // Map with string keys and int vals. - k := "one" - kk := "two" - m := map[string]int{k: 1, kk: 2} - klen := fmt.Sprintf("%d", len(k)) // not kLen to shut golint up - kkLen := fmt.Sprintf("%d", len(kk)) - mLen := fmt.Sprintf("%d", len(m)) - nilMap := map[string]int(nil) - nm := (*map[string]int)(nil) - pm := &m - mAddr := fmt.Sprintf("%p", pm) - pmAddr := fmt.Sprintf("%p", &pm) - mt := "map[string]int" - mt1 := "string" - mt2 := "int" - ms := "(len=" + mLen + ") {\n (" + mt1 + ") (len=" + klen + ") " + - "\"one\": (" + mt2 + ") 1,\n (" + mt1 + ") (len=" + kkLen + - ") \"two\": (" + mt2 + ") 2\n}" - ms2 := "(len=" + mLen + ") {\n (" + mt1 + ") (len=" + kkLen + ") " + - "\"two\": (" + mt2 + ") 2,\n (" + mt1 + ") (len=" + klen + - ") \"one\": (" + mt2 + ") 1\n}" - addDumpTest(m, "("+mt+") "+ms+"\n", "("+mt+") "+ms2+"\n") - addDumpTest(pm, "(*"+mt+")("+mAddr+")("+ms+")\n", - "(*"+mt+")("+mAddr+")("+ms2+")\n") - addDumpTest(&pm, "(**"+mt+")("+pmAddr+"->"+mAddr+")("+ms+")\n", - "(**"+mt+")("+pmAddr+"->"+mAddr+")("+ms2+")\n") - addDumpTest(nm, "(*"+mt+")()\n") - addDumpTest(nilMap, "("+mt+") \n") - - // Map with custom formatter type on pointer receiver only keys and vals. - k2 := pstringer("one") - v2 := pstringer("1") - m2 := map[pstringer]pstringer{k2: v2} - k2Len := fmt.Sprintf("%d", len(k2)) - v2Len := fmt.Sprintf("%d", len(v2)) - m2Len := fmt.Sprintf("%d", len(m2)) - nilMap2 := map[pstringer]pstringer(nil) - nm2 := (*map[pstringer]pstringer)(nil) - pm2 := &m2 - m2Addr := fmt.Sprintf("%p", pm2) - pm2Addr := fmt.Sprintf("%p", &pm2) - m2t := "map[spew_test.pstringer]spew_test.pstringer" - m2t1 := "spew_test.pstringer" - m2t2 := "spew_test.pstringer" - m2s := "(len=" + m2Len + ") {\n (" + m2t1 + ") (len=" + k2Len + ") " + - "stringer one: (" + m2t2 + ") (len=" + v2Len + ") stringer 1\n}" - if spew.UnsafeDisabled { - m2s = "(len=" + m2Len + ") {\n (" + m2t1 + ") (len=" + k2Len + - ") " + "\"one\": (" + m2t2 + ") (len=" + v2Len + - ") \"1\"\n}" - } - addDumpTest(m2, "("+m2t+") "+m2s+"\n") - addDumpTest(pm2, "(*"+m2t+")("+m2Addr+")("+m2s+")\n") - addDumpTest(&pm2, "(**"+m2t+")("+pm2Addr+"->"+m2Addr+")("+m2s+")\n") - addDumpTest(nm2, "(*"+m2t+")()\n") - addDumpTest(nilMap2, "("+m2t+") \n") - - // Map with interface keys and values. - k3 := "one" - k3Len := fmt.Sprintf("%d", len(k3)) - m3 := map[interface{}]interface{}{k3: 1} - m3Len := fmt.Sprintf("%d", len(m3)) - nilMap3 := map[interface{}]interface{}(nil) - nm3 := (*map[interface{}]interface{})(nil) - pm3 := &m3 - m3Addr := fmt.Sprintf("%p", pm3) - pm3Addr := fmt.Sprintf("%p", &pm3) - m3t := "map[interface {}]interface {}" - m3t1 := "string" - m3t2 := "int" - m3s := "(len=" + m3Len + ") {\n (" + m3t1 + ") (len=" + k3Len + ") " + - "\"one\": (" + m3t2 + ") 1\n}" - addDumpTest(m3, "("+m3t+") "+m3s+"\n") - addDumpTest(pm3, "(*"+m3t+")("+m3Addr+")("+m3s+")\n") - addDumpTest(&pm3, "(**"+m3t+")("+pm3Addr+"->"+m3Addr+")("+m3s+")\n") - addDumpTest(nm3, "(*"+m3t+")()\n") - addDumpTest(nilMap3, "("+m3t+") \n") - - // Map with nil interface value. - k4 := "nil" - k4Len := fmt.Sprintf("%d", len(k4)) - m4 := map[string]interface{}{k4: nil} - m4Len := fmt.Sprintf("%d", len(m4)) - nilMap4 := map[string]interface{}(nil) - nm4 := (*map[string]interface{})(nil) - pm4 := &m4 - m4Addr := fmt.Sprintf("%p", pm4) - pm4Addr := fmt.Sprintf("%p", &pm4) - m4t := "map[string]interface {}" - m4t1 := "string" - m4t2 := "interface {}" - m4s := "(len=" + m4Len + ") {\n (" + m4t1 + ") (len=" + k4Len + ")" + - " \"nil\": (" + m4t2 + ") \n}" - addDumpTest(m4, "("+m4t+") "+m4s+"\n") - addDumpTest(pm4, "(*"+m4t+")("+m4Addr+")("+m4s+")\n") - addDumpTest(&pm4, "(**"+m4t+")("+pm4Addr+"->"+m4Addr+")("+m4s+")\n") - addDumpTest(nm4, "(*"+m4t+")()\n") - addDumpTest(nilMap4, "("+m4t+") \n") -} - -func addStructDumpTests() { - // Struct with primitives. - type s1 struct { - a int8 - b uint8 - } - v := s1{127, 255} - nv := (*s1)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "spew_test.s1" - vt2 := "int8" - vt3 := "uint8" - vs := "{\n a: (" + vt2 + ") 127,\n b: (" + vt3 + ") 255\n}" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") - - // Struct that contains another struct. - type s2 struct { - s1 s1 - b bool - } - v2 := s2{s1{127, 255}, true} - nv2 := (*s2)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "spew_test.s2" - v2t2 := "spew_test.s1" - v2t3 := "int8" - v2t4 := "uint8" - v2t5 := "bool" - v2s := "{\n s1: (" + v2t2 + ") {\n a: (" + v2t3 + ") 127,\n b: (" + - v2t4 + ") 255\n },\n b: (" + v2t5 + ") true\n}" - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") - addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") - addDumpTest(nv2, "(*"+v2t+")()\n") - - // Struct that contains custom type with Stringer pointer interface via both - // exported and unexported fields. - type s3 struct { - s pstringer - S pstringer - } - v3 := s3{"test", "test2"} - nv3 := (*s3)(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "spew_test.s3" - v3t2 := "spew_test.pstringer" - v3s := "{\n s: (" + v3t2 + ") (len=4) stringer test,\n S: (" + v3t2 + - ") (len=5) stringer test2\n}" - v3sp := v3s - if spew.UnsafeDisabled { - v3s = "{\n s: (" + v3t2 + ") (len=4) \"test\",\n S: (" + - v3t2 + ") (len=5) \"test2\"\n}" - v3sp = "{\n s: (" + v3t2 + ") (len=4) \"test\",\n S: (" + - v3t2 + ") (len=5) stringer test2\n}" - } - addDumpTest(v3, "("+v3t+") "+v3s+"\n") - addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3sp+")\n") - addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3sp+")\n") - addDumpTest(nv3, "(*"+v3t+")()\n") - - // Struct that contains embedded struct and field to same struct. - e := embed{"embedstr"} - eLen := fmt.Sprintf("%d", len("embedstr")) - v4 := embedwrap{embed: &e, e: &e} - nv4 := (*embedwrap)(nil) - pv4 := &v4 - eAddr := fmt.Sprintf("%p", &e) - v4Addr := fmt.Sprintf("%p", pv4) - pv4Addr := fmt.Sprintf("%p", &pv4) - v4t := "spew_test.embedwrap" - v4t2 := "spew_test.embed" - v4t3 := "string" - v4s := "{\n embed: (*" + v4t2 + ")(" + eAddr + ")({\n a: (" + v4t3 + - ") (len=" + eLen + ") \"embedstr\"\n }),\n e: (*" + v4t2 + - ")(" + eAddr + ")({\n a: (" + v4t3 + ") (len=" + eLen + ")" + - " \"embedstr\"\n })\n}" - addDumpTest(v4, "("+v4t+") "+v4s+"\n") - addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") - addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") - addDumpTest(nv4, "(*"+v4t+")()\n") -} - -func addUintptrDumpTests() { - // Null pointer. - v := uintptr(0) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "uintptr" - vs := "" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - - // Address of real variable. - i := 1 - v2 := uintptr(unsafe.Pointer(&i)) - nv2 := (*uintptr)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "uintptr" - v2s := fmt.Sprintf("%p", &i) - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") - addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") - addDumpTest(nv2, "(*"+v2t+")()\n") -} - -func addUnsafePointerDumpTests() { - // Null pointer. - v := unsafe.Pointer(uintptr(0)) - nv := (*unsafe.Pointer)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "unsafe.Pointer" - vs := "" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") - - // Address of real variable. - i := 1 - v2 := unsafe.Pointer(&i) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "unsafe.Pointer" - v2s := fmt.Sprintf("%p", &i) - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") - addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") - addDumpTest(nv, "(*"+vt+")()\n") -} - -func addChanDumpTests() { - // Nil channel. - var v chan int - pv := &v - nv := (*chan int)(nil) - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "chan int" - vs := "" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") - - // Real channel. - v2 := make(chan int) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "chan int" - v2s := fmt.Sprintf("%p", v2) - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") - addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") -} - -func addFuncDumpTests() { - // Function with no params and no returns. - v := addIntDumpTests - nv := (*func())(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "func()" - vs := fmt.Sprintf("%p", v) - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") - - // Function with param and no returns. - v2 := TestDump - nv2 := (*func(*testing.T))(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "func(*testing.T)" - v2s := fmt.Sprintf("%p", v2) - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") - addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") - addDumpTest(nv2, "(*"+v2t+")()\n") - - // Function with multiple params and multiple returns. - var v3 = func(i int, s string) (b bool, err error) { - return true, nil - } - nv3 := (*func(int, string) (bool, error))(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "func(int, string) (bool, error)" - v3s := fmt.Sprintf("%p", v3) - addDumpTest(v3, "("+v3t+") "+v3s+"\n") - addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") - addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") - addDumpTest(nv3, "(*"+v3t+")()\n") -} - -func addCircularDumpTests() { - // Struct that is circular through self referencing. - type circular struct { - c *circular - } - v := circular{nil} - v.c = &v - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "spew_test.circular" - vs := "{\n c: (*" + vt + ")(" + vAddr + ")({\n c: (*" + vt + ")(" + - vAddr + ")()\n })\n}" - vs2 := "{\n c: (*" + vt + ")(" + vAddr + ")()\n}" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs2+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs2+")\n") - - // Structs that are circular through cross referencing. - v2 := xref1{nil} - ts2 := xref2{&v2} - v2.ps2 = &ts2 - pv2 := &v2 - ts2Addr := fmt.Sprintf("%p", &ts2) - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "spew_test.xref1" - v2t2 := "spew_test.xref2" - v2s := "{\n ps2: (*" + v2t2 + ")(" + ts2Addr + ")({\n ps1: (*" + v2t + - ")(" + v2Addr + ")({\n ps2: (*" + v2t2 + ")(" + ts2Addr + - ")()\n })\n })\n}" - v2s2 := "{\n ps2: (*" + v2t2 + ")(" + ts2Addr + ")({\n ps1: (*" + v2t + - ")(" + v2Addr + ")()\n })\n}" - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s2+")\n") - addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s2+")\n") - - // Structs that are indirectly circular. - v3 := indirCir1{nil} - tic2 := indirCir2{nil} - tic3 := indirCir3{&v3} - tic2.ps3 = &tic3 - v3.ps2 = &tic2 - pv3 := &v3 - tic2Addr := fmt.Sprintf("%p", &tic2) - tic3Addr := fmt.Sprintf("%p", &tic3) - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "spew_test.indirCir1" - v3t2 := "spew_test.indirCir2" - v3t3 := "spew_test.indirCir3" - v3s := "{\n ps2: (*" + v3t2 + ")(" + tic2Addr + ")({\n ps3: (*" + v3t3 + - ")(" + tic3Addr + ")({\n ps1: (*" + v3t + ")(" + v3Addr + - ")({\n ps2: (*" + v3t2 + ")(" + tic2Addr + - ")()\n })\n })\n })\n}" - v3s2 := "{\n ps2: (*" + v3t2 + ")(" + tic2Addr + ")({\n ps3: (*" + v3t3 + - ")(" + tic3Addr + ")({\n ps1: (*" + v3t + ")(" + v3Addr + - ")()\n })\n })\n}" - addDumpTest(v3, "("+v3t+") "+v3s+"\n") - addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s2+")\n") - addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s2+")\n") -} - -func addPanicDumpTests() { - // Type that panics in its Stringer interface. - v := panicer(127) - nv := (*panicer)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "spew_test.panicer" - vs := "(PANIC=test panic)127" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") -} - -func addErrorDumpTests() { - // Type that has a custom Error interface. - v := customError(127) - nv := (*customError)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "spew_test.customError" - vs := "error: 127" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") -} - -// TestDump executes all of the tests described by dumpTests. -func TestDump(t *testing.T) { - // Setup tests. - addIntDumpTests() - addUintDumpTests() - addBoolDumpTests() - addFloatDumpTests() - addComplexDumpTests() - addArrayDumpTests() - addSliceDumpTests() - addStringDumpTests() - addInterfaceDumpTests() - addMapDumpTests() - addStructDumpTests() - addUintptrDumpTests() - addUnsafePointerDumpTests() - addChanDumpTests() - addFuncDumpTests() - addCircularDumpTests() - addPanicDumpTests() - addErrorDumpTests() - addCgoDumpTests() - - t.Logf("Running %d tests", len(dumpTests)) - for i, test := range dumpTests { - buf := new(bytes.Buffer) - spew.Fdump(buf, test.in) - s := buf.String() - if testFailed(s, test.wants) { - t.Errorf("Dump #%d\n got: %s %s", i, s, stringizeWants(test.wants)) - continue - } - } -} - -func TestDumpSortedKeys(t *testing.T) { - cfg := spew.ConfigState{SortKeys: true} - s := cfg.Sdump(map[int]string{1: "1", 3: "3", 2: "2"}) - expected := "(map[int]string) (len=3) {\n(int) 1: (string) (len=1) " + - "\"1\",\n(int) 2: (string) (len=1) \"2\",\n(int) 3: (string) " + - "(len=1) \"3\"\n" + - "}\n" - if s != expected { - t.Errorf("Sorted keys mismatch:\n %v %v", s, expected) - } - - s = cfg.Sdump(map[stringer]int{"1": 1, "3": 3, "2": 2}) - expected = "(map[spew_test.stringer]int) (len=3) {\n" + - "(spew_test.stringer) (len=1) stringer 1: (int) 1,\n" + - "(spew_test.stringer) (len=1) stringer 2: (int) 2,\n" + - "(spew_test.stringer) (len=1) stringer 3: (int) 3\n" + - "}\n" - if s != expected { - t.Errorf("Sorted keys mismatch:\n %v %v", s, expected) - } - - s = cfg.Sdump(map[pstringer]int{pstringer("1"): 1, pstringer("3"): 3, pstringer("2"): 2}) - expected = "(map[spew_test.pstringer]int) (len=3) {\n" + - "(spew_test.pstringer) (len=1) stringer 1: (int) 1,\n" + - "(spew_test.pstringer) (len=1) stringer 2: (int) 2,\n" + - "(spew_test.pstringer) (len=1) stringer 3: (int) 3\n" + - "}\n" - if spew.UnsafeDisabled { - expected = "(map[spew_test.pstringer]int) (len=3) {\n" + - "(spew_test.pstringer) (len=1) \"1\": (int) 1,\n" + - "(spew_test.pstringer) (len=1) \"2\": (int) 2,\n" + - "(spew_test.pstringer) (len=1) \"3\": (int) 3\n" + - "}\n" - } - if s != expected { - t.Errorf("Sorted keys mismatch:\n %v %v", s, expected) - } - - s = cfg.Sdump(map[customError]int{customError(1): 1, customError(3): 3, customError(2): 2}) - expected = "(map[spew_test.customError]int) (len=3) {\n" + - "(spew_test.customError) error: 1: (int) 1,\n" + - "(spew_test.customError) error: 2: (int) 2,\n" + - "(spew_test.customError) error: 3: (int) 3\n" + - "}\n" - if s != expected { - t.Errorf("Sorted keys mismatch:\n %v %v", s, expected) - } - -} diff --git a/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go b/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go deleted file mode 100644 index 6ab180809a..0000000000 --- a/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright (c) 2013-2016 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when both cgo is supported and "-tags testcgo" is added to the go test -// command line. This means the cgo tests are only added (and hence run) when -// specifially requested. This configuration is used because spew itself -// does not require cgo to run even though it does handle certain cgo types -// specially. Rather than forcing all clients to require cgo and an external -// C compiler just to run the tests, this scheme makes them optional. -// +build cgo,testcgo - -package spew_test - -import ( - "fmt" - - "github.com/davecgh/go-spew/spew/testdata" -) - -func addCgoDumpTests() { - // C char pointer. - v := testdata.GetCgoCharPointer() - nv := testdata.GetCgoNullCharPointer() - pv := &v - vcAddr := fmt.Sprintf("%p", v) - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "*testdata._Ctype_char" - vs := "116" - addDumpTest(v, "("+vt+")("+vcAddr+")("+vs+")\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+"->"+vcAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+"->"+vcAddr+")("+vs+")\n") - addDumpTest(nv, "("+vt+")()\n") - - // C char array. - v2, v2l, v2c := testdata.GetCgoCharArray() - v2Len := fmt.Sprintf("%d", v2l) - v2Cap := fmt.Sprintf("%d", v2c) - v2t := "[6]testdata._Ctype_char" - v2s := "(len=" + v2Len + " cap=" + v2Cap + ") " + - "{\n 00000000 74 65 73 74 32 00 " + - " |test2.|\n}" - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - - // C unsigned char array. - v3, v3l, v3c := testdata.GetCgoUnsignedCharArray() - v3Len := fmt.Sprintf("%d", v3l) - v3Cap := fmt.Sprintf("%d", v3c) - v3t := "[6]testdata._Ctype_unsignedchar" - v3t2 := "[6]testdata._Ctype_uchar" - v3s := "(len=" + v3Len + " cap=" + v3Cap + ") " + - "{\n 00000000 74 65 73 74 33 00 " + - " |test3.|\n}" - addDumpTest(v3, "("+v3t+") "+v3s+"\n", "("+v3t2+") "+v3s+"\n") - - // C signed char array. - v4, v4l, v4c := testdata.GetCgoSignedCharArray() - v4Len := fmt.Sprintf("%d", v4l) - v4Cap := fmt.Sprintf("%d", v4c) - v4t := "[6]testdata._Ctype_schar" - v4t2 := "testdata._Ctype_schar" - v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " + - "{\n (" + v4t2 + ") 116,\n (" + v4t2 + ") 101,\n (" + v4t2 + - ") 115,\n (" + v4t2 + ") 116,\n (" + v4t2 + ") 52,\n (" + v4t2 + - ") 0\n}" - addDumpTest(v4, "("+v4t+") "+v4s+"\n") - - // C uint8_t array. - v5, v5l, v5c := testdata.GetCgoUint8tArray() - v5Len := fmt.Sprintf("%d", v5l) - v5Cap := fmt.Sprintf("%d", v5c) - v5t := "[6]testdata._Ctype_uint8_t" - v5s := "(len=" + v5Len + " cap=" + v5Cap + ") " + - "{\n 00000000 74 65 73 74 35 00 " + - " |test5.|\n}" - addDumpTest(v5, "("+v5t+") "+v5s+"\n") - - // C typedefed unsigned char array. - v6, v6l, v6c := testdata.GetCgoTypdefedUnsignedCharArray() - v6Len := fmt.Sprintf("%d", v6l) - v6Cap := fmt.Sprintf("%d", v6c) - v6t := "[6]testdata._Ctype_custom_uchar_t" - v6s := "(len=" + v6Len + " cap=" + v6Cap + ") " + - "{\n 00000000 74 65 73 74 36 00 " + - " |test6.|\n}" - addDumpTest(v6, "("+v6t+") "+v6s+"\n") -} diff --git a/vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go b/vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go deleted file mode 100644 index 52a0971fb3..0000000000 --- a/vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (c) 2013 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when either cgo is not supported or "-tags testcgo" is not added to the go -// test command line. This file intentionally does not setup any cgo tests in -// this scenario. -// +build !cgo !testcgo - -package spew_test - -func addCgoDumpTests() { - // Don't add any tests for cgo since this file is only compiled when - // there should not be any cgo tests. -} diff --git a/vendor/github.com/davecgh/go-spew/spew/example_test.go b/vendor/github.com/davecgh/go-spew/spew/example_test.go deleted file mode 100644 index c6ec8c6d59..0000000000 --- a/vendor/github.com/davecgh/go-spew/spew/example_test.go +++ /dev/null @@ -1,226 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew_test - -import ( - "fmt" - - "github.com/davecgh/go-spew/spew" -) - -type Flag int - -const ( - flagOne Flag = iota - flagTwo -) - -var flagStrings = map[Flag]string{ - flagOne: "flagOne", - flagTwo: "flagTwo", -} - -func (f Flag) String() string { - if s, ok := flagStrings[f]; ok { - return s - } - return fmt.Sprintf("Unknown flag (%d)", int(f)) -} - -type Bar struct { - data uintptr -} - -type Foo struct { - unexportedField Bar - ExportedField map[interface{}]interface{} -} - -// This example demonstrates how to use Dump to dump variables to stdout. -func ExampleDump() { - // The following package level declarations are assumed for this example: - /* - type Flag int - - const ( - flagOne Flag = iota - flagTwo - ) - - var flagStrings = map[Flag]string{ - flagOne: "flagOne", - flagTwo: "flagTwo", - } - - func (f Flag) String() string { - if s, ok := flagStrings[f]; ok { - return s - } - return fmt.Sprintf("Unknown flag (%d)", int(f)) - } - - type Bar struct { - data uintptr - } - - type Foo struct { - unexportedField Bar - ExportedField map[interface{}]interface{} - } - */ - - // Setup some sample data structures for the example. - bar := Bar{uintptr(0)} - s1 := Foo{bar, map[interface{}]interface{}{"one": true}} - f := Flag(5) - b := []byte{ - 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, - 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, - 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, - 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, - 0x31, 0x32, - } - - // Dump! - spew.Dump(s1, f, b) - - // Output: - // (spew_test.Foo) { - // unexportedField: (spew_test.Bar) { - // data: (uintptr) - // }, - // ExportedField: (map[interface {}]interface {}) (len=1) { - // (string) (len=3) "one": (bool) true - // } - // } - // (spew_test.Flag) Unknown flag (5) - // ([]uint8) (len=34 cap=34) { - // 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | - // 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| - // 00000020 31 32 |12| - // } - // -} - -// This example demonstrates how to use Printf to display a variable with a -// format string and inline formatting. -func ExamplePrintf() { - // Create a double pointer to a uint 8. - ui8 := uint8(5) - pui8 := &ui8 - ppui8 := &pui8 - - // Create a circular data type. - type circular struct { - ui8 uint8 - c *circular - } - c := circular{ui8: 1} - c.c = &c - - // Print! - spew.Printf("ppui8: %v\n", ppui8) - spew.Printf("circular: %v\n", c) - - // Output: - // ppui8: <**>5 - // circular: {1 <*>{1 <*>}} -} - -// This example demonstrates how to use a ConfigState. -func ExampleConfigState() { - // Modify the indent level of the ConfigState only. The global - // configuration is not modified. - scs := spew.ConfigState{Indent: "\t"} - - // Output using the ConfigState instance. - v := map[string]int{"one": 1} - scs.Printf("v: %v\n", v) - scs.Dump(v) - - // Output: - // v: map[one:1] - // (map[string]int) (len=1) { - // (string) (len=3) "one": (int) 1 - // } -} - -// This example demonstrates how to use ConfigState.Dump to dump variables to -// stdout -func ExampleConfigState_Dump() { - // See the top-level Dump example for details on the types used in this - // example. - - // Create two ConfigState instances with different indentation. - scs := spew.ConfigState{Indent: "\t"} - scs2 := spew.ConfigState{Indent: " "} - - // Setup some sample data structures for the example. - bar := Bar{uintptr(0)} - s1 := Foo{bar, map[interface{}]interface{}{"one": true}} - - // Dump using the ConfigState instances. - scs.Dump(s1) - scs2.Dump(s1) - - // Output: - // (spew_test.Foo) { - // unexportedField: (spew_test.Bar) { - // data: (uintptr) - // }, - // ExportedField: (map[interface {}]interface {}) (len=1) { - // (string) (len=3) "one": (bool) true - // } - // } - // (spew_test.Foo) { - // unexportedField: (spew_test.Bar) { - // data: (uintptr) - // }, - // ExportedField: (map[interface {}]interface {}) (len=1) { - // (string) (len=3) "one": (bool) true - // } - // } - // -} - -// This example demonstrates how to use ConfigState.Printf to display a variable -// with a format string and inline formatting. -func ExampleConfigState_Printf() { - // See the top-level Dump example for details on the types used in this - // example. - - // Create two ConfigState instances and modify the method handling of the - // first ConfigState only. - scs := spew.NewDefaultConfig() - scs2 := spew.NewDefaultConfig() - scs.DisableMethods = true - - // Alternatively - // scs := spew.ConfigState{Indent: " ", DisableMethods: true} - // scs2 := spew.ConfigState{Indent: " "} - - // This is of type Flag which implements a Stringer and has raw value 1. - f := flagTwo - - // Dump using the ConfigState instances. - scs.Printf("f: %v\n", f) - scs2.Printf("f: %v\n", f) - - // Output: - // f: 1 - // f: flagTwo -} diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go index c49875bacb..b04edb7d7a 100644 --- a/vendor/github.com/davecgh/go-spew/spew/format.go +++ b/vendor/github.com/davecgh/go-spew/spew/format.go @@ -182,10 +182,10 @@ func (f *formatState) formatPtr(v reflect.Value) { // Display dereferenced value. switch { - case nilFound == true: + case nilFound: f.fs.Write(nilAngleBytes) - case cycleFound == true: + case cycleFound: f.fs.Write(circularShortBytes) default: diff --git a/vendor/github.com/davecgh/go-spew/spew/format_test.go b/vendor/github.com/davecgh/go-spew/spew/format_test.go deleted file mode 100644 index f9b93abe86..0000000000 --- a/vendor/github.com/davecgh/go-spew/spew/format_test.go +++ /dev/null @@ -1,1558 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -/* -Test Summary: -NOTE: For each test, a nil pointer, a single pointer and double pointer to the -base test element are also tested to ensure proper indirection across all types. - -- Max int8, int16, int32, int64, int -- Max uint8, uint16, uint32, uint64, uint -- Boolean true and false -- Standard complex64 and complex128 -- Array containing standard ints -- Array containing type with custom formatter on pointer receiver only -- Array containing interfaces -- Slice containing standard float32 values -- Slice containing type with custom formatter on pointer receiver only -- Slice containing interfaces -- Nil slice -- Standard string -- Nil interface -- Sub-interface -- Map with string keys and int vals -- Map with custom formatter type on pointer receiver only keys and vals -- Map with interface keys and values -- Map with nil interface value -- Struct with primitives -- Struct that contains another struct -- Struct that contains custom type with Stringer pointer interface via both - exported and unexported fields -- Struct that contains embedded struct and field to same struct -- Uintptr to 0 (null pointer) -- Uintptr address of real variable -- Unsafe.Pointer to 0 (null pointer) -- Unsafe.Pointer to address of real variable -- Nil channel -- Standard int channel -- Function with no params and no returns -- Function with param and no returns -- Function with multiple params and multiple returns -- Struct that is circular through self referencing -- Structs that are circular through cross referencing -- Structs that are indirectly circular -- Type that panics in its Stringer interface -- Type that has a custom Error interface -- %x passthrough with uint -- %#x passthrough with uint -- %f passthrough with precision -- %f passthrough with width and precision -- %d passthrough with width -- %q passthrough with string -*/ - -package spew_test - -import ( - "bytes" - "fmt" - "testing" - "unsafe" - - "github.com/davecgh/go-spew/spew" -) - -// formatterTest is used to describe a test to be performed against NewFormatter. -type formatterTest struct { - format string - in interface{} - wants []string -} - -// formatterTests houses all of the tests to be performed against NewFormatter. -var formatterTests = make([]formatterTest, 0) - -// addFormatterTest is a helper method to append the passed input and desired -// result to formatterTests. -func addFormatterTest(format string, in interface{}, wants ...string) { - test := formatterTest{format, in, wants} - formatterTests = append(formatterTests, test) -} - -func addIntFormatterTests() { - // Max int8. - v := int8(127) - nv := (*int8)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "int8" - vs := "127" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Max int16. - v2 := int16(32767) - nv2 := (*int16)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "int16" - v2s := "32767" - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%v", nv2, "") - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) - addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") - - // Max int32. - v3 := int32(2147483647) - nv3 := (*int32)(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "int32" - v3s := "2147483647" - addFormatterTest("%v", v3, v3s) - addFormatterTest("%v", pv3, "<*>"+v3s) - addFormatterTest("%v", &pv3, "<**>"+v3s) - addFormatterTest("%v", nv3, "") - addFormatterTest("%+v", v3, v3s) - addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) - addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) - addFormatterTest("%+v", nv3, "") - addFormatterTest("%#v", v3, "("+v3t+")"+v3s) - addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s) - addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s) - addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") - addFormatterTest("%#+v", v3, "("+v3t+")"+v3s) - addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s) - addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s) - addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") - - // Max int64. - v4 := int64(9223372036854775807) - nv4 := (*int64)(nil) - pv4 := &v4 - v4Addr := fmt.Sprintf("%p", pv4) - pv4Addr := fmt.Sprintf("%p", &pv4) - v4t := "int64" - v4s := "9223372036854775807" - addFormatterTest("%v", v4, v4s) - addFormatterTest("%v", pv4, "<*>"+v4s) - addFormatterTest("%v", &pv4, "<**>"+v4s) - addFormatterTest("%v", nv4, "") - addFormatterTest("%+v", v4, v4s) - addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s) - addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s) - addFormatterTest("%+v", nv4, "") - addFormatterTest("%#v", v4, "("+v4t+")"+v4s) - addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s) - addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s) - addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") - addFormatterTest("%#+v", v4, "("+v4t+")"+v4s) - addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s) - addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s) - addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") - - // Max int. - v5 := int(2147483647) - nv5 := (*int)(nil) - pv5 := &v5 - v5Addr := fmt.Sprintf("%p", pv5) - pv5Addr := fmt.Sprintf("%p", &pv5) - v5t := "int" - v5s := "2147483647" - addFormatterTest("%v", v5, v5s) - addFormatterTest("%v", pv5, "<*>"+v5s) - addFormatterTest("%v", &pv5, "<**>"+v5s) - addFormatterTest("%v", nv5, "") - addFormatterTest("%+v", v5, v5s) - addFormatterTest("%+v", pv5, "<*>("+v5Addr+")"+v5s) - addFormatterTest("%+v", &pv5, "<**>("+pv5Addr+"->"+v5Addr+")"+v5s) - addFormatterTest("%+v", nv5, "") - addFormatterTest("%#v", v5, "("+v5t+")"+v5s) - addFormatterTest("%#v", pv5, "(*"+v5t+")"+v5s) - addFormatterTest("%#v", &pv5, "(**"+v5t+")"+v5s) - addFormatterTest("%#v", nv5, "(*"+v5t+")"+"") - addFormatterTest("%#+v", v5, "("+v5t+")"+v5s) - addFormatterTest("%#+v", pv5, "(*"+v5t+")("+v5Addr+")"+v5s) - addFormatterTest("%#+v", &pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")"+v5s) - addFormatterTest("%#+v", nv5, "(*"+v5t+")"+"") -} - -func addUintFormatterTests() { - // Max uint8. - v := uint8(255) - nv := (*uint8)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "uint8" - vs := "255" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Max uint16. - v2 := uint16(65535) - nv2 := (*uint16)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "uint16" - v2s := "65535" - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%v", nv2, "") - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) - addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") - - // Max uint32. - v3 := uint32(4294967295) - nv3 := (*uint32)(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "uint32" - v3s := "4294967295" - addFormatterTest("%v", v3, v3s) - addFormatterTest("%v", pv3, "<*>"+v3s) - addFormatterTest("%v", &pv3, "<**>"+v3s) - addFormatterTest("%v", nv3, "") - addFormatterTest("%+v", v3, v3s) - addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) - addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) - addFormatterTest("%+v", nv3, "") - addFormatterTest("%#v", v3, "("+v3t+")"+v3s) - addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s) - addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s) - addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") - addFormatterTest("%#+v", v3, "("+v3t+")"+v3s) - addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s) - addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s) - addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") - - // Max uint64. - v4 := uint64(18446744073709551615) - nv4 := (*uint64)(nil) - pv4 := &v4 - v4Addr := fmt.Sprintf("%p", pv4) - pv4Addr := fmt.Sprintf("%p", &pv4) - v4t := "uint64" - v4s := "18446744073709551615" - addFormatterTest("%v", v4, v4s) - addFormatterTest("%v", pv4, "<*>"+v4s) - addFormatterTest("%v", &pv4, "<**>"+v4s) - addFormatterTest("%v", nv4, "") - addFormatterTest("%+v", v4, v4s) - addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s) - addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s) - addFormatterTest("%+v", nv4, "") - addFormatterTest("%#v", v4, "("+v4t+")"+v4s) - addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s) - addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s) - addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") - addFormatterTest("%#+v", v4, "("+v4t+")"+v4s) - addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s) - addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s) - addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") - - // Max uint. - v5 := uint(4294967295) - nv5 := (*uint)(nil) - pv5 := &v5 - v5Addr := fmt.Sprintf("%p", pv5) - pv5Addr := fmt.Sprintf("%p", &pv5) - v5t := "uint" - v5s := "4294967295" - addFormatterTest("%v", v5, v5s) - addFormatterTest("%v", pv5, "<*>"+v5s) - addFormatterTest("%v", &pv5, "<**>"+v5s) - addFormatterTest("%v", nv5, "") - addFormatterTest("%+v", v5, v5s) - addFormatterTest("%+v", pv5, "<*>("+v5Addr+")"+v5s) - addFormatterTest("%+v", &pv5, "<**>("+pv5Addr+"->"+v5Addr+")"+v5s) - addFormatterTest("%+v", nv5, "") - addFormatterTest("%#v", v5, "("+v5t+")"+v5s) - addFormatterTest("%#v", pv5, "(*"+v5t+")"+v5s) - addFormatterTest("%#v", &pv5, "(**"+v5t+")"+v5s) - addFormatterTest("%#v", nv5, "(*"+v5t+")"+"") - addFormatterTest("%#+v", v5, "("+v5t+")"+v5s) - addFormatterTest("%#+v", pv5, "(*"+v5t+")("+v5Addr+")"+v5s) - addFormatterTest("%#+v", &pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")"+v5s) - addFormatterTest("%#v", nv5, "(*"+v5t+")"+"") -} - -func addBoolFormatterTests() { - // Boolean true. - v := bool(true) - nv := (*bool)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "bool" - vs := "true" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Boolean false. - v2 := bool(false) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "bool" - v2s := "false" - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) -} - -func addFloatFormatterTests() { - // Standard float32. - v := float32(3.1415) - nv := (*float32)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "float32" - vs := "3.1415" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Standard float64. - v2 := float64(3.1415926) - nv2 := (*float64)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "float64" - v2s := "3.1415926" - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) - addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") -} - -func addComplexFormatterTests() { - // Standard complex64. - v := complex(float32(6), -2) - nv := (*complex64)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "complex64" - vs := "(6-2i)" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Standard complex128. - v2 := complex(float64(-6), 2) - nv2 := (*complex128)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "complex128" - v2s := "(-6+2i)" - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) - addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") -} - -func addArrayFormatterTests() { - // Array containing standard ints. - v := [3]int{1, 2, 3} - nv := (*[3]int)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "[3]int" - vs := "[1 2 3]" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Array containing type with custom formatter on pointer receiver only. - v2 := [3]pstringer{"1", "2", "3"} - nv2 := (*[3]pstringer)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "[3]spew_test.pstringer" - v2sp := "[stringer 1 stringer 2 stringer 3]" - v2s := v2sp - if spew.UnsafeDisabled { - v2s = "[1 2 3]" - } - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2sp) - addFormatterTest("%v", &pv2, "<**>"+v2sp) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2sp) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2sp) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2sp) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2sp) - addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2sp) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2sp) - addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") - - // Array containing interfaces. - v3 := [3]interface{}{"one", int(2), uint(3)} - nv3 := (*[3]interface{})(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "[3]interface {}" - v3t2 := "string" - v3t3 := "int" - v3t4 := "uint" - v3s := "[one 2 3]" - v3s2 := "[(" + v3t2 + ")one (" + v3t3 + ")2 (" + v3t4 + ")3]" - addFormatterTest("%v", v3, v3s) - addFormatterTest("%v", pv3, "<*>"+v3s) - addFormatterTest("%v", &pv3, "<**>"+v3s) - addFormatterTest("%+v", nv3, "") - addFormatterTest("%+v", v3, v3s) - addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) - addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) - addFormatterTest("%+v", nv3, "") - addFormatterTest("%#v", v3, "("+v3t+")"+v3s2) - addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2) - addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2) - addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") - addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2) - addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2) - addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2) - addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") -} - -func addSliceFormatterTests() { - // Slice containing standard float32 values. - v := []float32{3.14, 6.28, 12.56} - nv := (*[]float32)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "[]float32" - vs := "[3.14 6.28 12.56]" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Slice containing type with custom formatter on pointer receiver only. - v2 := []pstringer{"1", "2", "3"} - nv2 := (*[]pstringer)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "[]spew_test.pstringer" - v2s := "[stringer 1 stringer 2 stringer 3]" - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) - addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") - - // Slice containing interfaces. - v3 := []interface{}{"one", int(2), uint(3), nil} - nv3 := (*[]interface{})(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "[]interface {}" - v3t2 := "string" - v3t3 := "int" - v3t4 := "uint" - v3t5 := "interface {}" - v3s := "[one 2 3 ]" - v3s2 := "[(" + v3t2 + ")one (" + v3t3 + ")2 (" + v3t4 + ")3 (" + v3t5 + - ")]" - addFormatterTest("%v", v3, v3s) - addFormatterTest("%v", pv3, "<*>"+v3s) - addFormatterTest("%v", &pv3, "<**>"+v3s) - addFormatterTest("%+v", nv3, "") - addFormatterTest("%+v", v3, v3s) - addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) - addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) - addFormatterTest("%+v", nv3, "") - addFormatterTest("%#v", v3, "("+v3t+")"+v3s2) - addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2) - addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2) - addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") - addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2) - addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2) - addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2) - addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") - - // Nil slice. - var v4 []int - nv4 := (*[]int)(nil) - pv4 := &v4 - v4Addr := fmt.Sprintf("%p", pv4) - pv4Addr := fmt.Sprintf("%p", &pv4) - v4t := "[]int" - v4s := "" - addFormatterTest("%v", v4, v4s) - addFormatterTest("%v", pv4, "<*>"+v4s) - addFormatterTest("%v", &pv4, "<**>"+v4s) - addFormatterTest("%+v", nv4, "") - addFormatterTest("%+v", v4, v4s) - addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s) - addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s) - addFormatterTest("%+v", nv4, "") - addFormatterTest("%#v", v4, "("+v4t+")"+v4s) - addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s) - addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s) - addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") - addFormatterTest("%#+v", v4, "("+v4t+")"+v4s) - addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s) - addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s) - addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") -} - -func addStringFormatterTests() { - // Standard string. - v := "test" - nv := (*string)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "string" - vs := "test" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") -} - -func addInterfaceFormatterTests() { - // Nil interface. - var v interface{} - nv := (*interface{})(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "interface {}" - vs := "" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Sub-interface. - v2 := interface{}(uint16(65535)) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "uint16" - v2s := "65535" - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) -} - -func addMapFormatterTests() { - // Map with string keys and int vals. - v := map[string]int{"one": 1, "two": 2} - nilMap := map[string]int(nil) - nv := (*map[string]int)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "map[string]int" - vs := "map[one:1 two:2]" - vs2 := "map[two:2 one:1]" - addFormatterTest("%v", v, vs, vs2) - addFormatterTest("%v", pv, "<*>"+vs, "<*>"+vs2) - addFormatterTest("%v", &pv, "<**>"+vs, "<**>"+vs2) - addFormatterTest("%+v", nilMap, "") - addFormatterTest("%+v", nv, "") - addFormatterTest("%+v", v, vs, vs2) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs, "<*>("+vAddr+")"+vs2) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs, - "<**>("+pvAddr+"->"+vAddr+")"+vs2) - addFormatterTest("%+v", nilMap, "") - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs, "("+vt+")"+vs2) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs, "(*"+vt+")"+vs2) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs, "(**"+vt+")"+vs2) - addFormatterTest("%#v", nilMap, "("+vt+")"+"") - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs, "("+vt+")"+vs2) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs, - "(*"+vt+")("+vAddr+")"+vs2) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs, - "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs2) - addFormatterTest("%#+v", nilMap, "("+vt+")"+"") - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Map with custom formatter type on pointer receiver only keys and vals. - v2 := map[pstringer]pstringer{"one": "1"} - nv2 := (*map[pstringer]pstringer)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "map[spew_test.pstringer]spew_test.pstringer" - v2s := "map[stringer one:stringer 1]" - if spew.UnsafeDisabled { - v2s = "map[one:1]" - } - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) - addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") - - // Map with interface keys and values. - v3 := map[interface{}]interface{}{"one": 1} - nv3 := (*map[interface{}]interface{})(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "map[interface {}]interface {}" - v3t1 := "string" - v3t2 := "int" - v3s := "map[one:1]" - v3s2 := "map[(" + v3t1 + ")one:(" + v3t2 + ")1]" - addFormatterTest("%v", v3, v3s) - addFormatterTest("%v", pv3, "<*>"+v3s) - addFormatterTest("%v", &pv3, "<**>"+v3s) - addFormatterTest("%+v", nv3, "") - addFormatterTest("%+v", v3, v3s) - addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) - addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) - addFormatterTest("%+v", nv3, "") - addFormatterTest("%#v", v3, "("+v3t+")"+v3s2) - addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2) - addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2) - addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") - addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2) - addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2) - addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2) - addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") - - // Map with nil interface value - v4 := map[string]interface{}{"nil": nil} - nv4 := (*map[string]interface{})(nil) - pv4 := &v4 - v4Addr := fmt.Sprintf("%p", pv4) - pv4Addr := fmt.Sprintf("%p", &pv4) - v4t := "map[string]interface {}" - v4t1 := "interface {}" - v4s := "map[nil:]" - v4s2 := "map[nil:(" + v4t1 + ")]" - addFormatterTest("%v", v4, v4s) - addFormatterTest("%v", pv4, "<*>"+v4s) - addFormatterTest("%v", &pv4, "<**>"+v4s) - addFormatterTest("%+v", nv4, "") - addFormatterTest("%+v", v4, v4s) - addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s) - addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s) - addFormatterTest("%+v", nv4, "") - addFormatterTest("%#v", v4, "("+v4t+")"+v4s2) - addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s2) - addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s2) - addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") - addFormatterTest("%#+v", v4, "("+v4t+")"+v4s2) - addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s2) - addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s2) - addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") -} - -func addStructFormatterTests() { - // Struct with primitives. - type s1 struct { - a int8 - b uint8 - } - v := s1{127, 255} - nv := (*s1)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "spew_test.s1" - vt2 := "int8" - vt3 := "uint8" - vs := "{127 255}" - vs2 := "{a:127 b:255}" - vs3 := "{a:(" + vt2 + ")127 b:(" + vt3 + ")255}" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%+v", v, vs2) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs2) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs2) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs3) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs3) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs3) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs3) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs3) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs3) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Struct that contains another struct. - type s2 struct { - s1 s1 - b bool - } - v2 := s2{s1{127, 255}, true} - nv2 := (*s2)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "spew_test.s2" - v2t2 := "spew_test.s1" - v2t3 := "int8" - v2t4 := "uint8" - v2t5 := "bool" - v2s := "{{127 255} true}" - v2s2 := "{s1:{a:127 b:255} b:true}" - v2s3 := "{s1:(" + v2t2 + "){a:(" + v2t3 + ")127 b:(" + v2t4 + ")255} b:(" + - v2t5 + ")true}" - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%+v", v2, v2s2) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s2) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s2) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%#v", v2, "("+v2t+")"+v2s3) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s3) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s3) - addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s3) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s3) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s3) - addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") - - // Struct that contains custom type with Stringer pointer interface via both - // exported and unexported fields. - type s3 struct { - s pstringer - S pstringer - } - v3 := s3{"test", "test2"} - nv3 := (*s3)(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "spew_test.s3" - v3t2 := "spew_test.pstringer" - v3s := "{stringer test stringer test2}" - v3sp := v3s - v3s2 := "{s:stringer test S:stringer test2}" - v3s2p := v3s2 - v3s3 := "{s:(" + v3t2 + ")stringer test S:(" + v3t2 + ")stringer test2}" - v3s3p := v3s3 - if spew.UnsafeDisabled { - v3s = "{test test2}" - v3sp = "{test stringer test2}" - v3s2 = "{s:test S:test2}" - v3s2p = "{s:test S:stringer test2}" - v3s3 = "{s:(" + v3t2 + ")test S:(" + v3t2 + ")test2}" - v3s3p = "{s:(" + v3t2 + ")test S:(" + v3t2 + ")stringer test2}" - } - addFormatterTest("%v", v3, v3s) - addFormatterTest("%v", pv3, "<*>"+v3sp) - addFormatterTest("%v", &pv3, "<**>"+v3sp) - addFormatterTest("%+v", nv3, "") - addFormatterTest("%+v", v3, v3s2) - addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s2p) - addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s2p) - addFormatterTest("%+v", nv3, "") - addFormatterTest("%#v", v3, "("+v3t+")"+v3s3) - addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s3p) - addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s3p) - addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") - addFormatterTest("%#+v", v3, "("+v3t+")"+v3s3) - addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s3p) - addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s3p) - addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") - - // Struct that contains embedded struct and field to same struct. - e := embed{"embedstr"} - v4 := embedwrap{embed: &e, e: &e} - nv4 := (*embedwrap)(nil) - pv4 := &v4 - eAddr := fmt.Sprintf("%p", &e) - v4Addr := fmt.Sprintf("%p", pv4) - pv4Addr := fmt.Sprintf("%p", &pv4) - v4t := "spew_test.embedwrap" - v4t2 := "spew_test.embed" - v4t3 := "string" - v4s := "{<*>{embedstr} <*>{embedstr}}" - v4s2 := "{embed:<*>(" + eAddr + "){a:embedstr} e:<*>(" + eAddr + - "){a:embedstr}}" - v4s3 := "{embed:(*" + v4t2 + "){a:(" + v4t3 + ")embedstr} e:(*" + v4t2 + - "){a:(" + v4t3 + ")embedstr}}" - v4s4 := "{embed:(*" + v4t2 + ")(" + eAddr + "){a:(" + v4t3 + - ")embedstr} e:(*" + v4t2 + ")(" + eAddr + "){a:(" + v4t3 + ")embedstr}}" - addFormatterTest("%v", v4, v4s) - addFormatterTest("%v", pv4, "<*>"+v4s) - addFormatterTest("%v", &pv4, "<**>"+v4s) - addFormatterTest("%+v", nv4, "") - addFormatterTest("%+v", v4, v4s2) - addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s2) - addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s2) - addFormatterTest("%+v", nv4, "") - addFormatterTest("%#v", v4, "("+v4t+")"+v4s3) - addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s3) - addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s3) - addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") - addFormatterTest("%#+v", v4, "("+v4t+")"+v4s4) - addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s4) - addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s4) - addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") -} - -func addUintptrFormatterTests() { - // Null pointer. - v := uintptr(0) - nv := (*uintptr)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "uintptr" - vs := "" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Address of real variable. - i := 1 - v2 := uintptr(unsafe.Pointer(&i)) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "uintptr" - v2s := fmt.Sprintf("%p", &i) - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) -} - -func addUnsafePointerFormatterTests() { - // Null pointer. - v := unsafe.Pointer(uintptr(0)) - nv := (*unsafe.Pointer)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "unsafe.Pointer" - vs := "" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Address of real variable. - i := 1 - v2 := unsafe.Pointer(&i) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "unsafe.Pointer" - v2s := fmt.Sprintf("%p", &i) - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) -} - -func addChanFormatterTests() { - // Nil channel. - var v chan int - pv := &v - nv := (*chan int)(nil) - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "chan int" - vs := "" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Real channel. - v2 := make(chan int) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "chan int" - v2s := fmt.Sprintf("%p", v2) - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) -} - -func addFuncFormatterTests() { - // Function with no params and no returns. - v := addIntFormatterTests - nv := (*func())(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "func()" - vs := fmt.Sprintf("%p", v) - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Function with param and no returns. - v2 := TestFormatter - nv2 := (*func(*testing.T))(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "func(*testing.T)" - v2s := fmt.Sprintf("%p", v2) - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) - addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") - - // Function with multiple params and multiple returns. - var v3 = func(i int, s string) (b bool, err error) { - return true, nil - } - nv3 := (*func(int, string) (bool, error))(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "func(int, string) (bool, error)" - v3s := fmt.Sprintf("%p", v3) - addFormatterTest("%v", v3, v3s) - addFormatterTest("%v", pv3, "<*>"+v3s) - addFormatterTest("%v", &pv3, "<**>"+v3s) - addFormatterTest("%+v", nv3, "") - addFormatterTest("%+v", v3, v3s) - addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) - addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) - addFormatterTest("%+v", nv3, "") - addFormatterTest("%#v", v3, "("+v3t+")"+v3s) - addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s) - addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s) - addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") - addFormatterTest("%#+v", v3, "("+v3t+")"+v3s) - addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s) - addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s) - addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") -} - -func addCircularFormatterTests() { - // Struct that is circular through self referencing. - type circular struct { - c *circular - } - v := circular{nil} - v.c = &v - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "spew_test.circular" - vs := "{<*>{<*>}}" - vs2 := "{<*>}" - vs3 := "{c:<*>(" + vAddr + "){c:<*>(" + vAddr + ")}}" - vs4 := "{c:<*>(" + vAddr + ")}" - vs5 := "{c:(*" + vt + "){c:(*" + vt + ")}}" - vs6 := "{c:(*" + vt + ")}" - vs7 := "{c:(*" + vt + ")(" + vAddr + "){c:(*" + vt + ")(" + vAddr + - ")}}" - vs8 := "{c:(*" + vt + ")(" + vAddr + ")}" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs2) - addFormatterTest("%v", &pv, "<**>"+vs2) - addFormatterTest("%+v", v, vs3) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs4) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs4) - addFormatterTest("%#v", v, "("+vt+")"+vs5) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs6) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs6) - addFormatterTest("%#+v", v, "("+vt+")"+vs7) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs8) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs8) - - // Structs that are circular through cross referencing. - v2 := xref1{nil} - ts2 := xref2{&v2} - v2.ps2 = &ts2 - pv2 := &v2 - ts2Addr := fmt.Sprintf("%p", &ts2) - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "spew_test.xref1" - v2t2 := "spew_test.xref2" - v2s := "{<*>{<*>{<*>}}}" - v2s2 := "{<*>{<*>}}" - v2s3 := "{ps2:<*>(" + ts2Addr + "){ps1:<*>(" + v2Addr + "){ps2:<*>(" + - ts2Addr + ")}}}" - v2s4 := "{ps2:<*>(" + ts2Addr + "){ps1:<*>(" + v2Addr + ")}}" - v2s5 := "{ps2:(*" + v2t2 + "){ps1:(*" + v2t + "){ps2:(*" + v2t2 + - ")}}}" - v2s6 := "{ps2:(*" + v2t2 + "){ps1:(*" + v2t + ")}}" - v2s7 := "{ps2:(*" + v2t2 + ")(" + ts2Addr + "){ps1:(*" + v2t + - ")(" + v2Addr + "){ps2:(*" + v2t2 + ")(" + ts2Addr + - ")}}}" - v2s8 := "{ps2:(*" + v2t2 + ")(" + ts2Addr + "){ps1:(*" + v2t + - ")(" + v2Addr + ")}}" - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s2) - addFormatterTest("%v", &pv2, "<**>"+v2s2) - addFormatterTest("%+v", v2, v2s3) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s4) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s4) - addFormatterTest("%#v", v2, "("+v2t+")"+v2s5) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s6) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s6) - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s7) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s8) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s8) - - // Structs that are indirectly circular. - v3 := indirCir1{nil} - tic2 := indirCir2{nil} - tic3 := indirCir3{&v3} - tic2.ps3 = &tic3 - v3.ps2 = &tic2 - pv3 := &v3 - tic2Addr := fmt.Sprintf("%p", &tic2) - tic3Addr := fmt.Sprintf("%p", &tic3) - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "spew_test.indirCir1" - v3t2 := "spew_test.indirCir2" - v3t3 := "spew_test.indirCir3" - v3s := "{<*>{<*>{<*>{<*>}}}}" - v3s2 := "{<*>{<*>{<*>}}}" - v3s3 := "{ps2:<*>(" + tic2Addr + "){ps3:<*>(" + tic3Addr + "){ps1:<*>(" + - v3Addr + "){ps2:<*>(" + tic2Addr + ")}}}}" - v3s4 := "{ps2:<*>(" + tic2Addr + "){ps3:<*>(" + tic3Addr + "){ps1:<*>(" + - v3Addr + ")}}}" - v3s5 := "{ps2:(*" + v3t2 + "){ps3:(*" + v3t3 + "){ps1:(*" + v3t + - "){ps2:(*" + v3t2 + ")}}}}" - v3s6 := "{ps2:(*" + v3t2 + "){ps3:(*" + v3t3 + "){ps1:(*" + v3t + - ")}}}" - v3s7 := "{ps2:(*" + v3t2 + ")(" + tic2Addr + "){ps3:(*" + v3t3 + ")(" + - tic3Addr + "){ps1:(*" + v3t + ")(" + v3Addr + "){ps2:(*" + v3t2 + - ")(" + tic2Addr + ")}}}}" - v3s8 := "{ps2:(*" + v3t2 + ")(" + tic2Addr + "){ps3:(*" + v3t3 + ")(" + - tic3Addr + "){ps1:(*" + v3t + ")(" + v3Addr + ")}}}" - addFormatterTest("%v", v3, v3s) - addFormatterTest("%v", pv3, "<*>"+v3s2) - addFormatterTest("%v", &pv3, "<**>"+v3s2) - addFormatterTest("%+v", v3, v3s3) - addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s4) - addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s4) - addFormatterTest("%#v", v3, "("+v3t+")"+v3s5) - addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s6) - addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s6) - addFormatterTest("%#+v", v3, "("+v3t+")"+v3s7) - addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s8) - addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s8) -} - -func addPanicFormatterTests() { - // Type that panics in its Stringer interface. - v := panicer(127) - nv := (*panicer)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "spew_test.panicer" - vs := "(PANIC=test panic)127" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") -} - -func addErrorFormatterTests() { - // Type that has a custom Error interface. - v := customError(127) - nv := (*customError)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "spew_test.customError" - vs := "error: 127" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") -} - -func addPassthroughFormatterTests() { - // %x passthrough with uint. - v := uint(4294967295) - pv := &v - vAddr := fmt.Sprintf("%x", pv) - pvAddr := fmt.Sprintf("%x", &pv) - vs := "ffffffff" - addFormatterTest("%x", v, vs) - addFormatterTest("%x", pv, vAddr) - addFormatterTest("%x", &pv, pvAddr) - - // %#x passthrough with uint. - v2 := int(2147483647) - pv2 := &v2 - v2Addr := fmt.Sprintf("%#x", pv2) - pv2Addr := fmt.Sprintf("%#x", &pv2) - v2s := "0x7fffffff" - addFormatterTest("%#x", v2, v2s) - addFormatterTest("%#x", pv2, v2Addr) - addFormatterTest("%#x", &pv2, pv2Addr) - - // %f passthrough with precision. - addFormatterTest("%.2f", 3.1415, "3.14") - addFormatterTest("%.3f", 3.1415, "3.142") - addFormatterTest("%.4f", 3.1415, "3.1415") - - // %f passthrough with width and precision. - addFormatterTest("%5.2f", 3.1415, " 3.14") - addFormatterTest("%6.3f", 3.1415, " 3.142") - addFormatterTest("%7.4f", 3.1415, " 3.1415") - - // %d passthrough with width. - addFormatterTest("%3d", 127, "127") - addFormatterTest("%4d", 127, " 127") - addFormatterTest("%5d", 127, " 127") - - // %q passthrough with string. - addFormatterTest("%q", "test", "\"test\"") -} - -// TestFormatter executes all of the tests described by formatterTests. -func TestFormatter(t *testing.T) { - // Setup tests. - addIntFormatterTests() - addUintFormatterTests() - addBoolFormatterTests() - addFloatFormatterTests() - addComplexFormatterTests() - addArrayFormatterTests() - addSliceFormatterTests() - addStringFormatterTests() - addInterfaceFormatterTests() - addMapFormatterTests() - addStructFormatterTests() - addUintptrFormatterTests() - addUnsafePointerFormatterTests() - addChanFormatterTests() - addFuncFormatterTests() - addCircularFormatterTests() - addPanicFormatterTests() - addErrorFormatterTests() - addPassthroughFormatterTests() - - t.Logf("Running %d tests", len(formatterTests)) - for i, test := range formatterTests { - buf := new(bytes.Buffer) - spew.Fprintf(buf, test.format, test.in) - s := buf.String() - if testFailed(s, test.wants) { - t.Errorf("Formatter #%d format: %s got: %s %s", i, test.format, s, - stringizeWants(test.wants)) - continue - } - } -} - -type testStruct struct { - x int -} - -func (ts testStruct) String() string { - return fmt.Sprintf("ts.%d", ts.x) -} - -type testStructP struct { - x int -} - -func (ts *testStructP) String() string { - return fmt.Sprintf("ts.%d", ts.x) -} - -func TestPrintSortedKeys(t *testing.T) { - cfg := spew.ConfigState{SortKeys: true} - s := cfg.Sprint(map[int]string{1: "1", 3: "3", 2: "2"}) - expected := "map[1:1 2:2 3:3]" - if s != expected { - t.Errorf("Sorted keys mismatch 1:\n %v %v", s, expected) - } - - s = cfg.Sprint(map[stringer]int{"1": 1, "3": 3, "2": 2}) - expected = "map[stringer 1:1 stringer 2:2 stringer 3:3]" - if s != expected { - t.Errorf("Sorted keys mismatch 2:\n %v %v", s, expected) - } - - s = cfg.Sprint(map[pstringer]int{pstringer("1"): 1, pstringer("3"): 3, pstringer("2"): 2}) - expected = "map[stringer 1:1 stringer 2:2 stringer 3:3]" - if spew.UnsafeDisabled { - expected = "map[1:1 2:2 3:3]" - } - if s != expected { - t.Errorf("Sorted keys mismatch 3:\n %v %v", s, expected) - } - - s = cfg.Sprint(map[testStruct]int{testStruct{1}: 1, testStruct{3}: 3, testStruct{2}: 2}) - expected = "map[ts.1:1 ts.2:2 ts.3:3]" - if s != expected { - t.Errorf("Sorted keys mismatch 4:\n %v %v", s, expected) - } - - if !spew.UnsafeDisabled { - s = cfg.Sprint(map[testStructP]int{testStructP{1}: 1, testStructP{3}: 3, testStructP{2}: 2}) - expected = "map[ts.1:1 ts.2:2 ts.3:3]" - if s != expected { - t.Errorf("Sorted keys mismatch 5:\n %v %v", s, expected) - } - } - - s = cfg.Sprint(map[customError]int{customError(1): 1, customError(3): 3, customError(2): 2}) - expected = "map[error: 1:1 error: 2:2 error: 3:3]" - if s != expected { - t.Errorf("Sorted keys mismatch 6:\n %v %v", s, expected) - } -} diff --git a/vendor/github.com/davecgh/go-spew/spew/internal_test.go b/vendor/github.com/davecgh/go-spew/spew/internal_test.go deleted file mode 100644 index 20a9cfefc6..0000000000 --- a/vendor/github.com/davecgh/go-spew/spew/internal_test.go +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -/* -This test file is part of the spew package rather than than the spew_test -package because it needs access to internals to properly test certain cases -which are not possible via the public interface since they should never happen. -*/ - -package spew - -import ( - "bytes" - "reflect" - "testing" -) - -// dummyFmtState implements a fake fmt.State to use for testing invalid -// reflect.Value handling. This is necessary because the fmt package catches -// invalid values before invoking the formatter on them. -type dummyFmtState struct { - bytes.Buffer -} - -func (dfs *dummyFmtState) Flag(f int) bool { - if f == int('+') { - return true - } - return false -} - -func (dfs *dummyFmtState) Precision() (int, bool) { - return 0, false -} - -func (dfs *dummyFmtState) Width() (int, bool) { - return 0, false -} - -// TestInvalidReflectValue ensures the dump and formatter code handles an -// invalid reflect value properly. This needs access to internal state since it -// should never happen in real code and therefore can't be tested via the public -// API. -func TestInvalidReflectValue(t *testing.T) { - i := 1 - - // Dump invalid reflect value. - v := new(reflect.Value) - buf := new(bytes.Buffer) - d := dumpState{w: buf, cs: &Config} - d.dump(*v) - s := buf.String() - want := "" - if s != want { - t.Errorf("InvalidReflectValue #%d\n got: %s want: %s", i, s, want) - } - i++ - - // Formatter invalid reflect value. - buf2 := new(dummyFmtState) - f := formatState{value: *v, cs: &Config, fs: buf2} - f.format(*v) - s = buf2.String() - want = "" - if s != want { - t.Errorf("InvalidReflectValue #%d got: %s want: %s", i, s, want) - } -} - -// SortValues makes the internal sortValues function available to the test -// package. -func SortValues(values []reflect.Value, cs *ConfigState) { - sortValues(values, cs) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go b/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go deleted file mode 100644 index a0c612ec3d..0000000000 --- a/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright (c) 2013-2016 Dave Collins - -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. - -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when the code is not running on Google App Engine, compiled by GopherJS, and -// "-tags safe" is not added to the go build command line. The "disableunsafe" -// tag is deprecated and thus should not be used. -// +build !js,!appengine,!safe,!disableunsafe - -/* -This test file is part of the spew package rather than than the spew_test -package because it needs access to internals to properly test certain cases -which are not possible via the public interface since they should never happen. -*/ - -package spew - -import ( - "bytes" - "reflect" - "testing" - "unsafe" -) - -// changeKind uses unsafe to intentionally change the kind of a reflect.Value to -// the maximum kind value which does not exist. This is needed to test the -// fallback code which punts to the standard fmt library for new types that -// might get added to the language. -func changeKind(v *reflect.Value, readOnly bool) { - rvf := (*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + offsetFlag)) - *rvf = *rvf | ((1< - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew_test - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" - "testing" - - "github.com/davecgh/go-spew/spew" -) - -// spewFunc is used to identify which public function of the spew package or -// ConfigState a test applies to. -type spewFunc int - -const ( - fCSFdump spewFunc = iota - fCSFprint - fCSFprintf - fCSFprintln - fCSPrint - fCSPrintln - fCSSdump - fCSSprint - fCSSprintf - fCSSprintln - fCSErrorf - fCSNewFormatter - fErrorf - fFprint - fFprintln - fPrint - fPrintln - fSdump - fSprint - fSprintf - fSprintln -) - -// Map of spewFunc values to names for pretty printing. -var spewFuncStrings = map[spewFunc]string{ - fCSFdump: "ConfigState.Fdump", - fCSFprint: "ConfigState.Fprint", - fCSFprintf: "ConfigState.Fprintf", - fCSFprintln: "ConfigState.Fprintln", - fCSSdump: "ConfigState.Sdump", - fCSPrint: "ConfigState.Print", - fCSPrintln: "ConfigState.Println", - fCSSprint: "ConfigState.Sprint", - fCSSprintf: "ConfigState.Sprintf", - fCSSprintln: "ConfigState.Sprintln", - fCSErrorf: "ConfigState.Errorf", - fCSNewFormatter: "ConfigState.NewFormatter", - fErrorf: "spew.Errorf", - fFprint: "spew.Fprint", - fFprintln: "spew.Fprintln", - fPrint: "spew.Print", - fPrintln: "spew.Println", - fSdump: "spew.Sdump", - fSprint: "spew.Sprint", - fSprintf: "spew.Sprintf", - fSprintln: "spew.Sprintln", -} - -func (f spewFunc) String() string { - if s, ok := spewFuncStrings[f]; ok { - return s - } - return fmt.Sprintf("Unknown spewFunc (%d)", int(f)) -} - -// spewTest is used to describe a test to be performed against the public -// functions of the spew package or ConfigState. -type spewTest struct { - cs *spew.ConfigState - f spewFunc - format string - in interface{} - want string -} - -// spewTests houses the tests to be performed against the public functions of -// the spew package and ConfigState. -// -// These tests are only intended to ensure the public functions are exercised -// and are intentionally not exhaustive of types. The exhaustive type -// tests are handled in the dump and format tests. -var spewTests []spewTest - -// redirStdout is a helper function to return the standard output from f as a -// byte slice. -func redirStdout(f func()) ([]byte, error) { - tempFile, err := ioutil.TempFile("", "ss-test") - if err != nil { - return nil, err - } - fileName := tempFile.Name() - defer os.Remove(fileName) // Ignore error - - origStdout := os.Stdout - os.Stdout = tempFile - f() - os.Stdout = origStdout - tempFile.Close() - - return ioutil.ReadFile(fileName) -} - -func initSpewTests() { - // Config states with various settings. - scsDefault := spew.NewDefaultConfig() - scsNoMethods := &spew.ConfigState{Indent: " ", DisableMethods: true} - scsNoPmethods := &spew.ConfigState{Indent: " ", DisablePointerMethods: true} - scsMaxDepth := &spew.ConfigState{Indent: " ", MaxDepth: 1} - scsContinue := &spew.ConfigState{Indent: " ", ContinueOnMethod: true} - scsNoPtrAddr := &spew.ConfigState{DisablePointerAddresses: true} - scsNoCap := &spew.ConfigState{DisableCapacities: true} - - // Variables for tests on types which implement Stringer interface with and - // without a pointer receiver. - ts := stringer("test") - tps := pstringer("test") - - type ptrTester struct { - s *struct{} - } - tptr := &ptrTester{s: &struct{}{}} - - // depthTester is used to test max depth handling for structs, array, slices - // and maps. - type depthTester struct { - ic indirCir1 - arr [1]string - slice []string - m map[string]int - } - dt := depthTester{indirCir1{nil}, [1]string{"arr"}, []string{"slice"}, - map[string]int{"one": 1}} - - // Variable for tests on types which implement error interface. - te := customError(10) - - spewTests = []spewTest{ - {scsDefault, fCSFdump, "", int8(127), "(int8) 127\n"}, - {scsDefault, fCSFprint, "", int16(32767), "32767"}, - {scsDefault, fCSFprintf, "%v", int32(2147483647), "2147483647"}, - {scsDefault, fCSFprintln, "", int(2147483647), "2147483647\n"}, - {scsDefault, fCSPrint, "", int64(9223372036854775807), "9223372036854775807"}, - {scsDefault, fCSPrintln, "", uint8(255), "255\n"}, - {scsDefault, fCSSdump, "", uint8(64), "(uint8) 64\n"}, - {scsDefault, fCSSprint, "", complex(1, 2), "(1+2i)"}, - {scsDefault, fCSSprintf, "%v", complex(float32(3), 4), "(3+4i)"}, - {scsDefault, fCSSprintln, "", complex(float64(5), 6), "(5+6i)\n"}, - {scsDefault, fCSErrorf, "%#v", uint16(65535), "(uint16)65535"}, - {scsDefault, fCSNewFormatter, "%v", uint32(4294967295), "4294967295"}, - {scsDefault, fErrorf, "%v", uint64(18446744073709551615), "18446744073709551615"}, - {scsDefault, fFprint, "", float32(3.14), "3.14"}, - {scsDefault, fFprintln, "", float64(6.28), "6.28\n"}, - {scsDefault, fPrint, "", true, "true"}, - {scsDefault, fPrintln, "", false, "false\n"}, - {scsDefault, fSdump, "", complex(-10, -20), "(complex128) (-10-20i)\n"}, - {scsDefault, fSprint, "", complex(-1, -2), "(-1-2i)"}, - {scsDefault, fSprintf, "%v", complex(float32(-3), -4), "(-3-4i)"}, - {scsDefault, fSprintln, "", complex(float64(-5), -6), "(-5-6i)\n"}, - {scsNoMethods, fCSFprint, "", ts, "test"}, - {scsNoMethods, fCSFprint, "", &ts, "<*>test"}, - {scsNoMethods, fCSFprint, "", tps, "test"}, - {scsNoMethods, fCSFprint, "", &tps, "<*>test"}, - {scsNoPmethods, fCSFprint, "", ts, "stringer test"}, - {scsNoPmethods, fCSFprint, "", &ts, "<*>stringer test"}, - {scsNoPmethods, fCSFprint, "", tps, "test"}, - {scsNoPmethods, fCSFprint, "", &tps, "<*>stringer test"}, - {scsMaxDepth, fCSFprint, "", dt, "{{} [] [] map[]}"}, - {scsMaxDepth, fCSFdump, "", dt, "(spew_test.depthTester) {\n" + - " ic: (spew_test.indirCir1) {\n \n },\n" + - " arr: ([1]string) (len=1 cap=1) {\n \n },\n" + - " slice: ([]string) (len=1 cap=1) {\n \n },\n" + - " m: (map[string]int) (len=1) {\n \n }\n}\n"}, - {scsContinue, fCSFprint, "", ts, "(stringer test) test"}, - {scsContinue, fCSFdump, "", ts, "(spew_test.stringer) " + - "(len=4) (stringer test) \"test\"\n"}, - {scsContinue, fCSFprint, "", te, "(error: 10) 10"}, - {scsContinue, fCSFdump, "", te, "(spew_test.customError) " + - "(error: 10) 10\n"}, - {scsNoPtrAddr, fCSFprint, "", tptr, "<*>{<*>{}}"}, - {scsNoPtrAddr, fCSSdump, "", tptr, "(*spew_test.ptrTester)({\ns: (*struct {})({\n})\n})\n"}, - {scsNoCap, fCSSdump, "", make([]string, 0, 10), "([]string) {\n}\n"}, - {scsNoCap, fCSSdump, "", make([]string, 1, 10), "([]string) (len=1) {\n(string) \"\"\n}\n"}, - } -} - -// TestSpew executes all of the tests described by spewTests. -func TestSpew(t *testing.T) { - initSpewTests() - - t.Logf("Running %d tests", len(spewTests)) - for i, test := range spewTests { - buf := new(bytes.Buffer) - switch test.f { - case fCSFdump: - test.cs.Fdump(buf, test.in) - - case fCSFprint: - test.cs.Fprint(buf, test.in) - - case fCSFprintf: - test.cs.Fprintf(buf, test.format, test.in) - - case fCSFprintln: - test.cs.Fprintln(buf, test.in) - - case fCSPrint: - b, err := redirStdout(func() { test.cs.Print(test.in) }) - if err != nil { - t.Errorf("%v #%d %v", test.f, i, err) - continue - } - buf.Write(b) - - case fCSPrintln: - b, err := redirStdout(func() { test.cs.Println(test.in) }) - if err != nil { - t.Errorf("%v #%d %v", test.f, i, err) - continue - } - buf.Write(b) - - case fCSSdump: - str := test.cs.Sdump(test.in) - buf.WriteString(str) - - case fCSSprint: - str := test.cs.Sprint(test.in) - buf.WriteString(str) - - case fCSSprintf: - str := test.cs.Sprintf(test.format, test.in) - buf.WriteString(str) - - case fCSSprintln: - str := test.cs.Sprintln(test.in) - buf.WriteString(str) - - case fCSErrorf: - err := test.cs.Errorf(test.format, test.in) - buf.WriteString(err.Error()) - - case fCSNewFormatter: - fmt.Fprintf(buf, test.format, test.cs.NewFormatter(test.in)) - - case fErrorf: - err := spew.Errorf(test.format, test.in) - buf.WriteString(err.Error()) - - case fFprint: - spew.Fprint(buf, test.in) - - case fFprintln: - spew.Fprintln(buf, test.in) - - case fPrint: - b, err := redirStdout(func() { spew.Print(test.in) }) - if err != nil { - t.Errorf("%v #%d %v", test.f, i, err) - continue - } - buf.Write(b) - - case fPrintln: - b, err := redirStdout(func() { spew.Println(test.in) }) - if err != nil { - t.Errorf("%v #%d %v", test.f, i, err) - continue - } - buf.Write(b) - - case fSdump: - str := spew.Sdump(test.in) - buf.WriteString(str) - - case fSprint: - str := spew.Sprint(test.in) - buf.WriteString(str) - - case fSprintf: - str := spew.Sprintf(test.format, test.in) - buf.WriteString(str) - - case fSprintln: - str := spew.Sprintln(test.in) - buf.WriteString(str) - - default: - t.Errorf("%v #%d unrecognized function", test.f, i) - continue - } - s := buf.String() - if test.want != s { - t.Errorf("ConfigState #%d\n got: %s want: %s", i, s, test.want) - continue - } - } -} diff --git a/vendor/github.com/davecgh/go-spew/test_coverage.txt b/vendor/github.com/davecgh/go-spew/test_coverage.txt deleted file mode 100644 index 2cd087a2a1..0000000000 --- a/vendor/github.com/davecgh/go-spew/test_coverage.txt +++ /dev/null @@ -1,61 +0,0 @@ - -github.com/davecgh/go-spew/spew/dump.go dumpState.dump 100.00% (88/88) -github.com/davecgh/go-spew/spew/format.go formatState.format 100.00% (82/82) -github.com/davecgh/go-spew/spew/format.go formatState.formatPtr 100.00% (52/52) -github.com/davecgh/go-spew/spew/dump.go dumpState.dumpPtr 100.00% (44/44) -github.com/davecgh/go-spew/spew/dump.go dumpState.dumpSlice 100.00% (39/39) -github.com/davecgh/go-spew/spew/common.go handleMethods 100.00% (30/30) -github.com/davecgh/go-spew/spew/common.go printHexPtr 100.00% (18/18) -github.com/davecgh/go-spew/spew/common.go unsafeReflectValue 100.00% (13/13) -github.com/davecgh/go-spew/spew/format.go formatState.constructOrigFormat 100.00% (12/12) -github.com/davecgh/go-spew/spew/dump.go fdump 100.00% (11/11) -github.com/davecgh/go-spew/spew/format.go formatState.Format 100.00% (11/11) -github.com/davecgh/go-spew/spew/common.go init 100.00% (10/10) -github.com/davecgh/go-spew/spew/common.go printComplex 100.00% (9/9) -github.com/davecgh/go-spew/spew/common.go valuesSorter.Less 100.00% (8/8) -github.com/davecgh/go-spew/spew/format.go formatState.buildDefaultFormat 100.00% (7/7) -github.com/davecgh/go-spew/spew/format.go formatState.unpackValue 100.00% (5/5) -github.com/davecgh/go-spew/spew/dump.go dumpState.indent 100.00% (4/4) -github.com/davecgh/go-spew/spew/common.go catchPanic 100.00% (4/4) -github.com/davecgh/go-spew/spew/config.go ConfigState.convertArgs 100.00% (4/4) -github.com/davecgh/go-spew/spew/spew.go convertArgs 100.00% (4/4) -github.com/davecgh/go-spew/spew/format.go newFormatter 100.00% (3/3) -github.com/davecgh/go-spew/spew/dump.go Sdump 100.00% (3/3) -github.com/davecgh/go-spew/spew/common.go printBool 100.00% (3/3) -github.com/davecgh/go-spew/spew/common.go sortValues 100.00% (3/3) -github.com/davecgh/go-spew/spew/config.go ConfigState.Sdump 100.00% (3/3) -github.com/davecgh/go-spew/spew/dump.go dumpState.unpackValue 100.00% (3/3) -github.com/davecgh/go-spew/spew/spew.go Printf 100.00% (1/1) -github.com/davecgh/go-spew/spew/spew.go Println 100.00% (1/1) -github.com/davecgh/go-spew/spew/spew.go Sprint 100.00% (1/1) -github.com/davecgh/go-spew/spew/spew.go Sprintf 100.00% (1/1) -github.com/davecgh/go-spew/spew/spew.go Sprintln 100.00% (1/1) -github.com/davecgh/go-spew/spew/common.go printFloat 100.00% (1/1) -github.com/davecgh/go-spew/spew/config.go NewDefaultConfig 100.00% (1/1) -github.com/davecgh/go-spew/spew/common.go printInt 100.00% (1/1) -github.com/davecgh/go-spew/spew/common.go printUint 100.00% (1/1) -github.com/davecgh/go-spew/spew/common.go valuesSorter.Len 100.00% (1/1) -github.com/davecgh/go-spew/spew/common.go valuesSorter.Swap 100.00% (1/1) -github.com/davecgh/go-spew/spew/config.go ConfigState.Errorf 100.00% (1/1) -github.com/davecgh/go-spew/spew/config.go ConfigState.Fprint 100.00% (1/1) -github.com/davecgh/go-spew/spew/config.go ConfigState.Fprintf 100.00% (1/1) -github.com/davecgh/go-spew/spew/config.go ConfigState.Fprintln 100.00% (1/1) -github.com/davecgh/go-spew/spew/config.go ConfigState.Print 100.00% (1/1) -github.com/davecgh/go-spew/spew/config.go ConfigState.Printf 100.00% (1/1) -github.com/davecgh/go-spew/spew/config.go ConfigState.Println 100.00% (1/1) -github.com/davecgh/go-spew/spew/config.go ConfigState.Sprint 100.00% (1/1) -github.com/davecgh/go-spew/spew/config.go ConfigState.Sprintf 100.00% (1/1) -github.com/davecgh/go-spew/spew/config.go ConfigState.Sprintln 100.00% (1/1) -github.com/davecgh/go-spew/spew/config.go ConfigState.NewFormatter 100.00% (1/1) -github.com/davecgh/go-spew/spew/config.go ConfigState.Fdump 100.00% (1/1) -github.com/davecgh/go-spew/spew/config.go ConfigState.Dump 100.00% (1/1) -github.com/davecgh/go-spew/spew/dump.go Fdump 100.00% (1/1) -github.com/davecgh/go-spew/spew/dump.go Dump 100.00% (1/1) -github.com/davecgh/go-spew/spew/spew.go Fprintln 100.00% (1/1) -github.com/davecgh/go-spew/spew/format.go NewFormatter 100.00% (1/1) -github.com/davecgh/go-spew/spew/spew.go Errorf 100.00% (1/1) -github.com/davecgh/go-spew/spew/spew.go Fprint 100.00% (1/1) -github.com/davecgh/go-spew/spew/spew.go Fprintf 100.00% (1/1) -github.com/davecgh/go-spew/spew/spew.go Print 100.00% (1/1) -github.com/davecgh/go-spew/spew ------------------------------- 100.00% (505/505) - diff --git a/vendor/github.com/dgrijalva/jwt-go/README.md b/vendor/github.com/dgrijalva/jwt-go/README.md index 25aec486c6..d358d881b8 100644 --- a/vendor/github.com/dgrijalva/jwt-go/README.md +++ b/vendor/github.com/dgrijalva/jwt-go/README.md @@ -1,11 +1,15 @@ -A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html) +# jwt-go [![Build Status](https://travis-ci.org/dgrijalva/jwt-go.svg?branch=master)](https://travis-ci.org/dgrijalva/jwt-go) +[![GoDoc](https://godoc.org/github.com/dgrijalva/jwt-go?status.svg)](https://godoc.org/github.com/dgrijalva/jwt-go) + +A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html) -**BREAKING CHANGES:*** Version 3.0.0 is here. It includes _a lot_ of changes including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code. +**NEW VERSION COMING:** There have been a lot of improvements suggested since the version 3.0.0 released in 2016. I'm working now on cutting two different releases: 3.2.0 will contain any non-breaking changes or enhancements. 4.0.0 will follow shortly which will include breaking changes. See the 4.0.0 milestone to get an idea of what's coming. If you have other ideas, or would like to participate in 4.0.0, now's the time. If you depend on this library and don't want to be interrupted, I recommend you use your dependency mangement tool to pin to version 3. -**NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided. +**SECURITY NOTICE:** Some older versions of Go have a security issue in the cryotp/elliptic. Recommendation is to upgrade to at least 1.8.3. See issue #216 for more detail. +**SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided. ## What the heck is a JWT? @@ -37,7 +41,7 @@ Here's an example of an extension that integrates with the Google App Engine sig ## Compliance -This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences: +This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences: * In order to protect against accidental use of [Unsecured JWTs](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#UnsecuredJWT), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key. @@ -47,7 +51,10 @@ This library is considered production ready. Feedback and feature requests are This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases). -While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v2`. It will do the right thing WRT semantic versioning. +While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v3`. It will do the right thing WRT semantic versioning. + +**BREAKING CHANGES:*** +* Version 3.0.0 includes _a lot_ of changes from the 2.x line, including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code. ## Usage Tips @@ -68,6 +75,14 @@ Symmetric signing methods, such as HSA, use only a single secret. This is probab Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification. +### Signing Methods and Key Types + +Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones: + +* The [HMAC signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation +* The [RSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation +* The [ECDSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation + ### JWT and OAuth It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication. @@ -77,7 +92,7 @@ Without going too far down the rabbit hole, here's a description of the interact * OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth. * OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token. * Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL. - + ## More Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go). diff --git a/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md b/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md index c21551f6bb..6370298313 100644 --- a/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md +++ b/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md @@ -1,5 +1,12 @@ ## `jwt-go` Version History +#### 3.2.0 + +* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation +* HMAC signing method returns `ErrInvalidKeyType` instead of `ErrInvalidKey` where appropriate +* Added options to `request.ParseFromRequest`, which allows for an arbitrary list of modifiers to parsing behavior. Initial set include `WithClaims` and `WithParser`. Existing usage of this function will continue to work as before. +* Deprecated `ParseFromRequestWithClaims` to simplify API in the future. + #### 3.1.0 * Improvements to `jwt` command line tool diff --git a/vendor/github.com/dgrijalva/jwt-go/ecdsa.go b/vendor/github.com/dgrijalva/jwt-go/ecdsa.go index 2f59a22236..f977381240 100644 --- a/vendor/github.com/dgrijalva/jwt-go/ecdsa.go +++ b/vendor/github.com/dgrijalva/jwt-go/ecdsa.go @@ -14,6 +14,7 @@ var ( ) // Implements the ECDSA family of signing methods signing methods +// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification type SigningMethodECDSA struct { Name string Hash crypto.Hash diff --git a/vendor/github.com/dgrijalva/jwt-go/ecdsa_test.go b/vendor/github.com/dgrijalva/jwt-go/ecdsa_test.go deleted file mode 100644 index 753047b1ec..0000000000 --- a/vendor/github.com/dgrijalva/jwt-go/ecdsa_test.go +++ /dev/null @@ -1,100 +0,0 @@ -package jwt_test - -import ( - "crypto/ecdsa" - "io/ioutil" - "strings" - "testing" - - "github.com/dgrijalva/jwt-go" -) - -var ecdsaTestData = []struct { - name string - keys map[string]string - tokenString string - alg string - claims map[string]interface{} - valid bool -}{ - { - "Basic ES256", - map[string]string{"private": "test/ec256-private.pem", "public": "test/ec256-public.pem"}, - "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiJ9.eyJmb28iOiJiYXIifQ.feG39E-bn8HXAKhzDZq7yEAPWYDhZlwTn3sePJnU9VrGMmwdXAIEyoOnrjreYlVM_Z4N13eK9-TmMTWyfKJtHQ", - "ES256", - map[string]interface{}{"foo": "bar"}, - true, - }, - { - "Basic ES384", - map[string]string{"private": "test/ec384-private.pem", "public": "test/ec384-public.pem"}, - "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzM4NCJ9.eyJmb28iOiJiYXIifQ.ngAfKMbJUh0WWubSIYe5GMsA-aHNKwFbJk_wq3lq23aPp8H2anb1rRILIzVR0gUf4a8WzDtrzmiikuPWyCS6CN4-PwdgTk-5nehC7JXqlaBZU05p3toM3nWCwm_LXcld", - "ES384", - map[string]interface{}{"foo": "bar"}, - true, - }, - { - "Basic ES512", - map[string]string{"private": "test/ec512-private.pem", "public": "test/ec512-public.pem"}, - "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzUxMiJ9.eyJmb28iOiJiYXIifQ.AAU0TvGQOcdg2OvrwY73NHKgfk26UDekh9Prz-L_iWuTBIBqOFCWwwLsRiHB1JOddfKAls5do1W0jR_F30JpVd-6AJeTjGKA4C1A1H6gIKwRY0o_tFDIydZCl_lMBMeG5VNFAjO86-WCSKwc3hqaGkq1MugPRq_qrF9AVbuEB4JPLyL5", - "ES512", - map[string]interface{}{"foo": "bar"}, - true, - }, - { - "basic ES256 invalid: foo => bar", - map[string]string{"private": "test/ec256-private.pem", "public": "test/ec256-public.pem"}, - "eyJhbGciOiJFUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIifQ.MEQCIHoSJnmGlPaVQDqacx_2XlXEhhqtWceVopjomc2PJLtdAiAUTeGPoNYxZw0z8mgOnnIcjoxRuNDVZvybRZF3wR1l8W", - "ES256", - map[string]interface{}{"foo": "bar"}, - false, - }, -} - -func TestECDSAVerify(t *testing.T) { - for _, data := range ecdsaTestData { - var err error - - key, _ := ioutil.ReadFile(data.keys["public"]) - - var ecdsaKey *ecdsa.PublicKey - if ecdsaKey, err = jwt.ParseECPublicKeyFromPEM(key); err != nil { - t.Errorf("Unable to parse ECDSA public key: %v", err) - } - - parts := strings.Split(data.tokenString, ".") - - method := jwt.GetSigningMethod(data.alg) - err = method.Verify(strings.Join(parts[0:2], "."), parts[2], ecdsaKey) - if data.valid && err != nil { - t.Errorf("[%v] Error while verifying key: %v", data.name, err) - } - if !data.valid && err == nil { - t.Errorf("[%v] Invalid key passed validation", data.name) - } - } -} - -func TestECDSASign(t *testing.T) { - for _, data := range ecdsaTestData { - var err error - key, _ := ioutil.ReadFile(data.keys["private"]) - - var ecdsaKey *ecdsa.PrivateKey - if ecdsaKey, err = jwt.ParseECPrivateKeyFromPEM(key); err != nil { - t.Errorf("Unable to parse ECDSA private key: %v", err) - } - - if data.valid { - parts := strings.Split(data.tokenString, ".") - method := jwt.GetSigningMethod(data.alg) - sig, err := method.Sign(strings.Join(parts[0:2], "."), ecdsaKey) - if err != nil { - t.Errorf("[%v] Error signing token: %v", data.name, err) - } - if sig == parts[2] { - t.Errorf("[%v] Identical signatures\nbefore:\n%v\nafter:\n%v", data.name, parts[2], sig) - } - } - } -} diff --git a/vendor/github.com/dgrijalva/jwt-go/example_test.go b/vendor/github.com/dgrijalva/jwt-go/example_test.go deleted file mode 100644 index ae8b788a0b..0000000000 --- a/vendor/github.com/dgrijalva/jwt-go/example_test.go +++ /dev/null @@ -1,114 +0,0 @@ -package jwt_test - -import ( - "fmt" - "github.com/dgrijalva/jwt-go" - "time" -) - -// Example (atypical) using the StandardClaims type by itself to parse a token. -// The StandardClaims type is designed to be embedded into your custom types -// to provide standard validation features. You can use it alone, but there's -// no way to retrieve other fields after parsing. -// See the CustomClaimsType example for intended usage. -func ExampleNewWithClaims_standardClaims() { - mySigningKey := []byte("AllYourBase") - - // Create the Claims - claims := &jwt.StandardClaims{ - ExpiresAt: 15000, - Issuer: "test", - } - - token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims) - ss, err := token.SignedString(mySigningKey) - fmt.Printf("%v %v", ss, err) - //Output: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE1MDAwLCJpc3MiOiJ0ZXN0In0.QsODzZu3lUZMVdhbO76u3Jv02iYCvEHcYVUI1kOWEU0 -} - -// Example creating a token using a custom claims type. The StandardClaim is embedded -// in the custom type to allow for easy encoding, parsing and validation of standard claims. -func ExampleNewWithClaims_customClaimsType() { - mySigningKey := []byte("AllYourBase") - - type MyCustomClaims struct { - Foo string `json:"foo"` - jwt.StandardClaims - } - - // Create the Claims - claims := MyCustomClaims{ - "bar", - jwt.StandardClaims{ - ExpiresAt: 15000, - Issuer: "test", - }, - } - - token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims) - ss, err := token.SignedString(mySigningKey) - fmt.Printf("%v %v", ss, err) - //Output: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIiLCJleHAiOjE1MDAwLCJpc3MiOiJ0ZXN0In0.HE7fK0xOQwFEr4WDgRWj4teRPZ6i3GLwD5YCm6Pwu_c -} - -// Example creating a token using a custom claims type. The StandardClaim is embedded -// in the custom type to allow for easy encoding, parsing and validation of standard claims. -func ExampleParseWithClaims_customClaimsType() { - tokenString := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIiLCJleHAiOjE1MDAwLCJpc3MiOiJ0ZXN0In0.HE7fK0xOQwFEr4WDgRWj4teRPZ6i3GLwD5YCm6Pwu_c" - - type MyCustomClaims struct { - Foo string `json:"foo"` - jwt.StandardClaims - } - - // sample token is expired. override time so it parses as valid - at(time.Unix(0, 0), func() { - token, err := jwt.ParseWithClaims(tokenString, &MyCustomClaims{}, func(token *jwt.Token) (interface{}, error) { - return []byte("AllYourBase"), nil - }) - - if claims, ok := token.Claims.(*MyCustomClaims); ok && token.Valid { - fmt.Printf("%v %v", claims.Foo, claims.StandardClaims.ExpiresAt) - } else { - fmt.Println(err) - } - }) - - // Output: bar 15000 -} - -// Override time value for tests. Restore default value after. -func at(t time.Time, f func()) { - jwt.TimeFunc = func() time.Time { - return t - } - f() - jwt.TimeFunc = time.Now -} - -// An example of parsing the error types using bitfield checks -func ExampleParse_errorChecking() { - // Token from another example. This token is expired - var tokenString = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIiLCJleHAiOjE1MDAwLCJpc3MiOiJ0ZXN0In0.HE7fK0xOQwFEr4WDgRWj4teRPZ6i3GLwD5YCm6Pwu_c" - - token, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) { - return []byte("AllYourBase"), nil - }) - - if token.Valid { - fmt.Println("You look nice today") - } else if ve, ok := err.(*jwt.ValidationError); ok { - if ve.Errors&jwt.ValidationErrorMalformed != 0 { - fmt.Println("That's not even a token") - } else if ve.Errors&(jwt.ValidationErrorExpired|jwt.ValidationErrorNotValidYet) != 0 { - // Token is either expired or not active yet - fmt.Println("Timing is everything") - } else { - fmt.Println("Couldn't handle this token:", err) - } - } else { - fmt.Println("Couldn't handle this token:", err) - } - - // Output: Timing is everything -} diff --git a/vendor/github.com/dgrijalva/jwt-go/hmac.go b/vendor/github.com/dgrijalva/jwt-go/hmac.go index c229919254..addbe5d401 100644 --- a/vendor/github.com/dgrijalva/jwt-go/hmac.go +++ b/vendor/github.com/dgrijalva/jwt-go/hmac.go @@ -7,6 +7,7 @@ import ( ) // Implements the HMAC-SHA family of signing methods signing methods +// Expects key type of []byte for both signing and validation type SigningMethodHMAC struct { Name string Hash crypto.Hash @@ -90,5 +91,5 @@ func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, return EncodeSegment(hasher.Sum(nil)), nil } - return "", ErrInvalidKey + return "", ErrInvalidKeyType } diff --git a/vendor/github.com/dgrijalva/jwt-go/hmac_example_test.go b/vendor/github.com/dgrijalva/jwt-go/hmac_example_test.go deleted file mode 100644 index 0027831474..0000000000 --- a/vendor/github.com/dgrijalva/jwt-go/hmac_example_test.go +++ /dev/null @@ -1,66 +0,0 @@ -package jwt_test - -import ( - "fmt" - "github.com/dgrijalva/jwt-go" - "io/ioutil" - "time" -) - -// For HMAC signing method, the key can be any []byte. It is recommended to generate -// a key using crypto/rand or something equivalent. You need the same key for signing -// and validating. -var hmacSampleSecret []byte - -func init() { - // Load sample key data - if keyData, e := ioutil.ReadFile("test/hmacTestKey"); e == nil { - hmacSampleSecret = keyData - } else { - panic(e) - } -} - -// Example creating, signing, and encoding a JWT token using the HMAC signing method -func ExampleNew_hmac() { - // Create a new token object, specifying signing method and the claims - // you would like it to contain. - token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{ - "foo": "bar", - "nbf": time.Date(2015, 10, 10, 12, 0, 0, 0, time.UTC).Unix(), - }) - - // Sign and get the complete encoded token as a string using the secret - tokenString, err := token.SignedString(hmacSampleSecret) - - fmt.Println(tokenString, err) - // Output: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIiLCJuYmYiOjE0NDQ0Nzg0MDB9.u1riaD1rW97opCoAuRCTy4w58Br-Zk-bh7vLiRIsrpU -} - -// Example parsing and validating a token using the HMAC signing method -func ExampleParse_hmac() { - // sample token string taken from the New example - tokenString := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIiLCJuYmYiOjE0NDQ0Nzg0MDB9.u1riaD1rW97opCoAuRCTy4w58Br-Zk-bh7vLiRIsrpU" - - // Parse takes the token string and a function for looking up the key. The latter is especially - // useful if you use multiple keys for your application. The standard is to use 'kid' in the - // head of the token to identify which key to use, but the parsed token (head and claims) is provided - // to the callback, providing flexibility. - token, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) { - // Don't forget to validate the alg is what you expect: - if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok { - return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"]) - } - - // hmacSampleSecret is a []byte containing your secret, e.g. []byte("my_secret_key") - return hmacSampleSecret, nil - }) - - if claims, ok := token.Claims.(jwt.MapClaims); ok && token.Valid { - fmt.Println(claims["foo"], claims["nbf"]) - } else { - fmt.Println(err) - } - - // Output: bar 1.4444784e+09 -} diff --git a/vendor/github.com/dgrijalva/jwt-go/hmac_test.go b/vendor/github.com/dgrijalva/jwt-go/hmac_test.go deleted file mode 100644 index c7e114f4f9..0000000000 --- a/vendor/github.com/dgrijalva/jwt-go/hmac_test.go +++ /dev/null @@ -1,91 +0,0 @@ -package jwt_test - -import ( - "github.com/dgrijalva/jwt-go" - "io/ioutil" - "strings" - "testing" -) - -var hmacTestData = []struct { - name string - tokenString string - alg string - claims map[string]interface{} - valid bool -}{ - { - "web sample", - "eyJ0eXAiOiJKV1QiLA0KICJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJqb2UiLA0KICJleHAiOjEzMDA4MTkzODAsDQogImh0dHA6Ly9leGFtcGxlLmNvbS9pc19yb290Ijp0cnVlfQ.dBjftJeZ4CVP-mB92K27uhbUJU1p1r_wW1gFWFOEjXk", - "HS256", - map[string]interface{}{"iss": "joe", "exp": 1300819380, "http://example.com/is_root": true}, - true, - }, - { - "HS384", - "eyJhbGciOiJIUzM4NCIsInR5cCI6IkpXVCJ9.eyJleHAiOjEuMzAwODE5MzhlKzA5LCJodHRwOi8vZXhhbXBsZS5jb20vaXNfcm9vdCI6dHJ1ZSwiaXNzIjoiam9lIn0.KWZEuOD5lbBxZ34g7F-SlVLAQ_r5KApWNWlZIIMyQVz5Zs58a7XdNzj5_0EcNoOy", - "HS384", - map[string]interface{}{"iss": "joe", "exp": 1300819380, "http://example.com/is_root": true}, - true, - }, - { - "HS512", - "eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJleHAiOjEuMzAwODE5MzhlKzA5LCJodHRwOi8vZXhhbXBsZS5jb20vaXNfcm9vdCI6dHJ1ZSwiaXNzIjoiam9lIn0.CN7YijRX6Aw1n2jyI2Id1w90ja-DEMYiWixhYCyHnrZ1VfJRaFQz1bEbjjA5Fn4CLYaUG432dEYmSbS4Saokmw", - "HS512", - map[string]interface{}{"iss": "joe", "exp": 1300819380, "http://example.com/is_root": true}, - true, - }, - { - "web sample: invalid", - "eyJ0eXAiOiJKV1QiLA0KICJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJqb2UiLA0KICJleHAiOjEzMDA4MTkzODAsDQogImh0dHA6Ly9leGFtcGxlLmNvbS9pc19yb290Ijp0cnVlfQ.dBjftJeZ4CVP-mB92K27uhbUJU1p1r_wW1gFWFOEjXo", - "HS256", - map[string]interface{}{"iss": "joe", "exp": 1300819380, "http://example.com/is_root": true}, - false, - }, -} - -// Sample data from http://tools.ietf.org/html/draft-jones-json-web-signature-04#appendix-A.1 -var hmacTestKey, _ = ioutil.ReadFile("test/hmacTestKey") - -func TestHMACVerify(t *testing.T) { - for _, data := range hmacTestData { - parts := strings.Split(data.tokenString, ".") - - method := jwt.GetSigningMethod(data.alg) - err := method.Verify(strings.Join(parts[0:2], "."), parts[2], hmacTestKey) - if data.valid && err != nil { - t.Errorf("[%v] Error while verifying key: %v", data.name, err) - } - if !data.valid && err == nil { - t.Errorf("[%v] Invalid key passed validation", data.name) - } - } -} - -func TestHMACSign(t *testing.T) { - for _, data := range hmacTestData { - if data.valid { - parts := strings.Split(data.tokenString, ".") - method := jwt.GetSigningMethod(data.alg) - sig, err := method.Sign(strings.Join(parts[0:2], "."), hmacTestKey) - if err != nil { - t.Errorf("[%v] Error signing token: %v", data.name, err) - } - if sig != parts[2] { - t.Errorf("[%v] Incorrect signature.\nwas:\n%v\nexpecting:\n%v", data.name, sig, parts[2]) - } - } - } -} - -func BenchmarkHS256Signing(b *testing.B) { - benchmarkSigning(b, jwt.SigningMethodHS256, hmacTestKey) -} - -func BenchmarkHS384Signing(b *testing.B) { - benchmarkSigning(b, jwt.SigningMethodHS384, hmacTestKey) -} - -func BenchmarkHS512Signing(b *testing.B) { - benchmarkSigning(b, jwt.SigningMethodHS512, hmacTestKey) -} diff --git a/vendor/github.com/dgrijalva/jwt-go/http_example_test.go b/vendor/github.com/dgrijalva/jwt-go/http_example_test.go deleted file mode 100644 index 82e9c50a41..0000000000 --- a/vendor/github.com/dgrijalva/jwt-go/http_example_test.go +++ /dev/null @@ -1,216 +0,0 @@ -package jwt_test - -// Example HTTP auth using asymmetric crypto/RSA keys -// This is based on a (now outdated) example at https://gist.github.com/cryptix/45c33ecf0ae54828e63b - -import ( - "bytes" - "crypto/rsa" - "fmt" - "github.com/dgrijalva/jwt-go" - "github.com/dgrijalva/jwt-go/request" - "io" - "io/ioutil" - "log" - "net" - "net/http" - "net/url" - "strings" - "time" -) - -// location of the files used for signing and verification -const ( - privKeyPath = "test/sample_key" // openssl genrsa -out app.rsa keysize - pubKeyPath = "test/sample_key.pub" // openssl rsa -in app.rsa -pubout > app.rsa.pub -) - -var ( - verifyKey *rsa.PublicKey - signKey *rsa.PrivateKey - serverPort int - // storing sample username/password pairs - // don't do this on a real server - users = map[string]string{ - "test": "known", - } -) - -// read the key files before starting http handlers -func init() { - signBytes, err := ioutil.ReadFile(privKeyPath) - fatal(err) - - signKey, err = jwt.ParseRSAPrivateKeyFromPEM(signBytes) - fatal(err) - - verifyBytes, err := ioutil.ReadFile(pubKeyPath) - fatal(err) - - verifyKey, err = jwt.ParseRSAPublicKeyFromPEM(verifyBytes) - fatal(err) - - http.HandleFunc("/authenticate", authHandler) - http.HandleFunc("/restricted", restrictedHandler) - - // Setup listener - listener, err := net.ListenTCP("tcp", &net.TCPAddr{}) - serverPort = listener.Addr().(*net.TCPAddr).Port - - log.Println("Listening...") - go func() { - fatal(http.Serve(listener, nil)) - }() -} - -var start func() - -func fatal(err error) { - if err != nil { - log.Fatal(err) - } -} - -// Define some custom types were going to use within our tokens -type CustomerInfo struct { - Name string - Kind string -} - -type CustomClaimsExample struct { - *jwt.StandardClaims - TokenType string - CustomerInfo -} - -func Example_getTokenViaHTTP() { - // See func authHandler for an example auth handler that produces a token - res, err := http.PostForm(fmt.Sprintf("http://localhost:%v/authenticate", serverPort), url.Values{ - "user": {"test"}, - "pass": {"known"}, - }) - if err != nil { - fatal(err) - } - - if res.StatusCode != 200 { - fmt.Println("Unexpected status code", res.StatusCode) - } - - // Read the token out of the response body - buf := new(bytes.Buffer) - io.Copy(buf, res.Body) - res.Body.Close() - tokenString := strings.TrimSpace(buf.String()) - - // Parse the token - token, err := jwt.ParseWithClaims(tokenString, &CustomClaimsExample{}, func(token *jwt.Token) (interface{}, error) { - // since we only use the one private key to sign the tokens, - // we also only use its public counter part to verify - return verifyKey, nil - }) - fatal(err) - - claims := token.Claims.(*CustomClaimsExample) - fmt.Println(claims.CustomerInfo.Name) - - //Output: test -} - -func Example_useTokenViaHTTP() { - - // Make a sample token - // In a real world situation, this token will have been acquired from - // some other API call (see Example_getTokenViaHTTP) - token, err := createToken("foo") - fatal(err) - - // Make request. See func restrictedHandler for example request processor - req, err := http.NewRequest("GET", fmt.Sprintf("http://localhost:%v/restricted", serverPort), nil) - fatal(err) - req.Header.Set("Authorization", fmt.Sprintf("Bearer %v", token)) - res, err := http.DefaultClient.Do(req) - fatal(err) - - // Read the response body - buf := new(bytes.Buffer) - io.Copy(buf, res.Body) - res.Body.Close() - fmt.Println(buf.String()) - - // Output: Welcome, foo -} - -func createToken(user string) (string, error) { - // create a signer for rsa 256 - t := jwt.New(jwt.GetSigningMethod("RS256")) - - // set our claims - t.Claims = &CustomClaimsExample{ - &jwt.StandardClaims{ - // set the expire time - // see http://tools.ietf.org/html/draft-ietf-oauth-json-web-token-20#section-4.1.4 - ExpiresAt: time.Now().Add(time.Minute * 1).Unix(), - }, - "level1", - CustomerInfo{user, "human"}, - } - - // Creat token string - return t.SignedString(signKey) -} - -// reads the form values, checks them and creates the token -func authHandler(w http.ResponseWriter, r *http.Request) { - // make sure its post - if r.Method != "POST" { - w.WriteHeader(http.StatusBadRequest) - fmt.Fprintln(w, "No POST", r.Method) - return - } - - user := r.FormValue("user") - pass := r.FormValue("pass") - - log.Printf("Authenticate: user[%s] pass[%s]\n", user, pass) - - // check values - if user != "test" || pass != "known" { - w.WriteHeader(http.StatusForbidden) - fmt.Fprintln(w, "Wrong info") - return - } - - tokenString, err := createToken(user) - if err != nil { - w.WriteHeader(http.StatusInternalServerError) - fmt.Fprintln(w, "Sorry, error while Signing Token!") - log.Printf("Token Signing error: %v\n", err) - return - } - - w.Header().Set("Content-Type", "application/jwt") - w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, tokenString) -} - -// only accessible with a valid token -func restrictedHandler(w http.ResponseWriter, r *http.Request) { - // Get token from request - token, err := request.ParseFromRequestWithClaims(r, request.OAuth2Extractor, &CustomClaimsExample{}, func(token *jwt.Token) (interface{}, error) { - // since we only use the one private key to sign the tokens, - // we also only use its public counter part to verify - return verifyKey, nil - }) - - // If the token is missing or invalid, return error - if err != nil { - w.WriteHeader(http.StatusUnauthorized) - fmt.Fprintln(w, "Invalid token:", err) - return - } - - // Token is valid - fmt.Fprintln(w, "Welcome,", token.Claims.(*CustomClaimsExample).Name) - return -} diff --git a/vendor/github.com/dgrijalva/jwt-go/none_test.go b/vendor/github.com/dgrijalva/jwt-go/none_test.go deleted file mode 100644 index 29a69efef7..0000000000 --- a/vendor/github.com/dgrijalva/jwt-go/none_test.go +++ /dev/null @@ -1,72 +0,0 @@ -package jwt_test - -import ( - "github.com/dgrijalva/jwt-go" - "strings" - "testing" -) - -var noneTestData = []struct { - name string - tokenString string - alg string - key interface{} - claims map[string]interface{} - valid bool -}{ - { - "Basic", - "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.", - "none", - jwt.UnsafeAllowNoneSignatureType, - map[string]interface{}{"foo": "bar"}, - true, - }, - { - "Basic - no key", - "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.", - "none", - nil, - map[string]interface{}{"foo": "bar"}, - false, - }, - { - "Signed", - "eyJhbGciOiJSUzM4NCIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIifQ.W-jEzRfBigtCWsinvVVuldiuilzVdU5ty0MvpLaSaqK9PlAWWlDQ1VIQ_qSKzwL5IXaZkvZFJXT3yL3n7OUVu7zCNJzdwznbC8Z-b0z2lYvcklJYi2VOFRcGbJtXUqgjk2oGsiqUMUMOLP70TTefkpsgqDxbRh9CDUfpOJgW-dU7cmgaoswe3wjUAUi6B6G2YEaiuXC0XScQYSYVKIzgKXJV8Zw-7AN_DBUI4GkTpsvQ9fVVjZM9csQiEXhYekyrKu1nu_POpQonGd8yqkIyXPECNmmqH5jH4sFiF67XhD7_JpkvLziBpI-uh86evBUadmHhb9Otqw3uV3NTaXLzJw", - "none", - jwt.UnsafeAllowNoneSignatureType, - map[string]interface{}{"foo": "bar"}, - false, - }, -} - -func TestNoneVerify(t *testing.T) { - for _, data := range noneTestData { - parts := strings.Split(data.tokenString, ".") - - method := jwt.GetSigningMethod(data.alg) - err := method.Verify(strings.Join(parts[0:2], "."), parts[2], data.key) - if data.valid && err != nil { - t.Errorf("[%v] Error while verifying key: %v", data.name, err) - } - if !data.valid && err == nil { - t.Errorf("[%v] Invalid key passed validation", data.name) - } - } -} - -func TestNoneSign(t *testing.T) { - for _, data := range noneTestData { - if data.valid { - parts := strings.Split(data.tokenString, ".") - method := jwt.GetSigningMethod(data.alg) - sig, err := method.Sign(strings.Join(parts[0:2], "."), data.key) - if err != nil { - t.Errorf("[%v] Error signing token: %v", data.name, err) - } - if sig != parts[2] { - t.Errorf("[%v] Incorrect signature.\nwas:\n%v\nexpecting:\n%v", data.name, sig, parts[2]) - } - } - } -} diff --git a/vendor/github.com/dgrijalva/jwt-go/parser.go b/vendor/github.com/dgrijalva/jwt-go/parser.go index 7bf1c4ea08..d6901d9adb 100644 --- a/vendor/github.com/dgrijalva/jwt-go/parser.go +++ b/vendor/github.com/dgrijalva/jwt-go/parser.go @@ -21,55 +21,9 @@ func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { } func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { - parts := strings.Split(tokenString, ".") - if len(parts) != 3 { - return nil, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed) - } - - var err error - token := &Token{Raw: tokenString} - - // parse Header - var headerBytes []byte - if headerBytes, err = DecodeSegment(parts[0]); err != nil { - if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") { - return token, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed) - } - return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} - } - if err = json.Unmarshal(headerBytes, &token.Header); err != nil { - return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} - } - - // parse Claims - var claimBytes []byte - token.Claims = claims - - if claimBytes, err = DecodeSegment(parts[1]); err != nil { - return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} - } - dec := json.NewDecoder(bytes.NewBuffer(claimBytes)) - if p.UseJSONNumber { - dec.UseNumber() - } - // JSON Decode. Special case for map type to avoid weird pointer behavior - if c, ok := token.Claims.(MapClaims); ok { - err = dec.Decode(&c) - } else { - err = dec.Decode(&claims) - } - // Handle decode error + token, parts, err := p.ParseUnverified(tokenString, claims) if err != nil { - return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} - } - - // Lookup signature method - if method, ok := token.Header["alg"].(string); ok { - if token.Method = GetSigningMethod(method); token.Method == nil { - return token, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable) - } - } else { - return token, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable) + return token, err } // Verify signing method is in the required set @@ -96,6 +50,9 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf } if key, err = keyFunc(token); err != nil { // keyFunc returned an error + if ve, ok := err.(*ValidationError); ok { + return token, ve + } return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable} } @@ -129,3 +86,63 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf return token, vErr } + +// WARNING: Don't use this method unless you know what you're doing +// +// This method parses the token but doesn't validate the signature. It's only +// ever useful in cases where you know the signature is valid (because it has +// been checked previously in the stack) and you want to extract values from +// it. +func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) { + parts = strings.Split(tokenString, ".") + if len(parts) != 3 { + return nil, parts, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed) + } + + token = &Token{Raw: tokenString} + + // parse Header + var headerBytes []byte + if headerBytes, err = DecodeSegment(parts[0]); err != nil { + if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") { + return token, parts, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed) + } + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + if err = json.Unmarshal(headerBytes, &token.Header); err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // parse Claims + var claimBytes []byte + token.Claims = claims + + if claimBytes, err = DecodeSegment(parts[1]); err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + dec := json.NewDecoder(bytes.NewBuffer(claimBytes)) + if p.UseJSONNumber { + dec.UseNumber() + } + // JSON Decode. Special case for map type to avoid weird pointer behavior + if c, ok := token.Claims.(MapClaims); ok { + err = dec.Decode(&c) + } else { + err = dec.Decode(&claims) + } + // Handle decode error + if err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // Lookup signature method + if method, ok := token.Header["alg"].(string); ok { + if token.Method = GetSigningMethod(method); token.Method == nil { + return token, parts, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable) + } + } else { + return token, parts, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable) + } + + return token, parts, nil +} diff --git a/vendor/github.com/dgrijalva/jwt-go/parser_test.go b/vendor/github.com/dgrijalva/jwt-go/parser_test.go deleted file mode 100644 index f8ad6f9083..0000000000 --- a/vendor/github.com/dgrijalva/jwt-go/parser_test.go +++ /dev/null @@ -1,261 +0,0 @@ -package jwt_test - -import ( - "crypto/rsa" - "encoding/json" - "fmt" - "reflect" - "testing" - "time" - - "github.com/dgrijalva/jwt-go" - "github.com/dgrijalva/jwt-go/test" -) - -var keyFuncError error = fmt.Errorf("error loading key") - -var ( - jwtTestDefaultKey *rsa.PublicKey - defaultKeyFunc jwt.Keyfunc = func(t *jwt.Token) (interface{}, error) { return jwtTestDefaultKey, nil } - emptyKeyFunc jwt.Keyfunc = func(t *jwt.Token) (interface{}, error) { return nil, nil } - errorKeyFunc jwt.Keyfunc = func(t *jwt.Token) (interface{}, error) { return nil, keyFuncError } - nilKeyFunc jwt.Keyfunc = nil -) - -func init() { - jwtTestDefaultKey = test.LoadRSAPublicKeyFromDisk("test/sample_key.pub") -} - -var jwtTestData = []struct { - name string - tokenString string - keyfunc jwt.Keyfunc - claims jwt.Claims - valid bool - errors uint32 - parser *jwt.Parser -}{ - { - "basic", - "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.FhkiHkoESI_cG3NPigFrxEk9Z60_oXrOT2vGm9Pn6RDgYNovYORQmmA0zs1AoAOf09ly2Nx2YAg6ABqAYga1AcMFkJljwxTT5fYphTuqpWdy4BELeSYJx5Ty2gmr8e7RonuUztrdD5WfPqLKMm1Ozp_T6zALpRmwTIW0QPnaBXaQD90FplAg46Iy1UlDKr-Eupy0i5SLch5Q-p2ZpaL_5fnTIUDlxC3pWhJTyx_71qDI-mAA_5lE_VdroOeflG56sSmDxopPEG3bFlSu1eowyBfxtu0_CuVd-M42RU75Zc4Gsj6uV77MBtbMrf4_7M_NUTSgoIF3fRqxrj0NzihIBg", - defaultKeyFunc, - jwt.MapClaims{"foo": "bar"}, - true, - 0, - nil, - }, - { - "basic expired", - "", // autogen - defaultKeyFunc, - jwt.MapClaims{"foo": "bar", "exp": float64(time.Now().Unix() - 100)}, - false, - jwt.ValidationErrorExpired, - nil, - }, - { - "basic nbf", - "", // autogen - defaultKeyFunc, - jwt.MapClaims{"foo": "bar", "nbf": float64(time.Now().Unix() + 100)}, - false, - jwt.ValidationErrorNotValidYet, - nil, - }, - { - "expired and nbf", - "", // autogen - defaultKeyFunc, - jwt.MapClaims{"foo": "bar", "nbf": float64(time.Now().Unix() + 100), "exp": float64(time.Now().Unix() - 100)}, - false, - jwt.ValidationErrorNotValidYet | jwt.ValidationErrorExpired, - nil, - }, - { - "basic invalid", - "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.EhkiHkoESI_cG3NPigFrxEk9Z60_oXrOT2vGm9Pn6RDgYNovYORQmmA0zs1AoAOf09ly2Nx2YAg6ABqAYga1AcMFkJljwxTT5fYphTuqpWdy4BELeSYJx5Ty2gmr8e7RonuUztrdD5WfPqLKMm1Ozp_T6zALpRmwTIW0QPnaBXaQD90FplAg46Iy1UlDKr-Eupy0i5SLch5Q-p2ZpaL_5fnTIUDlxC3pWhJTyx_71qDI-mAA_5lE_VdroOeflG56sSmDxopPEG3bFlSu1eowyBfxtu0_CuVd-M42RU75Zc4Gsj6uV77MBtbMrf4_7M_NUTSgoIF3fRqxrj0NzihIBg", - defaultKeyFunc, - jwt.MapClaims{"foo": "bar"}, - false, - jwt.ValidationErrorSignatureInvalid, - nil, - }, - { - "basic nokeyfunc", - "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.FhkiHkoESI_cG3NPigFrxEk9Z60_oXrOT2vGm9Pn6RDgYNovYORQmmA0zs1AoAOf09ly2Nx2YAg6ABqAYga1AcMFkJljwxTT5fYphTuqpWdy4BELeSYJx5Ty2gmr8e7RonuUztrdD5WfPqLKMm1Ozp_T6zALpRmwTIW0QPnaBXaQD90FplAg46Iy1UlDKr-Eupy0i5SLch5Q-p2ZpaL_5fnTIUDlxC3pWhJTyx_71qDI-mAA_5lE_VdroOeflG56sSmDxopPEG3bFlSu1eowyBfxtu0_CuVd-M42RU75Zc4Gsj6uV77MBtbMrf4_7M_NUTSgoIF3fRqxrj0NzihIBg", - nilKeyFunc, - jwt.MapClaims{"foo": "bar"}, - false, - jwt.ValidationErrorUnverifiable, - nil, - }, - { - "basic nokey", - "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.FhkiHkoESI_cG3NPigFrxEk9Z60_oXrOT2vGm9Pn6RDgYNovYORQmmA0zs1AoAOf09ly2Nx2YAg6ABqAYga1AcMFkJljwxTT5fYphTuqpWdy4BELeSYJx5Ty2gmr8e7RonuUztrdD5WfPqLKMm1Ozp_T6zALpRmwTIW0QPnaBXaQD90FplAg46Iy1UlDKr-Eupy0i5SLch5Q-p2ZpaL_5fnTIUDlxC3pWhJTyx_71qDI-mAA_5lE_VdroOeflG56sSmDxopPEG3bFlSu1eowyBfxtu0_CuVd-M42RU75Zc4Gsj6uV77MBtbMrf4_7M_NUTSgoIF3fRqxrj0NzihIBg", - emptyKeyFunc, - jwt.MapClaims{"foo": "bar"}, - false, - jwt.ValidationErrorSignatureInvalid, - nil, - }, - { - "basic errorkey", - "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.FhkiHkoESI_cG3NPigFrxEk9Z60_oXrOT2vGm9Pn6RDgYNovYORQmmA0zs1AoAOf09ly2Nx2YAg6ABqAYga1AcMFkJljwxTT5fYphTuqpWdy4BELeSYJx5Ty2gmr8e7RonuUztrdD5WfPqLKMm1Ozp_T6zALpRmwTIW0QPnaBXaQD90FplAg46Iy1UlDKr-Eupy0i5SLch5Q-p2ZpaL_5fnTIUDlxC3pWhJTyx_71qDI-mAA_5lE_VdroOeflG56sSmDxopPEG3bFlSu1eowyBfxtu0_CuVd-M42RU75Zc4Gsj6uV77MBtbMrf4_7M_NUTSgoIF3fRqxrj0NzihIBg", - errorKeyFunc, - jwt.MapClaims{"foo": "bar"}, - false, - jwt.ValidationErrorUnverifiable, - nil, - }, - { - "invalid signing method", - "", - defaultKeyFunc, - jwt.MapClaims{"foo": "bar"}, - false, - jwt.ValidationErrorSignatureInvalid, - &jwt.Parser{ValidMethods: []string{"HS256"}}, - }, - { - "valid signing method", - "", - defaultKeyFunc, - jwt.MapClaims{"foo": "bar"}, - true, - 0, - &jwt.Parser{ValidMethods: []string{"RS256", "HS256"}}, - }, - { - "JSON Number", - "", - defaultKeyFunc, - jwt.MapClaims{"foo": json.Number("123.4")}, - true, - 0, - &jwt.Parser{UseJSONNumber: true}, - }, - { - "Standard Claims", - "", - defaultKeyFunc, - &jwt.StandardClaims{ - ExpiresAt: time.Now().Add(time.Second * 10).Unix(), - }, - true, - 0, - &jwt.Parser{UseJSONNumber: true}, - }, - { - "JSON Number - basic expired", - "", // autogen - defaultKeyFunc, - jwt.MapClaims{"foo": "bar", "exp": json.Number(fmt.Sprintf("%v", time.Now().Unix()-100))}, - false, - jwt.ValidationErrorExpired, - &jwt.Parser{UseJSONNumber: true}, - }, - { - "JSON Number - basic nbf", - "", // autogen - defaultKeyFunc, - jwt.MapClaims{"foo": "bar", "nbf": json.Number(fmt.Sprintf("%v", time.Now().Unix()+100))}, - false, - jwt.ValidationErrorNotValidYet, - &jwt.Parser{UseJSONNumber: true}, - }, - { - "JSON Number - expired and nbf", - "", // autogen - defaultKeyFunc, - jwt.MapClaims{"foo": "bar", "nbf": json.Number(fmt.Sprintf("%v", time.Now().Unix()+100)), "exp": json.Number(fmt.Sprintf("%v", time.Now().Unix()-100))}, - false, - jwt.ValidationErrorNotValidYet | jwt.ValidationErrorExpired, - &jwt.Parser{UseJSONNumber: true}, - }, - { - "SkipClaimsValidation during token parsing", - "", // autogen - defaultKeyFunc, - jwt.MapClaims{"foo": "bar", "nbf": json.Number(fmt.Sprintf("%v", time.Now().Unix()+100))}, - true, - 0, - &jwt.Parser{UseJSONNumber: true, SkipClaimsValidation: true}, - }, -} - -func TestParser_Parse(t *testing.T) { - privateKey := test.LoadRSAPrivateKeyFromDisk("test/sample_key") - - // Iterate over test data set and run tests - for _, data := range jwtTestData { - // If the token string is blank, use helper function to generate string - if data.tokenString == "" { - data.tokenString = test.MakeSampleToken(data.claims, privateKey) - } - - // Parse the token - var token *jwt.Token - var err error - var parser = data.parser - if parser == nil { - parser = new(jwt.Parser) - } - // Figure out correct claims type - switch data.claims.(type) { - case jwt.MapClaims: - token, err = parser.ParseWithClaims(data.tokenString, jwt.MapClaims{}, data.keyfunc) - case *jwt.StandardClaims: - token, err = parser.ParseWithClaims(data.tokenString, &jwt.StandardClaims{}, data.keyfunc) - } - - // Verify result matches expectation - if !reflect.DeepEqual(data.claims, token.Claims) { - t.Errorf("[%v] Claims mismatch. Expecting: %v Got: %v", data.name, data.claims, token.Claims) - } - - if data.valid && err != nil { - t.Errorf("[%v] Error while verifying token: %T:%v", data.name, err, err) - } - - if !data.valid && err == nil { - t.Errorf("[%v] Invalid token passed validation", data.name) - } - - if (err == nil && !token.Valid) || (err != nil && token.Valid) { - t.Errorf("[%v] Inconsistent behavior between returned error and token.Valid", data.name) - } - - if data.errors != 0 { - if err == nil { - t.Errorf("[%v] Expecting error. Didn't get one.", data.name) - } else { - - ve := err.(*jwt.ValidationError) - // compare the bitfield part of the error - if e := ve.Errors; e != data.errors { - t.Errorf("[%v] Errors don't match expectation. %v != %v", data.name, e, data.errors) - } - - if err.Error() == keyFuncError.Error() && ve.Inner != keyFuncError { - t.Errorf("[%v] Inner error does not match expectation. %v != %v", data.name, ve.Inner, keyFuncError) - } - } - } - if data.valid && token.Signature == "" { - t.Errorf("[%v] Signature is left unpopulated after parsing", data.name) - } - } -} - -// Helper method for benchmarking various methods -func benchmarkSigning(b *testing.B, method jwt.SigningMethod, key interface{}) { - t := jwt.New(method) - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - if _, err := t.SignedString(key); err != nil { - b.Fatal(err) - } - } - }) - -} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa.go b/vendor/github.com/dgrijalva/jwt-go/rsa.go index 0ae0b1984e..e4caf1ca4a 100644 --- a/vendor/github.com/dgrijalva/jwt-go/rsa.go +++ b/vendor/github.com/dgrijalva/jwt-go/rsa.go @@ -7,6 +7,7 @@ import ( ) // Implements the RSA family of signing methods signing methods +// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation type SigningMethodRSA struct { Name string Hash crypto.Hash @@ -44,7 +45,7 @@ func (m *SigningMethodRSA) Alg() string { } // Implements the Verify method from SigningMethod -// For this signing method, must be an rsa.PublicKey structure. +// For this signing method, must be an *rsa.PublicKey structure. func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error { var err error @@ -73,7 +74,7 @@ func (m *SigningMethodRSA) Verify(signingString, signature string, key interface } // Implements the Sign method from SigningMethod -// For this signing method, must be an rsa.PrivateKey structure. +// For this signing method, must be an *rsa.PrivateKey structure. func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) { var rsaKey *rsa.PrivateKey var ok bool diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_pss_test.go b/vendor/github.com/dgrijalva/jwt-go/rsa_pss_test.go deleted file mode 100644 index 9045aaf349..0000000000 --- a/vendor/github.com/dgrijalva/jwt-go/rsa_pss_test.go +++ /dev/null @@ -1,96 +0,0 @@ -// +build go1.4 - -package jwt_test - -import ( - "crypto/rsa" - "io/ioutil" - "strings" - "testing" - - "github.com/dgrijalva/jwt-go" -) - -var rsaPSSTestData = []struct { - name string - tokenString string - alg string - claims map[string]interface{} - valid bool -}{ - { - "Basic PS256", - "eyJhbGciOiJQUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIifQ.PPG4xyDVY8ffp4CcxofNmsTDXsrVG2npdQuibLhJbv4ClyPTUtR5giNSvuxo03kB6I8VXVr0Y9X7UxhJVEoJOmULAwRWaUsDnIewQa101cVhMa6iR8X37kfFoiZ6NkS-c7henVkkQWu2HtotkEtQvN5hFlk8IevXXPmvZlhQhwzB1sGzGYnoi1zOfuL98d3BIjUjtlwii5w6gYG2AEEzp7HnHCsb3jIwUPdq86Oe6hIFjtBwduIK90ca4UqzARpcfwxHwVLMpatKask00AgGVI0ysdk0BLMjmLutquD03XbThHScC2C2_Pp4cHWgMzvbgLU2RYYZcZRKr46QeNgz9w", - "PS256", - map[string]interface{}{"foo": "bar"}, - true, - }, - { - "Basic PS384", - "eyJhbGciOiJQUzM4NCIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIifQ.w7-qqgj97gK4fJsq_DCqdYQiylJjzWONvD0qWWWhqEOFk2P1eDULPnqHRnjgTXoO4HAw4YIWCsZPet7nR3Xxq4ZhMqvKW8b7KlfRTb9cH8zqFvzMmybQ4jv2hKc3bXYqVow3AoR7hN_CWXI3Dv6Kd2X5xhtxRHI6IL39oTVDUQ74LACe-9t4c3QRPuj6Pq1H4FAT2E2kW_0KOc6EQhCLWEhm2Z2__OZskDC8AiPpP8Kv4k2vB7l0IKQu8Pr4RcNBlqJdq8dA5D3hk5TLxP8V5nG1Ib80MOMMqoS3FQvSLyolFX-R_jZ3-zfq6Ebsqr0yEb0AH2CfsECF7935Pa0FKQ", - "PS384", - map[string]interface{}{"foo": "bar"}, - true, - }, - { - "Basic PS512", - "eyJhbGciOiJQUzUxMiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIifQ.GX1HWGzFaJevuSLavqqFYaW8_TpvcjQ8KfC5fXiSDzSiT9UD9nB_ikSmDNyDILNdtjZLSvVKfXxZJqCfefxAtiozEDDdJthZ-F0uO4SPFHlGiXszvKeodh7BuTWRI2wL9-ZO4mFa8nq3GMeQAfo9cx11i7nfN8n2YNQ9SHGovG7_T_AvaMZB_jT6jkDHpwGR9mz7x1sycckEo6teLdHRnH_ZdlHlxqknmyTu8Odr5Xh0sJFOL8BepWbbvIIn-P161rRHHiDWFv6nhlHwZnVzjx7HQrWSGb6-s2cdLie9QL_8XaMcUpjLkfOMKkDOfHo6AvpL7Jbwi83Z2ZTHjJWB-A", - "PS512", - map[string]interface{}{"foo": "bar"}, - true, - }, - { - "basic PS256 invalid: foo => bar", - "eyJhbGciOiJQUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIifQ.PPG4xyDVY8ffp4CcxofNmsTDXsrVG2npdQuibLhJbv4ClyPTUtR5giNSvuxo03kB6I8VXVr0Y9X7UxhJVEoJOmULAwRWaUsDnIewQa101cVhMa6iR8X37kfFoiZ6NkS-c7henVkkQWu2HtotkEtQvN5hFlk8IevXXPmvZlhQhwzB1sGzGYnoi1zOfuL98d3BIjUjtlwii5w6gYG2AEEzp7HnHCsb3jIwUPdq86Oe6hIFjtBwduIK90ca4UqzARpcfwxHwVLMpatKask00AgGVI0ysdk0BLMjmLutquD03XbThHScC2C2_Pp4cHWgMzvbgLU2RYYZcZRKr46QeNgz9W", - "PS256", - map[string]interface{}{"foo": "bar"}, - false, - }, -} - -func TestRSAPSSVerify(t *testing.T) { - var err error - - key, _ := ioutil.ReadFile("test/sample_key.pub") - var rsaPSSKey *rsa.PublicKey - if rsaPSSKey, err = jwt.ParseRSAPublicKeyFromPEM(key); err != nil { - t.Errorf("Unable to parse RSA public key: %v", err) - } - - for _, data := range rsaPSSTestData { - parts := strings.Split(data.tokenString, ".") - - method := jwt.GetSigningMethod(data.alg) - err := method.Verify(strings.Join(parts[0:2], "."), parts[2], rsaPSSKey) - if data.valid && err != nil { - t.Errorf("[%v] Error while verifying key: %v", data.name, err) - } - if !data.valid && err == nil { - t.Errorf("[%v] Invalid key passed validation", data.name) - } - } -} - -func TestRSAPSSSign(t *testing.T) { - var err error - - key, _ := ioutil.ReadFile("test/sample_key") - var rsaPSSKey *rsa.PrivateKey - if rsaPSSKey, err = jwt.ParseRSAPrivateKeyFromPEM(key); err != nil { - t.Errorf("Unable to parse RSA private key: %v", err) - } - - for _, data := range rsaPSSTestData { - if data.valid { - parts := strings.Split(data.tokenString, ".") - method := jwt.GetSigningMethod(data.alg) - sig, err := method.Sign(strings.Join(parts[0:2], "."), rsaPSSKey) - if err != nil { - t.Errorf("[%v] Error signing token: %v", data.name, err) - } - if sig == parts[2] { - t.Errorf("[%v] Signatures shouldn't match\nnew:\n%v\noriginal:\n%v", data.name, sig, parts[2]) - } - } - } -} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_test.go b/vendor/github.com/dgrijalva/jwt-go/rsa_test.go deleted file mode 100644 index 2e0f785361..0000000000 --- a/vendor/github.com/dgrijalva/jwt-go/rsa_test.go +++ /dev/null @@ -1,176 +0,0 @@ -package jwt_test - -import ( - "github.com/dgrijalva/jwt-go" - "io/ioutil" - "strings" - "testing" -) - -var rsaTestData = []struct { - name string - tokenString string - alg string - claims map[string]interface{} - valid bool -}{ - { - "Basic RS256", - "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.FhkiHkoESI_cG3NPigFrxEk9Z60_oXrOT2vGm9Pn6RDgYNovYORQmmA0zs1AoAOf09ly2Nx2YAg6ABqAYga1AcMFkJljwxTT5fYphTuqpWdy4BELeSYJx5Ty2gmr8e7RonuUztrdD5WfPqLKMm1Ozp_T6zALpRmwTIW0QPnaBXaQD90FplAg46Iy1UlDKr-Eupy0i5SLch5Q-p2ZpaL_5fnTIUDlxC3pWhJTyx_71qDI-mAA_5lE_VdroOeflG56sSmDxopPEG3bFlSu1eowyBfxtu0_CuVd-M42RU75Zc4Gsj6uV77MBtbMrf4_7M_NUTSgoIF3fRqxrj0NzihIBg", - "RS256", - map[string]interface{}{"foo": "bar"}, - true, - }, - { - "Basic RS384", - "eyJhbGciOiJSUzM4NCIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIifQ.W-jEzRfBigtCWsinvVVuldiuilzVdU5ty0MvpLaSaqK9PlAWWlDQ1VIQ_qSKzwL5IXaZkvZFJXT3yL3n7OUVu7zCNJzdwznbC8Z-b0z2lYvcklJYi2VOFRcGbJtXUqgjk2oGsiqUMUMOLP70TTefkpsgqDxbRh9CDUfpOJgW-dU7cmgaoswe3wjUAUi6B6G2YEaiuXC0XScQYSYVKIzgKXJV8Zw-7AN_DBUI4GkTpsvQ9fVVjZM9csQiEXhYekyrKu1nu_POpQonGd8yqkIyXPECNmmqH5jH4sFiF67XhD7_JpkvLziBpI-uh86evBUadmHhb9Otqw3uV3NTaXLzJw", - "RS384", - map[string]interface{}{"foo": "bar"}, - true, - }, - { - "Basic RS512", - "eyJhbGciOiJSUzUxMiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIifQ.zBlLlmRrUxx4SJPUbV37Q1joRcI9EW13grnKduK3wtYKmDXbgDpF1cZ6B-2Jsm5RB8REmMiLpGms-EjXhgnyh2TSHE-9W2gA_jvshegLWtwRVDX40ODSkTb7OVuaWgiy9y7llvcknFBTIg-FnVPVpXMmeV_pvwQyhaz1SSwSPrDyxEmksz1hq7YONXhXPpGaNbMMeDTNP_1oj8DZaqTIL9TwV8_1wb2Odt_Fy58Ke2RVFijsOLdnyEAjt2n9Mxihu9i3PhNBkkxa2GbnXBfq3kzvZ_xxGGopLdHhJjcGWXO-NiwI9_tiu14NRv4L2xC0ItD9Yz68v2ZIZEp_DuzwRQ", - "RS512", - map[string]interface{}{"foo": "bar"}, - true, - }, - { - "basic invalid: foo => bar", - "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.EhkiHkoESI_cG3NPigFrxEk9Z60_oXrOT2vGm9Pn6RDgYNovYORQmmA0zs1AoAOf09ly2Nx2YAg6ABqAYga1AcMFkJljwxTT5fYphTuqpWdy4BELeSYJx5Ty2gmr8e7RonuUztrdD5WfPqLKMm1Ozp_T6zALpRmwTIW0QPnaBXaQD90FplAg46Iy1UlDKr-Eupy0i5SLch5Q-p2ZpaL_5fnTIUDlxC3pWhJTyx_71qDI-mAA_5lE_VdroOeflG56sSmDxopPEG3bFlSu1eowyBfxtu0_CuVd-M42RU75Zc4Gsj6uV77MBtbMrf4_7M_NUTSgoIF3fRqxrj0NzihIBg", - "RS256", - map[string]interface{}{"foo": "bar"}, - false, - }, -} - -func TestRSAVerify(t *testing.T) { - keyData, _ := ioutil.ReadFile("test/sample_key.pub") - key, _ := jwt.ParseRSAPublicKeyFromPEM(keyData) - - for _, data := range rsaTestData { - parts := strings.Split(data.tokenString, ".") - - method := jwt.GetSigningMethod(data.alg) - err := method.Verify(strings.Join(parts[0:2], "."), parts[2], key) - if data.valid && err != nil { - t.Errorf("[%v] Error while verifying key: %v", data.name, err) - } - if !data.valid && err == nil { - t.Errorf("[%v] Invalid key passed validation", data.name) - } - } -} - -func TestRSASign(t *testing.T) { - keyData, _ := ioutil.ReadFile("test/sample_key") - key, _ := jwt.ParseRSAPrivateKeyFromPEM(keyData) - - for _, data := range rsaTestData { - if data.valid { - parts := strings.Split(data.tokenString, ".") - method := jwt.GetSigningMethod(data.alg) - sig, err := method.Sign(strings.Join(parts[0:2], "."), key) - if err != nil { - t.Errorf("[%v] Error signing token: %v", data.name, err) - } - if sig != parts[2] { - t.Errorf("[%v] Incorrect signature.\nwas:\n%v\nexpecting:\n%v", data.name, sig, parts[2]) - } - } - } -} - -func TestRSAVerifyWithPreParsedPrivateKey(t *testing.T) { - key, _ := ioutil.ReadFile("test/sample_key.pub") - parsedKey, err := jwt.ParseRSAPublicKeyFromPEM(key) - if err != nil { - t.Fatal(err) - } - testData := rsaTestData[0] - parts := strings.Split(testData.tokenString, ".") - err = jwt.SigningMethodRS256.Verify(strings.Join(parts[0:2], "."), parts[2], parsedKey) - if err != nil { - t.Errorf("[%v] Error while verifying key: %v", testData.name, err) - } -} - -func TestRSAWithPreParsedPrivateKey(t *testing.T) { - key, _ := ioutil.ReadFile("test/sample_key") - parsedKey, err := jwt.ParseRSAPrivateKeyFromPEM(key) - if err != nil { - t.Fatal(err) - } - testData := rsaTestData[0] - parts := strings.Split(testData.tokenString, ".") - sig, err := jwt.SigningMethodRS256.Sign(strings.Join(parts[0:2], "."), parsedKey) - if err != nil { - t.Errorf("[%v] Error signing token: %v", testData.name, err) - } - if sig != parts[2] { - t.Errorf("[%v] Incorrect signature.\nwas:\n%v\nexpecting:\n%v", testData.name, sig, parts[2]) - } -} - -func TestRSAKeyParsing(t *testing.T) { - key, _ := ioutil.ReadFile("test/sample_key") - pubKey, _ := ioutil.ReadFile("test/sample_key.pub") - badKey := []byte("All your base are belong to key") - - // Test parsePrivateKey - if _, e := jwt.ParseRSAPrivateKeyFromPEM(key); e != nil { - t.Errorf("Failed to parse valid private key: %v", e) - } - - if k, e := jwt.ParseRSAPrivateKeyFromPEM(pubKey); e == nil { - t.Errorf("Parsed public key as valid private key: %v", k) - } - - if k, e := jwt.ParseRSAPrivateKeyFromPEM(badKey); e == nil { - t.Errorf("Parsed invalid key as valid private key: %v", k) - } - - // Test parsePublicKey - if _, e := jwt.ParseRSAPublicKeyFromPEM(pubKey); e != nil { - t.Errorf("Failed to parse valid public key: %v", e) - } - - if k, e := jwt.ParseRSAPublicKeyFromPEM(key); e == nil { - t.Errorf("Parsed private key as valid public key: %v", k) - } - - if k, e := jwt.ParseRSAPublicKeyFromPEM(badKey); e == nil { - t.Errorf("Parsed invalid key as valid private key: %v", k) - } - -} - -func BenchmarkRS256Signing(b *testing.B) { - key, _ := ioutil.ReadFile("test/sample_key") - parsedKey, err := jwt.ParseRSAPrivateKeyFromPEM(key) - if err != nil { - b.Fatal(err) - } - - benchmarkSigning(b, jwt.SigningMethodRS256, parsedKey) -} - -func BenchmarkRS384Signing(b *testing.B) { - key, _ := ioutil.ReadFile("test/sample_key") - parsedKey, err := jwt.ParseRSAPrivateKeyFromPEM(key) - if err != nil { - b.Fatal(err) - } - - benchmarkSigning(b, jwt.SigningMethodRS384, parsedKey) -} - -func BenchmarkRS512Signing(b *testing.B) { - key, _ := ioutil.ReadFile("test/sample_key") - parsedKey, err := jwt.ParseRSAPrivateKeyFromPEM(key) - if err != nil { - b.Fatal(err) - } - - benchmarkSigning(b, jwt.SigningMethodRS512, parsedKey) -} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go b/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go index 213a90dbbf..a5ababf956 100644 --- a/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go +++ b/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go @@ -39,6 +39,38 @@ func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) { return pkey, nil } +// Parse PEM encoded PKCS1 or PKCS8 private key protected with password +func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + + var blockDecrypted []byte + if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil { + return nil, err + } + + if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + // Parse PEM encoded PKCS1 or PKCS8 public key func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) { var err error diff --git a/vendor/github.com/docker/distribution/.gitignore b/vendor/github.com/docker/distribution/.gitignore deleted file mode 100644 index 1c3ae0a773..0000000000 --- a/vendor/github.com/docker/distribution/.gitignore +++ /dev/null @@ -1,37 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof - -# never checkin from the bin file (for now) -bin/* - -# Test key files -*.pem - -# Cover profiles -*.out - -# Editor/IDE specific files. -*.sublime-project -*.sublime-workspace diff --git a/vendor/github.com/docker/distribution/.mailmap b/vendor/github.com/docker/distribution/.mailmap deleted file mode 100644 index d991060198..0000000000 --- a/vendor/github.com/docker/distribution/.mailmap +++ /dev/null @@ -1,18 +0,0 @@ -Stephen J Day Stephen Day -Stephen J Day Stephen Day -Olivier Gambier Olivier Gambier -Brian Bland Brian Bland -Brian Bland Brian Bland -Josh Hawn Josh Hawn -Richard Scothern Richard -Richard Scothern Richard Scothern -Andrew Meredith Andrew Meredith -harche harche -Jessie Frazelle -Sharif Nassar Sharif Nassar -Sven Dowideit Sven Dowideit -Vincent Giersch Vincent Giersch -davidli davidli -Omer Cohen Omer Cohen -Eric Yang Eric Yang -Nikita Tarasov Nikita diff --git a/vendor/github.com/docker/distribution/AUTHORS b/vendor/github.com/docker/distribution/AUTHORS deleted file mode 100644 index 252ff8aa2a..0000000000 --- a/vendor/github.com/docker/distribution/AUTHORS +++ /dev/null @@ -1,182 +0,0 @@ -a-palchikov -Aaron Lehmann -Aaron Schlesinger -Aaron Vinson -Adam Duke -Adam Enger -Adrian Mouat -Ahmet Alp Balkan -Alex Chan -Alex Elman -Alexey Gladkov -allencloud -amitshukla -Amy Lindburg -Andrew Hsu -Andrew Meredith -Andrew T Nguyen -Andrey Kostov -Andy Goldstein -Anis Elleuch -Anton Tiurin -Antonio Mercado -Antonio Murdaca -Anusha Ragunathan -Arien Holthuizen -Arnaud Porterie -Arthur Baars -Asuka Suzuki -Avi Miller -Ayose Cazorla -BadZen -Ben Bodenmiller -Ben Firshman -bin liu -Brian Bland -burnettk -Carson A -Cezar Sa Espinola -Charles Smith -Chris Dillon -cuiwei13 -cyli -Daisuke Fujita -Daniel Huhn -Darren Shepherd -Dave Trombley -Dave Tucker -David Lawrence -David Verhasselt -David Xia -davidli -Dejan Golja -Derek McGowan -Diogo Mónica -DJ Enriquez -Donald Huang -Doug Davis -Edgar Lee -Eric Yang -Fabio Berchtold -Fabio Huser -farmerworking -Felix Yan -Florentin Raud -Frank Chen -Frederick F. Kautz IV -gabriell nascimento -Gleb Schukin -harche -Henri Gomez -Hu Keping -Hua Wang -HuKeping -Ian Babrou -igayoso -Jack Griffin -James Findley -Jason Freidman -Jason Heiss -Jeff Nickoloff -Jess Frazelle -Jessie Frazelle -jhaohai -Jianqing Wang -Jihoon Chung -Joao Fernandes -John Mulhausen -John Starks -Jon Johnson -Jon Poler -Jonathan Boulle -Jordan Liggitt -Josh Chorlton -Josh Hawn -Julien Fernandez -Ke Xu -Keerthan Mala -Kelsey Hightower -Kenneth Lim -Kenny Leung -Li Yi -Liu Hua -liuchang0812 -Lloyd Ramey -Louis Kottmann -Luke Carpenter -Marcus Martins -Mary Anthony -Matt Bentley -Matt Duch -Matt Moore -Matt Robenolt -Matthew Green -Michael Prokop -Michal Minar -Michal Minář -Mike Brown -Miquel Sabaté -Misty Stanley-Jones -Misty Stanley-Jones -Morgan Bauer -moxiegirl -Nathan Sullivan -nevermosby -Nghia Tran -Nikita Tarasov -Noah Treuhaft -Nuutti Kotivuori -Oilbeater -Olivier Gambier -Olivier Jacques -Omer Cohen -Patrick Devine -Phil Estes -Philip Misiowiec -Pierre-Yves Ritschard -Qiao Anran -Randy Barlow -Richard Scothern -Rodolfo Carvalho -Rusty Conover -Sean Boran -Sebastiaan van Stijn -Sebastien Coavoux -Serge Dubrouski -Sharif Nassar -Shawn Falkner-Horine -Shreyas Karnik -Simon Thulbourn -spacexnice -Spencer Rinehart -Stan Hu -Stefan Majewsky -Stefan Weil -Stephen J Day -Sungho Moon -Sven Dowideit -Sylvain Baubeau -Ted Reed -tgic -Thomas Sjögren -Tianon Gravi -Tibor Vass -Tonis Tiigi -Tony Holdstock-Brown -Trevor Pounds -Troels Thomsen -Victor Vieux -Victoria Bialas -Vincent Batts -Vincent Demeester -Vincent Giersch -W. Trevor King -weiyuan.yl -xg.song -xiekeyang -Yann ROBERT -yaoyao.xyy -yuexiao-wang -yuzou -zhouhaibing089 -姜继忠 diff --git a/vendor/github.com/docker/distribution/BUILDING.md b/vendor/github.com/docker/distribution/BUILDING.md deleted file mode 100644 index c59828182a..0000000000 --- a/vendor/github.com/docker/distribution/BUILDING.md +++ /dev/null @@ -1,117 +0,0 @@ - -# Building the registry source - -## Use-case - -This is useful if you intend to actively work on the registry. - -### Alternatives - -Most people should use the [official Registry docker image](https://hub.docker.com/r/library/registry/). - -People looking for advanced operational use cases might consider rolling their own image with a custom Dockerfile inheriting `FROM registry:2`. - -OS X users who want to run natively can do so following [the instructions here](https://github.com/docker/docker.github.io/blob/master/registry/recipes/osx-setup-guide.md). - -### Gotchas - -You are expected to know your way around with go & git. - -If you are a casual user with no development experience, and no preliminary knowledge of go, building from source is probably not a good solution for you. - -## Build the development environment - -The first prerequisite of properly building distribution targets is to have a Go -development environment setup. Please follow [How to Write Go Code](https://golang.org/doc/code.html) -for proper setup. If done correctly, you should have a GOROOT and GOPATH set in the -environment. - -If a Go development environment is setup, one can use `go get` to install the -`registry` command from the current latest: - - go get github.com/docker/distribution/cmd/registry - -The above will install the source repository into the `GOPATH`. - -Now create the directory for the registry data (this might require you to set permissions properly) - - mkdir -p /var/lib/registry - -... or alternatively `export REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere` if you want to store data into another location. - -The `registry` -binary can then be run with the following: - - $ $GOPATH/bin/registry --version - $GOPATH/bin/registry github.com/docker/distribution v2.0.0-alpha.1+unknown - -> __NOTE:__ While you do not need to use `go get` to checkout the distribution -> project, for these build instructions to work, the project must be checked -> out in the correct location in the `GOPATH`. This should almost always be -> `$GOPATH/src/github.com/docker/distribution`. - -The registry can be run with the default config using the following -incantation: - - $ $GOPATH/bin/registry serve $GOPATH/src/github.com/docker/distribution/cmd/registry/config-example.yml - INFO[0000] endpoint local-5003 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown - INFO[0000] endpoint local-8083 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown - INFO[0000] listening on :5000 app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown - INFO[0000] debug server listening localhost:5001 - -If it is working, one should see the above log messages. - -### Repeatable Builds - -For the full development experience, one should `cd` into -`$GOPATH/src/github.com/docker/distribution`. From there, the regular `go` -commands, such as `go test`, should work per package (please see -[Developing](#developing) if they don't work). - -A `Makefile` has been provided as a convenience to support repeatable builds. -Please install the following into `GOPATH` for it to work: - - go get github.com/golang/lint/golint - -Once these commands are available in the `GOPATH`, run `make` to get a full -build: - - $ make - + clean - + fmt - + vet - + lint - + build - github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar - github.com/sirupsen/logrus - github.com/docker/libtrust - ... - github.com/yvasiyarov/gorelic - github.com/docker/distribution/registry/handlers - github.com/docker/distribution/cmd/registry - + test - ... - ok github.com/docker/distribution/digest 7.875s - ok github.com/docker/distribution/manifest 0.028s - ok github.com/docker/distribution/notifications 17.322s - ? github.com/docker/distribution/registry [no test files] - ok github.com/docker/distribution/registry/api/v2 0.101s - ? github.com/docker/distribution/registry/auth [no test files] - ok github.com/docker/distribution/registry/auth/silly 0.011s - ... - + /Users/sday/go/src/github.com/docker/distribution/bin/registry - + /Users/sday/go/src/github.com/docker/distribution/bin/registry-api-descriptor-template - + binaries - -The above provides a repeatable build using the contents of the vendor -directory. This includes formatting, vetting, linting, building, -testing and generating tagged binaries. We can verify this worked by running -the registry binary generated in the "./bin" directory: - - $ ./bin/registry -version - ./bin/registry github.com/docker/distribution v2.0.0-alpha.2-80-g16d8b2c.m - -### Optional build tags - -Optional [build tags](http://golang.org/pkg/go/build/) can be provided using -the environment variable `DOCKER_BUILDTAGS`. diff --git a/vendor/github.com/docker/distribution/CHANGELOG.md b/vendor/github.com/docker/distribution/CHANGELOG.md deleted file mode 100644 index e7b16b3c25..0000000000 --- a/vendor/github.com/docker/distribution/CHANGELOG.md +++ /dev/null @@ -1,108 +0,0 @@ -# Changelog - -## 2.6.0 (2017-01-18) - -#### Storage -- S3: fixed bug in delete due to read-after-write inconsistency -- S3: allow EC2 IAM roles to be used when authorizing region endpoints -- S3: add Object ACL Support -- S3: fix delete method's notion of subpaths -- S3: use multipart upload API in `Move` method for performance -- S3: add v2 signature signing for legacy S3 clones -- Swift: add simple heuristic to detect incomplete DLOs during read ops -- Swift: support different user and tenant domains -- Swift: bulk deletes in chunks -- Aliyun OSS: fix delete method's notion of subpaths -- Aliyun OSS: optimize data copy after upload finishes -- Azure: close leaking response body -- Fix storage drivers dropping non-EOF errors when listing repositories -- Compare path properly when listing repositories in catalog -- Add a foreign layer URL host whitelist -- Improve catalog enumerate runtime - -#### Registry -- Export `storage.CreateOptions` in top-level package -- Enable notifications to endpoints that use self-signed certificates -- Properly validate multi-URL foreign layers -- Add control over validation of URLs in pushed manifests -- Proxy mode: fix socket leak when pull is cancelled -- Tag service: properly handle error responses on HEAD request -- Support for custom authentication URL in proxying registry -- Add configuration option to disable access logging -- Add notification filtering by target media type -- Manifest: `References()` returns all children -- Honor `X-Forwarded-Port` and Forwarded headers -- Reference: Preserve tag and digest in With* functions -- Add policy configuration for enforcing repository classes - -#### Client -- Changes the client Tags `All()` method to follow links -- Allow registry clients to connect via HTTP2 -- Better handling of OAuth errors in client - -#### Spec -- Manifest: clarify relationship between urls and foreign layers -- Authorization: add support for repository classes - -#### Manifest -- Override media type returned from `Stat()` for existing manifests -- Add plugin mediatype to distribution manifest - -#### Docs -- Document `TOOMANYREQUESTS` error code -- Document required Let's Encrypt port -- Improve documentation around implementation of OAuth2 -- Improve documentation for configuration - -#### Auth -- Add support for registry type in scope -- Add support for using v2 ping challenges for v1 -- Add leeway to JWT `nbf` and `exp` checking -- htpasswd: dynamically parse htpasswd file -- Fix missing auth headers with PATCH HTTP request when pushing to default port - -#### Dockerfile -- Update to go1.7 -- Reorder Dockerfile steps for better layer caching - -#### Notes - -Documentation has moved to the documentation repository at -`github.com/docker/docker.github.io/tree/master/registry` - -The registry is go 1.7 compliant, and passes newer, more restrictive `lint` and `vet` ing. - - -## 2.5.0 (2016-06-14) - -#### Storage -- Ensure uploads directory is cleaned after upload is committed -- Add ability to cap concurrent operations in filesystem driver -- S3: Add 'us-gov-west-1' to the valid region list -- Swift: Handle ceph not returning Last-Modified header for HEAD requests -- Add redirect middleware - -#### Registry -- Add support for blobAccessController middleware -- Add support for layers from foreign sources -- Remove signature store -- Add support for Let's Encrypt -- Correct yaml key names in configuration - -#### Client -- Add option to get content digest from manifest get - -#### Spec -- Update the auth spec scope grammar to reflect the fact that hostnames are optionally supported -- Clarify API documentation around catalog fetch behavior - -#### API -- Support returning HTTP 429 (Too Many Requests) - -#### Documentation -- Update auth documentation examples to show "expires in" as int - -#### Docker Image -- Use Alpine Linux as base image - - diff --git a/vendor/github.com/docker/distribution/CONTRIBUTING.md b/vendor/github.com/docker/distribution/CONTRIBUTING.md deleted file mode 100644 index 1f1441beec..0000000000 --- a/vendor/github.com/docker/distribution/CONTRIBUTING.md +++ /dev/null @@ -1,148 +0,0 @@ -# Contributing to the registry - -## Before reporting an issue... - -### If your problem is with... - - - automated builds - - your account on the [Docker Hub](https://hub.docker.com/) - - any other [Docker Hub](https://hub.docker.com/) issue - -Then please do not report your issue here - you should instead report it to [https://support.docker.com](https://support.docker.com) - -### If you... - - - need help setting up your registry - - can't figure out something - - are not sure what's going on or what your problem is - -Then please do not open an issue here yet - you should first try one of the following support forums: - - - irc: #docker-distribution on freenode - - mailing-list: or https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution - -### Reporting security issues - -The Docker maintainers take security seriously. If you discover a security -issue, please bring it to their attention right away! - -Please **DO NOT** file a public issue, instead send your report privately to -[security@docker.com](mailto:security@docker.com). - -## Reporting an issue properly - -By following these simple rules you will get better and faster feedback on your issue. - - - search the bugtracker for an already reported issue - -### If you found an issue that describes your problem: - - - please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments - - please refrain from adding "same thing here" or "+1" comments - - you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button - - comment if you have some new, technical and relevant information to add to the case - - __DO NOT__ comment on closed issues or merged PRs. If you think you have a related problem, open up a new issue and reference the PR or issue. - -### If you have not found an existing issue that describes your problem: - - 1. create a new issue, with a succinct title that describes your issue: - - bad title: "It doesn't work with my docker" - - good title: "Private registry push fail: 400 error with E_INVALID_DIGEST" - 2. copy the output of: - - `docker version` - - `docker info` - - `docker exec registry -version` - 3. copy the command line you used to launch your Registry - 4. restart your docker daemon in debug mode (add `-D` to the daemon launch arguments) - 5. reproduce your problem and get your docker daemon logs showing the error - 6. if relevant, copy your registry logs that show the error - 7. provide any relevant detail about your specific Registry configuration (e.g., storage backend used) - 8. indicate if you are using an enterprise proxy, Nginx, or anything else between you and your Registry - -## Contributing a patch for a known bug, or a small correction - -You should follow the basic GitHub workflow: - - 1. fork - 2. commit a change - 3. make sure the tests pass - 4. PR - -Additionally, you must [sign your commits](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work). It's very simple: - - - configure your name with git: `git config user.name "Real Name" && git config user.email mail@example.com` - - sign your commits using `-s`: `git commit -s -m "My commit"` - -Some simple rules to ensure quick merge: - - - clearly point to the issue(s) you want to fix in your PR comment (e.g., `closes #12345`) - - prefer multiple (smaller) PRs addressing individual issues over a big one trying to address multiple issues at once - - if you need to amend your PR following comments, please squash instead of adding more commits - -## Contributing new features - -You are heavily encouraged to first discuss what you want to do. You can do so on the irc channel, or by opening an issue that clearly describes the use case you want to fulfill, or the problem you are trying to solve. - -If this is a major new feature, you should then submit a proposal that describes your technical solution and reasoning. -If you did discuss it first, this will likely be greenlighted very fast. It's advisable to address all feedback on this proposal before starting actual work. - -Then you should submit your implementation, clearly linking to the issue (and possible proposal). - -Your PR will be reviewed by the community, then ultimately by the project maintainers, before being merged. - -It's mandatory to: - - - interact respectfully with other community members and maintainers - more generally, you are expected to abide by the [Docker community rules](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#docker-community-guidelines) - - address maintainers' comments and modify your submission accordingly - - write tests for any new code - -Complying to these simple rules will greatly accelerate the review process, and will ensure you have a pleasant experience in contributing code to the Registry. - -Have a look at a great, successful contribution: the [Swift driver PR](https://github.com/docker/distribution/pull/493) - -## Coding Style - -Unless explicitly stated, we follow all coding guidelines from the Go -community. While some of these standards may seem arbitrary, they somehow seem -to result in a solid, consistent codebase. - -It is possible that the code base does not currently comply with these -guidelines. We are not looking for a massive PR that fixes this, since that -goes against the spirit of the guidelines. All new contributions should make a -best effort to clean up and make the code base better than they left it. -Obviously, apply your best judgement. Remember, the goal here is to make the -code base easier for humans to navigate and understand. Always keep that in -mind when nudging others to comply. - -The rules: - -1. All code should be formatted with `gofmt -s`. -2. All code should pass the default levels of - [`golint`](https://github.com/golang/lint). -3. All code should follow the guidelines covered in [Effective - Go](http://golang.org/doc/effective_go.html) and [Go Code Review - Comments](https://github.com/golang/go/wiki/CodeReviewComments). -4. Comment the code. Tell us the why, the history and the context. -5. Document _all_ declarations and methods, even private ones. Declare - expectations, caveats and anything else that may be important. If a type - gets exported, having the comments already there will ensure it's ready. -6. Variable name length should be proportional to its context and no longer. - `noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`. - In practice, short methods will have short variable names and globals will - have longer names. -7. No underscores in package names. If you need a compound name, step back, - and re-examine why you need a compound name. If you still think you need a - compound name, lose the underscore. -8. No utils or helpers packages. If a function is not general enough to - warrant its own package, it has not been written generally enough to be a - part of a util package. Just leave it unexported and well-documented. -9. All tests should run with `go test` and outside tooling should not be - required. No, we don't need another unit testing framework. Assertion - packages are acceptable if they provide _real_ incremental value. -10. Even though we call these "rules" above, they are actually just - guidelines. Since you've read all the rules, you now know that. - -If you are having trouble getting into the mood of idiomatic Go, we recommend -reading through [Effective Go](http://golang.org/doc/effective_go.html). The -[Go Blog](http://blog.golang.org/) is also a great resource. Drinking the -kool-aid is a lot easier than going thirsty. diff --git a/vendor/github.com/docker/distribution/Dockerfile b/vendor/github.com/docker/distribution/Dockerfile deleted file mode 100644 index ac8dbca2fb..0000000000 --- a/vendor/github.com/docker/distribution/Dockerfile +++ /dev/null @@ -1,21 +0,0 @@ -FROM golang:1.8-alpine - -ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution -ENV DOCKER_BUILDTAGS include_oss include_gcs - -ARG GOOS=linux -ARG GOARCH=amd64 - -RUN set -ex \ - && apk add --no-cache make git - -WORKDIR $DISTRIBUTION_DIR -COPY . $DISTRIBUTION_DIR -COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml - -RUN make PREFIX=/go clean binaries - -VOLUME ["/var/lib/registry"] -EXPOSE 5000 -ENTRYPOINT ["registry"] -CMD ["serve", "/etc/docker/registry/config.yml"] diff --git a/vendor/github.com/docker/distribution/LICENSE b/vendor/github.com/docker/distribution/LICENSE deleted file mode 100644 index e06d208186..0000000000 --- a/vendor/github.com/docker/distribution/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/vendor/github.com/docker/distribution/MAINTAINERS b/vendor/github.com/docker/distribution/MAINTAINERS deleted file mode 100644 index bda400150c..0000000000 --- a/vendor/github.com/docker/distribution/MAINTAINERS +++ /dev/null @@ -1,58 +0,0 @@ -# Distribution maintainers file -# -# This file describes who runs the docker/distribution project and how. -# This is a living document - if you see something out of date or missing, speak up! -# -# It is structured to be consumable by both humans and programs. -# To extract its contents programmatically, use any TOML-compliant parser. -# -# This file is compiled into the MAINTAINERS file in docker/opensource. -# -[Org] - [Org."Core maintainers"] - people = [ - "aaronlehmann", - "dmcgowan", - "dmp42", - "richardscothern", - "shykes", - "stevvooe", - ] - -[people] - -# A reference list of all people associated with the project. -# All other sections should refer to people by their canonical key -# in the people section. - - # ADD YOURSELF HERE IN ALPHABETICAL ORDER - - [people.aaronlehmann] - Name = "Aaron Lehmann" - Email = "aaron.lehmann@docker.com" - GitHub = "aaronlehmann" - - [people.dmcgowan] - Name = "Derek McGowan" - Email = "derek@mcgstyle.net" - GitHub = "dmcgowan" - - [people.dmp42] - Name = "Olivier Gambier" - Email = "olivier@docker.com" - GitHub = "dmp42" - - [people.richardscothern] - Name = "Richard Scothern" - Email = "richard.scothern@gmail.com" - GitHub = "richardscothern" - - [people.shykes] - Name = "Solomon Hykes" - Email = "solomon@docker.com" - GitHub = "shykes" - - [people.stevvooe] - Name = "Stephen Day" - Email = "stephen.day@docker.com" - GitHub = "stevvooe" diff --git a/vendor/github.com/docker/distribution/Makefile b/vendor/github.com/docker/distribution/Makefile deleted file mode 100644 index 7c6f9c7a6c..0000000000 --- a/vendor/github.com/docker/distribution/Makefile +++ /dev/null @@ -1,99 +0,0 @@ -# Set an output prefix, which is the local directory if not specified -PREFIX?=$(shell pwd) - - -# Used to populate version variable in main package. -VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always) - -# Allow turning off function inlining and variable registerization -ifeq (${DISABLE_OPTIMIZATION},true) - GO_GCFLAGS=-gcflags "-N -l" - VERSION:="$(VERSION)-noopt" -endif - -GO_LDFLAGS=-ldflags "-X `go list ./version`.Version=$(VERSION)" - -.PHONY: all build binaries clean dep-restore dep-save dep-validate fmt lint test test-full vet -.DEFAULT: all -all: fmt vet lint build test binaries - -AUTHORS: .mailmap .git/HEAD - git log --format='%aN <%aE>' | sort -fu > $@ - -# This only needs to be generated by hand when cutting full releases. -version/version.go: - ./version/version.sh > $@ - -# Required for go 1.5 to build -GO15VENDOREXPERIMENT := 1 - -# Go files -GOFILES=$(shell find . -type f -name '*.go') - -# Package list -PKGS=$(shell go list -tags "${DOCKER_BUILDTAGS}" ./... | grep -v ^github.com/docker/distribution/vendor/) - -# Resolving binary dependencies for specific targets -GOLINT=$(shell which golint || echo '') -VNDR=$(shell which vndr || echo '') - -${PREFIX}/bin/registry: $(GOFILES) - @echo "+ $@" - @go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry - -${PREFIX}/bin/digest: $(GOFILES) - @echo "+ $@" - @go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/digest - -${PREFIX}/bin/registry-api-descriptor-template: $(GOFILES) - @echo "+ $@" - @go build -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry-api-descriptor-template - -docs/spec/api.md: docs/spec/api.md.tmpl ${PREFIX}/bin/registry-api-descriptor-template - ./bin/registry-api-descriptor-template $< > $@ - -vet: - @echo "+ $@" - @go vet -tags "${DOCKER_BUILDTAGS}" $(PKGS) - -fmt: - @echo "+ $@" - @test -z "$$(gofmt -s -l . 2>&1 | grep -v ^vendor/ | tee /dev/stderr)" || \ - (echo >&2 "+ please format Go code with 'gofmt -s'" && false) - -lint: - @echo "+ $@" - $(if $(GOLINT), , \ - $(error Please install golint: `go get -u github.com/golang/lint/golint`)) - @test -z "$$($(GOLINT) ./... 2>&1 | grep -v ^vendor/ | tee /dev/stderr)" - -build: - @echo "+ $@" - @go build -tags "${DOCKER_BUILDTAGS}" -v ${GO_LDFLAGS} $(PKGS) - -test: - @echo "+ $@" - @go test -test.short -tags "${DOCKER_BUILDTAGS}" $(PKGS) - -test-full: - @echo "+ $@" - @go test -tags "${DOCKER_BUILDTAGS}" $(PKGS) - -binaries: ${PREFIX}/bin/registry ${PREFIX}/bin/digest ${PREFIX}/bin/registry-api-descriptor-template - @echo "+ $@" - -clean: - @echo "+ $@" - @rm -rf "${PREFIX}/bin/registry" "${PREFIX}/bin/digest" "${PREFIX}/bin/registry-api-descriptor-template" - -dep-validate: - @echo "+ $@" - $(if $(VNDR), , \ - $(error Please install vndr: go get github.com/lk4d4/vndr)) - @rm -Rf .vendor.bak - @mv vendor .vendor.bak - @$(VNDR) - @test -z "$$(diff -r vendor .vendor.bak 2>&1 | tee /dev/stderr)" || \ - (echo >&2 "+ inconsistent dependencies! what you have in vendor.conf does not match with what you have in vendor" && false) - @rm -Rf vendor - @mv .vendor.bak vendor diff --git a/vendor/github.com/docker/distribution/README.md b/vendor/github.com/docker/distribution/README.md deleted file mode 100644 index 998878850c..0000000000 --- a/vendor/github.com/docker/distribution/README.md +++ /dev/null @@ -1,130 +0,0 @@ -# Distribution - -The Docker toolset to pack, ship, store, and deliver content. - -This repository's main product is the Docker Registry 2.0 implementation -for storing and distributing Docker images. It supersedes the -[docker/docker-registry](https://github.com/docker/docker-registry) -project with a new API design, focused around security and performance. - - - -[![Circle CI](https://circleci.com/gh/docker/distribution/tree/master.svg?style=svg)](https://circleci.com/gh/docker/distribution/tree/master) -[![GoDoc](https://godoc.org/github.com/docker/distribution?status.svg)](https://godoc.org/github.com/docker/distribution) - -This repository contains the following components: - -|**Component** |Description | -|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. | -| **libraries** | A rich set of libraries for interacting with distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. | -| **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) | -| **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/) related just to the registry. | - -### How does this integrate with Docker engine? - -This project should provide an implementation to a V2 API for use in the [Docker -core project](https://github.com/docker/docker). The API should be embeddable -and simplify the process of securely pulling and pushing content from `docker` -daemons. - -### What are the long term goals of the Distribution project? - -The _Distribution_ project has the further long term goal of providing a -secure tool chain for distributing content. The specifications, APIs and tools -should be as useful with Docker as they are without. - -Our goal is to design a professional grade and extensible content distribution -system that allow users to: - -* Enjoy an efficient, secured and reliable way to store, manage, package and - exchange content -* Hack/roll their own on top of healthy open-source components -* Implement their own home made solution through good specs, and solid - extensions mechanism. - -## More about Registry 2.0 - -The new registry implementation provides the following benefits: - -- faster push and pull -- new, more efficient implementation -- simplified deployment -- pluggable storage backend -- webhook notifications - -For information on upcoming functionality, please see [ROADMAP.md](ROADMAP.md). - -### Who needs to deploy a registry? - -By default, Docker users pull images from Docker's public registry instance. -[Installing Docker](https://docs.docker.com/engine/installation/) gives users this -ability. Users can also push images to a repository on Docker's public registry, -if they have a [Docker Hub](https://hub.docker.com/) account. - -For some users and even companies, this default behavior is sufficient. For -others, it is not. - -For example, users with their own software products may want to maintain a -registry for private, company images. Also, you may wish to deploy your own -image repository for images used to test or in continuous integration. For these -use cases and others, [deploying your own registry instance](https://github.com/docker/docker.github.io/blob/master/registry/deploying.md) -may be the better choice. - -### Migration to Registry 2.0 - -For those who have previously deployed their own registry based on the Registry -1.0 implementation and wish to deploy a Registry 2.0 while retaining images, -data migration is required. A tool to assist with migration efforts has been -created. For more information see [docker/migrator](https://github.com/docker/migrator). - -## Contribute - -Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute -issues, fixes, and patches to this project. If you are contributing code, see -the instructions for [building a development environment](BUILDING.md). - -## Support - -If any issues are encountered while using the _Distribution_ project, several -avenues are available for support: - - - - - - - - - - - - - - - - - - -
- IRC - - #docker-distribution on FreeNode -
- Issue Tracker - - github.com/docker/distribution/issues -
- Google Groups - - https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution -
- Mailing List - - docker@dockerproject.org -
- - -## License - -This project is distributed under [Apache License, Version 2.0](LICENSE). diff --git a/vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md b/vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md deleted file mode 100644 index 73eba5a871..0000000000 --- a/vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md +++ /dev/null @@ -1,44 +0,0 @@ -## Registry Release Checklist - -10. Compile release notes detailing features and since the last release. - - Update the `CHANGELOG.md` file and create a PR to master with the updates. -Once that PR has been approved by maintainers the change may be cherry-picked -to the release branch (new release branches may be forked from this commit). - -20. Update the version file: `https://github.com/docker/distribution/blob/master/version/version.go` - -30. Update the `MAINTAINERS` (if necessary), `AUTHORS` and `.mailmap` files. - -``` -make AUTHORS -``` - -40. Create a signed tag. - - Distribution uses semantic versioning. Tags are of the format -`vx.y.z[-rcn]`. You will need PGP installed and a PGP key which has been added -to your Github account. The comment for the tag should include the release -notes, use previous tags as a guide for formatting consistently. Run -`git tag -s vx.y.z[-rcn]` to create tag and `git -v vx.y.z[-rcn]` to verify tag, -check comment and correct commit hash. - -50. Push the signed tag - -60. Create a new [release](https://github.com/docker/distribution/releases). In the case of a release candidate, tick the `pre-release` checkbox. - -70. Update the registry binary in [distribution library image repo](https://github.com/docker/distribution-library-image) by running the update script and opening a pull request. - -80. Update the official image. Add the new version in the [official images repo](https://github.com/docker-library/official-images) by appending a new version to the `registry/registry` file with the git hash pointed to by the signed tag. Update the major version to point to the latest version and the minor version to point to new patch release if necessary. -e.g. to release `2.3.1` - - `2.3.1 (new)` - - `2.3.0 -> 2.3.0` can be removed - - `2 -> 2.3.1` - - `2.3 -> 2.3.1` - -90. Build a new distribution/registry image on [Docker hub](https://hub.docker.com/u/distribution/dashboard) by adding a new automated build with the new tag and re-building the images. - diff --git a/vendor/github.com/docker/distribution/ROADMAP.md b/vendor/github.com/docker/distribution/ROADMAP.md deleted file mode 100644 index 701127afec..0000000000 --- a/vendor/github.com/docker/distribution/ROADMAP.md +++ /dev/null @@ -1,267 +0,0 @@ -# Roadmap - -The Distribution Project consists of several components, some of which are -still being defined. This document defines the high-level goals of the -project, identifies the current components, and defines the release- -relationship to the Docker Platform. - -* [Distribution Goals](#distribution-goals) -* [Distribution Components](#distribution-components) -* [Project Planning](#project-planning): release-relationship to the Docker Platform. - -This road map is a living document, providing an overview of the goals and -considerations made in respect of the future of the project. - -## Distribution Goals - -- Replace the existing [docker registry](github.com/docker/docker-registry) - implementation as the primary implementation. -- Replace the existing push and pull code in the docker engine with the - distribution package. -- Define a strong data model for distributing docker images -- Provide a flexible distribution tool kit for use in the docker platform -- Unlock new distribution models - -## Distribution Components - -Components of the Distribution Project are managed via github [milestones](https://github.com/docker/distribution/milestones). Upcoming -features and bugfixes for a component will be added to the relevant milestone. If a feature or -bugfix is not part of a milestone, it is currently unscheduled for -implementation. - -* [Registry](#registry) -* [Distribution Package](#distribution-package) - -*** - -### Registry - -The new Docker registry is the main portion of the distribution repository. -Registry 2.0 is the first release of the next-generation registry. This was -primarily focused on implementing the [new registry -API](https://github.com/docker/distribution/blob/master/docs/spec/api.md), -with a focus on security and performance. - -Following from the Distribution project goals above, we have a set of goals -for registry v2 that we would like to follow in the design. New features -should be compared against these goals. - -#### Data Storage and Distribution First - -The registry's first goal is to provide a reliable, consistent storage -location for Docker images. The registry should only provide the minimal -amount of indexing required to fetch image data and no more. - -This means we should be selective in new features and API additions, including -those that may require expensive, ever growing indexes. Requests should be -servable in "constant time". - -#### Content Addressability - -All data objects used in the registry API should be content addressable. -Content identifiers should be secure and verifiable. This provides a secure, -reliable base from which to build more advanced content distribution systems. - -#### Content Agnostic - -In the past, changes to the image format would require large changes in Docker -and the Registry. By decoupling the distribution and image format, we can -allow the formats to progress without having to coordinate between the two. -This means that we should be focused on decoupling Docker from the registry -just as much as decoupling the registry from Docker. Such an approach will -allow us to unlock new distribution models that haven't been possible before. - -We can take this further by saying that the new registry should be content -agnostic. The registry provides a model of names, tags, manifests and content -addresses and that model can be used to work with content. - -#### Simplicity - -The new registry should be closer to a microservice component than its -predecessor. This means it should have a narrower API and a low number of -service dependencies. It should be easy to deploy. - -This means that other solutions should be explored before changing the API or -adding extra dependencies. If functionality is required, can it be added as an -extension or companion service. - -#### Extensibility - -The registry should provide extension points to add functionality. By keeping -the scope narrow, but providing the ability to add functionality. - -Features like search, indexing, synchronization and registry explorers fall -into this category. No such feature should be added unless we've found it -impossible to do through an extension. - -#### Active Feature Discussions - -The following are feature discussions that are currently active. - -If you don't see your favorite, unimplemented feature, feel free to contact us -via IRC or the mailing list and we can talk about adding it. The goal here is -to make sure that new features go through a rigid design process before -landing in the registry. - -##### Proxying to other Registries - -A _pull-through caching_ mode exists for the registry, but is restricted from -within the docker client to only mirror the official Docker Hub. This functionality -can be expanded when image provenance has been specified and implemented in the -distribution project. - -##### Metadata storage - -Metadata for the registry is currently stored with the manifest and layer data on -the storage backend. While this is a big win for simplicity and reliably maintaining -state, it comes with the cost of consistency and high latency. The mutable registry -metadata operations should be abstracted behind an API which will allow ACID compliant -storage systems to handle metadata. - -##### Peer to Peer transfer - -Discussion has started here: https://docs.google.com/document/d/1rYDpSpJiQWmCQy8Cuiaa3NH-Co33oK_SC9HeXYo87QA/edit - -##### Indexing, Search and Discovery - -The original registry provided some implementation of search for use with -private registries. Support has been elided from V2 since we'd like to both -decouple search functionality from the registry. The makes the registry -simpler to deploy, especially in use cases where search is not needed, and -let's us decouple the image format from the registry. - -There are explorations into using the catalog API and notification system to -build external indexes. The current line of thought is that we will define a -common search API to index and query docker images. Such a system could be run -as a companion to a registry or set of registries to power discovery. - -The main issue with search and discovery is that there are so many ways to -accomplish it. There are two aspects to this project. The first is deciding on -how it will be done, including an API definition that can work with changing -data formats. The second is the process of integrating with `docker search`. -We expect that someone attempts to address the problem with the existing tools -and propose it as a standard search API or uses it to inform a standardization -process. Once this has been explored, we integrate with the docker client. - -Please see the following for more detail: - -- https://github.com/docker/distribution/issues/206 - -##### Deletes - -> __NOTE:__ Deletes are a much asked for feature. Before requesting this -feature or participating in discussion, we ask that you read this section in -full and understand the problems behind deletes. - -While, at first glance, implementing deleting seems simple, there are a number -mitigating factors that make many solutions not ideal or even pathological in -the context of a registry. The following paragraph discuss the background and -approaches that could be applied to arrive at a solution. - -The goal of deletes in any system is to remove unused or unneeded data. Only -data requested for deletion should be removed and no other data. Removing -unintended data is worse than _not_ removing data that was requested for -removal but ideally, both are supported. Generally, according to this rule, we -err on holding data longer than needed, ensuring that it is only removed when -we can be certain that it can be removed. With the current behavior, we opt to -hold onto the data forever, ensuring that data cannot be incorrectly removed. - -To understand the problems with implementing deletes, one must understand the -data model. All registry data is stored in a filesystem layout, implemented on -a "storage driver", effectively a _virtual file system_ (VFS). The storage -system must assume that this VFS layer will be eventually consistent and has -poor read- after-write consistency, since this is the lower common denominator -among the storage drivers. This is mitigated by writing values in reverse- -dependent order, but makes wider transactional operations unsafe. - -Layered on the VFS model is a content-addressable _directed, acyclic graph_ -(DAG) made up of blobs. Manifests reference layers. Tags reference manifests. -Since the same data can be referenced by multiple manifests, we only store -data once, even if it is in different repositories. Thus, we have a set of -blobs, referenced by tags and manifests. If we want to delete a blob we need -to be certain that it is no longer referenced by another manifest or tag. When -we delete a manifest, we also can try to delete the referenced blobs. Deciding -whether or not a blob has an active reference is the crux of the problem. - -Conceptually, deleting a manifest and its resources is quite simple. Just find -all the manifests, enumerate the referenced blobs and delete the blobs not in -that set. An astute observer will recognize this as a garbage collection -problem. As with garbage collection in programming languages, this is very -simple when one always has a consistent view. When one adds parallelism and an -inconsistent view of data, it becomes very challenging. - -A simple example can demonstrate this. Let's say we are deleting a manifest -_A_ in one process. We scan the manifest and decide that all the blobs are -ready for deletion. Concurrently, we have another process accepting a new -manifest _B_ referencing one or more blobs from the manifest _A_. Manifest _B_ -is accepted and all the blobs are considered present, so the operation -proceeds. The original process then deletes the referenced blobs, assuming -they were unreferenced. The manifest _B_, which we thought had all of its data -present, can no longer be served by the registry, since the dependent data has -been deleted. - -Deleting data from the registry safely requires some way to coordinate this -operation. The following approaches are being considered: - -- _Reference Counting_ - Maintain a count of references to each blob. This is - challenging for a number of reasons: 1. maintaining a consistent consensus - of reference counts across a set of Registries and 2. Building the initial - list of reference counts for an existing registry. These challenges can be - met with a consensus protocol like Paxos or Raft in the first case and a - necessary but simple scan in the second.. -- _Lock the World GC_ - Halt all writes to the data store. Walk the data store - and find all blob references. Delete all unreferenced blobs. This approach - is very simple but requires disabling writes for a period of time while the - service reads all data. This is slow and expensive but very accurate and - effective. -- _Generational GC_ - Do something similar to above but instead of blocking - writes, writes are sent to another storage backend while reads are broadcast - to the new and old backends. GC is then performed on the read-only portion. - Because writes land in the new backend, the data in the read-only section - can be safely deleted. The main drawbacks of this approach are complexity - and coordination. -- _Centralized Oracle_ - Using a centralized, transactional database, we can - know exactly which data is referenced at any given time. This avoids - coordination problem by managing this data in a single location. We trade - off metadata scalability for simplicity and performance. This is a very good - option for most registry deployments. This would create a bottleneck for - registry metadata. However, metadata is generally not the main bottleneck - when serving images. - -Please let us know if other solutions exist that we have yet to enumerate. -Note that for any approach, implementation is a massive consideration. For -example, a mark-sweep based solution may seem simple but the amount of work in -coordination offset the extra work it might take to build a _Centralized -Oracle_. We'll accept proposals for any solution but please coordinate with us -before dropping code. - -At this time, we have traded off simplicity and ease of deployment for disk -space. Simplicity and ease of deployment tend to reduce developer involvement, -which is currently the most expensive resource in software engineering. Taking -on any solution for deletes will greatly effect these factors, trading off -very cheap disk space for a complex deployment and operational story. - -Please see the following issues for more detail: - -- https://github.com/docker/distribution/issues/422 -- https://github.com/docker/distribution/issues/461 -- https://github.com/docker/distribution/issues/462 - -### Distribution Package - -At its core, the Distribution Project is a set of Go packages that make up -Distribution Components. At this time, most of these packages make up the -Registry implementation. - -The package itself is considered unstable. If you're using it, please take care to vendor the dependent version. - -For feature additions, please see the Registry section. In the future, we may break out a -separate Roadmap for distribution-specific features that apply to more than -just the registry. - -*** - -### Project Planning - -An [Open-Source Planning Process](https://github.com/docker/distribution/wiki/Open-Source-Planning-Process) is used to define the Roadmap. [Project Pages](https://github.com/docker/distribution/wiki) define the goals for each Milestone and identify current progress. - diff --git a/vendor/github.com/docker/distribution/blobs.go b/vendor/github.com/docker/distribution/blobs.go deleted file mode 100644 index 01d309029f..0000000000 --- a/vendor/github.com/docker/distribution/blobs.go +++ /dev/null @@ -1,257 +0,0 @@ -package distribution - -import ( - "errors" - "fmt" - "io" - "net/http" - "time" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/reference" - "github.com/opencontainers/go-digest" -) - -var ( - // ErrBlobExists returned when blob already exists - ErrBlobExists = errors.New("blob exists") - - // ErrBlobDigestUnsupported when blob digest is an unsupported version. - ErrBlobDigestUnsupported = errors.New("unsupported blob digest") - - // ErrBlobUnknown when blob is not found. - ErrBlobUnknown = errors.New("unknown blob") - - // ErrBlobUploadUnknown returned when upload is not found. - ErrBlobUploadUnknown = errors.New("blob upload unknown") - - // ErrBlobInvalidLength returned when the blob has an expected length on - // commit, meaning mismatched with the descriptor or an invalid value. - ErrBlobInvalidLength = errors.New("blob invalid length") -) - -// ErrBlobInvalidDigest returned when digest check fails. -type ErrBlobInvalidDigest struct { - Digest digest.Digest - Reason error -} - -func (err ErrBlobInvalidDigest) Error() string { - return fmt.Sprintf("invalid digest for referenced layer: %v, %v", - err.Digest, err.Reason) -} - -// ErrBlobMounted returned when a blob is mounted from another repository -// instead of initiating an upload session. -type ErrBlobMounted struct { - From reference.Canonical - Descriptor Descriptor -} - -func (err ErrBlobMounted) Error() string { - return fmt.Sprintf("blob mounted from: %v to: %v", - err.From, err.Descriptor) -} - -// Descriptor describes targeted content. Used in conjunction with a blob -// store, a descriptor can be used to fetch, store and target any kind of -// blob. The struct also describes the wire protocol format. Fields should -// only be added but never changed. -type Descriptor struct { - // MediaType describe the type of the content. All text based formats are - // encoded as utf-8. - MediaType string `json:"mediaType,omitempty"` - - // Size in bytes of content. - Size int64 `json:"size,omitempty"` - - // Digest uniquely identifies the content. A byte stream can be verified - // against against this digest. - Digest digest.Digest `json:"digest,omitempty"` - - // URLs contains the source URLs of this content. - URLs []string `json:"urls,omitempty"` - - // NOTE: Before adding a field here, please ensure that all - // other options have been exhausted. Much of the type relationships - // depend on the simplicity of this type. -} - -// Descriptor returns the descriptor, to make it satisfy the Describable -// interface. Note that implementations of Describable are generally objects -// which can be described, not simply descriptors; this exception is in place -// to make it more convenient to pass actual descriptors to functions that -// expect Describable objects. -func (d Descriptor) Descriptor() Descriptor { - return d -} - -// BlobStatter makes blob descriptors available by digest. The service may -// provide a descriptor of a different digest if the provided digest is not -// canonical. -type BlobStatter interface { - // Stat provides metadata about a blob identified by the digest. If the - // blob is unknown to the describer, ErrBlobUnknown will be returned. - Stat(ctx context.Context, dgst digest.Digest) (Descriptor, error) -} - -// BlobDeleter enables deleting blobs from storage. -type BlobDeleter interface { - Delete(ctx context.Context, dgst digest.Digest) error -} - -// BlobEnumerator enables iterating over blobs from storage -type BlobEnumerator interface { - Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error -} - -// BlobDescriptorService manages metadata about a blob by digest. Most -// implementations will not expose such an interface explicitly. Such mappings -// should be maintained by interacting with the BlobIngester. Hence, this is -// left off of BlobService and BlobStore. -type BlobDescriptorService interface { - BlobStatter - - // SetDescriptor assigns the descriptor to the digest. The provided digest and - // the digest in the descriptor must map to identical content but they may - // differ on their algorithm. The descriptor must have the canonical - // digest of the content and the digest algorithm must match the - // annotators canonical algorithm. - // - // Such a facility can be used to map blobs between digest domains, with - // the restriction that the algorithm of the descriptor must match the - // canonical algorithm (ie sha256) of the annotator. - SetDescriptor(ctx context.Context, dgst digest.Digest, desc Descriptor) error - - // Clear enables descriptors to be unlinked - Clear(ctx context.Context, dgst digest.Digest) error -} - -// BlobDescriptorServiceFactory creates middleware for BlobDescriptorService. -type BlobDescriptorServiceFactory interface { - BlobAccessController(svc BlobDescriptorService) BlobDescriptorService -} - -// ReadSeekCloser is the primary reader type for blob data, combining -// io.ReadSeeker with io.Closer. -type ReadSeekCloser interface { - io.ReadSeeker - io.Closer -} - -// BlobProvider describes operations for getting blob data. -type BlobProvider interface { - // Get returns the entire blob identified by digest along with the descriptor. - Get(ctx context.Context, dgst digest.Digest) ([]byte, error) - - // Open provides a ReadSeekCloser to the blob identified by the provided - // descriptor. If the blob is not known to the service, an error will be - // returned. - Open(ctx context.Context, dgst digest.Digest) (ReadSeekCloser, error) -} - -// BlobServer can serve blobs via http. -type BlobServer interface { - // ServeBlob attempts to serve the blob, identified by dgst, via http. The - // service may decide to redirect the client elsewhere or serve the data - // directly. - // - // This handler only issues successful responses, such as 2xx or 3xx, - // meaning it serves data or issues a redirect. If the blob is not - // available, an error will be returned and the caller may still issue a - // response. - // - // The implementation may serve the same blob from a different digest - // domain. The appropriate headers will be set for the blob, unless they - // have already been set by the caller. - ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error -} - -// BlobIngester ingests blob data. -type BlobIngester interface { - // Put inserts the content p into the blob service, returning a descriptor - // or an error. - Put(ctx context.Context, mediaType string, p []byte) (Descriptor, error) - - // Create allocates a new blob writer to add a blob to this service. The - // returned handle can be written to and later resumed using an opaque - // identifier. With this approach, one can Close and Resume a BlobWriter - // multiple times until the BlobWriter is committed or cancelled. - Create(ctx context.Context, options ...BlobCreateOption) (BlobWriter, error) - - // Resume attempts to resume a write to a blob, identified by an id. - Resume(ctx context.Context, id string) (BlobWriter, error) -} - -// BlobCreateOption is a general extensible function argument for blob creation -// methods. A BlobIngester may choose to honor any or none of the given -// BlobCreateOptions, which can be specific to the implementation of the -// BlobIngester receiving them. -// TODO (brianbland): unify this with ManifestServiceOption in the future -type BlobCreateOption interface { - Apply(interface{}) error -} - -// CreateOptions is a collection of blob creation modifiers relevant to general -// blob storage intended to be configured by the BlobCreateOption.Apply method. -type CreateOptions struct { - Mount struct { - ShouldMount bool - From reference.Canonical - // Stat allows to pass precalculated descriptor to link and return. - // Blob access check will be skipped if set. - Stat *Descriptor - } -} - -// BlobWriter provides a handle for inserting data into a blob store. -// Instances should be obtained from BlobWriteService.Writer and -// BlobWriteService.Resume. If supported by the store, a writer can be -// recovered with the id. -type BlobWriter interface { - io.WriteCloser - io.ReaderFrom - - // Size returns the number of bytes written to this blob. - Size() int64 - - // ID returns the identifier for this writer. The ID can be used with the - // Blob service to later resume the write. - ID() string - - // StartedAt returns the time this blob write was started. - StartedAt() time.Time - - // Commit completes the blob writer process. The content is verified - // against the provided provisional descriptor, which may result in an - // error. Depending on the implementation, written data may be validated - // against the provisional descriptor fields. If MediaType is not present, - // the implementation may reject the commit or assign "application/octet- - // stream" to the blob. The returned descriptor may have a different - // digest depending on the blob store, referred to as the canonical - // descriptor. - Commit(ctx context.Context, provisional Descriptor) (canonical Descriptor, err error) - - // Cancel ends the blob write without storing any data and frees any - // associated resources. Any data written thus far will be lost. Cancel - // implementations should allow multiple calls even after a commit that - // result in a no-op. This allows use of Cancel in a defer statement, - // increasing the assurance that it is correctly called. - Cancel(ctx context.Context) error -} - -// BlobService combines the operations to access, read and write blobs. This -// can be used to describe remote blob services. -type BlobService interface { - BlobStatter - BlobProvider - BlobIngester -} - -// BlobStore represent the entire suite of blob related operations. Such an -// implementation can access, read, write, delete and serve blobs. -type BlobStore interface { - BlobService - BlobServer - BlobDeleter -} diff --git a/vendor/github.com/docker/distribution/circle.yml b/vendor/github.com/docker/distribution/circle.yml deleted file mode 100644 index ddc76c86c7..0000000000 --- a/vendor/github.com/docker/distribution/circle.yml +++ /dev/null @@ -1,94 +0,0 @@ -# Pony-up! -machine: - pre: - # Install gvm - - bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/1.0.22/binscripts/gvm-installer) - # Install codecov for coverage - - pip install --user codecov - - post: - # go - - gvm install go1.8 --prefer-binary --name=stable - - environment: - # Convenient shortcuts to "common" locations - CHECKOUT: /home/ubuntu/$CIRCLE_PROJECT_REPONAME - BASE_DIR: src/github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME - # Trick circle brainflat "no absolute path" behavior - BASE_STABLE: ../../../$HOME/.gvm/pkgsets/stable/global/$BASE_DIR - DOCKER_BUILDTAGS: "include_oss include_gcs" - # Workaround Circle parsing dumb bugs and/or YAML wonkyness - CIRCLE_PAIN: "mode: set" - - hosts: - # Not used yet - fancy: 127.0.0.1 - -dependencies: - pre: - # Copy the code to the gopath of all go versions - - > - gvm use stable && - mkdir -p "$(dirname $BASE_STABLE)" && - cp -R "$CHECKOUT" "$BASE_STABLE" - - override: - # Install dependencies for every copied clone/go version - - gvm use stable && go get github.com/lk4d4/vndr: - pwd: $BASE_STABLE - - post: - # For the stable go version, additionally install linting tools - - > - gvm use stable && - go get github.com/axw/gocov/gocov github.com/golang/lint/golint - -test: - pre: - # Output the go versions we are going to test - # - gvm use old && go version - - gvm use stable && go version - - # Ensure validation of dependencies - - git fetch origin: - pwd: $BASE_STABLE - - gvm use stable && if test -n "`git diff --stat=1000 origin/master | grep -E \"^[[:space:]]*vendor\"`"; then make dep-validate; fi: - pwd: $BASE_STABLE - - # First thing: build everything. This will catch compile errors, and it's - # also necessary for go vet to work properly (see #807). - - gvm use stable && go install $(go list ./... | grep -v "/vendor/"): - pwd: $BASE_STABLE - - # FMT - - gvm use stable && make fmt: - pwd: $BASE_STABLE - - # VET - - gvm use stable && make vet: - pwd: $BASE_STABLE - - # LINT - - gvm use stable && make lint: - pwd: $BASE_STABLE - - override: - # Test stable, and report - - gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | grep -v "/vendor/" | xargs -L 1 -I{} bash -c 'export PACKAGE={}; go test -tags "$DOCKER_BUILDTAGS" -test.short -coverprofile=$GOPATH/src/$PACKAGE/coverage.out -coverpkg=$(./coverpkg.sh $PACKAGE $ROOT_PACKAGE) $PACKAGE': - timeout: 1000 - pwd: $BASE_STABLE - - # Test stable with race - - gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | grep -v "/vendor/" | grep -v "registry/handlers" | grep -v "registry/storage/driver" | xargs -L 1 -I{} bash -c 'export PACKAGE={}; go test -race -tags "$DOCKER_BUILDTAGS" -test.short $PACKAGE': - timeout: 1000 - pwd: $BASE_STABLE - post: - # Report to codecov - - bash <(curl -s https://codecov.io/bash): - pwd: $BASE_STABLE - - ## Notes - # Do we want these as well? - # - go get code.google.com/p/go.tools/cmd/goimports - # - test -z "$(goimports -l -w ./... | tee /dev/stderr)" - # http://labix.org/gocheck diff --git a/vendor/github.com/docker/distribution/coverpkg.sh b/vendor/github.com/docker/distribution/coverpkg.sh deleted file mode 100755 index 25d419ae82..0000000000 --- a/vendor/github.com/docker/distribution/coverpkg.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/usr/bin/env bash -# Given a subpackage and the containing package, figures out which packages -# need to be passed to `go test -coverpkg`: this includes all of the -# subpackage's dependencies within the containing package, as well as the -# subpackage itself. -DEPENDENCIES="$(go list -f $'{{range $f := .Deps}}{{$f}}\n{{end}}' ${1} | grep ${2} | grep -v github.com/docker/distribution/vendor)" -echo "${1} ${DEPENDENCIES}" | xargs echo -n | tr ' ' ',' diff --git a/vendor/github.com/docker/distribution/digestset/set.go b/vendor/github.com/docker/distribution/digestset/set.go deleted file mode 100644 index 71327dca72..0000000000 --- a/vendor/github.com/docker/distribution/digestset/set.go +++ /dev/null @@ -1,247 +0,0 @@ -package digestset - -import ( - "errors" - "sort" - "strings" - "sync" - - digest "github.com/opencontainers/go-digest" -) - -var ( - // ErrDigestNotFound is used when a matching digest - // could not be found in a set. - ErrDigestNotFound = errors.New("digest not found") - - // ErrDigestAmbiguous is used when multiple digests - // are found in a set. None of the matching digests - // should be considered valid matches. - ErrDigestAmbiguous = errors.New("ambiguous digest string") -) - -// Set is used to hold a unique set of digests which -// may be easily referenced by easily referenced by a string -// representation of the digest as well as short representation. -// The uniqueness of the short representation is based on other -// digests in the set. If digests are omitted from this set, -// collisions in a larger set may not be detected, therefore it -// is important to always do short representation lookups on -// the complete set of digests. To mitigate collisions, an -// appropriately long short code should be used. -type Set struct { - mutex sync.RWMutex - entries digestEntries -} - -// NewSet creates an empty set of digests -// which may have digests added. -func NewSet() *Set { - return &Set{ - entries: digestEntries{}, - } -} - -// checkShortMatch checks whether two digests match as either whole -// values or short values. This function does not test equality, -// rather whether the second value could match against the first -// value. -func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool { - if len(hex) == len(shortHex) { - if hex != shortHex { - return false - } - if len(shortAlg) > 0 && string(alg) != shortAlg { - return false - } - } else if !strings.HasPrefix(hex, shortHex) { - return false - } else if len(shortAlg) > 0 && string(alg) != shortAlg { - return false - } - return true -} - -// Lookup looks for a digest matching the given string representation. -// If no digests could be found ErrDigestNotFound will be returned -// with an empty digest value. If multiple matches are found -// ErrDigestAmbiguous will be returned with an empty digest value. -func (dst *Set) Lookup(d string) (digest.Digest, error) { - dst.mutex.RLock() - defer dst.mutex.RUnlock() - if len(dst.entries) == 0 { - return "", ErrDigestNotFound - } - var ( - searchFunc func(int) bool - alg digest.Algorithm - hex string - ) - dgst, err := digest.Parse(d) - if err == digest.ErrDigestInvalidFormat { - hex = d - searchFunc = func(i int) bool { - return dst.entries[i].val >= d - } - } else { - hex = dgst.Hex() - alg = dgst.Algorithm() - searchFunc = func(i int) bool { - if dst.entries[i].val == hex { - return dst.entries[i].alg >= alg - } - return dst.entries[i].val >= hex - } - } - idx := sort.Search(len(dst.entries), searchFunc) - if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) { - return "", ErrDigestNotFound - } - if dst.entries[idx].alg == alg && dst.entries[idx].val == hex { - return dst.entries[idx].digest, nil - } - if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) { - return "", ErrDigestAmbiguous - } - - return dst.entries[idx].digest, nil -} - -// Add adds the given digest to the set. An error will be returned -// if the given digest is invalid. If the digest already exists in the -// set, this operation will be a no-op. -func (dst *Set) Add(d digest.Digest) error { - if err := d.Validate(); err != nil { - return err - } - dst.mutex.Lock() - defer dst.mutex.Unlock() - entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} - searchFunc := func(i int) bool { - if dst.entries[i].val == entry.val { - return dst.entries[i].alg >= entry.alg - } - return dst.entries[i].val >= entry.val - } - idx := sort.Search(len(dst.entries), searchFunc) - if idx == len(dst.entries) { - dst.entries = append(dst.entries, entry) - return nil - } else if dst.entries[idx].digest == d { - return nil - } - - entries := append(dst.entries, nil) - copy(entries[idx+1:], entries[idx:len(entries)-1]) - entries[idx] = entry - dst.entries = entries - return nil -} - -// Remove removes the given digest from the set. An err will be -// returned if the given digest is invalid. If the digest does -// not exist in the set, this operation will be a no-op. -func (dst *Set) Remove(d digest.Digest) error { - if err := d.Validate(); err != nil { - return err - } - dst.mutex.Lock() - defer dst.mutex.Unlock() - entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} - searchFunc := func(i int) bool { - if dst.entries[i].val == entry.val { - return dst.entries[i].alg >= entry.alg - } - return dst.entries[i].val >= entry.val - } - idx := sort.Search(len(dst.entries), searchFunc) - // Not found if idx is after or value at idx is not digest - if idx == len(dst.entries) || dst.entries[idx].digest != d { - return nil - } - - entries := dst.entries - copy(entries[idx:], entries[idx+1:]) - entries = entries[:len(entries)-1] - dst.entries = entries - - return nil -} - -// All returns all the digests in the set -func (dst *Set) All() []digest.Digest { - dst.mutex.RLock() - defer dst.mutex.RUnlock() - retValues := make([]digest.Digest, len(dst.entries)) - for i := range dst.entries { - retValues[i] = dst.entries[i].digest - } - - return retValues -} - -// ShortCodeTable returns a map of Digest to unique short codes. The -// length represents the minimum value, the maximum length may be the -// entire value of digest if uniqueness cannot be achieved without the -// full value. This function will attempt to make short codes as short -// as possible to be unique. -func ShortCodeTable(dst *Set, length int) map[digest.Digest]string { - dst.mutex.RLock() - defer dst.mutex.RUnlock() - m := make(map[digest.Digest]string, len(dst.entries)) - l := length - resetIdx := 0 - for i := 0; i < len(dst.entries); i++ { - var short string - extended := true - for extended { - extended = false - if len(dst.entries[i].val) <= l { - short = dst.entries[i].digest.String() - } else { - short = dst.entries[i].val[:l] - for j := i + 1; j < len(dst.entries); j++ { - if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) { - if j > resetIdx { - resetIdx = j - } - extended = true - } else { - break - } - } - if extended { - l++ - } - } - } - m[dst.entries[i].digest] = short - if i >= resetIdx { - l = length - } - } - return m -} - -type digestEntry struct { - alg digest.Algorithm - val string - digest digest.Digest -} - -type digestEntries []*digestEntry - -func (d digestEntries) Len() int { - return len(d) -} - -func (d digestEntries) Less(i, j int) bool { - if d[i].val != d[j].val { - return d[i].val < d[j].val - } - return d[i].alg < d[j].alg -} - -func (d digestEntries) Swap(i, j int) { - d[i], d[j] = d[j], d[i] -} diff --git a/vendor/github.com/docker/distribution/digestset/set_test.go b/vendor/github.com/docker/distribution/digestset/set_test.go deleted file mode 100644 index 89c5729d00..0000000000 --- a/vendor/github.com/docker/distribution/digestset/set_test.go +++ /dev/null @@ -1,371 +0,0 @@ -package digestset - -import ( - "crypto/sha256" - _ "crypto/sha512" - "encoding/binary" - "math/rand" - "testing" - - digest "github.com/opencontainers/go-digest" -) - -func assertEqualDigests(t *testing.T, d1, d2 digest.Digest) { - if d1 != d2 { - t.Fatalf("Digests do not match:\n\tActual: %s\n\tExpected: %s", d1, d2) - } -} - -func TestLookup(t *testing.T) { - digests := []digest.Digest{ - "sha256:1234511111111111111111111111111111111111111111111111111111111111", - "sha256:1234111111111111111111111111111111111111111111111111111111111111", - "sha256:1234611111111111111111111111111111111111111111111111111111111111", - "sha256:5432111111111111111111111111111111111111111111111111111111111111", - "sha256:6543111111111111111111111111111111111111111111111111111111111111", - "sha256:6432111111111111111111111111111111111111111111111111111111111111", - "sha256:6542111111111111111111111111111111111111111111111111111111111111", - "sha256:6532111111111111111111111111111111111111111111111111111111111111", - } - - dset := NewSet() - for i := range digests { - if err := dset.Add(digests[i]); err != nil { - t.Fatal(err) - } - } - - dgst, err := dset.Lookup("54") - if err != nil { - t.Fatal(err) - } - assertEqualDigests(t, dgst, digests[3]) - - dgst, err = dset.Lookup("1234") - if err == nil { - t.Fatal("Expected ambiguous error looking up: 1234") - } - if err != ErrDigestAmbiguous { - t.Fatal(err) - } - - dgst, err = dset.Lookup("9876") - if err == nil { - t.Fatal("Expected ambiguous error looking up: 9876") - } - if err != ErrDigestNotFound { - t.Fatal(err) - } - - dgst, err = dset.Lookup("sha256:1234") - if err == nil { - t.Fatal("Expected ambiguous error looking up: sha256:1234") - } - if err != ErrDigestAmbiguous { - t.Fatal(err) - } - - dgst, err = dset.Lookup("sha256:12345") - if err != nil { - t.Fatal(err) - } - assertEqualDigests(t, dgst, digests[0]) - - dgst, err = dset.Lookup("sha256:12346") - if err != nil { - t.Fatal(err) - } - assertEqualDigests(t, dgst, digests[2]) - - dgst, err = dset.Lookup("12346") - if err != nil { - t.Fatal(err) - } - assertEqualDigests(t, dgst, digests[2]) - - dgst, err = dset.Lookup("12345") - if err != nil { - t.Fatal(err) - } - assertEqualDigests(t, dgst, digests[0]) -} - -func TestAddDuplication(t *testing.T) { - digests := []digest.Digest{ - "sha256:1234111111111111111111111111111111111111111111111111111111111111", - "sha256:1234511111111111111111111111111111111111111111111111111111111111", - "sha256:1234611111111111111111111111111111111111111111111111111111111111", - "sha256:5432111111111111111111111111111111111111111111111111111111111111", - "sha256:6543111111111111111111111111111111111111111111111111111111111111", - "sha512:65431111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", - "sha512:65421111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", - "sha512:65321111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", - } - - dset := NewSet() - for i := range digests { - if err := dset.Add(digests[i]); err != nil { - t.Fatal(err) - } - } - - if len(dset.entries) != 8 { - t.Fatal("Invalid dset size") - } - - if err := dset.Add(digest.Digest("sha256:1234511111111111111111111111111111111111111111111111111111111111")); err != nil { - t.Fatal(err) - } - - if len(dset.entries) != 8 { - t.Fatal("Duplicate digest insert allowed") - } - - if err := dset.Add(digest.Digest("sha384:123451111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111")); err != nil { - t.Fatal(err) - } - - if len(dset.entries) != 9 { - t.Fatal("Insert with different algorithm not allowed") - } -} - -func TestRemove(t *testing.T) { - digests, err := createDigests(10) - if err != nil { - t.Fatal(err) - } - - dset := NewSet() - for i := range digests { - if err := dset.Add(digests[i]); err != nil { - t.Fatal(err) - } - } - - dgst, err := dset.Lookup(digests[0].String()) - if err != nil { - t.Fatal(err) - } - if dgst != digests[0] { - t.Fatalf("Unexpected digest value:\n\tExpected: %s\n\tActual: %s", digests[0], dgst) - } - - if err := dset.Remove(digests[0]); err != nil { - t.Fatal(err) - } - - if _, err := dset.Lookup(digests[0].String()); err != ErrDigestNotFound { - t.Fatalf("Expected error %v when looking up removed digest, got %v", ErrDigestNotFound, err) - } -} - -func TestAll(t *testing.T) { - digests, err := createDigests(100) - if err != nil { - t.Fatal(err) - } - - dset := NewSet() - for i := range digests { - if err := dset.Add(digests[i]); err != nil { - t.Fatal(err) - } - } - - all := map[digest.Digest]struct{}{} - for _, dgst := range dset.All() { - all[dgst] = struct{}{} - } - - if len(all) != len(digests) { - t.Fatalf("Unexpected number of unique digests found:\n\tExpected: %d\n\tActual: %d", len(digests), len(all)) - } - - for i, dgst := range digests { - if _, ok := all[dgst]; !ok { - t.Fatalf("Missing element at position %d: %s", i, dgst) - } - } - -} - -func assertEqualShort(t *testing.T, actual, expected string) { - if actual != expected { - t.Fatalf("Unexpected short value:\n\tExpected: %s\n\tActual: %s", expected, actual) - } -} - -func TestShortCodeTable(t *testing.T) { - digests := []digest.Digest{ - "sha256:1234111111111111111111111111111111111111111111111111111111111111", - "sha256:1234511111111111111111111111111111111111111111111111111111111111", - "sha256:1234611111111111111111111111111111111111111111111111111111111111", - "sha256:5432111111111111111111111111111111111111111111111111111111111111", - "sha256:6543111111111111111111111111111111111111111111111111111111111111", - "sha256:6432111111111111111111111111111111111111111111111111111111111111", - "sha256:6542111111111111111111111111111111111111111111111111111111111111", - "sha256:6532111111111111111111111111111111111111111111111111111111111111", - } - - dset := NewSet() - for i := range digests { - if err := dset.Add(digests[i]); err != nil { - t.Fatal(err) - } - } - - dump := ShortCodeTable(dset, 2) - - if len(dump) < len(digests) { - t.Fatalf("Error unexpected size: %d, expecting %d", len(dump), len(digests)) - } - assertEqualShort(t, dump[digests[0]], "12341") - assertEqualShort(t, dump[digests[1]], "12345") - assertEqualShort(t, dump[digests[2]], "12346") - assertEqualShort(t, dump[digests[3]], "54") - assertEqualShort(t, dump[digests[4]], "6543") - assertEqualShort(t, dump[digests[5]], "64") - assertEqualShort(t, dump[digests[6]], "6542") - assertEqualShort(t, dump[digests[7]], "653") -} - -func createDigests(count int) ([]digest.Digest, error) { - r := rand.New(rand.NewSource(25823)) - digests := make([]digest.Digest, count) - for i := range digests { - h := sha256.New() - if err := binary.Write(h, binary.BigEndian, r.Int63()); err != nil { - return nil, err - } - digests[i] = digest.NewDigest("sha256", h) - } - return digests, nil -} - -func benchAddNTable(b *testing.B, n int) { - digests, err := createDigests(n) - if err != nil { - b.Fatal(err) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))} - for j := range digests { - if err = dset.Add(digests[j]); err != nil { - b.Fatal(err) - } - } - } -} - -func benchLookupNTable(b *testing.B, n int, shortLen int) { - digests, err := createDigests(n) - if err != nil { - b.Fatal(err) - } - dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))} - for i := range digests { - if err := dset.Add(digests[i]); err != nil { - b.Fatal(err) - } - } - shorts := make([]string, 0, n) - for _, short := range ShortCodeTable(dset, shortLen) { - shorts = append(shorts, short) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - if _, err = dset.Lookup(shorts[i%n]); err != nil { - b.Fatal(err) - } - } -} - -func benchRemoveNTable(b *testing.B, n int) { - digests, err := createDigests(n) - if err != nil { - b.Fatal(err) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))} - b.StopTimer() - for j := range digests { - if err = dset.Add(digests[j]); err != nil { - b.Fatal(err) - } - } - b.StartTimer() - for j := range digests { - if err = dset.Remove(digests[j]); err != nil { - b.Fatal(err) - } - } - } -} - -func benchShortCodeNTable(b *testing.B, n int, shortLen int) { - digests, err := createDigests(n) - if err != nil { - b.Fatal(err) - } - dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))} - for i := range digests { - if err := dset.Add(digests[i]); err != nil { - b.Fatal(err) - } - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - ShortCodeTable(dset, shortLen) - } -} - -func BenchmarkAdd10(b *testing.B) { - benchAddNTable(b, 10) -} - -func BenchmarkAdd100(b *testing.B) { - benchAddNTable(b, 100) -} - -func BenchmarkAdd1000(b *testing.B) { - benchAddNTable(b, 1000) -} - -func BenchmarkRemove10(b *testing.B) { - benchRemoveNTable(b, 10) -} - -func BenchmarkRemove100(b *testing.B) { - benchRemoveNTable(b, 100) -} - -func BenchmarkRemove1000(b *testing.B) { - benchRemoveNTable(b, 1000) -} - -func BenchmarkLookup10(b *testing.B) { - benchLookupNTable(b, 10, 12) -} - -func BenchmarkLookup100(b *testing.B) { - benchLookupNTable(b, 100, 12) -} - -func BenchmarkLookup1000(b *testing.B) { - benchLookupNTable(b, 1000, 12) -} - -func BenchmarkShortCode10(b *testing.B) { - benchShortCodeNTable(b, 10, 12) -} -func BenchmarkShortCode100(b *testing.B) { - benchShortCodeNTable(b, 100, 12) -} -func BenchmarkShortCode1000(b *testing.B) { - benchShortCodeNTable(b, 1000, 12) -} diff --git a/vendor/github.com/docker/distribution/doc.go b/vendor/github.com/docker/distribution/doc.go deleted file mode 100644 index bdd8cb708e..0000000000 --- a/vendor/github.com/docker/distribution/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -// Package distribution will define the interfaces for the components of -// docker distribution. The goal is to allow users to reliably package, ship -// and store content related to docker images. -// -// This is currently a work in progress. More details are available in the -// README.md. -package distribution diff --git a/vendor/github.com/docker/distribution/errors.go b/vendor/github.com/docker/distribution/errors.go deleted file mode 100644 index 020d33258b..0000000000 --- a/vendor/github.com/docker/distribution/errors.go +++ /dev/null @@ -1,115 +0,0 @@ -package distribution - -import ( - "errors" - "fmt" - "strings" - - "github.com/opencontainers/go-digest" -) - -// ErrAccessDenied is returned when an access to a requested resource is -// denied. -var ErrAccessDenied = errors.New("access denied") - -// ErrManifestNotModified is returned when a conditional manifest GetByTag -// returns nil due to the client indicating it has the latest version -var ErrManifestNotModified = errors.New("manifest not modified") - -// ErrUnsupported is returned when an unimplemented or unsupported action is -// performed -var ErrUnsupported = errors.New("operation unsupported") - -// ErrTagUnknown is returned if the given tag is not known by the tag service -type ErrTagUnknown struct { - Tag string -} - -func (err ErrTagUnknown) Error() string { - return fmt.Sprintf("unknown tag=%s", err.Tag) -} - -// ErrRepositoryUnknown is returned if the named repository is not known by -// the registry. -type ErrRepositoryUnknown struct { - Name string -} - -func (err ErrRepositoryUnknown) Error() string { - return fmt.Sprintf("unknown repository name=%s", err.Name) -} - -// ErrRepositoryNameInvalid should be used to denote an invalid repository -// name. Reason may set, indicating the cause of invalidity. -type ErrRepositoryNameInvalid struct { - Name string - Reason error -} - -func (err ErrRepositoryNameInvalid) Error() string { - return fmt.Sprintf("repository name %q invalid: %v", err.Name, err.Reason) -} - -// ErrManifestUnknown is returned if the manifest is not known by the -// registry. -type ErrManifestUnknown struct { - Name string - Tag string -} - -func (err ErrManifestUnknown) Error() string { - return fmt.Sprintf("unknown manifest name=%s tag=%s", err.Name, err.Tag) -} - -// ErrManifestUnknownRevision is returned when a manifest cannot be found by -// revision within a repository. -type ErrManifestUnknownRevision struct { - Name string - Revision digest.Digest -} - -func (err ErrManifestUnknownRevision) Error() string { - return fmt.Sprintf("unknown manifest name=%s revision=%s", err.Name, err.Revision) -} - -// ErrManifestUnverified is returned when the registry is unable to verify -// the manifest. -type ErrManifestUnverified struct{} - -func (ErrManifestUnverified) Error() string { - return "unverified manifest" -} - -// ErrManifestVerification provides a type to collect errors encountered -// during manifest verification. Currently, it accepts errors of all types, -// but it may be narrowed to those involving manifest verification. -type ErrManifestVerification []error - -func (errs ErrManifestVerification) Error() string { - var parts []string - for _, err := range errs { - parts = append(parts, err.Error()) - } - - return fmt.Sprintf("errors verifying manifest: %v", strings.Join(parts, ",")) -} - -// ErrManifestBlobUnknown returned when a referenced blob cannot be found. -type ErrManifestBlobUnknown struct { - Digest digest.Digest -} - -func (err ErrManifestBlobUnknown) Error() string { - return fmt.Sprintf("unknown blob %v on manifest", err.Digest) -} - -// ErrManifestNameInvalid should be used to denote an invalid manifest -// name. Reason may set, indicating the cause of invalidity. -type ErrManifestNameInvalid struct { - Name string - Reason error -} - -func (err ErrManifestNameInvalid) Error() string { - return fmt.Sprintf("manifest name %q invalid: %v", err.Name, err.Reason) -} diff --git a/vendor/github.com/docker/distribution/manifests.go b/vendor/github.com/docker/distribution/manifests.go deleted file mode 100644 index 2c99f25d39..0000000000 --- a/vendor/github.com/docker/distribution/manifests.go +++ /dev/null @@ -1,125 +0,0 @@ -package distribution - -import ( - "fmt" - "mime" - - "github.com/docker/distribution/context" - "github.com/opencontainers/go-digest" -) - -// Manifest represents a registry object specifying a set of -// references and an optional target -type Manifest interface { - // References returns a list of objects which make up this manifest. - // A reference is anything which can be represented by a - // distribution.Descriptor. These can consist of layers, resources or other - // manifests. - // - // While no particular order is required, implementations should return - // them from highest to lowest priority. For example, one might want to - // return the base layer before the top layer. - References() []Descriptor - - // Payload provides the serialized format of the manifest, in addition to - // the media type. - Payload() (mediaType string, payload []byte, err error) -} - -// ManifestBuilder creates a manifest allowing one to include dependencies. -// Instances can be obtained from a version-specific manifest package. Manifest -// specific data is passed into the function which creates the builder. -type ManifestBuilder interface { - // Build creates the manifest from his builder. - Build(ctx context.Context) (Manifest, error) - - // References returns a list of objects which have been added to this - // builder. The dependencies are returned in the order they were added, - // which should be from base to head. - References() []Descriptor - - // AppendReference includes the given object in the manifest after any - // existing dependencies. If the add fails, such as when adding an - // unsupported dependency, an error may be returned. - // - // The destination of the reference is dependent on the manifest type and - // the dependency type. - AppendReference(dependency Describable) error -} - -// ManifestService describes operations on image manifests. -type ManifestService interface { - // Exists returns true if the manifest exists. - Exists(ctx context.Context, dgst digest.Digest) (bool, error) - - // Get retrieves the manifest specified by the given digest - Get(ctx context.Context, dgst digest.Digest, options ...ManifestServiceOption) (Manifest, error) - - // Put creates or updates the given manifest returning the manifest digest - Put(ctx context.Context, manifest Manifest, options ...ManifestServiceOption) (digest.Digest, error) - - // Delete removes the manifest specified by the given digest. Deleting - // a manifest that doesn't exist will return ErrManifestNotFound - Delete(ctx context.Context, dgst digest.Digest) error -} - -// ManifestEnumerator enables iterating over manifests -type ManifestEnumerator interface { - // Enumerate calls ingester for each manifest. - Enumerate(ctx context.Context, ingester func(digest.Digest) error) error -} - -// Describable is an interface for descriptors -type Describable interface { - Descriptor() Descriptor -} - -// ManifestMediaTypes returns the supported media types for manifests. -func ManifestMediaTypes() (mediaTypes []string) { - for t := range mappings { - if t != "" { - mediaTypes = append(mediaTypes, t) - } - } - return -} - -// UnmarshalFunc implements manifest unmarshalling a given MediaType -type UnmarshalFunc func([]byte) (Manifest, Descriptor, error) - -var mappings = make(map[string]UnmarshalFunc, 0) - -// UnmarshalManifest looks up manifest unmarshal functions based on -// MediaType -func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) { - // Need to look up by the actual media type, not the raw contents of - // the header. Strip semicolons and anything following them. - var mediaType string - if ctHeader != "" { - var err error - mediaType, _, err = mime.ParseMediaType(ctHeader) - if err != nil { - return nil, Descriptor{}, err - } - } - - unmarshalFunc, ok := mappings[mediaType] - if !ok { - unmarshalFunc, ok = mappings[""] - if !ok { - return nil, Descriptor{}, fmt.Errorf("unsupported manifest media type and no default available: %s", mediaType) - } - } - - return unmarshalFunc(p) -} - -// RegisterManifestSchema registers an UnmarshalFunc for a given schema type. This -// should be called from specific -func RegisterManifestSchema(mediaType string, u UnmarshalFunc) error { - if _, ok := mappings[mediaType]; ok { - return fmt.Errorf("manifest media type registration would overwrite existing: %s", mediaType) - } - mappings[mediaType] = u - return nil -} diff --git a/vendor/github.com/docker/distribution/reference/helpers.go b/vendor/github.com/docker/distribution/reference/helpers.go deleted file mode 100644 index 978df7eabb..0000000000 --- a/vendor/github.com/docker/distribution/reference/helpers.go +++ /dev/null @@ -1,42 +0,0 @@ -package reference - -import "path" - -// IsNameOnly returns true if reference only contains a repo name. -func IsNameOnly(ref Named) bool { - if _, ok := ref.(NamedTagged); ok { - return false - } - if _, ok := ref.(Canonical); ok { - return false - } - return true -} - -// FamiliarName returns the familiar name string -// for the given named, familiarizing if needed. -func FamiliarName(ref Named) string { - if nn, ok := ref.(normalizedNamed); ok { - return nn.Familiar().Name() - } - return ref.Name() -} - -// FamiliarString returns the familiar string representation -// for the given reference, familiarizing if needed. -func FamiliarString(ref Reference) string { - if nn, ok := ref.(normalizedNamed); ok { - return nn.Familiar().String() - } - return ref.String() -} - -// FamiliarMatch reports whether ref matches the specified pattern. -// See https://godoc.org/path#Match for supported patterns. -func FamiliarMatch(pattern string, ref Reference) (bool, error) { - matched, err := path.Match(pattern, FamiliarString(ref)) - if namedRef, isNamed := ref.(Named); isNamed && !matched { - matched, _ = path.Match(pattern, FamiliarName(namedRef)) - } - return matched, err -} diff --git a/vendor/github.com/docker/distribution/reference/normalize.go b/vendor/github.com/docker/distribution/reference/normalize.go deleted file mode 100644 index 2d71fc5e9f..0000000000 --- a/vendor/github.com/docker/distribution/reference/normalize.go +++ /dev/null @@ -1,170 +0,0 @@ -package reference - -import ( - "errors" - "fmt" - "strings" - - "github.com/docker/distribution/digestset" - "github.com/opencontainers/go-digest" -) - -var ( - legacyDefaultDomain = "index.docker.io" - defaultDomain = "docker.io" - officialRepoName = "library" - defaultTag = "latest" -) - -// normalizedNamed represents a name which has been -// normalized and has a familiar form. A familiar name -// is what is used in Docker UI. An example normalized -// name is "docker.io/library/ubuntu" and corresponding -// familiar name of "ubuntu". -type normalizedNamed interface { - Named - Familiar() Named -} - -// ParseNormalizedNamed parses a string into a named reference -// transforming a familiar name from Docker UI to a fully -// qualified reference. If the value may be an identifier -// use ParseAnyReference. -func ParseNormalizedNamed(s string) (Named, error) { - if ok := anchoredIdentifierRegexp.MatchString(s); ok { - return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) - } - domain, remainder := splitDockerDomain(s) - var remoteName string - if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { - remoteName = remainder[:tagSep] - } else { - remoteName = remainder - } - if strings.ToLower(remoteName) != remoteName { - return nil, errors.New("invalid reference format: repository name must be lowercase") - } - - ref, err := Parse(domain + "/" + remainder) - if err != nil { - return nil, err - } - named, isNamed := ref.(Named) - if !isNamed { - return nil, fmt.Errorf("reference %s has no name", ref.String()) - } - return named, nil -} - -// splitDockerDomain splits a repository name to domain and remotename string. -// If no valid domain is found, the default domain is used. Repository name -// needs to be already validated before. -func splitDockerDomain(name string) (domain, remainder string) { - i := strings.IndexRune(name, '/') - if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { - domain, remainder = defaultDomain, name - } else { - domain, remainder = name[:i], name[i+1:] - } - if domain == legacyDefaultDomain { - domain = defaultDomain - } - if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { - remainder = officialRepoName + "/" + remainder - } - return -} - -// familiarizeName returns a shortened version of the name familiar -// to to the Docker UI. Familiar names have the default domain -// "docker.io" and "library/" repository prefix removed. -// For example, "docker.io/library/redis" will have the familiar -// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". -// Returns a familiarized named only reference. -func familiarizeName(named namedRepository) repository { - repo := repository{ - domain: named.Domain(), - path: named.Path(), - } - - if repo.domain == defaultDomain { - repo.domain = "" - // Handle official repositories which have the pattern "library/" - if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName { - repo.path = split[1] - } - } - return repo -} - -func (r reference) Familiar() Named { - return reference{ - namedRepository: familiarizeName(r.namedRepository), - tag: r.tag, - digest: r.digest, - } -} - -func (r repository) Familiar() Named { - return familiarizeName(r) -} - -func (t taggedReference) Familiar() Named { - return taggedReference{ - namedRepository: familiarizeName(t.namedRepository), - tag: t.tag, - } -} - -func (c canonicalReference) Familiar() Named { - return canonicalReference{ - namedRepository: familiarizeName(c.namedRepository), - digest: c.digest, - } -} - -// TagNameOnly adds the default tag "latest" to a reference if it only has -// a repo name. -func TagNameOnly(ref Named) Named { - if IsNameOnly(ref) { - namedTagged, err := WithTag(ref, defaultTag) - if err != nil { - // Default tag must be valid, to create a NamedTagged - // type with non-validated input the WithTag function - // should be used instead - panic(err) - } - return namedTagged - } - return ref -} - -// ParseAnyReference parses a reference string as a possible identifier, -// full digest, or familiar name. -func ParseAnyReference(ref string) (Reference, error) { - if ok := anchoredIdentifierRegexp.MatchString(ref); ok { - return digestReference("sha256:" + ref), nil - } - if dgst, err := digest.Parse(ref); err == nil { - return digestReference(dgst), nil - } - - return ParseNormalizedNamed(ref) -} - -// ParseAnyReferenceWithSet parses a reference string as a possible short -// identifier to be matched in a digest set, a full digest, or familiar name. -func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) { - if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok { - dgst, err := ds.Lookup(ref) - if err == nil { - return digestReference(dgst), nil - } - } else { - if dgst, err := digest.Parse(ref); err == nil { - return digestReference(dgst), nil - } - } - - return ParseNormalizedNamed(ref) -} diff --git a/vendor/github.com/docker/distribution/reference/normalize_test.go b/vendor/github.com/docker/distribution/reference/normalize_test.go deleted file mode 100644 index a881972acc..0000000000 --- a/vendor/github.com/docker/distribution/reference/normalize_test.go +++ /dev/null @@ -1,625 +0,0 @@ -package reference - -import ( - "strconv" - "testing" - - "github.com/docker/distribution/digestset" - "github.com/opencontainers/go-digest" -) - -func TestValidateReferenceName(t *testing.T) { - validRepoNames := []string{ - "docker/docker", - "library/debian", - "debian", - "docker.io/docker/docker", - "docker.io/library/debian", - "docker.io/debian", - "index.docker.io/docker/docker", - "index.docker.io/library/debian", - "index.docker.io/debian", - "127.0.0.1:5000/docker/docker", - "127.0.0.1:5000/library/debian", - "127.0.0.1:5000/debian", - "thisisthesongthatneverendsitgoesonandonandonthisisthesongthatnev", - - // This test case was moved from invalid to valid since it is valid input - // when specified with a hostname, it removes the ambiguity from about - // whether the value is an identifier or repository name - "docker.io/1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", - } - invalidRepoNames := []string{ - "https://github.com/docker/docker", - "docker/Docker", - "-docker", - "-docker/docker", - "-docker.io/docker/docker", - "docker///docker", - "docker.io/docker/Docker", - "docker.io/docker///docker", - "1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", - } - - for _, name := range invalidRepoNames { - _, err := ParseNormalizedNamed(name) - if err == nil { - t.Fatalf("Expected invalid repo name for %q", name) - } - } - - for _, name := range validRepoNames { - _, err := ParseNormalizedNamed(name) - if err != nil { - t.Fatalf("Error parsing repo name %s, got: %q", name, err) - } - } -} - -func TestValidateRemoteName(t *testing.T) { - validRepositoryNames := []string{ - // Sanity check. - "docker/docker", - - // Allow 64-character non-hexadecimal names (hexadecimal names are forbidden). - "thisisthesongthatneverendsitgoesonandonandonthisisthesongthatnev", - - // Allow embedded hyphens. - "docker-rules/docker", - - // Allow multiple hyphens as well. - "docker---rules/docker", - - //Username doc and image name docker being tested. - "doc/docker", - - // single character names are now allowed. - "d/docker", - "jess/t", - - // Consecutive underscores. - "dock__er/docker", - } - for _, repositoryName := range validRepositoryNames { - _, err := ParseNormalizedNamed(repositoryName) - if err != nil { - t.Errorf("Repository name should be valid: %v. Error: %v", repositoryName, err) - } - } - - invalidRepositoryNames := []string{ - // Disallow capital letters. - "docker/Docker", - - // Only allow one slash. - "docker///docker", - - // Disallow 64-character hexadecimal. - "1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", - - // Disallow leading and trailing hyphens in namespace. - "-docker/docker", - "docker-/docker", - "-docker-/docker", - - // Don't allow underscores everywhere (as opposed to hyphens). - "____/____", - - "_docker/_docker", - - // Disallow consecutive periods. - "dock..er/docker", - "dock_.er/docker", - "dock-.er/docker", - - // No repository. - "docker/", - - //namespace too long - "this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255/docker", - } - for _, repositoryName := range invalidRepositoryNames { - if _, err := ParseNormalizedNamed(repositoryName); err == nil { - t.Errorf("Repository name should be invalid: %v", repositoryName) - } - } -} - -func TestParseRepositoryInfo(t *testing.T) { - type tcase struct { - RemoteName, FamiliarName, FullName, AmbiguousName, Domain string - } - - tcases := []tcase{ - { - RemoteName: "fooo/bar", - FamiliarName: "fooo/bar", - FullName: "docker.io/fooo/bar", - AmbiguousName: "index.docker.io/fooo/bar", - Domain: "docker.io", - }, - { - RemoteName: "library/ubuntu", - FamiliarName: "ubuntu", - FullName: "docker.io/library/ubuntu", - AmbiguousName: "library/ubuntu", - Domain: "docker.io", - }, - { - RemoteName: "nonlibrary/ubuntu", - FamiliarName: "nonlibrary/ubuntu", - FullName: "docker.io/nonlibrary/ubuntu", - AmbiguousName: "", - Domain: "docker.io", - }, - { - RemoteName: "other/library", - FamiliarName: "other/library", - FullName: "docker.io/other/library", - AmbiguousName: "", - Domain: "docker.io", - }, - { - RemoteName: "private/moonbase", - FamiliarName: "127.0.0.1:8000/private/moonbase", - FullName: "127.0.0.1:8000/private/moonbase", - AmbiguousName: "", - Domain: "127.0.0.1:8000", - }, - { - RemoteName: "privatebase", - FamiliarName: "127.0.0.1:8000/privatebase", - FullName: "127.0.0.1:8000/privatebase", - AmbiguousName: "", - Domain: "127.0.0.1:8000", - }, - { - RemoteName: "private/moonbase", - FamiliarName: "example.com/private/moonbase", - FullName: "example.com/private/moonbase", - AmbiguousName: "", - Domain: "example.com", - }, - { - RemoteName: "privatebase", - FamiliarName: "example.com/privatebase", - FullName: "example.com/privatebase", - AmbiguousName: "", - Domain: "example.com", - }, - { - RemoteName: "private/moonbase", - FamiliarName: "example.com:8000/private/moonbase", - FullName: "example.com:8000/private/moonbase", - AmbiguousName: "", - Domain: "example.com:8000", - }, - { - RemoteName: "privatebasee", - FamiliarName: "example.com:8000/privatebasee", - FullName: "example.com:8000/privatebasee", - AmbiguousName: "", - Domain: "example.com:8000", - }, - { - RemoteName: "library/ubuntu-12.04-base", - FamiliarName: "ubuntu-12.04-base", - FullName: "docker.io/library/ubuntu-12.04-base", - AmbiguousName: "index.docker.io/library/ubuntu-12.04-base", - Domain: "docker.io", - }, - { - RemoteName: "library/foo", - FamiliarName: "foo", - FullName: "docker.io/library/foo", - AmbiguousName: "docker.io/foo", - Domain: "docker.io", - }, - { - RemoteName: "library/foo/bar", - FamiliarName: "library/foo/bar", - FullName: "docker.io/library/foo/bar", - AmbiguousName: "", - Domain: "docker.io", - }, - { - RemoteName: "store/foo/bar", - FamiliarName: "store/foo/bar", - FullName: "docker.io/store/foo/bar", - AmbiguousName: "", - Domain: "docker.io", - }, - } - - for _, tcase := range tcases { - refStrings := []string{tcase.FamiliarName, tcase.FullName} - if tcase.AmbiguousName != "" { - refStrings = append(refStrings, tcase.AmbiguousName) - } - - var refs []Named - for _, r := range refStrings { - named, err := ParseNormalizedNamed(r) - if err != nil { - t.Fatal(err) - } - refs = append(refs, named) - } - - for _, r := range refs { - if expected, actual := tcase.FamiliarName, FamiliarName(r); expected != actual { - t.Fatalf("Invalid normalized reference for %q. Expected %q, got %q", r, expected, actual) - } - if expected, actual := tcase.FullName, r.String(); expected != actual { - t.Fatalf("Invalid canonical reference for %q. Expected %q, got %q", r, expected, actual) - } - if expected, actual := tcase.Domain, Domain(r); expected != actual { - t.Fatalf("Invalid domain for %q. Expected %q, got %q", r, expected, actual) - } - if expected, actual := tcase.RemoteName, Path(r); expected != actual { - t.Fatalf("Invalid remoteName for %q. Expected %q, got %q", r, expected, actual) - } - - } - } -} - -func TestParseReferenceWithTagAndDigest(t *testing.T) { - shortRef := "busybox:latest@sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa" - ref, err := ParseNormalizedNamed(shortRef) - if err != nil { - t.Fatal(err) - } - if expected, actual := "docker.io/library/"+shortRef, ref.String(); actual != expected { - t.Fatalf("Invalid parsed reference for %q: expected %q, got %q", ref, expected, actual) - } - - if _, isTagged := ref.(NamedTagged); !isTagged { - t.Fatalf("Reference from %q should support tag", ref) - } - if _, isCanonical := ref.(Canonical); !isCanonical { - t.Fatalf("Reference from %q should support digest", ref) - } - if expected, actual := shortRef, FamiliarString(ref); actual != expected { - t.Fatalf("Invalid parsed reference for %q: expected %q, got %q", ref, expected, actual) - } -} - -func TestInvalidReferenceComponents(t *testing.T) { - if _, err := ParseNormalizedNamed("-foo"); err == nil { - t.Fatal("Expected WithName to detect invalid name") - } - ref, err := ParseNormalizedNamed("busybox") - if err != nil { - t.Fatal(err) - } - if _, err := WithTag(ref, "-foo"); err == nil { - t.Fatal("Expected WithName to detect invalid tag") - } - if _, err := WithDigest(ref, digest.Digest("foo")); err == nil { - t.Fatal("Expected WithDigest to detect invalid digest") - } -} - -func equalReference(r1, r2 Reference) bool { - switch v1 := r1.(type) { - case digestReference: - if v2, ok := r2.(digestReference); ok { - return v1 == v2 - } - case repository: - if v2, ok := r2.(repository); ok { - return v1 == v2 - } - case taggedReference: - if v2, ok := r2.(taggedReference); ok { - return v1 == v2 - } - case canonicalReference: - if v2, ok := r2.(canonicalReference); ok { - return v1 == v2 - } - case reference: - if v2, ok := r2.(reference); ok { - return v1 == v2 - } - } - return false -} - -func TestParseAnyReference(t *testing.T) { - tcases := []struct { - Reference string - Equivalent string - Expected Reference - Digests []digest.Digest - }{ - { - Reference: "redis", - Equivalent: "docker.io/library/redis", - }, - { - Reference: "redis:latest", - Equivalent: "docker.io/library/redis:latest", - }, - { - Reference: "docker.io/library/redis:latest", - Equivalent: "docker.io/library/redis:latest", - }, - { - Reference: "redis@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - Equivalent: "docker.io/library/redis@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - }, - { - Reference: "docker.io/library/redis@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - Equivalent: "docker.io/library/redis@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - }, - { - Reference: "dmcgowan/myapp", - Equivalent: "docker.io/dmcgowan/myapp", - }, - { - Reference: "dmcgowan/myapp:latest", - Equivalent: "docker.io/dmcgowan/myapp:latest", - }, - { - Reference: "docker.io/mcgowan/myapp:latest", - Equivalent: "docker.io/mcgowan/myapp:latest", - }, - { - Reference: "dmcgowan/myapp@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - Equivalent: "docker.io/dmcgowan/myapp@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - }, - { - Reference: "docker.io/dmcgowan/myapp@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - Equivalent: "docker.io/dmcgowan/myapp@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - }, - { - Reference: "dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - Expected: digestReference("sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), - Equivalent: "sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - }, - { - Reference: "sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - Expected: digestReference("sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), - Equivalent: "sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - }, - { - Reference: "dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9", - Equivalent: "docker.io/library/dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9", - }, - { - Reference: "dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9", - Expected: digestReference("sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), - Equivalent: "sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - Digests: []digest.Digest{ - digest.Digest("sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), - digest.Digest("sha256:abcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), - }, - }, - { - Reference: "dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9", - Equivalent: "docker.io/library/dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9", - Digests: []digest.Digest{ - digest.Digest("sha256:abcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), - }, - }, - { - Reference: "dbcc1c", - Expected: digestReference("sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), - Equivalent: "sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - Digests: []digest.Digest{ - digest.Digest("sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), - digest.Digest("sha256:abcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), - }, - }, - { - Reference: "dbcc1", - Equivalent: "docker.io/library/dbcc1", - Digests: []digest.Digest{ - digest.Digest("sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), - digest.Digest("sha256:abcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), - }, - }, - { - Reference: "dbcc1c", - Equivalent: "docker.io/library/dbcc1c", - Digests: []digest.Digest{ - digest.Digest("sha256:abcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), - }, - }, - } - - for _, tcase := range tcases { - var ref Reference - var err error - if len(tcase.Digests) == 0 { - ref, err = ParseAnyReference(tcase.Reference) - } else { - ds := digestset.NewSet() - for _, dgst := range tcase.Digests { - if err := ds.Add(dgst); err != nil { - t.Fatalf("Error adding digest %s: %v", dgst.String(), err) - } - } - ref, err = ParseAnyReferenceWithSet(tcase.Reference, ds) - } - if err != nil { - t.Fatalf("Error parsing reference %s: %v", tcase.Reference, err) - } - if ref.String() != tcase.Equivalent { - t.Fatalf("Unexpected string: %s, expected %s", ref.String(), tcase.Equivalent) - } - - expected := tcase.Expected - if expected == nil { - expected, err = Parse(tcase.Equivalent) - if err != nil { - t.Fatalf("Error parsing reference %s: %v", tcase.Equivalent, err) - } - } - if !equalReference(ref, expected) { - t.Errorf("Unexpected reference %#v, expected %#v", ref, expected) - } - } -} - -func TestNormalizedSplitHostname(t *testing.T) { - testcases := []struct { - input string - domain string - name string - }{ - { - input: "test.com/foo", - domain: "test.com", - name: "foo", - }, - { - input: "test_com/foo", - domain: "docker.io", - name: "test_com/foo", - }, - { - input: "docker/migrator", - domain: "docker.io", - name: "docker/migrator", - }, - { - input: "test.com:8080/foo", - domain: "test.com:8080", - name: "foo", - }, - { - input: "test-com:8080/foo", - domain: "test-com:8080", - name: "foo", - }, - { - input: "foo", - domain: "docker.io", - name: "library/foo", - }, - { - input: "xn--n3h.com/foo", - domain: "xn--n3h.com", - name: "foo", - }, - { - input: "xn--n3h.com:18080/foo", - domain: "xn--n3h.com:18080", - name: "foo", - }, - { - input: "docker.io/foo", - domain: "docker.io", - name: "library/foo", - }, - { - input: "docker.io/library/foo", - domain: "docker.io", - name: "library/foo", - }, - { - input: "docker.io/library/foo/bar", - domain: "docker.io", - name: "library/foo/bar", - }, - } - for _, testcase := range testcases { - failf := func(format string, v ...interface{}) { - t.Logf(strconv.Quote(testcase.input)+": "+format, v...) - t.Fail() - } - - named, err := ParseNormalizedNamed(testcase.input) - if err != nil { - failf("error parsing name: %s", err) - } - domain, name := SplitHostname(named) - if domain != testcase.domain { - failf("unexpected domain: got %q, expected %q", domain, testcase.domain) - } - if name != testcase.name { - failf("unexpected name: got %q, expected %q", name, testcase.name) - } - } -} - -func TestMatchError(t *testing.T) { - named, err := ParseAnyReference("foo") - if err != nil { - t.Fatal(err) - } - _, err = FamiliarMatch("[-x]", named) - if err == nil { - t.Fatalf("expected an error, got nothing") - } -} - -func TestMatch(t *testing.T) { - matchCases := []struct { - reference string - pattern string - expected bool - }{ - { - reference: "foo", - pattern: "foo/**/ba[rz]", - expected: false, - }, - { - reference: "foo/any/bat", - pattern: "foo/**/ba[rz]", - expected: false, - }, - { - reference: "foo/a/bar", - pattern: "foo/**/ba[rz]", - expected: true, - }, - { - reference: "foo/b/baz", - pattern: "foo/**/ba[rz]", - expected: true, - }, - { - reference: "foo/c/baz:tag", - pattern: "foo/**/ba[rz]", - expected: true, - }, - { - reference: "foo/c/baz:tag", - pattern: "foo/*/baz:tag", - expected: true, - }, - { - reference: "foo/c/baz:tag", - pattern: "foo/c/baz:tag", - expected: true, - }, - { - reference: "example.com/foo/c/baz:tag", - pattern: "*/foo/c/baz", - expected: true, - }, - { - reference: "example.com/foo/c/baz:tag", - pattern: "example.com/foo/c/baz", - expected: true, - }, - } - for _, c := range matchCases { - named, err := ParseAnyReference(c.reference) - if err != nil { - t.Fatal(err) - } - actual, err := FamiliarMatch(c.pattern, named) - if err != nil { - t.Fatal(err) - } - if actual != c.expected { - t.Fatalf("expected %s match %s to be %v, was %v", c.reference, c.pattern, c.expected, actual) - } - } -} diff --git a/vendor/github.com/docker/distribution/reference/reference.go b/vendor/github.com/docker/distribution/reference/reference.go deleted file mode 100644 index 2f66cca87a..0000000000 --- a/vendor/github.com/docker/distribution/reference/reference.go +++ /dev/null @@ -1,433 +0,0 @@ -// Package reference provides a general type to represent any way of referencing images within the registry. -// Its main purpose is to abstract tags and digests (content-addressable hash). -// -// Grammar -// -// reference := name [ ":" tag ] [ "@" digest ] -// name := [domain '/'] path-component ['/' path-component]* -// domain := domain-component ['.' domain-component]* [':' port-number] -// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ -// port-number := /[0-9]+/ -// path-component := alpha-numeric [separator alpha-numeric]* -// alpha-numeric := /[a-z0-9]+/ -// separator := /[_.]|__|[-]*/ -// -// tag := /[\w][\w.-]{0,127}/ -// -// digest := digest-algorithm ":" digest-hex -// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]* -// digest-algorithm-separator := /[+.-_]/ -// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ -// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value -// -// identifier := /[a-f0-9]{64}/ -// short-identifier := /[a-f0-9]{6,64}/ -package reference - -import ( - "errors" - "fmt" - "strings" - - "github.com/opencontainers/go-digest" -) - -const ( - // NameTotalLengthMax is the maximum total number of characters in a repository name. - NameTotalLengthMax = 255 -) - -var ( - // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. - ErrReferenceInvalidFormat = errors.New("invalid reference format") - - // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. - ErrTagInvalidFormat = errors.New("invalid tag format") - - // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. - ErrDigestInvalidFormat = errors.New("invalid digest format") - - // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. - ErrNameContainsUppercase = errors.New("repository name must be lowercase") - - // ErrNameEmpty is returned for empty, invalid repository names. - ErrNameEmpty = errors.New("repository name must have at least one component") - - // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. - ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) - - // ErrNameNotCanonical is returned when a name is not canonical. - ErrNameNotCanonical = errors.New("repository name must be canonical") -) - -// Reference is an opaque object reference identifier that may include -// modifiers such as a hostname, name, tag, and digest. -type Reference interface { - // String returns the full reference - String() string -} - -// Field provides a wrapper type for resolving correct reference types when -// working with encoding. -type Field struct { - reference Reference -} - -// AsField wraps a reference in a Field for encoding. -func AsField(reference Reference) Field { - return Field{reference} -} - -// Reference unwraps the reference type from the field to -// return the Reference object. This object should be -// of the appropriate type to further check for different -// reference types. -func (f Field) Reference() Reference { - return f.reference -} - -// MarshalText serializes the field to byte text which -// is the string of the reference. -func (f Field) MarshalText() (p []byte, err error) { - return []byte(f.reference.String()), nil -} - -// UnmarshalText parses text bytes by invoking the -// reference parser to ensure the appropriately -// typed reference object is wrapped by field. -func (f *Field) UnmarshalText(p []byte) error { - r, err := Parse(string(p)) - if err != nil { - return err - } - - f.reference = r - return nil -} - -// Named is an object with a full name -type Named interface { - Reference - Name() string -} - -// Tagged is an object which has a tag -type Tagged interface { - Reference - Tag() string -} - -// NamedTagged is an object including a name and tag. -type NamedTagged interface { - Named - Tag() string -} - -// Digested is an object which has a digest -// in which it can be referenced by -type Digested interface { - Reference - Digest() digest.Digest -} - -// Canonical reference is an object with a fully unique -// name including a name with domain and digest -type Canonical interface { - Named - Digest() digest.Digest -} - -// namedRepository is a reference to a repository with a name. -// A namedRepository has both domain and path components. -type namedRepository interface { - Named - Domain() string - Path() string -} - -// Domain returns the domain part of the Named reference -func Domain(named Named) string { - if r, ok := named.(namedRepository); ok { - return r.Domain() - } - domain, _ := splitDomain(named.Name()) - return domain -} - -// Path returns the name without the domain part of the Named reference -func Path(named Named) (name string) { - if r, ok := named.(namedRepository); ok { - return r.Path() - } - _, path := splitDomain(named.Name()) - return path -} - -func splitDomain(name string) (string, string) { - match := anchoredNameRegexp.FindStringSubmatch(name) - if len(match) != 3 { - return "", name - } - return match[1], match[2] -} - -// SplitHostname splits a named reference into a -// hostname and name string. If no valid hostname is -// found, the hostname is empty and the full value -// is returned as name -// DEPRECATED: Use Domain or Path -func SplitHostname(named Named) (string, string) { - if r, ok := named.(namedRepository); ok { - return r.Domain(), r.Path() - } - return splitDomain(named.Name()) -} - -// Parse parses s and returns a syntactically valid Reference. -// If an error was encountered it is returned, along with a nil Reference. -// NOTE: Parse will not handle short digests. -func Parse(s string) (Reference, error) { - matches := ReferenceRegexp.FindStringSubmatch(s) - if matches == nil { - if s == "" { - return nil, ErrNameEmpty - } - if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { - return nil, ErrNameContainsUppercase - } - return nil, ErrReferenceInvalidFormat - } - - if len(matches[1]) > NameTotalLengthMax { - return nil, ErrNameTooLong - } - - var repo repository - - nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) - if nameMatch != nil && len(nameMatch) == 3 { - repo.domain = nameMatch[1] - repo.path = nameMatch[2] - } else { - repo.domain = "" - repo.path = matches[1] - } - - ref := reference{ - namedRepository: repo, - tag: matches[2], - } - if matches[3] != "" { - var err error - ref.digest, err = digest.Parse(matches[3]) - if err != nil { - return nil, err - } - } - - r := getBestReferenceType(ref) - if r == nil { - return nil, ErrNameEmpty - } - - return r, nil -} - -// ParseNamed parses s and returns a syntactically valid reference implementing -// the Named interface. The reference must have a name and be in the canonical -// form, otherwise an error is returned. -// If an error was encountered it is returned, along with a nil Reference. -// NOTE: ParseNamed will not handle short digests. -func ParseNamed(s string) (Named, error) { - named, err := ParseNormalizedNamed(s) - if err != nil { - return nil, err - } - if named.String() != s { - return nil, ErrNameNotCanonical - } - return named, nil -} - -// WithName returns a named object representing the given string. If the input -// is invalid ErrReferenceInvalidFormat will be returned. -func WithName(name string) (Named, error) { - if len(name) > NameTotalLengthMax { - return nil, ErrNameTooLong - } - - match := anchoredNameRegexp.FindStringSubmatch(name) - if match == nil || len(match) != 3 { - return nil, ErrReferenceInvalidFormat - } - return repository{ - domain: match[1], - path: match[2], - }, nil -} - -// WithTag combines the name from "name" and the tag from "tag" to form a -// reference incorporating both the name and the tag. -func WithTag(name Named, tag string) (NamedTagged, error) { - if !anchoredTagRegexp.MatchString(tag) { - return nil, ErrTagInvalidFormat - } - var repo repository - if r, ok := name.(namedRepository); ok { - repo.domain = r.Domain() - repo.path = r.Path() - } else { - repo.path = name.Name() - } - if canonical, ok := name.(Canonical); ok { - return reference{ - namedRepository: repo, - tag: tag, - digest: canonical.Digest(), - }, nil - } - return taggedReference{ - namedRepository: repo, - tag: tag, - }, nil -} - -// WithDigest combines the name from "name" and the digest from "digest" to form -// a reference incorporating both the name and the digest. -func WithDigest(name Named, digest digest.Digest) (Canonical, error) { - if !anchoredDigestRegexp.MatchString(digest.String()) { - return nil, ErrDigestInvalidFormat - } - var repo repository - if r, ok := name.(namedRepository); ok { - repo.domain = r.Domain() - repo.path = r.Path() - } else { - repo.path = name.Name() - } - if tagged, ok := name.(Tagged); ok { - return reference{ - namedRepository: repo, - tag: tagged.Tag(), - digest: digest, - }, nil - } - return canonicalReference{ - namedRepository: repo, - digest: digest, - }, nil -} - -// TrimNamed removes any tag or digest from the named reference. -func TrimNamed(ref Named) Named { - domain, path := SplitHostname(ref) - return repository{ - domain: domain, - path: path, - } -} - -func getBestReferenceType(ref reference) Reference { - if ref.Name() == "" { - // Allow digest only references - if ref.digest != "" { - return digestReference(ref.digest) - } - return nil - } - if ref.tag == "" { - if ref.digest != "" { - return canonicalReference{ - namedRepository: ref.namedRepository, - digest: ref.digest, - } - } - return ref.namedRepository - } - if ref.digest == "" { - return taggedReference{ - namedRepository: ref.namedRepository, - tag: ref.tag, - } - } - - return ref -} - -type reference struct { - namedRepository - tag string - digest digest.Digest -} - -func (r reference) String() string { - return r.Name() + ":" + r.tag + "@" + r.digest.String() -} - -func (r reference) Tag() string { - return r.tag -} - -func (r reference) Digest() digest.Digest { - return r.digest -} - -type repository struct { - domain string - path string -} - -func (r repository) String() string { - return r.Name() -} - -func (r repository) Name() string { - if r.domain == "" { - return r.path - } - return r.domain + "/" + r.path -} - -func (r repository) Domain() string { - return r.domain -} - -func (r repository) Path() string { - return r.path -} - -type digestReference digest.Digest - -func (d digestReference) String() string { - return digest.Digest(d).String() -} - -func (d digestReference) Digest() digest.Digest { - return digest.Digest(d) -} - -type taggedReference struct { - namedRepository - tag string -} - -func (t taggedReference) String() string { - return t.Name() + ":" + t.tag -} - -func (t taggedReference) Tag() string { - return t.tag -} - -type canonicalReference struct { - namedRepository - digest digest.Digest -} - -func (c canonicalReference) String() string { - return c.Name() + "@" + c.digest.String() -} - -func (c canonicalReference) Digest() digest.Digest { - return c.digest -} diff --git a/vendor/github.com/docker/distribution/reference/reference_test.go b/vendor/github.com/docker/distribution/reference/reference_test.go deleted file mode 100644 index 16b871f987..0000000000 --- a/vendor/github.com/docker/distribution/reference/reference_test.go +++ /dev/null @@ -1,659 +0,0 @@ -package reference - -import ( - _ "crypto/sha256" - _ "crypto/sha512" - "encoding/json" - "strconv" - "strings" - "testing" - - "github.com/opencontainers/go-digest" -) - -func TestReferenceParse(t *testing.T) { - // referenceTestcases is a unified set of testcases for - // testing the parsing of references - referenceTestcases := []struct { - // input is the repository name or name component testcase - input string - // err is the error expected from Parse, or nil - err error - // repository is the string representation for the reference - repository string - // domain is the domain expected in the reference - domain string - // tag is the tag for the reference - tag string - // digest is the digest for the reference (enforces digest reference) - digest string - }{ - { - input: "test_com", - repository: "test_com", - }, - { - input: "test.com:tag", - repository: "test.com", - tag: "tag", - }, - { - input: "test.com:5000", - repository: "test.com", - tag: "5000", - }, - { - input: "test.com/repo:tag", - domain: "test.com", - repository: "test.com/repo", - tag: "tag", - }, - { - input: "test:5000/repo", - domain: "test:5000", - repository: "test:5000/repo", - }, - { - input: "test:5000/repo:tag", - domain: "test:5000", - repository: "test:5000/repo", - tag: "tag", - }, - { - input: "test:5000/repo@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - domain: "test:5000", - repository: "test:5000/repo", - digest: "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - }, - { - input: "test:5000/repo:tag@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - domain: "test:5000", - repository: "test:5000/repo", - tag: "tag", - digest: "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - }, - { - input: "test:5000/repo", - domain: "test:5000", - repository: "test:5000/repo", - }, - { - input: "", - err: ErrNameEmpty, - }, - { - input: ":justtag", - err: ErrReferenceInvalidFormat, - }, - { - input: "@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - err: ErrReferenceInvalidFormat, - }, - { - input: "repo@sha256:ffffffffffffffffffffffffffffffffff", - err: digest.ErrDigestInvalidLength, - }, - { - input: "validname@invaliddigest:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - err: digest.ErrDigestUnsupported, - }, - { - input: "Uppercase:tag", - err: ErrNameContainsUppercase, - }, - // FIXME "Uppercase" is incorrectly handled as a domain-name here, therefore passes. - // See https://github.com/docker/distribution/pull/1778, and https://github.com/docker/docker/pull/20175 - //{ - // input: "Uppercase/lowercase:tag", - // err: ErrNameContainsUppercase, - //}, - { - input: "test:5000/Uppercase/lowercase:tag", - err: ErrNameContainsUppercase, - }, - { - input: "lowercase:Uppercase", - repository: "lowercase", - tag: "Uppercase", - }, - { - input: strings.Repeat("a/", 128) + "a:tag", - err: ErrNameTooLong, - }, - { - input: strings.Repeat("a/", 127) + "a:tag-puts-this-over-max", - domain: "a", - repository: strings.Repeat("a/", 127) + "a", - tag: "tag-puts-this-over-max", - }, - { - input: "aa/asdf$$^/aa", - err: ErrReferenceInvalidFormat, - }, - { - input: "sub-dom1.foo.com/bar/baz/quux", - domain: "sub-dom1.foo.com", - repository: "sub-dom1.foo.com/bar/baz/quux", - }, - { - input: "sub-dom1.foo.com/bar/baz/quux:some-long-tag", - domain: "sub-dom1.foo.com", - repository: "sub-dom1.foo.com/bar/baz/quux", - tag: "some-long-tag", - }, - { - input: "b.gcr.io/test.example.com/my-app:test.example.com", - domain: "b.gcr.io", - repository: "b.gcr.io/test.example.com/my-app", - tag: "test.example.com", - }, - { - input: "xn--n3h.com/myimage:xn--n3h.com", // ☃.com in punycode - domain: "xn--n3h.com", - repository: "xn--n3h.com/myimage", - tag: "xn--n3h.com", - }, - { - input: "xn--7o8h.com/myimage:xn--7o8h.com@sha512:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", // 🐳.com in punycode - domain: "xn--7o8h.com", - repository: "xn--7o8h.com/myimage", - tag: "xn--7o8h.com", - digest: "sha512:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - }, - { - input: "foo_bar.com:8080", - repository: "foo_bar.com", - tag: "8080", - }, - { - input: "foo/foo_bar.com:8080", - domain: "foo", - repository: "foo/foo_bar.com", - tag: "8080", - }, - } - for _, testcase := range referenceTestcases { - failf := func(format string, v ...interface{}) { - t.Logf(strconv.Quote(testcase.input)+": "+format, v...) - t.Fail() - } - - repo, err := Parse(testcase.input) - if testcase.err != nil { - if err == nil { - failf("missing expected error: %v", testcase.err) - } else if testcase.err != err { - failf("mismatched error: got %v, expected %v", err, testcase.err) - } - continue - } else if err != nil { - failf("unexpected parse error: %v", err) - continue - } - if repo.String() != testcase.input { - failf("mismatched repo: got %q, expected %q", repo.String(), testcase.input) - } - - if named, ok := repo.(Named); ok { - if named.Name() != testcase.repository { - failf("unexpected repository: got %q, expected %q", named.Name(), testcase.repository) - } - domain, _ := SplitHostname(named) - if domain != testcase.domain { - failf("unexpected domain: got %q, expected %q", domain, testcase.domain) - } - } else if testcase.repository != "" || testcase.domain != "" { - failf("expected named type, got %T", repo) - } - - tagged, ok := repo.(Tagged) - if testcase.tag != "" { - if ok { - if tagged.Tag() != testcase.tag { - failf("unexpected tag: got %q, expected %q", tagged.Tag(), testcase.tag) - } - } else { - failf("expected tagged type, got %T", repo) - } - } else if ok { - failf("unexpected tagged type") - } - - digested, ok := repo.(Digested) - if testcase.digest != "" { - if ok { - if digested.Digest().String() != testcase.digest { - failf("unexpected digest: got %q, expected %q", digested.Digest().String(), testcase.digest) - } - } else { - failf("expected digested type, got %T", repo) - } - } else if ok { - failf("unexpected digested type") - } - - } -} - -// TestWithNameFailure tests cases where WithName should fail. Cases where it -// should succeed are covered by TestSplitHostname, below. -func TestWithNameFailure(t *testing.T) { - testcases := []struct { - input string - err error - }{ - { - input: "", - err: ErrNameEmpty, - }, - { - input: ":justtag", - err: ErrReferenceInvalidFormat, - }, - { - input: "@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - err: ErrReferenceInvalidFormat, - }, - { - input: "validname@invaliddigest:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - err: ErrReferenceInvalidFormat, - }, - { - input: strings.Repeat("a/", 128) + "a:tag", - err: ErrNameTooLong, - }, - { - input: "aa/asdf$$^/aa", - err: ErrReferenceInvalidFormat, - }, - } - for _, testcase := range testcases { - failf := func(format string, v ...interface{}) { - t.Logf(strconv.Quote(testcase.input)+": "+format, v...) - t.Fail() - } - - _, err := WithName(testcase.input) - if err == nil { - failf("no error parsing name. expected: %s", testcase.err) - } - } -} - -func TestSplitHostname(t *testing.T) { - testcases := []struct { - input string - domain string - name string - }{ - { - input: "test.com/foo", - domain: "test.com", - name: "foo", - }, - { - input: "test_com/foo", - domain: "", - name: "test_com/foo", - }, - { - input: "test:8080/foo", - domain: "test:8080", - name: "foo", - }, - { - input: "test.com:8080/foo", - domain: "test.com:8080", - name: "foo", - }, - { - input: "test-com:8080/foo", - domain: "test-com:8080", - name: "foo", - }, - { - input: "xn--n3h.com:18080/foo", - domain: "xn--n3h.com:18080", - name: "foo", - }, - } - for _, testcase := range testcases { - failf := func(format string, v ...interface{}) { - t.Logf(strconv.Quote(testcase.input)+": "+format, v...) - t.Fail() - } - - named, err := WithName(testcase.input) - if err != nil { - failf("error parsing name: %s", err) - } - domain, name := SplitHostname(named) - if domain != testcase.domain { - failf("unexpected domain: got %q, expected %q", domain, testcase.domain) - } - if name != testcase.name { - failf("unexpected name: got %q, expected %q", name, testcase.name) - } - } -} - -type serializationType struct { - Description string - Field Field -} - -func TestSerialization(t *testing.T) { - testcases := []struct { - description string - input string - name string - tag string - digest string - err error - }{ - { - description: "empty value", - err: ErrNameEmpty, - }, - { - description: "just a name", - input: "example.com:8000/named", - name: "example.com:8000/named", - }, - { - description: "name with a tag", - input: "example.com:8000/named:tagged", - name: "example.com:8000/named", - tag: "tagged", - }, - { - description: "name with digest", - input: "other.com/named@sha256:1234567890098765432112345667890098765432112345667890098765432112", - name: "other.com/named", - digest: "sha256:1234567890098765432112345667890098765432112345667890098765432112", - }, - } - for _, testcase := range testcases { - failf := func(format string, v ...interface{}) { - t.Logf(strconv.Quote(testcase.input)+": "+format, v...) - t.Fail() - } - - m := map[string]string{ - "Description": testcase.description, - "Field": testcase.input, - } - b, err := json.Marshal(m) - if err != nil { - failf("error marshalling: %v", err) - } - t := serializationType{} - - if err := json.Unmarshal(b, &t); err != nil { - if testcase.err == nil { - failf("error unmarshalling: %v", err) - } - if err != testcase.err { - failf("wrong error, expected %v, got %v", testcase.err, err) - } - - continue - } else if testcase.err != nil { - failf("expected error unmarshalling: %v", testcase.err) - } - - if t.Description != testcase.description { - failf("wrong description, expected %q, got %q", testcase.description, t.Description) - } - - ref := t.Field.Reference() - - if named, ok := ref.(Named); ok { - if named.Name() != testcase.name { - failf("unexpected repository: got %q, expected %q", named.Name(), testcase.name) - } - } else if testcase.name != "" { - failf("expected named type, got %T", ref) - } - - tagged, ok := ref.(Tagged) - if testcase.tag != "" { - if ok { - if tagged.Tag() != testcase.tag { - failf("unexpected tag: got %q, expected %q", tagged.Tag(), testcase.tag) - } - } else { - failf("expected tagged type, got %T", ref) - } - } else if ok { - failf("unexpected tagged type") - } - - digested, ok := ref.(Digested) - if testcase.digest != "" { - if ok { - if digested.Digest().String() != testcase.digest { - failf("unexpected digest: got %q, expected %q", digested.Digest().String(), testcase.digest) - } - } else { - failf("expected digested type, got %T", ref) - } - } else if ok { - failf("unexpected digested type") - } - - t = serializationType{ - Description: testcase.description, - Field: AsField(ref), - } - - b2, err := json.Marshal(t) - if err != nil { - failf("error marshing serialization type: %v", err) - } - - if string(b) != string(b2) { - failf("unexpected serialized value: expected %q, got %q", string(b), string(b2)) - } - - // Ensure t.Field is not implementing "Reference" directly, getting - // around the Reference type system - var fieldInterface interface{} = t.Field - if _, ok := fieldInterface.(Reference); ok { - failf("field should not implement Reference interface") - } - - } -} - -func TestWithTag(t *testing.T) { - testcases := []struct { - name string - digest digest.Digest - tag string - combined string - }{ - { - name: "test.com/foo", - tag: "tag", - combined: "test.com/foo:tag", - }, - { - name: "foo", - tag: "tag2", - combined: "foo:tag2", - }, - { - name: "test.com:8000/foo", - tag: "tag4", - combined: "test.com:8000/foo:tag4", - }, - { - name: "test.com:8000/foo", - tag: "TAG5", - combined: "test.com:8000/foo:TAG5", - }, - { - name: "test.com:8000/foo", - digest: "sha256:1234567890098765432112345667890098765", - tag: "TAG5", - combined: "test.com:8000/foo:TAG5@sha256:1234567890098765432112345667890098765", - }, - } - for _, testcase := range testcases { - failf := func(format string, v ...interface{}) { - t.Logf(strconv.Quote(testcase.name)+": "+format, v...) - t.Fail() - } - - named, err := WithName(testcase.name) - if err != nil { - failf("error parsing name: %s", err) - } - if testcase.digest != "" { - canonical, err := WithDigest(named, testcase.digest) - if err != nil { - failf("error adding digest") - } - named = canonical - } - - tagged, err := WithTag(named, testcase.tag) - if err != nil { - failf("WithTag failed: %s", err) - } - if tagged.String() != testcase.combined { - failf("unexpected: got %q, expected %q", tagged.String(), testcase.combined) - } - } -} - -func TestWithDigest(t *testing.T) { - testcases := []struct { - name string - digest digest.Digest - tag string - combined string - }{ - { - name: "test.com/foo", - digest: "sha256:1234567890098765432112345667890098765", - combined: "test.com/foo@sha256:1234567890098765432112345667890098765", - }, - { - name: "foo", - digest: "sha256:1234567890098765432112345667890098765", - combined: "foo@sha256:1234567890098765432112345667890098765", - }, - { - name: "test.com:8000/foo", - digest: "sha256:1234567890098765432112345667890098765", - combined: "test.com:8000/foo@sha256:1234567890098765432112345667890098765", - }, - { - name: "test.com:8000/foo", - digest: "sha256:1234567890098765432112345667890098765", - tag: "latest", - combined: "test.com:8000/foo:latest@sha256:1234567890098765432112345667890098765", - }, - } - for _, testcase := range testcases { - failf := func(format string, v ...interface{}) { - t.Logf(strconv.Quote(testcase.name)+": "+format, v...) - t.Fail() - } - - named, err := WithName(testcase.name) - if err != nil { - failf("error parsing name: %s", err) - } - if testcase.tag != "" { - tagged, err := WithTag(named, testcase.tag) - if err != nil { - failf("error adding tag") - } - named = tagged - } - digested, err := WithDigest(named, testcase.digest) - if err != nil { - failf("WithDigest failed: %s", err) - } - if digested.String() != testcase.combined { - failf("unexpected: got %q, expected %q", digested.String(), testcase.combined) - } - } -} - -func TestParseNamed(t *testing.T) { - testcases := []struct { - input string - domain string - name string - err error - }{ - { - input: "test.com/foo", - domain: "test.com", - name: "foo", - }, - { - input: "test:8080/foo", - domain: "test:8080", - name: "foo", - }, - { - input: "test_com/foo", - err: ErrNameNotCanonical, - }, - { - input: "test.com", - err: ErrNameNotCanonical, - }, - { - input: "foo", - err: ErrNameNotCanonical, - }, - { - input: "library/foo", - err: ErrNameNotCanonical, - }, - { - input: "docker.io/library/foo", - domain: "docker.io", - name: "library/foo", - }, - // Ambiguous case, parser will add "library/" to foo - { - input: "docker.io/foo", - err: ErrNameNotCanonical, - }, - } - for _, testcase := range testcases { - failf := func(format string, v ...interface{}) { - t.Logf(strconv.Quote(testcase.input)+": "+format, v...) - t.Fail() - } - - named, err := ParseNamed(testcase.input) - if err != nil && testcase.err == nil { - failf("error parsing name: %s", err) - continue - } else if err == nil && testcase.err != nil { - failf("parsing succeded: expected error %v", testcase.err) - continue - } else if err != testcase.err { - failf("unexpected error %v, expected %v", err, testcase.err) - continue - } else if err != nil { - continue - } - - domain, name := SplitHostname(named) - if domain != testcase.domain { - failf("unexpected domain: got %q, expected %q", domain, testcase.domain) - } - if name != testcase.name { - failf("unexpected name: got %q, expected %q", name, testcase.name) - } - } -} diff --git a/vendor/github.com/docker/distribution/reference/regexp.go b/vendor/github.com/docker/distribution/reference/regexp.go deleted file mode 100644 index 7860349320..0000000000 --- a/vendor/github.com/docker/distribution/reference/regexp.go +++ /dev/null @@ -1,143 +0,0 @@ -package reference - -import "regexp" - -var ( - // alphaNumericRegexp defines the alpha numeric atom, typically a - // component of names. This only allows lower case characters and digits. - alphaNumericRegexp = match(`[a-z0-9]+`) - - // separatorRegexp defines the separators allowed to be embedded in name - // components. This allow one period, one or two underscore and multiple - // dashes. - separatorRegexp = match(`(?:[._]|__|[-]*)`) - - // nameComponentRegexp restricts registry path component names to start - // with at least one letter or number, with following parts able to be - // separated by one period, one or two underscore and multiple dashes. - nameComponentRegexp = expression( - alphaNumericRegexp, - optional(repeated(separatorRegexp, alphaNumericRegexp))) - - // domainComponentRegexp restricts the registry domain component of a - // repository name to start with a component as defined by DomainRegexp - // and followed by an optional port. - domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) - - // DomainRegexp defines the structure of potential domain components - // that may be part of image names. This is purposely a subset of what is - // allowed by DNS to ensure backwards compatibility with Docker image - // names. - DomainRegexp = expression( - domainComponentRegexp, - optional(repeated(literal(`.`), domainComponentRegexp)), - optional(literal(`:`), match(`[0-9]+`))) - - // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. - TagRegexp = match(`[\w][\w.-]{0,127}`) - - // anchoredTagRegexp matches valid tag names, anchored at the start and - // end of the matched string. - anchoredTagRegexp = anchored(TagRegexp) - - // DigestRegexp matches valid digests. - DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) - - // anchoredDigestRegexp matches valid digests, anchored at the start and - // end of the matched string. - anchoredDigestRegexp = anchored(DigestRegexp) - - // NameRegexp is the format for the name component of references. The - // regexp has capturing groups for the domain and name part omitting - // the separating forward slash from either. - NameRegexp = expression( - optional(DomainRegexp, literal(`/`)), - nameComponentRegexp, - optional(repeated(literal(`/`), nameComponentRegexp))) - - // anchoredNameRegexp is used to parse a name value, capturing the - // domain and trailing components. - anchoredNameRegexp = anchored( - optional(capture(DomainRegexp), literal(`/`)), - capture(nameComponentRegexp, - optional(repeated(literal(`/`), nameComponentRegexp)))) - - // ReferenceRegexp is the full supported format of a reference. The regexp - // is anchored and has capturing groups for name, tag, and digest - // components. - ReferenceRegexp = anchored(capture(NameRegexp), - optional(literal(":"), capture(TagRegexp)), - optional(literal("@"), capture(DigestRegexp))) - - // IdentifierRegexp is the format for string identifier used as a - // content addressable identifier using sha256. These identifiers - // are like digests without the algorithm, since sha256 is used. - IdentifierRegexp = match(`([a-f0-9]{64})`) - - // ShortIdentifierRegexp is the format used to represent a prefix - // of an identifier. A prefix may be used to match a sha256 identifier - // within a list of trusted identifiers. - ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`) - - // anchoredIdentifierRegexp is used to check or match an - // identifier value, anchored at start and end of string. - anchoredIdentifierRegexp = anchored(IdentifierRegexp) - - // anchoredShortIdentifierRegexp is used to check if a value - // is a possible identifier prefix, anchored at start and end - // of string. - anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp) -) - -// match compiles the string to a regular expression. -var match = regexp.MustCompile - -// literal compiles s into a literal regular expression, escaping any regexp -// reserved characters. -func literal(s string) *regexp.Regexp { - re := match(regexp.QuoteMeta(s)) - - if _, complete := re.LiteralPrefix(); !complete { - panic("must be a literal") - } - - return re -} - -// expression defines a full expression, where each regular expression must -// follow the previous. -func expression(res ...*regexp.Regexp) *regexp.Regexp { - var s string - for _, re := range res { - s += re.String() - } - - return match(s) -} - -// optional wraps the expression in a non-capturing group and makes the -// production optional. -func optional(res ...*regexp.Regexp) *regexp.Regexp { - return match(group(expression(res...)).String() + `?`) -} - -// repeated wraps the regexp in a non-capturing group to get one or more -// matches. -func repeated(res ...*regexp.Regexp) *regexp.Regexp { - return match(group(expression(res...)).String() + `+`) -} - -// group wraps the regexp in a non-capturing group. -func group(res ...*regexp.Regexp) *regexp.Regexp { - return match(`(?:` + expression(res...).String() + `)`) -} - -// capture wraps the expression in a capturing group. -func capture(res ...*regexp.Regexp) *regexp.Regexp { - return match(`(` + expression(res...).String() + `)`) -} - -// anchored anchors the regular expression by adding start and end delimiters. -func anchored(res ...*regexp.Regexp) *regexp.Regexp { - return match(`^` + expression(res...).String() + `$`) -} diff --git a/vendor/github.com/docker/distribution/reference/regexp_test.go b/vendor/github.com/docker/distribution/reference/regexp_test.go deleted file mode 100644 index 09bc819275..0000000000 --- a/vendor/github.com/docker/distribution/reference/regexp_test.go +++ /dev/null @@ -1,553 +0,0 @@ -package reference - -import ( - "regexp" - "strings" - "testing" -) - -type regexpMatch struct { - input string - match bool - subs []string -} - -func checkRegexp(t *testing.T, r *regexp.Regexp, m regexpMatch) { - matches := r.FindStringSubmatch(m.input) - if m.match && matches != nil { - if len(matches) != (r.NumSubexp()+1) || matches[0] != m.input { - t.Fatalf("Bad match result %#v for %q", matches, m.input) - } - if len(matches) < (len(m.subs) + 1) { - t.Errorf("Expected %d sub matches, only have %d for %q", len(m.subs), len(matches)-1, m.input) - } - for i := range m.subs { - if m.subs[i] != matches[i+1] { - t.Errorf("Unexpected submatch %d: %q, expected %q for %q", i+1, matches[i+1], m.subs[i], m.input) - } - } - } else if m.match { - t.Errorf("Expected match for %q", m.input) - } else if matches != nil { - t.Errorf("Unexpected match for %q", m.input) - } -} - -func TestDomainRegexp(t *testing.T) { - hostcases := []regexpMatch{ - { - input: "test.com", - match: true, - }, - { - input: "test.com:10304", - match: true, - }, - { - input: "test.com:http", - match: false, - }, - { - input: "localhost", - match: true, - }, - { - input: "localhost:8080", - match: true, - }, - { - input: "a", - match: true, - }, - { - input: "a.b", - match: true, - }, - { - input: "ab.cd.com", - match: true, - }, - { - input: "a-b.com", - match: true, - }, - { - input: "-ab.com", - match: false, - }, - { - input: "ab-.com", - match: false, - }, - { - input: "ab.c-om", - match: true, - }, - { - input: "ab.-com", - match: false, - }, - { - input: "ab.com-", - match: false, - }, - { - input: "0101.com", - match: true, // TODO(dmcgowan): valid if this should be allowed - }, - { - input: "001a.com", - match: true, - }, - { - input: "b.gbc.io:443", - match: true, - }, - { - input: "b.gbc.io", - match: true, - }, - { - input: "xn--n3h.com", // ☃.com in punycode - match: true, - }, - { - input: "Asdf.com", // uppercase character - match: true, - }, - } - r := regexp.MustCompile(`^` + DomainRegexp.String() + `$`) - for i := range hostcases { - checkRegexp(t, r, hostcases[i]) - } -} - -func TestFullNameRegexp(t *testing.T) { - if anchoredNameRegexp.NumSubexp() != 2 { - t.Fatalf("anchored name regexp should have two submatches: %v, %v != 2", - anchoredNameRegexp, anchoredNameRegexp.NumSubexp()) - } - - testcases := []regexpMatch{ - { - input: "", - match: false, - }, - { - input: "short", - match: true, - subs: []string{"", "short"}, - }, - { - input: "simple/name", - match: true, - subs: []string{"simple", "name"}, - }, - { - input: "library/ubuntu", - match: true, - subs: []string{"library", "ubuntu"}, - }, - { - input: "docker/stevvooe/app", - match: true, - subs: []string{"docker", "stevvooe/app"}, - }, - { - input: "aa/aa/aa/aa/aa/aa/aa/aa/aa/bb/bb/bb/bb/bb/bb", - match: true, - subs: []string{"aa", "aa/aa/aa/aa/aa/aa/aa/aa/bb/bb/bb/bb/bb/bb"}, - }, - { - input: "aa/aa/bb/bb/bb", - match: true, - subs: []string{"aa", "aa/bb/bb/bb"}, - }, - { - input: "a/a/a/a", - match: true, - subs: []string{"a", "a/a/a"}, - }, - { - input: "a/a/a/a/", - match: false, - }, - { - input: "a//a/a", - match: false, - }, - { - input: "a", - match: true, - subs: []string{"", "a"}, - }, - { - input: "a/aa", - match: true, - subs: []string{"a", "aa"}, - }, - { - input: "a/aa/a", - match: true, - subs: []string{"a", "aa/a"}, - }, - { - input: "foo.com", - match: true, - subs: []string{"", "foo.com"}, - }, - { - input: "foo.com/", - match: false, - }, - { - input: "foo.com:8080/bar", - match: true, - subs: []string{"foo.com:8080", "bar"}, - }, - { - input: "foo.com:http/bar", - match: false, - }, - { - input: "foo.com/bar", - match: true, - subs: []string{"foo.com", "bar"}, - }, - { - input: "foo.com/bar/baz", - match: true, - subs: []string{"foo.com", "bar/baz"}, - }, - { - input: "localhost:8080/bar", - match: true, - subs: []string{"localhost:8080", "bar"}, - }, - { - input: "sub-dom1.foo.com/bar/baz/quux", - match: true, - subs: []string{"sub-dom1.foo.com", "bar/baz/quux"}, - }, - { - input: "blog.foo.com/bar/baz", - match: true, - subs: []string{"blog.foo.com", "bar/baz"}, - }, - { - input: "a^a", - match: false, - }, - { - input: "aa/asdf$$^/aa", - match: false, - }, - { - input: "asdf$$^/aa", - match: false, - }, - { - input: "aa-a/a", - match: true, - subs: []string{"aa-a", "a"}, - }, - { - input: strings.Repeat("a/", 128) + "a", - match: true, - subs: []string{"a", strings.Repeat("a/", 127) + "a"}, - }, - { - input: "a-/a/a/a", - match: false, - }, - { - input: "foo.com/a-/a/a", - match: false, - }, - { - input: "-foo/bar", - match: false, - }, - { - input: "foo/bar-", - match: false, - }, - { - input: "foo-/bar", - match: false, - }, - { - input: "foo/-bar", - match: false, - }, - { - input: "_foo/bar", - match: false, - }, - { - input: "foo_bar", - match: true, - subs: []string{"", "foo_bar"}, - }, - { - input: "foo_bar.com", - match: true, - subs: []string{"", "foo_bar.com"}, - }, - { - input: "foo_bar.com:8080", - match: false, - }, - { - input: "foo_bar.com:8080/app", - match: false, - }, - { - input: "foo.com/foo_bar", - match: true, - subs: []string{"foo.com", "foo_bar"}, - }, - { - input: "____/____", - match: false, - }, - { - input: "_docker/_docker", - match: false, - }, - { - input: "docker_/docker_", - match: false, - }, - { - input: "b.gcr.io/test.example.com/my-app", - match: true, - subs: []string{"b.gcr.io", "test.example.com/my-app"}, - }, - { - input: "xn--n3h.com/myimage", // ☃.com in punycode - match: true, - subs: []string{"xn--n3h.com", "myimage"}, - }, - { - input: "xn--7o8h.com/myimage", // 🐳.com in punycode - match: true, - subs: []string{"xn--7o8h.com", "myimage"}, - }, - { - input: "example.com/xn--7o8h.com/myimage", // 🐳.com in punycode - match: true, - subs: []string{"example.com", "xn--7o8h.com/myimage"}, - }, - { - input: "example.com/some_separator__underscore/myimage", - match: true, - subs: []string{"example.com", "some_separator__underscore/myimage"}, - }, - { - input: "example.com/__underscore/myimage", - match: false, - }, - { - input: "example.com/..dots/myimage", - match: false, - }, - { - input: "example.com/.dots/myimage", - match: false, - }, - { - input: "example.com/nodouble..dots/myimage", - match: false, - }, - { - input: "example.com/nodouble..dots/myimage", - match: false, - }, - { - input: "docker./docker", - match: false, - }, - { - input: ".docker/docker", - match: false, - }, - { - input: "docker-/docker", - match: false, - }, - { - input: "-docker/docker", - match: false, - }, - { - input: "do..cker/docker", - match: false, - }, - { - input: "do__cker:8080/docker", - match: false, - }, - { - input: "do__cker/docker", - match: true, - subs: []string{"", "do__cker/docker"}, - }, - { - input: "b.gcr.io/test.example.com/my-app", - match: true, - subs: []string{"b.gcr.io", "test.example.com/my-app"}, - }, - { - input: "registry.io/foo/project--id.module--name.ver---sion--name", - match: true, - subs: []string{"registry.io", "foo/project--id.module--name.ver---sion--name"}, - }, - { - input: "Asdf.com/foo/bar", // uppercase character in hostname - match: true, - }, - { - input: "Foo/FarB", // uppercase characters in remote name - match: false, - }, - } - for i := range testcases { - checkRegexp(t, anchoredNameRegexp, testcases[i]) - } -} - -func TestReferenceRegexp(t *testing.T) { - if ReferenceRegexp.NumSubexp() != 3 { - t.Fatalf("anchored name regexp should have three submatches: %v, %v != 3", - ReferenceRegexp, ReferenceRegexp.NumSubexp()) - } - - testcases := []regexpMatch{ - { - input: "registry.com:8080/myapp:tag", - match: true, - subs: []string{"registry.com:8080/myapp", "tag", ""}, - }, - { - input: "registry.com:8080/myapp@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", - match: true, - subs: []string{"registry.com:8080/myapp", "", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, - }, - { - input: "registry.com:8080/myapp:tag2@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", - match: true, - subs: []string{"registry.com:8080/myapp", "tag2", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, - }, - { - input: "registry.com:8080/myapp@sha256:badbadbadbad", - match: false, - }, - { - input: "registry.com:8080/myapp:invalid~tag", - match: false, - }, - { - input: "bad_hostname.com:8080/myapp:tag", - match: false, - }, - { - input:// localhost treated as name, missing tag with 8080 as tag - "localhost:8080@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", - match: true, - subs: []string{"localhost", "8080", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, - }, - { - input: "localhost:8080/name@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", - match: true, - subs: []string{"localhost:8080/name", "", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, - }, - { - input: "localhost:http/name@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", - match: false, - }, - { - // localhost will be treated as an image name without a host - input: "localhost@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", - match: true, - subs: []string{"localhost", "", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, - }, - { - input: "registry.com:8080/myapp@bad", - match: false, - }, - { - input: "registry.com:8080/myapp@2bad", - match: false, // TODO(dmcgowan): Support this as valid - }, - } - - for i := range testcases { - checkRegexp(t, ReferenceRegexp, testcases[i]) - } - -} - -func TestIdentifierRegexp(t *testing.T) { - fullCases := []regexpMatch{ - { - input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf9821", - match: true, - }, - { - input: "7EC43B381E5AEFE6E04EFB0B3F0693FF2A4A50652D64AEC573905F2DB5889A1C", - match: false, - }, - { - input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf", - match: false, - }, - { - input: "sha256:da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf9821", - match: false, - }, - { - input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf98218482", - match: false, - }, - } - - shortCases := []regexpMatch{ - { - input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf9821", - match: true, - }, - { - input: "7EC43B381E5AEFE6E04EFB0B3F0693FF2A4A50652D64AEC573905F2DB5889A1C", - match: false, - }, - { - input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf", - match: true, - }, - { - input: "sha256:da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf9821", - match: false, - }, - { - input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf98218482", - match: false, - }, - { - input: "da304", - match: false, - }, - { - input: "da304e", - match: true, - }, - } - - for i := range fullCases { - checkRegexp(t, anchoredIdentifierRegexp, fullCases[i]) - } - - for i := range shortCases { - checkRegexp(t, anchoredShortIdentifierRegexp, shortCases[i]) - } -} diff --git a/vendor/github.com/docker/distribution/registry.go b/vendor/github.com/docker/distribution/registry.go deleted file mode 100644 index 1da1d533ff..0000000000 --- a/vendor/github.com/docker/distribution/registry.go +++ /dev/null @@ -1,97 +0,0 @@ -package distribution - -import ( - "github.com/docker/distribution/context" - "github.com/docker/distribution/reference" -) - -// Scope defines the set of items that match a namespace. -type Scope interface { - // Contains returns true if the name belongs to the namespace. - Contains(name string) bool -} - -type fullScope struct{} - -func (f fullScope) Contains(string) bool { - return true -} - -// GlobalScope represents the full namespace scope which contains -// all other scopes. -var GlobalScope = Scope(fullScope{}) - -// Namespace represents a collection of repositories, addressable by name. -// Generally, a namespace is backed by a set of one or more services, -// providing facilities such as registry access, trust, and indexing. -type Namespace interface { - // Scope describes the names that can be used with this Namespace. The - // global namespace will have a scope that matches all names. The scope - // effectively provides an identity for the namespace. - Scope() Scope - - // Repository should return a reference to the named repository. The - // registry may or may not have the repository but should always return a - // reference. - Repository(ctx context.Context, name reference.Named) (Repository, error) - - // Repositories fills 'repos' with a lexicographically sorted catalog of repositories - // up to the size of 'repos' and returns the value 'n' for the number of entries - // which were filled. 'last' contains an offset in the catalog, and 'err' will be - // set to io.EOF if there are no more entries to obtain. - Repositories(ctx context.Context, repos []string, last string) (n int, err error) - - // Blobs returns a blob enumerator to access all blobs - Blobs() BlobEnumerator - - // BlobStatter returns a BlobStatter to control - BlobStatter() BlobStatter -} - -// RepositoryEnumerator describes an operation to enumerate repositories -type RepositoryEnumerator interface { - Enumerate(ctx context.Context, ingester func(string) error) error -} - -// ManifestServiceOption is a function argument for Manifest Service methods -type ManifestServiceOption interface { - Apply(ManifestService) error -} - -// WithTag allows a tag to be passed into Put -func WithTag(tag string) ManifestServiceOption { - return WithTagOption{tag} -} - -// WithTagOption holds a tag -type WithTagOption struct{ Tag string } - -// Apply conforms to the ManifestServiceOption interface -func (o WithTagOption) Apply(m ManifestService) error { - // no implementation - return nil -} - -// Repository is a named collection of manifests and layers. -type Repository interface { - // Named returns the name of the repository. - Named() reference.Named - - // Manifests returns a reference to this repository's manifest service. - // with the supplied options applied. - Manifests(ctx context.Context, options ...ManifestServiceOption) (ManifestService, error) - - // Blobs returns a reference to this repository's blob service. - Blobs(ctx context.Context) BlobStore - - // TODO(stevvooe): The above BlobStore return can probably be relaxed to - // be a BlobService for use with clients. This will allow such - // implementations to avoid implementing ServeBlob. - - // Tags returns a reference to this repositories tag service - Tags(ctx context.Context) TagService -} - -// TODO(stevvooe): Must add close methods to all these. May want to change the -// way instances are created to better reflect internal dependency -// relationships. diff --git a/vendor/github.com/docker/distribution/tags.go b/vendor/github.com/docker/distribution/tags.go deleted file mode 100644 index 5030565963..0000000000 --- a/vendor/github.com/docker/distribution/tags.go +++ /dev/null @@ -1,27 +0,0 @@ -package distribution - -import ( - "github.com/docker/distribution/context" -) - -// TagService provides access to information about tagged objects. -type TagService interface { - // Get retrieves the descriptor identified by the tag. Some - // implementations may differentiate between "trusted" tags and - // "untrusted" tags. If a tag is "untrusted", the mapping will be returned - // as an ErrTagUntrusted error, with the target descriptor. - Get(ctx context.Context, tag string) (Descriptor, error) - - // Tag associates the tag with the provided descriptor, updating the - // current association, if needed. - Tag(ctx context.Context, tag string, desc Descriptor) error - - // Untag removes the given tag association - Untag(ctx context.Context, tag string) error - - // All returns the set of tags managed by this tag service - All(ctx context.Context) ([]string, error) - - // Lookup returns the set of tags referencing the given digest. - Lookup(ctx context.Context, digest Descriptor) ([]string, error) -} diff --git a/vendor/github.com/docker/distribution/vendor.conf b/vendor/github.com/docker/distribution/vendor.conf deleted file mode 100644 index d67edd779e..0000000000 --- a/vendor/github.com/docker/distribution/vendor.conf +++ /dev/null @@ -1,43 +0,0 @@ -github.com/Azure/azure-sdk-for-go 088007b3b08cc02b27f2eadfdcd870958460ce7e -github.com/Azure/go-autorest ec5f4903f77ed9927ac95b19ab8e44ada64c1356 -github.com/sirupsen/logrus 3d4380f53a34dcdc95f0c1db702615992b38d9a4 -github.com/aws/aws-sdk-go c6fc52983ea2375810aa38ddb5370e9cdf611716 -github.com/bshuster-repo/logrus-logstash-hook d2c0ecc1836d91814e15e23bb5dc309c3ef51f4a -github.com/bugsnag/bugsnag-go b1d153021fcd90ca3f080db36bec96dc690fb274 -github.com/bugsnag/osext 0dd3f918b21bec95ace9dc86c7e70266cfc5c702 -github.com/bugsnag/panicwrap e2c28503fcd0675329da73bf48b33404db873782 -github.com/denverdino/aliyungo afedced274aa9a7fcdd47ac97018f0f8db4e5de2 -github.com/dgrijalva/jwt-go a601269ab70c205d26370c16f7c81e9017c14e04 -github.com/docker/goamz f0a21f5b2e12f83a505ecf79b633bb2035cf6f85 -github.com/docker/libtrust fa567046d9b14f6aa788882a950d69651d230b21 -github.com/garyburd/redigo 535138d7bcd717d6531c701ef5933d98b1866257 -github.com/go-ini/ini 2ba15ac2dc9cdf88c110ec2dc0ced7fa45f5678c -github.com/golang/protobuf 8d92cf5fc15a4382f8964b08e1f42a75c0591aa3 -github.com/gorilla/context 14f550f51af52180c2eefed15e5fd18d63c0a64a -github.com/gorilla/handlers 60c7bfde3e33c201519a200a4507a158cc03a17b -github.com/gorilla/mux 599cba5e7b6137d46ddf58fb1765f5d928e69604 -github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 -github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d -github.com/miekg/dns 271c58e0c14f552178ea321a545ff9af38930f39 -github.com/mitchellh/mapstructure 482a9fd5fa83e8c4e7817413b80f3eb8feec03ef -github.com/ncw/swift b964f2ca856aac39885e258ad25aec08d5f64ee6 -github.com/spf13/cobra 312092086bed4968099259622145a0c9ae280064 -github.com/spf13/pflag 5644820622454e71517561946e3d94b9f9db6842 -github.com/stevvooe/resumable 2aaf90b2ceea5072cb503ef2a620b08ff3119870 -github.com/xenolf/lego a9d8cec0e6563575e5868a005359ac97911b5985 -github.com/yvasiyarov/go-metrics 57bccd1ccd43f94bb17fdd8bf3007059b802f85e -github.com/yvasiyarov/gorelic a9bba5b9ab508a086f9a12b8c51fab68478e2128 -github.com/yvasiyarov/newrelic_platform_go b21fdbd4370f3717f3bbd2bf41c223bc273068e6 -golang.org/x/crypto c10c31b5e94b6f7a0283272dc2bb27163dcea24b -golang.org/x/net 4876518f9e71663000c348837735820161a42df7 -golang.org/x/oauth2 045497edb6234273d67dbc25da3f2ddbc4c4cacf -golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb -google.golang.org/api 9bf6e6e569ff057f75d9604a46c52928f17d2b54 -google.golang.org/appengine 12d5545dc1cfa6047a286d5e853841b6471f4c19 -google.golang.org/cloud 975617b05ea8a58727e6c1a06b6161ff4185a9f2 -google.golang.org/grpc d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994 -gopkg.in/check.v1 64131543e7896d5bcc6bd5a76287eb75ea96c673 -gopkg.in/square/go-jose.v1 40d457b439244b546f023d056628e5184136899b -gopkg.in/yaml.v2 bef53efd0c76e49e6de55ead051f886bea7e9420 -rsc.io/letsencrypt e770c10b0f1a64775ae91d240407ce00d1a5bdeb https://github.com/dmcgowan/letsencrypt.git -github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb diff --git a/vendor/github.com/docker/go-units/CONTRIBUTING.md b/vendor/github.com/docker/go-units/CONTRIBUTING.md new file mode 100644 index 0000000000..9ea86d784e --- /dev/null +++ b/vendor/github.com/docker/go-units/CONTRIBUTING.md @@ -0,0 +1,67 @@ +# Contributing to go-units + +Want to hack on go-units? Awesome! Here are instructions to get you started. + +go-units is a part of the [Docker](https://www.docker.com) project, and follows +the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read Docker's +[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), +[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), +[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and +[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. diff --git a/vendor/github.com/docker/go-units/LICENSE b/vendor/github.com/docker/go-units/LICENSE new file mode 100644 index 0000000000..b55b37bc31 --- /dev/null +++ b/vendor/github.com/docker/go-units/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/go-units/MAINTAINERS b/vendor/github.com/docker/go-units/MAINTAINERS new file mode 100644 index 0000000000..9b3b6b101e --- /dev/null +++ b/vendor/github.com/docker/go-units/MAINTAINERS @@ -0,0 +1,46 @@ +# go-units maintainers file +# +# This file describes who runs the docker/go-units project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + [Org."Core maintainers"] + people = [ + "akihirosuda", + "dnephin", + "thajeztah", + "vdemeester", + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + + [people.akihirosuda] + Name = "Akihiro Suda" + Email = "suda.akihiro@lab.ntt.co.jp" + GitHub = "AkihiroSuda" + + [people.dnephin] + Name = "Daniel Nephin" + Email = "dnephin@gmail.com" + GitHub = "dnephin" + + [people.thajeztah] + Name = "Sebastiaan van Stijn" + Email = "github@gone.nl" + GitHub = "thaJeztah" + + [people.vdemeester] + Name = "Vincent Demeester" + Email = "vincent@sbr.pm" + GitHub = "vdemeester" \ No newline at end of file diff --git a/vendor/github.com/docker/go-units/README.md b/vendor/github.com/docker/go-units/README.md new file mode 100644 index 0000000000..4f70a4e134 --- /dev/null +++ b/vendor/github.com/docker/go-units/README.md @@ -0,0 +1,16 @@ +[![GoDoc](https://godoc.org/github.com/docker/go-units?status.svg)](https://godoc.org/github.com/docker/go-units) + +# Introduction + +go-units is a library to transform human friendly measurements into machine friendly values. + +## Usage + +See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation. + +## Copyright and license + +Copyright © 2015 Docker, Inc. + +go-units is licensed under the Apache License, Version 2.0. +See [LICENSE](LICENSE) for the full text of the license. diff --git a/vendor/github.com/docker/go-units/circle.yml b/vendor/github.com/docker/go-units/circle.yml new file mode 100644 index 0000000000..9043b35478 --- /dev/null +++ b/vendor/github.com/docker/go-units/circle.yml @@ -0,0 +1,11 @@ +dependencies: + post: + # install golint + - go get github.com/golang/lint/golint + +test: + pre: + # run analysis before tests + - go vet ./... + - test -z "$(golint ./... | tee /dev/stderr)" + - test -z "$(gofmt -s -l . | tee /dev/stderr)" diff --git a/vendor/github.com/docker/go-units/duration.go b/vendor/github.com/docker/go-units/duration.go new file mode 100644 index 0000000000..ba02af26dc --- /dev/null +++ b/vendor/github.com/docker/go-units/duration.go @@ -0,0 +1,35 @@ +// Package units provides helper function to parse and print size and time units +// in human-readable format. +package units + +import ( + "fmt" + "time" +) + +// HumanDuration returns a human-readable approximation of a duration +// (eg. "About a minute", "4 hours ago", etc.). +func HumanDuration(d time.Duration) string { + if seconds := int(d.Seconds()); seconds < 1 { + return "Less than a second" + } else if seconds == 1 { + return "1 second" + } else if seconds < 60 { + return fmt.Sprintf("%d seconds", seconds) + } else if minutes := int(d.Minutes()); minutes == 1 { + return "About a minute" + } else if minutes < 46 { + return fmt.Sprintf("%d minutes", minutes) + } else if hours := int(d.Hours() + 0.5); hours == 1 { + return "About an hour" + } else if hours < 48 { + return fmt.Sprintf("%d hours", hours) + } else if hours < 24*7*2 { + return fmt.Sprintf("%d days", hours/24) + } else if hours < 24*30*2 { + return fmt.Sprintf("%d weeks", hours/24/7) + } else if hours < 24*365*2 { + return fmt.Sprintf("%d months", hours/24/30) + } + return fmt.Sprintf("%d years", int(d.Hours())/24/365) +} diff --git a/vendor/github.com/docker/go-units/size.go b/vendor/github.com/docker/go-units/size.go new file mode 100644 index 0000000000..85f6ab0715 --- /dev/null +++ b/vendor/github.com/docker/go-units/size.go @@ -0,0 +1,108 @@ +package units + +import ( + "fmt" + "regexp" + "strconv" + "strings" +) + +// See: http://en.wikipedia.org/wiki/Binary_prefix +const ( + // Decimal + + KB = 1000 + MB = 1000 * KB + GB = 1000 * MB + TB = 1000 * GB + PB = 1000 * TB + + // Binary + + KiB = 1024 + MiB = 1024 * KiB + GiB = 1024 * MiB + TiB = 1024 * GiB + PiB = 1024 * TiB +) + +type unitMap map[string]int64 + +var ( + decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB} + binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB} + sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[iI]?[bB]?$`) +) + +var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} +var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} + +func getSizeAndUnit(size float64, base float64, _map []string) (float64, string) { + i := 0 + unitsLimit := len(_map) - 1 + for size >= base && i < unitsLimit { + size = size / base + i++ + } + return size, _map[i] +} + +// CustomSize returns a human-readable approximation of a size +// using custom format. +func CustomSize(format string, size float64, base float64, _map []string) string { + size, unit := getSizeAndUnit(size, base, _map) + return fmt.Sprintf(format, size, unit) +} + +// HumanSizeWithPrecision allows the size to be in any precision, +// instead of 4 digit precision used in units.HumanSize. +func HumanSizeWithPrecision(size float64, precision int) string { + size, unit := getSizeAndUnit(size, 1000.0, decimapAbbrs) + return fmt.Sprintf("%.*g%s", precision, size, unit) +} + +// HumanSize returns a human-readable approximation of a size +// capped at 4 valid numbers (eg. "2.746 MB", "796 KB"). +func HumanSize(size float64) string { + return HumanSizeWithPrecision(size, 4) +} + +// BytesSize returns a human-readable size in bytes, kibibytes, +// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB"). +func BytesSize(size float64) string { + return CustomSize("%.4g%s", size, 1024.0, binaryAbbrs) +} + +// FromHumanSize returns an integer from a human-readable specification of a +// size using SI standard (eg. "44kB", "17MB"). +func FromHumanSize(size string) (int64, error) { + return parseSize(size, decimalMap) +} + +// RAMInBytes parses a human-readable string representing an amount of RAM +// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and +// returns the number of bytes, or -1 if the string is unparseable. +// Units are case-insensitive, and the 'b' suffix is optional. +func RAMInBytes(size string) (int64, error) { + return parseSize(size, binaryMap) +} + +// Parses the human-readable size string into the amount it represents. +func parseSize(sizeStr string, uMap unitMap) (int64, error) { + matches := sizeRegex.FindStringSubmatch(sizeStr) + if len(matches) != 4 { + return -1, fmt.Errorf("invalid size: '%s'", sizeStr) + } + + size, err := strconv.ParseFloat(matches[1], 64) + if err != nil { + return -1, err + } + + unitPrefix := strings.ToLower(matches[3]) + if mul, ok := uMap[unitPrefix]; ok { + size *= float64(mul) + } + + return int64(size), nil +} diff --git a/vendor/github.com/docker/go-units/ulimit.go b/vendor/github.com/docker/go-units/ulimit.go new file mode 100644 index 0000000000..5ac7fd825f --- /dev/null +++ b/vendor/github.com/docker/go-units/ulimit.go @@ -0,0 +1,118 @@ +package units + +import ( + "fmt" + "strconv" + "strings" +) + +// Ulimit is a human friendly version of Rlimit. +type Ulimit struct { + Name string + Hard int64 + Soft int64 +} + +// Rlimit specifies the resource limits, such as max open files. +type Rlimit struct { + Type int `json:"type,omitempty"` + Hard uint64 `json:"hard,omitempty"` + Soft uint64 `json:"soft,omitempty"` +} + +const ( + // magic numbers for making the syscall + // some of these are defined in the syscall package, but not all. + // Also since Windows client doesn't get access to the syscall package, need to + // define these here + rlimitAs = 9 + rlimitCore = 4 + rlimitCPU = 0 + rlimitData = 2 + rlimitFsize = 1 + rlimitLocks = 10 + rlimitMemlock = 8 + rlimitMsgqueue = 12 + rlimitNice = 13 + rlimitNofile = 7 + rlimitNproc = 6 + rlimitRss = 5 + rlimitRtprio = 14 + rlimitRttime = 15 + rlimitSigpending = 11 + rlimitStack = 3 +) + +var ulimitNameMapping = map[string]int{ + //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container. + "core": rlimitCore, + "cpu": rlimitCPU, + "data": rlimitData, + "fsize": rlimitFsize, + "locks": rlimitLocks, + "memlock": rlimitMemlock, + "msgqueue": rlimitMsgqueue, + "nice": rlimitNice, + "nofile": rlimitNofile, + "nproc": rlimitNproc, + "rss": rlimitRss, + "rtprio": rlimitRtprio, + "rttime": rlimitRttime, + "sigpending": rlimitSigpending, + "stack": rlimitStack, +} + +// ParseUlimit parses and returns a Ulimit from the specified string. +func ParseUlimit(val string) (*Ulimit, error) { + parts := strings.SplitN(val, "=", 2) + if len(parts) != 2 { + return nil, fmt.Errorf("invalid ulimit argument: %s", val) + } + + if _, exists := ulimitNameMapping[parts[0]]; !exists { + return nil, fmt.Errorf("invalid ulimit type: %s", parts[0]) + } + + var ( + soft int64 + hard = &soft // default to soft in case no hard was set + temp int64 + err error + ) + switch limitVals := strings.Split(parts[1], ":"); len(limitVals) { + case 2: + temp, err = strconv.ParseInt(limitVals[1], 10, 64) + if err != nil { + return nil, err + } + hard = &temp + fallthrough + case 1: + soft, err = strconv.ParseInt(limitVals[0], 10, 64) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1]) + } + + if soft > *hard { + return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, *hard) + } + + return &Ulimit{Name: parts[0], Soft: soft, Hard: *hard}, nil +} + +// GetRlimit returns the RLimit corresponding to Ulimit. +func (u *Ulimit) GetRlimit() (*Rlimit, error) { + t, exists := ulimitNameMapping[u.Name] + if !exists { + return nil, fmt.Errorf("invalid ulimit name %s", u.Name) + } + + return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil +} + +func (u *Ulimit) String() string { + return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard) +} diff --git a/vendor/github.com/docker/spdystream/handlers.go b/vendor/github.com/docker/spdystream/handlers.go index b59fa5fdcd..d4ee7be815 100644 --- a/vendor/github.com/docker/spdystream/handlers.go +++ b/vendor/github.com/docker/spdystream/handlers.go @@ -30,9 +30,7 @@ func MirrorStreamHandler(stream *Stream) { }() } -// NoopStreamHandler does nothing when stream connects, most -// likely used with RejectAuthHandler which will not allow any -// streams to make it to the stream handler. +// NoopStreamHandler does nothing when stream connects. func NoOpStreamHandler(stream *Stream) { stream.SendReply(http.Header{}, false) } diff --git a/vendor/github.com/docker/spdystream/priority_test.go b/vendor/github.com/docker/spdystream/priority_test.go deleted file mode 100644 index 0746be2a80..0000000000 --- a/vendor/github.com/docker/spdystream/priority_test.go +++ /dev/null @@ -1,108 +0,0 @@ -package spdystream - -import ( - "sync" - "testing" - "time" - - "github.com/docker/spdystream/spdy" -) - -func TestPriorityQueueOrdering(t *testing.T) { - queue := NewPriorityFrameQueue(150) - data1 := &spdy.DataFrame{} - data2 := &spdy.DataFrame{} - data3 := &spdy.DataFrame{} - data4 := &spdy.DataFrame{} - queue.Push(data1, 2) - queue.Push(data2, 1) - queue.Push(data3, 1) - queue.Push(data4, 0) - - if queue.Pop() != data4 { - t.Fatalf("Wrong order, expected data4 first") - } - if queue.Pop() != data2 { - t.Fatalf("Wrong order, expected data2 second") - } - if queue.Pop() != data3 { - t.Fatalf("Wrong order, expected data3 third") - } - if queue.Pop() != data1 { - t.Fatalf("Wrong order, expected data1 fourth") - } - - // Insert 50 Medium priority frames - for i := spdy.StreamId(50); i < 100; i++ { - queue.Push(&spdy.DataFrame{StreamId: i}, 1) - } - // Insert 50 low priority frames - for i := spdy.StreamId(100); i < 150; i++ { - queue.Push(&spdy.DataFrame{StreamId: i}, 2) - } - // Insert 50 high priority frames - for i := spdy.StreamId(0); i < 50; i++ { - queue.Push(&spdy.DataFrame{StreamId: i}, 0) - } - - for i := spdy.StreamId(0); i < 150; i++ { - frame := queue.Pop() - if frame.(*spdy.DataFrame).StreamId != i { - t.Fatalf("Wrong frame\nActual: %d\nExpecting: %d", frame.(*spdy.DataFrame).StreamId, i) - } - } -} - -func TestPriorityQueueSync(t *testing.T) { - queue := NewPriorityFrameQueue(150) - var wg sync.WaitGroup - insertRange := func(start, stop spdy.StreamId, priority uint8) { - for i := start; i < stop; i++ { - queue.Push(&spdy.DataFrame{StreamId: i}, priority) - } - wg.Done() - } - wg.Add(3) - go insertRange(spdy.StreamId(100), spdy.StreamId(150), 2) - go insertRange(spdy.StreamId(0), spdy.StreamId(50), 0) - go insertRange(spdy.StreamId(50), spdy.StreamId(100), 1) - - wg.Wait() - for i := spdy.StreamId(0); i < 150; i++ { - frame := queue.Pop() - if frame.(*spdy.DataFrame).StreamId != i { - t.Fatalf("Wrong frame\nActual: %d\nExpecting: %d", frame.(*spdy.DataFrame).StreamId, i) - } - } -} - -func TestPriorityQueueBlocking(t *testing.T) { - queue := NewPriorityFrameQueue(15) - for i := 0; i < 15; i++ { - queue.Push(&spdy.DataFrame{}, 2) - } - doneChan := make(chan bool) - go func() { - queue.Push(&spdy.DataFrame{}, 2) - close(doneChan) - }() - select { - case <-doneChan: - t.Fatalf("Push succeeded, expected to block") - case <-time.After(time.Millisecond): - break - } - - queue.Pop() - - select { - case <-doneChan: - break - case <-time.After(time.Millisecond): - t.Fatalf("Push should have succeeded, but timeout reached") - } - - for i := 0; i < 15; i++ { - queue.Pop() - } -} diff --git a/vendor/github.com/docker/spdystream/spdy/spdy_test.go b/vendor/github.com/docker/spdystream/spdy/spdy_test.go deleted file mode 100644 index ce581f1d05..0000000000 --- a/vendor/github.com/docker/spdystream/spdy/spdy_test.go +++ /dev/null @@ -1,644 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package spdy - -import ( - "bytes" - "compress/zlib" - "encoding/base64" - "io" - "io/ioutil" - "net/http" - "reflect" - "testing" -) - -var HeadersFixture = http.Header{ - "Url": []string{"http://www.google.com/"}, - "Method": []string{"get"}, - "Version": []string{"http/1.1"}, -} - -func TestHeaderParsing(t *testing.T) { - var headerValueBlockBuf bytes.Buffer - writeHeaderValueBlock(&headerValueBlockBuf, HeadersFixture) - const bogusStreamId = 1 - newHeaders, err := parseHeaderValueBlock(&headerValueBlockBuf, bogusStreamId) - if err != nil { - t.Fatal("parseHeaderValueBlock:", err) - } - if !reflect.DeepEqual(HeadersFixture, newHeaders) { - t.Fatal("got: ", newHeaders, "\nwant: ", HeadersFixture) - } -} - -func TestCreateParseSynStreamFrameCompressionDisable(t *testing.T) { - buffer := new(bytes.Buffer) - // Fixture framer for no compression test. - framer := &Framer{ - headerCompressionDisabled: true, - w: buffer, - headerBuf: new(bytes.Buffer), - r: buffer, - } - synStreamFrame := SynStreamFrame{ - CFHeader: ControlFrameHeader{ - version: Version, - frameType: TypeSynStream, - }, - StreamId: 2, - Headers: HeadersFixture, - } - if err := framer.WriteFrame(&synStreamFrame); err != nil { - t.Fatal("WriteFrame without compression:", err) - } - frame, err := framer.ReadFrame() - if err != nil { - t.Fatal("ReadFrame without compression:", err) - } - parsedSynStreamFrame, ok := frame.(*SynStreamFrame) - if !ok { - t.Fatal("Parsed incorrect frame type:", frame) - } - if !reflect.DeepEqual(synStreamFrame, *parsedSynStreamFrame) { - t.Fatal("got: ", *parsedSynStreamFrame, "\nwant: ", synStreamFrame) - } -} - -func TestCreateParseSynStreamFrameCompressionEnable(t *testing.T) { - buffer := new(bytes.Buffer) - framer, err := NewFramer(buffer, buffer) - synStreamFrame := SynStreamFrame{ - CFHeader: ControlFrameHeader{ - version: Version, - frameType: TypeSynStream, - }, - StreamId: 2, - Headers: HeadersFixture, - } - if err != nil { - t.Fatal("Failed to create new framer:", err) - } - if err := framer.WriteFrame(&synStreamFrame); err != nil { - t.Fatal("WriteFrame with compression:", err) - } - frame, err := framer.ReadFrame() - if err != nil { - t.Fatal("ReadFrame with compression:", err) - } - parsedSynStreamFrame, ok := frame.(*SynStreamFrame) - if !ok { - t.Fatal("Parsed incorrect frame type:", frame) - } - if !reflect.DeepEqual(synStreamFrame, *parsedSynStreamFrame) { - t.Fatal("got: ", *parsedSynStreamFrame, "\nwant: ", synStreamFrame) - } -} - -func TestCreateParseSynReplyFrameCompressionDisable(t *testing.T) { - buffer := new(bytes.Buffer) - framer := &Framer{ - headerCompressionDisabled: true, - w: buffer, - headerBuf: new(bytes.Buffer), - r: buffer, - } - synReplyFrame := SynReplyFrame{ - CFHeader: ControlFrameHeader{ - version: Version, - frameType: TypeSynReply, - }, - StreamId: 2, - Headers: HeadersFixture, - } - if err := framer.WriteFrame(&synReplyFrame); err != nil { - t.Fatal("WriteFrame without compression:", err) - } - frame, err := framer.ReadFrame() - if err != nil { - t.Fatal("ReadFrame without compression:", err) - } - parsedSynReplyFrame, ok := frame.(*SynReplyFrame) - if !ok { - t.Fatal("Parsed incorrect frame type:", frame) - } - if !reflect.DeepEqual(synReplyFrame, *parsedSynReplyFrame) { - t.Fatal("got: ", *parsedSynReplyFrame, "\nwant: ", synReplyFrame) - } -} - -func TestCreateParseSynReplyFrameCompressionEnable(t *testing.T) { - buffer := new(bytes.Buffer) - framer, err := NewFramer(buffer, buffer) - synReplyFrame := SynReplyFrame{ - CFHeader: ControlFrameHeader{ - version: Version, - frameType: TypeSynReply, - }, - StreamId: 2, - Headers: HeadersFixture, - } - if err != nil { - t.Fatal("Failed to create new framer:", err) - } - if err := framer.WriteFrame(&synReplyFrame); err != nil { - t.Fatal("WriteFrame with compression:", err) - } - frame, err := framer.ReadFrame() - if err != nil { - t.Fatal("ReadFrame with compression:", err) - } - parsedSynReplyFrame, ok := frame.(*SynReplyFrame) - if !ok { - t.Fatal("Parsed incorrect frame type:", frame) - } - if !reflect.DeepEqual(synReplyFrame, *parsedSynReplyFrame) { - t.Fatal("got: ", *parsedSynReplyFrame, "\nwant: ", synReplyFrame) - } -} - -func TestCreateParseRstStream(t *testing.T) { - buffer := new(bytes.Buffer) - framer, err := NewFramer(buffer, buffer) - if err != nil { - t.Fatal("Failed to create new framer:", err) - } - rstStreamFrame := RstStreamFrame{ - CFHeader: ControlFrameHeader{ - version: Version, - frameType: TypeRstStream, - }, - StreamId: 1, - Status: InvalidStream, - } - if err := framer.WriteFrame(&rstStreamFrame); err != nil { - t.Fatal("WriteFrame:", err) - } - frame, err := framer.ReadFrame() - if err != nil { - t.Fatal("ReadFrame:", err) - } - parsedRstStreamFrame, ok := frame.(*RstStreamFrame) - if !ok { - t.Fatal("Parsed incorrect frame type:", frame) - } - if !reflect.DeepEqual(rstStreamFrame, *parsedRstStreamFrame) { - t.Fatal("got: ", *parsedRstStreamFrame, "\nwant: ", rstStreamFrame) - } -} - -func TestCreateParseSettings(t *testing.T) { - buffer := new(bytes.Buffer) - framer, err := NewFramer(buffer, buffer) - if err != nil { - t.Fatal("Failed to create new framer:", err) - } - settingsFrame := SettingsFrame{ - CFHeader: ControlFrameHeader{ - version: Version, - frameType: TypeSettings, - }, - FlagIdValues: []SettingsFlagIdValue{ - {FlagSettingsPersistValue, SettingsCurrentCwnd, 10}, - {FlagSettingsPersisted, SettingsUploadBandwidth, 1}, - }, - } - if err := framer.WriteFrame(&settingsFrame); err != nil { - t.Fatal("WriteFrame:", err) - } - frame, err := framer.ReadFrame() - if err != nil { - t.Fatal("ReadFrame:", err) - } - parsedSettingsFrame, ok := frame.(*SettingsFrame) - if !ok { - t.Fatal("Parsed incorrect frame type:", frame) - } - if !reflect.DeepEqual(settingsFrame, *parsedSettingsFrame) { - t.Fatal("got: ", *parsedSettingsFrame, "\nwant: ", settingsFrame) - } -} - -func TestCreateParsePing(t *testing.T) { - buffer := new(bytes.Buffer) - framer, err := NewFramer(buffer, buffer) - if err != nil { - t.Fatal("Failed to create new framer:", err) - } - pingFrame := PingFrame{ - CFHeader: ControlFrameHeader{ - version: Version, - frameType: TypePing, - }, - Id: 31337, - } - if err := framer.WriteFrame(&pingFrame); err != nil { - t.Fatal("WriteFrame:", err) - } - if pingFrame.CFHeader.Flags != 0 { - t.Fatal("Incorrect frame type:", pingFrame) - } - frame, err := framer.ReadFrame() - if err != nil { - t.Fatal("ReadFrame:", err) - } - parsedPingFrame, ok := frame.(*PingFrame) - if !ok { - t.Fatal("Parsed incorrect frame type:", frame) - } - if parsedPingFrame.CFHeader.Flags != 0 { - t.Fatal("Parsed incorrect frame type:", parsedPingFrame) - } - if !reflect.DeepEqual(pingFrame, *parsedPingFrame) { - t.Fatal("got: ", *parsedPingFrame, "\nwant: ", pingFrame) - } -} - -func TestCreateParseGoAway(t *testing.T) { - buffer := new(bytes.Buffer) - framer, err := NewFramer(buffer, buffer) - if err != nil { - t.Fatal("Failed to create new framer:", err) - } - goAwayFrame := GoAwayFrame{ - CFHeader: ControlFrameHeader{ - version: Version, - frameType: TypeGoAway, - }, - LastGoodStreamId: 31337, - Status: 1, - } - if err := framer.WriteFrame(&goAwayFrame); err != nil { - t.Fatal("WriteFrame:", err) - } - if goAwayFrame.CFHeader.Flags != 0 { - t.Fatal("Incorrect frame type:", goAwayFrame) - } - if goAwayFrame.CFHeader.length != 8 { - t.Fatal("Incorrect frame type:", goAwayFrame) - } - frame, err := framer.ReadFrame() - if err != nil { - t.Fatal("ReadFrame:", err) - } - parsedGoAwayFrame, ok := frame.(*GoAwayFrame) - if !ok { - t.Fatal("Parsed incorrect frame type:", frame) - } - if parsedGoAwayFrame.CFHeader.Flags != 0 { - t.Fatal("Incorrect frame type:", parsedGoAwayFrame) - } - if parsedGoAwayFrame.CFHeader.length != 8 { - t.Fatal("Incorrect frame type:", parsedGoAwayFrame) - } - if !reflect.DeepEqual(goAwayFrame, *parsedGoAwayFrame) { - t.Fatal("got: ", *parsedGoAwayFrame, "\nwant: ", goAwayFrame) - } -} - -func TestCreateParseHeadersFrame(t *testing.T) { - buffer := new(bytes.Buffer) - framer := &Framer{ - headerCompressionDisabled: true, - w: buffer, - headerBuf: new(bytes.Buffer), - r: buffer, - } - headersFrame := HeadersFrame{ - CFHeader: ControlFrameHeader{ - version: Version, - frameType: TypeHeaders, - }, - StreamId: 2, - } - headersFrame.Headers = HeadersFixture - if err := framer.WriteFrame(&headersFrame); err != nil { - t.Fatal("WriteFrame without compression:", err) - } - frame, err := framer.ReadFrame() - if err != nil { - t.Fatal("ReadFrame without compression:", err) - } - parsedHeadersFrame, ok := frame.(*HeadersFrame) - if !ok { - t.Fatal("Parsed incorrect frame type:", frame) - } - if !reflect.DeepEqual(headersFrame, *parsedHeadersFrame) { - t.Fatal("got: ", *parsedHeadersFrame, "\nwant: ", headersFrame) - } -} - -func TestCreateParseHeadersFrameCompressionEnable(t *testing.T) { - buffer := new(bytes.Buffer) - headersFrame := HeadersFrame{ - CFHeader: ControlFrameHeader{ - version: Version, - frameType: TypeHeaders, - }, - StreamId: 2, - } - headersFrame.Headers = HeadersFixture - - framer, err := NewFramer(buffer, buffer) - if err := framer.WriteFrame(&headersFrame); err != nil { - t.Fatal("WriteFrame with compression:", err) - } - frame, err := framer.ReadFrame() - if err != nil { - t.Fatal("ReadFrame with compression:", err) - } - parsedHeadersFrame, ok := frame.(*HeadersFrame) - if !ok { - t.Fatal("Parsed incorrect frame type:", frame) - } - if !reflect.DeepEqual(headersFrame, *parsedHeadersFrame) { - t.Fatal("got: ", *parsedHeadersFrame, "\nwant: ", headersFrame) - } -} - -func TestCreateParseWindowUpdateFrame(t *testing.T) { - buffer := new(bytes.Buffer) - framer, err := NewFramer(buffer, buffer) - if err != nil { - t.Fatal("Failed to create new framer:", err) - } - windowUpdateFrame := WindowUpdateFrame{ - CFHeader: ControlFrameHeader{ - version: Version, - frameType: TypeWindowUpdate, - }, - StreamId: 31337, - DeltaWindowSize: 1, - } - if err := framer.WriteFrame(&windowUpdateFrame); err != nil { - t.Fatal("WriteFrame:", err) - } - if windowUpdateFrame.CFHeader.Flags != 0 { - t.Fatal("Incorrect frame type:", windowUpdateFrame) - } - if windowUpdateFrame.CFHeader.length != 8 { - t.Fatal("Incorrect frame type:", windowUpdateFrame) - } - frame, err := framer.ReadFrame() - if err != nil { - t.Fatal("ReadFrame:", err) - } - parsedWindowUpdateFrame, ok := frame.(*WindowUpdateFrame) - if !ok { - t.Fatal("Parsed incorrect frame type:", frame) - } - if parsedWindowUpdateFrame.CFHeader.Flags != 0 { - t.Fatal("Incorrect frame type:", parsedWindowUpdateFrame) - } - if parsedWindowUpdateFrame.CFHeader.length != 8 { - t.Fatal("Incorrect frame type:", parsedWindowUpdateFrame) - } - if !reflect.DeepEqual(windowUpdateFrame, *parsedWindowUpdateFrame) { - t.Fatal("got: ", *parsedWindowUpdateFrame, "\nwant: ", windowUpdateFrame) - } -} - -func TestCreateParseDataFrame(t *testing.T) { - buffer := new(bytes.Buffer) - framer, err := NewFramer(buffer, buffer) - if err != nil { - t.Fatal("Failed to create new framer:", err) - } - dataFrame := DataFrame{ - StreamId: 1, - Data: []byte{'h', 'e', 'l', 'l', 'o'}, - } - if err := framer.WriteFrame(&dataFrame); err != nil { - t.Fatal("WriteFrame:", err) - } - frame, err := framer.ReadFrame() - if err != nil { - t.Fatal("ReadFrame:", err) - } - parsedDataFrame, ok := frame.(*DataFrame) - if !ok { - t.Fatal("Parsed incorrect frame type:", frame) - } - if !reflect.DeepEqual(dataFrame, *parsedDataFrame) { - t.Fatal("got: ", *parsedDataFrame, "\nwant: ", dataFrame) - } -} - -func TestCompressionContextAcrossFrames(t *testing.T) { - buffer := new(bytes.Buffer) - framer, err := NewFramer(buffer, buffer) - if err != nil { - t.Fatal("Failed to create new framer:", err) - } - headersFrame := HeadersFrame{ - CFHeader: ControlFrameHeader{ - version: Version, - frameType: TypeHeaders, - }, - StreamId: 2, - Headers: HeadersFixture, - } - if err := framer.WriteFrame(&headersFrame); err != nil { - t.Fatal("WriteFrame (HEADERS):", err) - } - synStreamFrame := SynStreamFrame{ - ControlFrameHeader{ - Version, - TypeSynStream, - 0, // Flags - 0, // length - }, - 2, // StreamId - 0, // AssociatedTOStreamID - 0, // Priority - 1, // Slot - nil, // Headers - } - synStreamFrame.Headers = HeadersFixture - - if err := framer.WriteFrame(&synStreamFrame); err != nil { - t.Fatal("WriteFrame (SYN_STREAM):", err) - } - frame, err := framer.ReadFrame() - if err != nil { - t.Fatal("ReadFrame (HEADERS):", err, buffer.Bytes()) - } - parsedHeadersFrame, ok := frame.(*HeadersFrame) - if !ok { - t.Fatalf("expected HeadersFrame; got %T %v", frame, frame) - } - if !reflect.DeepEqual(headersFrame, *parsedHeadersFrame) { - t.Fatal("got: ", *parsedHeadersFrame, "\nwant: ", headersFrame) - } - frame, err = framer.ReadFrame() - if err != nil { - t.Fatal("ReadFrame (SYN_STREAM):", err, buffer.Bytes()) - } - parsedSynStreamFrame, ok := frame.(*SynStreamFrame) - if !ok { - t.Fatalf("expected SynStreamFrame; got %T %v", frame, frame) - } - if !reflect.DeepEqual(synStreamFrame, *parsedSynStreamFrame) { - t.Fatal("got: ", *parsedSynStreamFrame, "\nwant: ", synStreamFrame) - } -} - -func TestMultipleSPDYFrames(t *testing.T) { - // Initialize the framers. - pr1, pw1 := io.Pipe() - pr2, pw2 := io.Pipe() - writer, err := NewFramer(pw1, pr2) - if err != nil { - t.Fatal("Failed to create writer:", err) - } - reader, err := NewFramer(pw2, pr1) - if err != nil { - t.Fatal("Failed to create reader:", err) - } - - // Set up the frames we're actually transferring. - headersFrame := HeadersFrame{ - CFHeader: ControlFrameHeader{ - version: Version, - frameType: TypeHeaders, - }, - StreamId: 2, - Headers: HeadersFixture, - } - synStreamFrame := SynStreamFrame{ - CFHeader: ControlFrameHeader{ - version: Version, - frameType: TypeSynStream, - }, - StreamId: 2, - Headers: HeadersFixture, - } - - // Start the goroutines to write the frames. - go func() { - if err := writer.WriteFrame(&headersFrame); err != nil { - t.Fatal("WriteFrame (HEADERS): ", err) - } - if err := writer.WriteFrame(&synStreamFrame); err != nil { - t.Fatal("WriteFrame (SYN_STREAM): ", err) - } - }() - - // Read the frames and verify they look as expected. - frame, err := reader.ReadFrame() - if err != nil { - t.Fatal("ReadFrame (HEADERS): ", err) - } - parsedHeadersFrame, ok := frame.(*HeadersFrame) - if !ok { - t.Fatal("Parsed incorrect frame type:", frame) - } - if !reflect.DeepEqual(headersFrame, *parsedHeadersFrame) { - t.Fatal("got: ", *parsedHeadersFrame, "\nwant: ", headersFrame) - } - frame, err = reader.ReadFrame() - if err != nil { - t.Fatal("ReadFrame (SYN_STREAM):", err) - } - parsedSynStreamFrame, ok := frame.(*SynStreamFrame) - if !ok { - t.Fatal("Parsed incorrect frame type.") - } - if !reflect.DeepEqual(synStreamFrame, *parsedSynStreamFrame) { - t.Fatal("got: ", *parsedSynStreamFrame, "\nwant: ", synStreamFrame) - } -} - -func TestReadMalformedZlibHeader(t *testing.T) { - // These were constructed by corrupting the first byte of the zlib - // header after writing. - malformedStructs := map[string]string{ - "SynStreamFrame": "gAIAAQAAABgAAAACAAAAAAAAF/nfolGyYmAAAAAA//8=", - "SynReplyFrame": "gAIAAgAAABQAAAACAAAX+d+iUbJiYAAAAAD//w==", - "HeadersFrame": "gAIACAAAABQAAAACAAAX+d+iUbJiYAAAAAD//w==", - } - for name, bad := range malformedStructs { - b, err := base64.StdEncoding.DecodeString(bad) - if err != nil { - t.Errorf("Unable to decode base64 encoded frame %s: %v", name, err) - } - buf := bytes.NewBuffer(b) - reader, err := NewFramer(buf, buf) - if err != nil { - t.Fatalf("NewFramer: %v", err) - } - _, err = reader.ReadFrame() - if err != zlib.ErrHeader { - t.Errorf("Frame %s, expected: %#v, actual: %#v", name, zlib.ErrHeader, err) - } - } -} - -// TODO: these tests are too weak for updating SPDY spec. Fix me. - -type zeroStream struct { - frame Frame - encoded string -} - -var streamIdZeroFrames = map[string]zeroStream{ - "SynStreamFrame": { - &SynStreamFrame{StreamId: 0}, - "gAIAAQAAABgAAAAAAAAAAAAAePnfolGyYmAAAAAA//8=", - }, - "SynReplyFrame": { - &SynReplyFrame{StreamId: 0}, - "gAIAAgAAABQAAAAAAAB4+d+iUbJiYAAAAAD//w==", - }, - "RstStreamFrame": { - &RstStreamFrame{StreamId: 0}, - "gAIAAwAAAAgAAAAAAAAAAA==", - }, - "HeadersFrame": { - &HeadersFrame{StreamId: 0}, - "gAIACAAAABQAAAAAAAB4+d+iUbJiYAAAAAD//w==", - }, - "DataFrame": { - &DataFrame{StreamId: 0}, - "AAAAAAAAAAA=", - }, - "PingFrame": { - &PingFrame{Id: 0}, - "gAIABgAAAAQAAAAA", - }, -} - -func TestNoZeroStreamId(t *testing.T) { - t.Log("skipping") // TODO: update to work with SPDY3 - return - - for name, f := range streamIdZeroFrames { - b, err := base64.StdEncoding.DecodeString(f.encoded) - if err != nil { - t.Errorf("Unable to decode base64 encoded frame %s: %v", f, err) - continue - } - framer, err := NewFramer(ioutil.Discard, bytes.NewReader(b)) - if err != nil { - t.Fatalf("NewFramer: %v", err) - } - err = framer.WriteFrame(f.frame) - checkZeroStreamId(t, name, "WriteFrame", err) - - _, err = framer.ReadFrame() - checkZeroStreamId(t, name, "ReadFrame", err) - } -} - -func checkZeroStreamId(t *testing.T, frame string, method string, err error) { - if err == nil { - t.Errorf("%s ZeroStreamId, no error on %s", method, frame) - return - } - eerr, ok := err.(*Error) - if !ok || eerr.Err != ZeroStreamId { - t.Errorf("%s ZeroStreamId, incorrect error %#v, frame %s", method, eerr, frame) - } -} diff --git a/vendor/github.com/docker/spdystream/spdy_bench_test.go b/vendor/github.com/docker/spdystream/spdy_bench_test.go deleted file mode 100644 index 6f9e491015..0000000000 --- a/vendor/github.com/docker/spdystream/spdy_bench_test.go +++ /dev/null @@ -1,113 +0,0 @@ -package spdystream - -import ( - "fmt" - "io" - "net" - "net/http" - "sync" - "testing" -) - -func configureServer() (io.Closer, string, *sync.WaitGroup) { - authenticated = true - wg := &sync.WaitGroup{} - server, listen, serverErr := runServer(wg) - - if serverErr != nil { - panic(serverErr) - } - - return server, listen, wg -} - -func BenchmarkDial10000(b *testing.B) { - server, addr, wg := configureServer() - - defer func() { - server.Close() - wg.Wait() - }() - - for i := 0; i < b.N; i++ { - conn, dialErr := net.Dial("tcp", addr) - if dialErr != nil { - panic(fmt.Sprintf("Error dialing server: %s", dialErr)) - } - conn.Close() - } -} - -func BenchmarkDialWithSPDYStream10000(b *testing.B) { - server, addr, wg := configureServer() - - defer func() { - server.Close() - wg.Wait() - }() - - for i := 0; i < b.N; i++ { - conn, dialErr := net.Dial("tcp", addr) - if dialErr != nil { - b.Fatalf("Error dialing server: %s", dialErr) - } - - spdyConn, spdyErr := NewConnection(conn, false) - if spdyErr != nil { - b.Fatalf("Error creating spdy connection: %s", spdyErr) - } - go spdyConn.Serve(NoOpStreamHandler) - - closeErr := spdyConn.Close() - if closeErr != nil { - b.Fatalf("Error closing connection: %s, closeErr") - } - } -} - -func benchmarkStreamWithDataAndSize(size uint64, b *testing.B) { - server, addr, wg := configureServer() - - defer func() { - server.Close() - wg.Wait() - }() - - for i := 0; i < b.N; i++ { - conn, dialErr := net.Dial("tcp", addr) - if dialErr != nil { - b.Fatalf("Error dialing server: %s", dialErr) - } - - spdyConn, spdyErr := NewConnection(conn, false) - if spdyErr != nil { - b.Fatalf("Error creating spdy connection: %s", spdyErr) - } - - go spdyConn.Serve(MirrorStreamHandler) - - stream, err := spdyConn.CreateStream(http.Header{}, nil, false) - - writer := make([]byte, size) - - stream.Write(writer) - - if err != nil { - panic(err) - } - - reader := make([]byte, size) - stream.Read(reader) - - stream.Close() - - closeErr := spdyConn.Close() - if closeErr != nil { - b.Fatalf("Error closing connection: %s, closeErr") - } - } -} - -func BenchmarkStreamWith1Byte10000(b *testing.B) { benchmarkStreamWithDataAndSize(1, b) } -func BenchmarkStreamWith1KiloByte10000(b *testing.B) { benchmarkStreamWithDataAndSize(1024, b) } -func BenchmarkStreamWith1Megabyte10000(b *testing.B) { benchmarkStreamWithDataAndSize(1024*1024, b) } diff --git a/vendor/github.com/docker/spdystream/spdy_test.go b/vendor/github.com/docker/spdystream/spdy_test.go deleted file mode 100644 index 7f0e3a4487..0000000000 --- a/vendor/github.com/docker/spdystream/spdy_test.go +++ /dev/null @@ -1,1171 +0,0 @@ -package spdystream - -import ( - "bufio" - "bytes" - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - "net/http/httptest" - "sync" - "testing" - "time" - - "github.com/docker/spdystream/spdy" -) - -func TestSpdyStreams(t *testing.T) { - var wg sync.WaitGroup - server, listen, serverErr := runServer(&wg) - if serverErr != nil { - t.Fatalf("Error initializing server: %s", serverErr) - } - - conn, dialErr := net.Dial("tcp", listen) - if dialErr != nil { - t.Fatalf("Error dialing server: %s", dialErr) - } - - spdyConn, spdyErr := NewConnection(conn, false) - if spdyErr != nil { - t.Fatalf("Error creating spdy connection: %s", spdyErr) - } - go spdyConn.Serve(NoOpStreamHandler) - - authenticated = true - stream, streamErr := spdyConn.CreateStream(http.Header{}, nil, false) - if streamErr != nil { - t.Fatalf("Error creating stream: %s", streamErr) - } - - waitErr := stream.Wait() - if waitErr != nil { - t.Fatalf("Error waiting for stream: %s", waitErr) - } - - message := []byte("hello") - writeErr := stream.WriteData(message, false) - if writeErr != nil { - t.Fatalf("Error writing data") - } - - buf := make([]byte, 10) - n, readErr := stream.Read(buf) - if readErr != nil { - t.Fatalf("Error reading data from stream: %s", readErr) - } - if n != 5 { - t.Fatalf("Unexpected number of bytes read:\nActual: %d\nExpected: 5", n) - } - if bytes.Compare(buf[:n], message) != 0 { - t.Fatalf("Did not receive expected message:\nActual: %s\nExpectd: %s", buf, message) - } - - headers := http.Header{ - "TestKey": []string{"TestVal"}, - } - sendErr := stream.SendHeader(headers, false) - if sendErr != nil { - t.Fatalf("Error sending headers: %s", sendErr) - } - receiveHeaders, receiveErr := stream.ReceiveHeader() - if receiveErr != nil { - t.Fatalf("Error receiving headers: %s", receiveErr) - } - if len(receiveHeaders) != 1 { - t.Fatalf("Unexpected number of headers:\nActual: %d\nExpecting:%d", len(receiveHeaders), 1) - } - testVal := receiveHeaders.Get("TestKey") - if testVal != "TestVal" { - t.Fatalf("Wrong test value:\nActual: %q\nExpecting: %q", testVal, "TestVal") - } - - writeErr = stream.WriteData(message, true) - if writeErr != nil { - t.Fatalf("Error writing data") - } - - smallBuf := make([]byte, 3) - n, readErr = stream.Read(smallBuf) - if readErr != nil { - t.Fatalf("Error reading data from stream: %s", readErr) - } - if n != 3 { - t.Fatalf("Unexpected number of bytes read:\nActual: %d\nExpected: 3", n) - } - if bytes.Compare(smallBuf[:n], []byte("hel")) != 0 { - t.Fatalf("Did not receive expected message:\nActual: %s\nExpectd: %s", smallBuf[:n], message) - } - n, readErr = stream.Read(smallBuf) - if readErr != nil { - t.Fatalf("Error reading data from stream: %s", readErr) - } - if n != 2 { - t.Fatalf("Unexpected number of bytes read:\nActual: %d\nExpected: 2", n) - } - if bytes.Compare(smallBuf[:n], []byte("lo")) != 0 { - t.Fatalf("Did not receive expected message:\nActual: %s\nExpected: lo", smallBuf[:n]) - } - - n, readErr = stream.Read(buf) - if readErr != io.EOF { - t.Fatalf("Expected EOF reading from finished stream, read %d bytes", n) - } - - // Closing again should return error since stream is already closed - streamCloseErr := stream.Close() - if streamCloseErr == nil { - t.Fatalf("No error closing finished stream") - } - if streamCloseErr != ErrWriteClosedStream { - t.Fatalf("Unexpected error closing stream: %s", streamCloseErr) - } - - streamResetErr := stream.Reset() - if streamResetErr != nil { - t.Fatalf("Error reseting stream: %s", streamResetErr) - } - - authenticated = false - badStream, badStreamErr := spdyConn.CreateStream(http.Header{}, nil, false) - if badStreamErr != nil { - t.Fatalf("Error creating stream: %s", badStreamErr) - } - - waitErr = badStream.Wait() - if waitErr == nil { - t.Fatalf("Did not receive error creating stream") - } - if waitErr != ErrReset { - t.Fatalf("Unexpected error creating stream: %s", waitErr) - } - streamCloseErr = badStream.Close() - if streamCloseErr == nil { - t.Fatalf("No error closing bad stream") - } - - spdyCloseErr := spdyConn.Close() - if spdyCloseErr != nil { - t.Fatalf("Error closing spdy connection: %s", spdyCloseErr) - } - - closeErr := server.Close() - if closeErr != nil { - t.Fatalf("Error shutting down server: %s", closeErr) - } - wg.Wait() -} - -func TestPing(t *testing.T) { - var wg sync.WaitGroup - server, listen, serverErr := runServer(&wg) - if serverErr != nil { - t.Fatalf("Error initializing server: %s", serverErr) - } - - conn, dialErr := net.Dial("tcp", listen) - if dialErr != nil { - t.Fatalf("Error dialing server: %s", dialErr) - } - - spdyConn, spdyErr := NewConnection(conn, false) - if spdyErr != nil { - t.Fatalf("Error creating spdy connection: %s", spdyErr) - } - go spdyConn.Serve(NoOpStreamHandler) - - pingTime, pingErr := spdyConn.Ping() - if pingErr != nil { - t.Fatalf("Error pinging server: %s", pingErr) - } - if pingTime == time.Duration(0) { - t.Fatalf("Expecting non-zero ping time") - } - - closeErr := server.Close() - if closeErr != nil { - t.Fatalf("Error shutting down server: %s", closeErr) - } - wg.Wait() -} - -func TestHalfClose(t *testing.T) { - var wg sync.WaitGroup - server, listen, serverErr := runServer(&wg) - if serverErr != nil { - t.Fatalf("Error initializing server: %s", serverErr) - } - - conn, dialErr := net.Dial("tcp", listen) - if dialErr != nil { - t.Fatalf("Error dialing server: %s", dialErr) - } - - spdyConn, spdyErr := NewConnection(conn, false) - if spdyErr != nil { - t.Fatalf("Error creating spdy connection: %s", spdyErr) - } - go spdyConn.Serve(NoOpStreamHandler) - - authenticated = true - stream, streamErr := spdyConn.CreateStream(http.Header{}, nil, false) - if streamErr != nil { - t.Fatalf("Error creating stream: %s", streamErr) - } - - waitErr := stream.Wait() - if waitErr != nil { - t.Fatalf("Error waiting for stream: %s", waitErr) - } - - message := []byte("hello and will read after close") - writeErr := stream.WriteData(message, false) - if writeErr != nil { - t.Fatalf("Error writing data") - } - - streamCloseErr := stream.Close() - if streamCloseErr != nil { - t.Fatalf("Error closing stream: %s", streamCloseErr) - } - - buf := make([]byte, 40) - n, readErr := stream.Read(buf) - if readErr != nil { - t.Fatalf("Error reading data from stream: %s", readErr) - } - if n != 31 { - t.Fatalf("Unexpected number of bytes read:\nActual: %d\nExpected: 5", n) - } - if bytes.Compare(buf[:n], message) != 0 { - t.Fatalf("Did not receive expected message:\nActual: %s\nExpectd: %s", buf, message) - } - - spdyCloseErr := spdyConn.Close() - if spdyCloseErr != nil { - t.Fatalf("Error closing spdy connection: %s", spdyCloseErr) - } - - closeErr := server.Close() - if closeErr != nil { - t.Fatalf("Error shutting down server: %s", closeErr) - } - wg.Wait() -} - -func TestUnexpectedRemoteConnectionClosed(t *testing.T) { - tt := []struct { - closeReceiver bool - closeSender bool - }{ - {closeReceiver: true, closeSender: false}, - {closeReceiver: false, closeSender: true}, - {closeReceiver: false, closeSender: false}, - } - for tix, tc := range tt { - listener, listenErr := net.Listen("tcp", "localhost:0") - if listenErr != nil { - t.Fatalf("Error listening: %v", listenErr) - } - - var serverConn net.Conn - var connErr error - go func() { - serverConn, connErr = listener.Accept() - if connErr != nil { - t.Fatalf("Error accepting: %v", connErr) - } - - serverSpdyConn, _ := NewConnection(serverConn, true) - go serverSpdyConn.Serve(func(stream *Stream) { - stream.SendReply(http.Header{}, tc.closeSender) - }) - }() - - conn, dialErr := net.Dial("tcp", listener.Addr().String()) - if dialErr != nil { - t.Fatalf("Error dialing server: %s", dialErr) - } - - spdyConn, spdyErr := NewConnection(conn, false) - if spdyErr != nil { - t.Fatalf("Error creating spdy connection: %s", spdyErr) - } - go spdyConn.Serve(NoOpStreamHandler) - - authenticated = true - stream, streamErr := spdyConn.CreateStream(http.Header{}, nil, false) - if streamErr != nil { - t.Fatalf("Error creating stream: %s", streamErr) - } - - waitErr := stream.Wait() - if waitErr != nil { - t.Fatalf("Error waiting for stream: %s", waitErr) - } - - if tc.closeReceiver { - // make stream half closed, receive only - stream.Close() - } - - streamch := make(chan error, 1) - go func() { - b := make([]byte, 1) - _, err := stream.Read(b) - streamch <- err - }() - - closeErr := serverConn.Close() - if closeErr != nil { - t.Fatalf("Error shutting down server: %s", closeErr) - } - - select { - case e := <-streamch: - if e == nil || e != io.EOF { - t.Fatalf("(%d) Expected to get an EOF stream error", tix) - } - } - - closeErr = conn.Close() - if closeErr != nil { - t.Fatalf("Error closing client connection: %s", closeErr) - } - - listenErr = listener.Close() - if listenErr != nil { - t.Fatalf("Error closing listener: %s", listenErr) - } - } -} - -func TestCloseNotification(t *testing.T) { - listener, listenErr := net.Listen("tcp", "localhost:0") - if listenErr != nil { - t.Fatalf("Error listening: %v", listenErr) - } - listen := listener.Addr().String() - - serverConnChan := make(chan net.Conn) - go func() { - serverConn, err := listener.Accept() - if err != nil { - t.Fatalf("Error accepting: %v", err) - } - - serverSpdyConn, err := NewConnection(serverConn, true) - if err != nil { - t.Fatalf("Error creating server connection: %v", err) - } - go serverSpdyConn.Serve(NoOpStreamHandler) - <-serverSpdyConn.CloseChan() - serverConnChan <- serverConn - }() - - conn, dialErr := net.Dial("tcp", listen) - if dialErr != nil { - t.Fatalf("Error dialing server: %s", dialErr) - } - - spdyConn, spdyErr := NewConnection(conn, false) - if spdyErr != nil { - t.Fatalf("Error creating spdy connection: %s", spdyErr) - } - go spdyConn.Serve(NoOpStreamHandler) - - // close client conn - err := conn.Close() - if err != nil { - t.Fatalf("Error closing client connection: %v", err) - } - - var serverConn net.Conn - select { - case serverConn = <-serverConnChan: - } - - err = serverConn.Close() - if err != nil { - t.Fatalf("Error closing serverConn: %v", err) - } - - listenErr = listener.Close() - if listenErr != nil { - t.Fatalf("Error closing listener: %s", listenErr) - } -} - -func TestIdleShutdownRace(t *testing.T) { - var wg sync.WaitGroup - server, listen, serverErr := runServer(&wg) - if serverErr != nil { - t.Fatalf("Error initializing server: %s", serverErr) - } - - conn, dialErr := net.Dial("tcp", listen) - if dialErr != nil { - t.Fatalf("Error dialing server: %s", dialErr) - } - - spdyConn, spdyErr := NewConnection(conn, false) - if spdyErr != nil { - t.Fatalf("Error creating spdy connection: %s", spdyErr) - } - go spdyConn.Serve(NoOpStreamHandler) - - authenticated = true - stream, err := spdyConn.CreateStream(http.Header{}, nil, false) - if err != nil { - t.Fatalf("Error creating stream: %v", err) - } - - spdyConn.SetIdleTimeout(5 * time.Millisecond) - go func() { - time.Sleep(5 * time.Millisecond) - stream.Reset() - }() - - select { - case <-spdyConn.CloseChan(): - case <-time.After(20 * time.Millisecond): - t.Fatal("Timed out waiting for idle connection closure") - } - - closeErr := server.Close() - if closeErr != nil { - t.Fatalf("Error shutting down server: %s", closeErr) - } - wg.Wait() -} - -func TestIdleNoTimeoutSet(t *testing.T) { - var wg sync.WaitGroup - server, listen, serverErr := runServer(&wg) - if serverErr != nil { - t.Fatalf("Error initializing server: %s", serverErr) - } - - conn, dialErr := net.Dial("tcp", listen) - if dialErr != nil { - t.Fatalf("Error dialing server: %s", dialErr) - } - - spdyConn, spdyErr := NewConnection(conn, false) - if spdyErr != nil { - t.Fatalf("Error creating spdy connection: %s", spdyErr) - } - go spdyConn.Serve(NoOpStreamHandler) - - select { - case <-spdyConn.CloseChan(): - t.Fatal("Unexpected connection closure") - case <-time.After(10 * time.Millisecond): - } - - closeErr := server.Close() - if closeErr != nil { - t.Fatalf("Error shutting down server: %s", closeErr) - } - wg.Wait() -} - -func TestIdleClearTimeout(t *testing.T) { - var wg sync.WaitGroup - server, listen, serverErr := runServer(&wg) - if serverErr != nil { - t.Fatalf("Error initializing server: %s", serverErr) - } - - conn, dialErr := net.Dial("tcp", listen) - if dialErr != nil { - t.Fatalf("Error dialing server: %s", dialErr) - } - - spdyConn, spdyErr := NewConnection(conn, false) - if spdyErr != nil { - t.Fatalf("Error creating spdy connection: %s", spdyErr) - } - go spdyConn.Serve(NoOpStreamHandler) - - spdyConn.SetIdleTimeout(10 * time.Millisecond) - spdyConn.SetIdleTimeout(0) - select { - case <-spdyConn.CloseChan(): - t.Fatal("Unexpected connection closure") - case <-time.After(20 * time.Millisecond): - } - - closeErr := server.Close() - if closeErr != nil { - t.Fatalf("Error shutting down server: %s", closeErr) - } - wg.Wait() -} - -func TestIdleNoData(t *testing.T) { - var wg sync.WaitGroup - server, listen, serverErr := runServer(&wg) - if serverErr != nil { - t.Fatalf("Error initializing server: %s", serverErr) - } - - conn, dialErr := net.Dial("tcp", listen) - if dialErr != nil { - t.Fatalf("Error dialing server: %s", dialErr) - } - - spdyConn, spdyErr := NewConnection(conn, false) - if spdyErr != nil { - t.Fatalf("Error creating spdy connection: %s", spdyErr) - } - go spdyConn.Serve(NoOpStreamHandler) - - spdyConn.SetIdleTimeout(10 * time.Millisecond) - <-spdyConn.CloseChan() - - closeErr := server.Close() - if closeErr != nil { - t.Fatalf("Error shutting down server: %s", closeErr) - } - wg.Wait() -} - -func TestIdleWithData(t *testing.T) { - var wg sync.WaitGroup - server, listen, serverErr := runServer(&wg) - if serverErr != nil { - t.Fatalf("Error initializing server: %s", serverErr) - } - - conn, dialErr := net.Dial("tcp", listen) - if dialErr != nil { - t.Fatalf("Error dialing server: %s", dialErr) - } - - spdyConn, spdyErr := NewConnection(conn, false) - if spdyErr != nil { - t.Fatalf("Error creating spdy connection: %s", spdyErr) - } - go spdyConn.Serve(NoOpStreamHandler) - - spdyConn.SetIdleTimeout(25 * time.Millisecond) - - authenticated = true - stream, err := spdyConn.CreateStream(http.Header{}, nil, false) - if err != nil { - t.Fatalf("Error creating stream: %v", err) - } - - writeCh := make(chan struct{}) - - go func() { - b := []byte{1, 2, 3, 4, 5} - for i := 0; i < 10; i++ { - _, err = stream.Write(b) - if err != nil { - t.Fatalf("Error writing to stream: %v", err) - } - time.Sleep(10 * time.Millisecond) - } - close(writeCh) - }() - - writesFinished := false - -Loop: - for { - select { - case <-writeCh: - writesFinished = true - case <-spdyConn.CloseChan(): - if !writesFinished { - t.Fatal("Connection closed before all writes finished") - } - break Loop - } - } - - closeErr := server.Close() - if closeErr != nil { - t.Fatalf("Error shutting down server: %s", closeErr) - } - wg.Wait() -} - -func TestIdleRace(t *testing.T) { - var wg sync.WaitGroup - server, listen, serverErr := runServer(&wg) - if serverErr != nil { - t.Fatalf("Error initializing server: %s", serverErr) - } - - conn, dialErr := net.Dial("tcp", listen) - if dialErr != nil { - t.Fatalf("Error dialing server: %s", dialErr) - } - - spdyConn, spdyErr := NewConnection(conn, false) - if spdyErr != nil { - t.Fatalf("Error creating spdy connection: %s", spdyErr) - } - go spdyConn.Serve(NoOpStreamHandler) - - spdyConn.SetIdleTimeout(10 * time.Millisecond) - - authenticated = true - - for i := 0; i < 10; i++ { - _, err := spdyConn.CreateStream(http.Header{}, nil, false) - if err != nil { - t.Fatalf("Error creating stream: %v", err) - } - } - - <-spdyConn.CloseChan() - - closeErr := server.Close() - if closeErr != nil { - t.Fatalf("Error shutting down server: %s", closeErr) - } - wg.Wait() -} - -func TestHalfClosedIdleTimeout(t *testing.T) { - listener, listenErr := net.Listen("tcp", "localhost:0") - if listenErr != nil { - t.Fatalf("Error listening: %v", listenErr) - } - listen := listener.Addr().String() - - go func() { - serverConn, err := listener.Accept() - if err != nil { - t.Fatalf("Error accepting: %v", err) - } - - serverSpdyConn, err := NewConnection(serverConn, true) - if err != nil { - t.Fatalf("Error creating server connection: %v", err) - } - go serverSpdyConn.Serve(func(s *Stream) { - s.SendReply(http.Header{}, true) - }) - serverSpdyConn.SetIdleTimeout(10 * time.Millisecond) - }() - - conn, dialErr := net.Dial("tcp", listen) - if dialErr != nil { - t.Fatalf("Error dialing server: %s", dialErr) - } - - spdyConn, spdyErr := NewConnection(conn, false) - if spdyErr != nil { - t.Fatalf("Error creating spdy connection: %s", spdyErr) - } - go spdyConn.Serve(NoOpStreamHandler) - - stream, err := spdyConn.CreateStream(http.Header{}, nil, false) - if err != nil { - t.Fatalf("Error creating stream: %v", err) - } - - time.Sleep(20 * time.Millisecond) - - stream.Reset() - - err = spdyConn.Close() - if err != nil { - t.Fatalf("Error closing client spdy conn: %v", err) - } -} - -func TestStreamReset(t *testing.T) { - var wg sync.WaitGroup - server, listen, serverErr := runServer(&wg) - if serverErr != nil { - t.Fatalf("Error initializing server: %s", serverErr) - } - - conn, dialErr := net.Dial("tcp", listen) - if dialErr != nil { - t.Fatalf("Error dialing server: %s", dialErr) - } - - spdyConn, spdyErr := NewConnection(conn, false) - if spdyErr != nil { - t.Fatalf("Error creating spdy connection: %s", spdyErr) - } - go spdyConn.Serve(NoOpStreamHandler) - - authenticated = true - stream, streamErr := spdyConn.CreateStream(http.Header{}, nil, false) - if streamErr != nil { - t.Fatalf("Error creating stream: %s", streamErr) - } - - buf := []byte("dskjahfkdusahfkdsahfkdsafdkas") - for i := 0; i < 10; i++ { - if _, err := stream.Write(buf); err != nil { - t.Fatalf("Error writing to stream: %s", err) - } - } - for i := 0; i < 10; i++ { - if _, err := stream.Read(buf); err != nil { - t.Fatalf("Error reading from stream: %s", err) - } - } - - // fmt.Printf("Resetting...\n") - if err := stream.Reset(); err != nil { - t.Fatalf("Error reseting stream: %s", err) - } - - closeErr := server.Close() - if closeErr != nil { - t.Fatalf("Error shutting down server: %s", closeErr) - } - wg.Wait() -} - -func TestStreamResetWithDataRemaining(t *testing.T) { - var wg sync.WaitGroup - server, listen, serverErr := runServer(&wg) - if serverErr != nil { - t.Fatalf("Error initializing server: %s", serverErr) - } - - conn, dialErr := net.Dial("tcp", listen) - if dialErr != nil { - t.Fatalf("Error dialing server: %s", dialErr) - } - - spdyConn, spdyErr := NewConnection(conn, false) - if spdyErr != nil { - t.Fatalf("Error creating spdy connection: %s", spdyErr) - } - go spdyConn.Serve(NoOpStreamHandler) - - authenticated = true - stream, streamErr := spdyConn.CreateStream(http.Header{}, nil, false) - if streamErr != nil { - t.Fatalf("Error creating stream: %s", streamErr) - } - - buf := []byte("dskjahfkdusahfkdsahfkdsafdkas") - for i := 0; i < 10; i++ { - if _, err := stream.Write(buf); err != nil { - t.Fatalf("Error writing to stream: %s", err) - } - } - - // read a bit to make sure a goroutine gets to <-dataChan - if _, err := stream.Read(buf); err != nil { - t.Fatalf("Error reading from stream: %s", err) - } - - // fmt.Printf("Resetting...\n") - if err := stream.Reset(); err != nil { - t.Fatalf("Error reseting stream: %s", err) - } - - closeErr := server.Close() - if closeErr != nil { - t.Fatalf("Error shutting down server: %s", closeErr) - } - wg.Wait() -} - -type roundTripper struct { - conn net.Conn -} - -func (s *roundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - r := *req - req = &r - - conn, err := net.Dial("tcp", req.URL.Host) - if err != nil { - return nil, err - } - - err = req.Write(conn) - if err != nil { - return nil, err - } - - resp, err := http.ReadResponse(bufio.NewReader(conn), req) - if err != nil { - return nil, err - } - - s.conn = conn - - return resp, nil -} - -// see https://github.com/GoogleCloudPlatform/kubernetes/issues/4882 -func TestFramingAfterRemoteConnectionClosed(t *testing.T) { - server := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - streamCh := make(chan *Stream) - - w.WriteHeader(http.StatusSwitchingProtocols) - - netconn, _, _ := w.(http.Hijacker).Hijack() - conn, _ := NewConnection(netconn, true) - go conn.Serve(func(s *Stream) { - s.SendReply(http.Header{}, false) - streamCh <- s - }) - - stream := <-streamCh - io.Copy(stream, stream) - - closeChan := make(chan struct{}) - go func() { - stream.Reset() - conn.Close() - close(closeChan) - }() - - <-closeChan - })) - - server.Start() - defer server.Close() - - req, err := http.NewRequest("GET", server.URL, nil) - if err != nil { - t.Fatalf("Error creating request: %s", err) - } - - rt := &roundTripper{} - client := &http.Client{Transport: rt} - - _, err = client.Do(req) - if err != nil { - t.Fatalf("unexpected error from client.Do: %s", err) - } - - conn, err := NewConnection(rt.conn, false) - go conn.Serve(NoOpStreamHandler) - - stream, err := conn.CreateStream(http.Header{}, nil, false) - if err != nil { - t.Fatalf("error creating client stream: %s", err) - } - - n, err := stream.Write([]byte("hello")) - if err != nil { - t.Fatalf("error writing to stream: %s", err) - } - if n != 5 { - t.Fatalf("Expected to write 5 bytes, but actually wrote %d", n) - } - - b := make([]byte, 5) - n, err = stream.Read(b) - if err != nil { - t.Fatalf("error reading from stream: %s", err) - } - if n != 5 { - t.Fatalf("Expected to read 5 bytes, but actually read %d", n) - } - if e, a := "hello", string(b[0:n]); e != a { - t.Fatalf("expected '%s', got '%s'", e, a) - } - - stream.Reset() - conn.Close() -} - -func TestGoAwayRace(t *testing.T) { - var done sync.WaitGroup - listener, err := net.Listen("tcp", "localhost:0") - if err != nil { - t.Fatalf("Error listening: %v", err) - } - listen := listener.Addr().String() - - processDataFrame := make(chan struct{}) - serverClosed := make(chan struct{}) - - done.Add(1) - go func() { - defer done.Done() - serverConn, err := listener.Accept() - if err != nil { - t.Fatalf("Error accepting: %v", err) - } - - serverSpdyConn, err := NewConnection(serverConn, true) - if err != nil { - t.Fatalf("Error creating server connection: %v", err) - } - go func() { - <-serverSpdyConn.CloseChan() - close(serverClosed) - }() - - // force the data frame handler to sleep before delivering the frame - serverSpdyConn.dataFrameHandler = func(frame *spdy.DataFrame) error { - <-processDataFrame - return serverSpdyConn.handleDataFrame(frame) - } - - streamCh := make(chan *Stream) - go serverSpdyConn.Serve(func(s *Stream) { - s.SendReply(http.Header{}, false) - streamCh <- s - }) - - stream, ok := <-streamCh - if !ok { - t.Fatalf("didn't get a stream") - } - stream.Close() - data, err := ioutil.ReadAll(stream) - if err != nil { - t.Error(err) - } - if e, a := "hello1hello2hello3hello4hello5", string(data); e != a { - t.Errorf("Expected %q, got %q", e, a) - } - }() - - dialConn, err := net.Dial("tcp", listen) - if err != nil { - t.Fatalf("Error dialing server: %s", err) - } - conn, err := NewConnection(dialConn, false) - if err != nil { - t.Fatalf("Error creating client connectin: %v", err) - } - go conn.Serve(NoOpStreamHandler) - - stream, err := conn.CreateStream(http.Header{}, nil, false) - if err != nil { - t.Fatalf("error creating client stream: %s", err) - } - if err := stream.Wait(); err != nil { - t.Fatalf("error waiting for stream creation: %v", err) - } - - fmt.Fprint(stream, "hello1") - fmt.Fprint(stream, "hello2") - fmt.Fprint(stream, "hello3") - fmt.Fprint(stream, "hello4") - fmt.Fprint(stream, "hello5") - - stream.Close() - conn.Close() - - // wait for the server to get the go away frame - <-serverClosed - - // allow the data frames to be delivered to the server's stream - close(processDataFrame) - - done.Wait() -} - -func TestSetIdleTimeoutAfterRemoteConnectionClosed(t *testing.T) { - listener, err := net.Listen("tcp", "localhost:0") - if err != nil { - t.Fatalf("Error listening: %v", err) - } - listen := listener.Addr().String() - - serverConns := make(chan *Connection, 1) - go func() { - conn, connErr := listener.Accept() - if connErr != nil { - t.Fatal(connErr) - } - serverSpdyConn, err := NewConnection(conn, true) - if err != nil { - t.Fatalf("Error creating server connection: %v", err) - } - go serverSpdyConn.Serve(NoOpStreamHandler) - serverConns <- serverSpdyConn - }() - - conn, dialErr := net.Dial("tcp", listen) - if dialErr != nil { - t.Fatalf("Error dialing server: %s", dialErr) - } - - spdyConn, spdyErr := NewConnection(conn, false) - if spdyErr != nil { - t.Fatalf("Error creating spdy connection: %s", spdyErr) - } - go spdyConn.Serve(NoOpStreamHandler) - - if err := spdyConn.Close(); err != nil { - t.Fatal(err) - } - - serverConn := <-serverConns - defer serverConn.Close() - <-serverConn.closeChan - - serverConn.SetIdleTimeout(10 * time.Second) -} - -func TestClientConnectionStopsServingAfterGoAway(t *testing.T) { - listener, err := net.Listen("tcp", "localhost:0") - if err != nil { - t.Fatalf("Error listening: %v", err) - } - listen := listener.Addr().String() - - serverConns := make(chan *Connection, 1) - go func() { - conn, connErr := listener.Accept() - if connErr != nil { - t.Fatal(connErr) - } - serverSpdyConn, err := NewConnection(conn, true) - if err != nil { - t.Fatalf("Error creating server connection: %v", err) - } - go serverSpdyConn.Serve(NoOpStreamHandler) - serverConns <- serverSpdyConn - }() - - conn, dialErr := net.Dial("tcp", listen) - if dialErr != nil { - t.Fatalf("Error dialing server: %s", dialErr) - } - - spdyConn, spdyErr := NewConnection(conn, false) - if spdyErr != nil { - t.Fatalf("Error creating spdy connection: %s", spdyErr) - } - go spdyConn.Serve(NoOpStreamHandler) - - stream, err := spdyConn.CreateStream(http.Header{}, nil, false) - if err != nil { - t.Fatalf("Error creating stream: %v", err) - } - if err := stream.WaitTimeout(30 * time.Second); err != nil { - t.Fatalf("Timed out waiting for stream: %v", err) - } - - readChan := make(chan struct{}) - go func() { - _, err := ioutil.ReadAll(stream) - if err != nil { - t.Fatalf("Error reading stream: %v", err) - } - close(readChan) - }() - - serverConn := <-serverConns - serverConn.Close() - - // make sure the client conn breaks out of the main loop in Serve() - <-spdyConn.closeChan - // make sure the remote channels are closed and the stream read is unblocked - <-readChan -} - -func TestStreamReadUnblocksAfterCloseThenReset(t *testing.T) { - listener, err := net.Listen("tcp", "localhost:0") - if err != nil { - t.Fatalf("Error listening: %v", err) - } - listen := listener.Addr().String() - - serverConns := make(chan *Connection, 1) - go func() { - conn, connErr := listener.Accept() - if connErr != nil { - t.Fatal(connErr) - } - serverSpdyConn, err := NewConnection(conn, true) - if err != nil { - t.Fatalf("Error creating server connection: %v", err) - } - go serverSpdyConn.Serve(NoOpStreamHandler) - serverConns <- serverSpdyConn - }() - - conn, dialErr := net.Dial("tcp", listen) - if dialErr != nil { - t.Fatalf("Error dialing server: %s", dialErr) - } - - spdyConn, spdyErr := NewConnection(conn, false) - if spdyErr != nil { - t.Fatalf("Error creating spdy connection: %s", spdyErr) - } - go spdyConn.Serve(NoOpStreamHandler) - - stream, err := spdyConn.CreateStream(http.Header{}, nil, false) - if err != nil { - t.Fatalf("Error creating stream: %v", err) - } - if err := stream.WaitTimeout(30 * time.Second); err != nil { - t.Fatalf("Timed out waiting for stream: %v", err) - } - - readChan := make(chan struct{}) - go func() { - _, err := ioutil.ReadAll(stream) - if err != nil { - t.Fatalf("Error reading stream: %v", err) - } - close(readChan) - }() - - serverConn := <-serverConns - defer serverConn.Close() - - if err := stream.Close(); err != nil { - t.Fatal(err) - } - if err := stream.Reset(); err != nil { - t.Fatal(err) - } - - // make sure close followed by reset unblocks stream.Read() - select { - case <-readChan: - case <-time.After(10 * time.Second): - t.Fatal("Timed out waiting for stream read to unblock") - } -} - -var authenticated bool - -func authStreamHandler(stream *Stream) { - if !authenticated { - stream.Refuse() - } - MirrorStreamHandler(stream) -} - -func runServer(wg *sync.WaitGroup) (io.Closer, string, error) { - listener, listenErr := net.Listen("tcp", "localhost:0") - if listenErr != nil { - return nil, "", listenErr - } - wg.Add(1) - go func() { - for { - conn, connErr := listener.Accept() - if connErr != nil { - break - } - - spdyConn, _ := NewConnection(conn, true) - go spdyConn.Serve(authStreamHandler) - - } - wg.Done() - }() - return listener, listener.Addr().String(), nil -} diff --git a/vendor/github.com/davecgh/go-spew/.gitignore b/vendor/github.com/eapache/channels/.gitignore similarity index 100% rename from vendor/github.com/davecgh/go-spew/.gitignore rename to vendor/github.com/eapache/channels/.gitignore diff --git a/vendor/github.com/eapache/channels/.travis.yml b/vendor/github.com/eapache/channels/.travis.yml new file mode 100644 index 0000000000..b072a4c851 --- /dev/null +++ b/vendor/github.com/eapache/channels/.travis.yml @@ -0,0 +1,11 @@ +language: go +sudo: false + +script: go test -v -race -timeout 10s ./... + +go: + - 1.1 + - 1.2 + - 1.3 + - 1.4 + - 1.5 diff --git a/vendor/github.com/eapache/channels/CHANGELOG.md b/vendor/github.com/eapache/channels/CHANGELOG.md new file mode 100644 index 0000000000..63825cd2ff --- /dev/null +++ b/vendor/github.com/eapache/channels/CHANGELOG.md @@ -0,0 +1,17 @@ +# Changelog + +#### Version 1.1.0 (2015-11-22) + +Bug Fixes: + - The `Len()` and `Cap()` methods on several implementations were racy + ([#18](https://github.com/eapache/channels/issues/18)). + +Note: Fixing the above issue led to a fairly substantial performance hit +(anywhere from 10-25% in benchmarks depending on use case) and involved fairly +major refactoring, which is why this is being released as v1.1.0 instead +of v1.0.1. + +#### Version 1.0.0 (2015-01-24) + +Version 1.0.0 is the first tagged release. All core functionality was available +at this point. diff --git a/vendor/github.com/eapache/channels/LICENSE b/vendor/github.com/eapache/channels/LICENSE new file mode 100644 index 0000000000..8c4bddf755 --- /dev/null +++ b/vendor/github.com/eapache/channels/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Evan Huus + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/eapache/channels/README.md b/vendor/github.com/eapache/channels/README.md new file mode 100644 index 0000000000..aab2a53a3b --- /dev/null +++ b/vendor/github.com/eapache/channels/README.md @@ -0,0 +1,27 @@ +channels +======== + +[![Build Status](https://travis-ci.org/eapache/channels.svg?branch=master)](https://travis-ci.org/eapache/channels) +[![GoDoc](https://godoc.org/github.com/eapache/channels?status.png)](https://godoc.org/github.com/eapache/channels) +[![Code of Conduct](https://img.shields.io/badge/code%20of%20conduct-active-blue.svg)](https://eapache.github.io/conduct.html) + +A collection of helper functions and special types for working with and +extending [Go](https://golang.org/)'s existing channels. Due to limitations +of Go's type system, importing this library directly is often not practical for +production code. It serves equally well, however, as a reference guide and +template for implementing many common idioms; if you use it in this way I would +appreciate the inclusion of some sort of credit in the resulting code. + +See https://godoc.org/github.com/eapache/channels for full documentation or +https://gopkg.in/eapache/channels.v1 for a versioned import path. + +Requires Go version 1.1 or later, as certain necessary elements of the `reflect` +package were not present in 1.0. + +Most of the buffered channel types in this package are backed by a very fast +queue implementation that used to be built into this package but has now been +extracted into its own package at https://github.com/eapache/queue. + +*Note:* Several types in this package provide so-called "infinite" buffers. Be +very careful using these, as no buffer is truly infinite. If such a buffer +grows too large your program will run out of memory and crash. Caveat emptor. diff --git a/vendor/github.com/eapache/channels/batching_channel.go b/vendor/github.com/eapache/channels/batching_channel.go new file mode 100644 index 0000000000..5be622f2f3 --- /dev/null +++ b/vendor/github.com/eapache/channels/batching_channel.go @@ -0,0 +1,87 @@ +package channels + +// BatchingChannel implements the Channel interface, with the change that instead of producing individual elements +// on Out(), it batches together the entire internal buffer each time. Trying to construct an unbuffered batching channel +// will panic, that configuration is not supported (and provides no benefit over an unbuffered NativeChannel). +type BatchingChannel struct { + input, output chan interface{} + length chan int + buffer []interface{} + size BufferCap +} + +func NewBatchingChannel(size BufferCap) *BatchingChannel { + if size == None { + panic("channels: BatchingChannel does not support unbuffered behaviour") + } + if size < 0 && size != Infinity { + panic("channels: invalid negative size in NewBatchingChannel") + } + ch := &BatchingChannel{ + input: make(chan interface{}), + output: make(chan interface{}), + length: make(chan int), + size: size, + } + go ch.batchingBuffer() + return ch +} + +func (ch *BatchingChannel) In() chan<- interface{} { + return ch.input +} + +// Out returns a <-chan interface{} in order that BatchingChannel conforms to the standard Channel interface provided +// by this package, however each output value is guaranteed to be of type []interface{} - a slice collecting the most +// recent batch of values sent on the In channel. The slice is guaranteed to not be empty or nil. In practice the net +// result is that you need an additional type assertion to access the underlying values. +func (ch *BatchingChannel) Out() <-chan interface{} { + return ch.output +} + +func (ch *BatchingChannel) Len() int { + return <-ch.length +} + +func (ch *BatchingChannel) Cap() BufferCap { + return ch.size +} + +func (ch *BatchingChannel) Close() { + close(ch.input) +} + +func (ch *BatchingChannel) batchingBuffer() { + var input, output, nextInput chan interface{} + nextInput = ch.input + input = nextInput + + for input != nil || output != nil { + select { + case elem, open := <-input: + if open { + ch.buffer = append(ch.buffer, elem) + } else { + input = nil + nextInput = nil + } + case output <- ch.buffer: + ch.buffer = nil + case ch.length <- len(ch.buffer): + } + + if len(ch.buffer) == 0 { + input = nextInput + output = nil + } else if ch.size != Infinity && len(ch.buffer) >= int(ch.size) { + input = nil + output = ch.output + } else { + input = nextInput + output = ch.output + } + } + + close(ch.output) + close(ch.length) +} diff --git a/vendor/github.com/eapache/channels/black_hole.go b/vendor/github.com/eapache/channels/black_hole.go new file mode 100644 index 0000000000..0d1ba97b3d --- /dev/null +++ b/vendor/github.com/eapache/channels/black_hole.go @@ -0,0 +1,54 @@ +package channels + +// BlackHole implements the InChannel interface and provides an analogue for the "Discard" variable in +// the ioutil package - it never blocks, and simply discards every value it reads. The number of items +// discarded in this way is counted and returned from Len. +type BlackHole struct { + input chan interface{} + length chan int + count int +} + +func NewBlackHole() *BlackHole { + ch := &BlackHole{ + input: make(chan interface{}), + length: make(chan int), + } + go ch.discard() + return ch +} + +func (ch *BlackHole) In() chan<- interface{} { + return ch.input +} + +func (ch *BlackHole) Len() int { + val, open := <-ch.length + if open { + return val + } else { + return ch.count + } +} + +func (ch *BlackHole) Cap() BufferCap { + return Infinity +} + +func (ch *BlackHole) Close() { + close(ch.input) +} + +func (ch *BlackHole) discard() { + for { + select { + case _, open := <-ch.input: + if !open { + close(ch.length) + return + } + ch.count++ + case ch.length <- ch.count: + } + } +} diff --git a/vendor/github.com/eapache/channels/channels.go b/vendor/github.com/eapache/channels/channels.go new file mode 100644 index 0000000000..efcb2b5c50 --- /dev/null +++ b/vendor/github.com/eapache/channels/channels.go @@ -0,0 +1,277 @@ +/* +Package channels provides a collection of helper functions, interfaces and implementations for +working with and extending the capabilities of golang's existing channels. The main interface of +interest is Channel, though sub-interfaces are also provided for cases where the full Channel interface +cannot be met (for example, InChannel for write-only channels). + +For integration with native typed golang channels, functions Wrap and Unwrap are provided which do the +appropriate type conversions. The NativeChannel, NativeInChannel and NativeOutChannel type definitions +are also provided for use with native channels which already carry values of type interface{}. + +The heart of the package consists of several distinct implementations of the Channel interface, including +channels backed by special buffers (resizable, infinite, ring buffers, etc) and other useful types. A +"black hole" channel for discarding unwanted values (similar in purpose to ioutil.Discard or /dev/null) +rounds out the set. + +Helper functions for operating on Channels include Pipe and Tee (which behave much like their Unix +namesakes), as well as Multiplex and Distribute. "Weak" versions of these functions also exist, which +do not close their output channel(s) on completion. + +Due to limitations of Go's type system, importing this library directly is often not practical for +production code. It serves equally well, however, as a reference guide and template for implementing +many common idioms; if you use it in this way I would appreciate the inclusion of some sort of credit +in the resulting code. + +Warning: several types in this package provide so-called "infinite" buffers. Be *very* careful using +these, as no buffer is truly infinite - if such a buffer grows too large your program will run out of +memory and crash. Caveat emptor. +*/ +package channels + +import "reflect" + +// BufferCap represents the capacity of the buffer backing a channel. Valid values consist of all +// positive integers, as well as the special values below. +type BufferCap int + +const ( + // None is the capacity for channels that have no buffer at all. + None BufferCap = 0 + // Infinity is the capacity for channels with no limit on their buffer size. + Infinity BufferCap = -1 +) + +// Buffer is an interface for any channel that provides access to query the state of its buffer. +// Even unbuffered channels can implement this interface by simply returning 0 from Len() and None from Cap(). +type Buffer interface { + Len() int // The number of elements currently buffered. + Cap() BufferCap // The maximum number of elements that can be buffered. +} + +// SimpleInChannel is an interface representing a writeable channel that does not necessarily +// implement the Buffer interface. +type SimpleInChannel interface { + In() chan<- interface{} // The writeable end of the channel. + Close() // Closes the channel. It is an error to write to In() after calling Close(). +} + +// InChannel is an interface representing a writeable channel with a buffer. +type InChannel interface { + SimpleInChannel + Buffer +} + +// SimpleOutChannel is an interface representing a readable channel that does not necessarily +// implement the Buffer interface. +type SimpleOutChannel interface { + Out() <-chan interface{} // The readable end of the channel. +} + +// OutChannel is an interface representing a readable channel implementing the Buffer interface. +type OutChannel interface { + SimpleOutChannel + Buffer +} + +// SimpleChannel is an interface representing a channel that is both readable and writeable, +// but does not necessarily implement the Buffer interface. +type SimpleChannel interface { + SimpleInChannel + SimpleOutChannel +} + +// Channel is an interface representing a channel that is readable, writeable and implements +// the Buffer interface +type Channel interface { + SimpleChannel + Buffer +} + +func pipe(input SimpleOutChannel, output SimpleInChannel, closeWhenDone bool) { + for elem := range input.Out() { + output.In() <- elem + } + if closeWhenDone { + output.Close() + } +} + +func multiplex(output SimpleInChannel, inputs []SimpleOutChannel, closeWhenDone bool) { + inputCount := len(inputs) + cases := make([]reflect.SelectCase, inputCount) + for i := range cases { + cases[i].Dir = reflect.SelectRecv + cases[i].Chan = reflect.ValueOf(inputs[i].Out()) + } + for inputCount > 0 { + chosen, recv, recvOK := reflect.Select(cases) + if recvOK { + output.In() <- recv.Interface() + } else { + cases[chosen].Chan = reflect.ValueOf(nil) + inputCount-- + } + } + if closeWhenDone { + output.Close() + } +} + +func tee(input SimpleOutChannel, outputs []SimpleInChannel, closeWhenDone bool) { + cases := make([]reflect.SelectCase, len(outputs)) + for i := range cases { + cases[i].Dir = reflect.SelectSend + } + for elem := range input.Out() { + for i := range cases { + cases[i].Chan = reflect.ValueOf(outputs[i].In()) + cases[i].Send = reflect.ValueOf(elem) + } + for _ = range cases { + chosen, _, _ := reflect.Select(cases) + cases[chosen].Chan = reflect.ValueOf(nil) + } + } + if closeWhenDone { + for i := range outputs { + outputs[i].Close() + } + } +} + +func distribute(input SimpleOutChannel, outputs []SimpleInChannel, closeWhenDone bool) { + cases := make([]reflect.SelectCase, len(outputs)) + for i := range cases { + cases[i].Dir = reflect.SelectSend + cases[i].Chan = reflect.ValueOf(outputs[i].In()) + } + for elem := range input.Out() { + for i := range cases { + cases[i].Send = reflect.ValueOf(elem) + } + reflect.Select(cases) + } + if closeWhenDone { + for i := range outputs { + outputs[i].Close() + } + } +} + +// Pipe connects the input channel to the output channel so that +// they behave as if a single channel. +func Pipe(input SimpleOutChannel, output SimpleInChannel) { + go pipe(input, output, true) +} + +// Multiplex takes an arbitrary number of input channels and multiplexes their output into a single output +// channel. When all input channels have been closed, the output channel is closed. Multiplex with a single +// input channel is equivalent to Pipe (though slightly less efficient). +func Multiplex(output SimpleInChannel, inputs ...SimpleOutChannel) { + if len(inputs) == 0 { + panic("channels: Multiplex requires at least one input") + } + go multiplex(output, inputs, true) +} + +// Tee (like its Unix namesake) takes a single input channel and an arbitrary number of output channels +// and duplicates each input into every output. When the input channel is closed, all outputs channels are closed. +// Tee with a single output channel is equivalent to Pipe (though slightly less efficient). +func Tee(input SimpleOutChannel, outputs ...SimpleInChannel) { + if len(outputs) == 0 { + panic("channels: Tee requires at least one output") + } + go tee(input, outputs, true) +} + +// Distribute takes a single input channel and an arbitrary number of output channels and duplicates each input +// into *one* available output. If multiple outputs are waiting for a value, one is chosen at random. When the +// input channel is closed, all outputs channels are closed. Distribute with a single output channel is +// equivalent to Pipe (though slightly less efficient). +func Distribute(input SimpleOutChannel, outputs ...SimpleInChannel) { + if len(outputs) == 0 { + panic("channels: Distribute requires at least one output") + } + go distribute(input, outputs, true) +} + +// WeakPipe behaves like Pipe (connecting the two channels) except that it does not close +// the output channel when the input channel is closed. +func WeakPipe(input SimpleOutChannel, output SimpleInChannel) { + go pipe(input, output, false) +} + +// WeakMultiplex behaves like Multiplex (multiplexing multiple inputs into a single output) except that it does not close +// the output channel when the input channels are closed. +func WeakMultiplex(output SimpleInChannel, inputs ...SimpleOutChannel) { + if len(inputs) == 0 { + panic("channels: WeakMultiplex requires at least one input") + } + go multiplex(output, inputs, false) +} + +// WeakTee behaves like Tee (duplicating a single input into multiple outputs) except that it does not close +// the output channels when the input channel is closed. +func WeakTee(input SimpleOutChannel, outputs ...SimpleInChannel) { + if len(outputs) == 0 { + panic("channels: WeakTee requires at least one output") + } + go tee(input, outputs, false) +} + +// WeakDistribute behaves like Distribute (distributing a single input amongst multiple outputs) except that +// it does not close the output channels when the input channel is closed. +func WeakDistribute(input SimpleOutChannel, outputs ...SimpleInChannel) { + if len(outputs) == 0 { + panic("channels: WeakDistribute requires at least one output") + } + go distribute(input, outputs, false) +} + +// Wrap takes any readable channel type (chan or <-chan but not chan<-) and +// exposes it as a SimpleOutChannel for easy integration with existing channel sources. +// It panics if the input is not a readable channel. +func Wrap(ch interface{}) SimpleOutChannel { + t := reflect.TypeOf(ch) + if t.Kind() != reflect.Chan || t.ChanDir()&reflect.RecvDir == 0 { + panic("channels: input to Wrap must be readable channel") + } + realChan := make(chan interface{}) + + go func() { + v := reflect.ValueOf(ch) + for { + x, ok := v.Recv() + if !ok { + close(realChan) + return + } + realChan <- x.Interface() + } + }() + + return NativeOutChannel(realChan) +} + +// Unwrap takes a SimpleOutChannel and uses reflection to pipe it to a typed native channel for +// easy integration with existing channel sources. Output can be any writable channel type (chan or chan<-). +// It panics if the output is not a writable channel, or if a value is received that cannot be sent on the +// output channel. +func Unwrap(input SimpleOutChannel, output interface{}) { + t := reflect.TypeOf(output) + if t.Kind() != reflect.Chan || t.ChanDir()&reflect.SendDir == 0 { + panic("channels: input to Unwrap must be readable channel") + } + + go func() { + v := reflect.ValueOf(output) + for { + x, ok := <-input.Out() + if !ok { + v.Close() + return + } + v.Send(reflect.ValueOf(x)) + } + }() +} diff --git a/vendor/github.com/eapache/channels/infinite_channel.go b/vendor/github.com/eapache/channels/infinite_channel.go new file mode 100644 index 0000000000..3aa9e8e7eb --- /dev/null +++ b/vendor/github.com/eapache/channels/infinite_channel.go @@ -0,0 +1,72 @@ +package channels + +import "github.com/eapache/queue" + +// InfiniteChannel implements the Channel interface with an infinite buffer between the input and the output. +type InfiniteChannel struct { + input, output chan interface{} + length chan int + buffer *queue.Queue +} + +func NewInfiniteChannel() *InfiniteChannel { + ch := &InfiniteChannel{ + input: make(chan interface{}), + output: make(chan interface{}), + length: make(chan int), + buffer: queue.New(), + } + go ch.infiniteBuffer() + return ch +} + +func (ch *InfiniteChannel) In() chan<- interface{} { + return ch.input +} + +func (ch *InfiniteChannel) Out() <-chan interface{} { + return ch.output +} + +func (ch *InfiniteChannel) Len() int { + return <-ch.length +} + +func (ch *InfiniteChannel) Cap() BufferCap { + return Infinity +} + +func (ch *InfiniteChannel) Close() { + close(ch.input) +} + +func (ch *InfiniteChannel) infiniteBuffer() { + var input, output chan interface{} + var next interface{} + input = ch.input + + for input != nil || output != nil { + select { + case elem, open := <-input: + if open { + ch.buffer.Add(elem) + } else { + input = nil + } + case output <- next: + ch.buffer.Remove() + case ch.length <- ch.buffer.Length(): + } + + if ch.buffer.Length() > 0 { + output = ch.output + next = ch.buffer.Peek() + } else { + output = nil + next = nil + } + } + + close(ch.output) + close(ch.length) +} diff --git a/vendor/github.com/eapache/channels/native_channel.go b/vendor/github.com/eapache/channels/native_channel.go new file mode 100644 index 0000000000..3807a19915 --- /dev/null +++ b/vendor/github.com/eapache/channels/native_channel.go @@ -0,0 +1,92 @@ +package channels + +// NativeInChannel implements the InChannel interface by wrapping a native go write-only channel. +type NativeInChannel chan<- interface{} + +func (ch NativeInChannel) In() chan<- interface{} { + return ch +} + +func (ch NativeInChannel) Len() int { + return len(ch) +} + +func (ch NativeInChannel) Cap() BufferCap { + return BufferCap(cap(ch)) +} + +func (ch NativeInChannel) Close() { + close(ch) +} + +// NativeOutChannel implements the OutChannel interface by wrapping a native go read-only channel. +type NativeOutChannel <-chan interface{} + +func (ch NativeOutChannel) Out() <-chan interface{} { + return ch +} + +func (ch NativeOutChannel) Len() int { + return len(ch) +} + +func (ch NativeOutChannel) Cap() BufferCap { + return BufferCap(cap(ch)) +} + +// NativeChannel implements the Channel interface by wrapping a native go channel. +type NativeChannel chan interface{} + +// NewNativeChannel makes a new NativeChannel with the given buffer size. Just a convenience wrapper +// to avoid having to cast the result of make(). +func NewNativeChannel(size BufferCap) NativeChannel { + return make(chan interface{}, size) +} + +func (ch NativeChannel) In() chan<- interface{} { + return ch +} + +func (ch NativeChannel) Out() <-chan interface{} { + return ch +} + +func (ch NativeChannel) Len() int { + return len(ch) +} + +func (ch NativeChannel) Cap() BufferCap { + return BufferCap(cap(ch)) +} + +func (ch NativeChannel) Close() { + close(ch) +} + +// DeadChannel is a placeholder implementation of the Channel interface with no buffer +// that is never ready for reading or writing. Closing a dead channel is a no-op. +// Behaves almost like NativeChannel(nil) except that closing a nil NativeChannel will panic. +type DeadChannel struct{} + +func NewDeadChannel() DeadChannel { + return DeadChannel{} +} + +func (ch DeadChannel) In() chan<- interface{} { + return nil +} + +func (ch DeadChannel) Out() <-chan interface{} { + return nil +} + +func (ch DeadChannel) Len() int { + return 0 +} + +func (ch DeadChannel) Cap() BufferCap { + return BufferCap(0) +} + +func (ch DeadChannel) Close() { +} diff --git a/vendor/github.com/eapache/channels/overflowing_channel.go b/vendor/github.com/eapache/channels/overflowing_channel.go new file mode 100644 index 0000000000..35090f8e85 --- /dev/null +++ b/vendor/github.com/eapache/channels/overflowing_channel.go @@ -0,0 +1,113 @@ +package channels + +import "github.com/eapache/queue" + +// OverflowingChannel implements the Channel interface in a way that never blocks the writer. +// Specifically, if a value is written to an OverflowingChannel when its buffer is full +// (or, in an unbuffered case, when the recipient is not ready) then that value is simply discarded. +// Note that Go's scheduler can cause discarded values when they could be avoided, simply by scheduling +// the writer before the reader, so caveat emptor. +// For the opposite behaviour (discarding the oldest element, not the newest) see RingChannel. +type OverflowingChannel struct { + input, output chan interface{} + length chan int + buffer *queue.Queue + size BufferCap +} + +func NewOverflowingChannel(size BufferCap) *OverflowingChannel { + if size < 0 && size != Infinity { + panic("channels: invalid negative size in NewOverflowingChannel") + } + ch := &OverflowingChannel{ + input: make(chan interface{}), + output: make(chan interface{}), + length: make(chan int), + size: size, + } + if size == None { + go ch.overflowingDirect() + } else { + ch.buffer = queue.New() + go ch.overflowingBuffer() + } + return ch +} + +func (ch *OverflowingChannel) In() chan<- interface{} { + return ch.input +} + +func (ch *OverflowingChannel) Out() <-chan interface{} { + return ch.output +} + +func (ch *OverflowingChannel) Len() int { + if ch.size == None { + return 0 + } else { + return <-ch.length + } +} + +func (ch *OverflowingChannel) Cap() BufferCap { + return ch.size +} + +func (ch *OverflowingChannel) Close() { + close(ch.input) +} + +// for entirely unbuffered cases +func (ch *OverflowingChannel) overflowingDirect() { + for elem := range ch.input { + // if we can't write it immediately, drop it and move on + select { + case ch.output <- elem: + default: + } + } + close(ch.output) +} + +// for all buffered cases +func (ch *OverflowingChannel) overflowingBuffer() { + var input, output chan interface{} + var next interface{} + input = ch.input + + for input != nil || output != nil { + select { + // Prefer to write if possible, which is surprisingly effective in reducing + // dropped elements due to overflow. The naive read/write select chooses randomly + // when both channels are ready, which produces unnecessary drops 50% of the time. + case output <- next: + ch.buffer.Remove() + default: + select { + case elem, open := <-input: + if open { + if ch.size == Infinity || ch.buffer.Length() < int(ch.size) { + ch.buffer.Add(elem) + } + } else { + input = nil + } + case output <- next: + ch.buffer.Remove() + case ch.length <- ch.buffer.Length(): + } + } + + if ch.buffer.Length() > 0 { + output = ch.output + next = ch.buffer.Peek() + } else { + output = nil + next = nil + } + } + + close(ch.output) + close(ch.length) +} diff --git a/vendor/github.com/eapache/channels/resizable_channel.go b/vendor/github.com/eapache/channels/resizable_channel.go new file mode 100644 index 0000000000..fafed0a29b --- /dev/null +++ b/vendor/github.com/eapache/channels/resizable_channel.go @@ -0,0 +1,109 @@ +package channels + +import "github.com/eapache/queue" + +// ResizableChannel implements the Channel interface with a resizable buffer between the input and the output. +// The channel initially has a buffer size of 1, but can be resized by calling Resize(). +// +// Resizing to a buffer capacity of None is, unfortunately, not supported and will panic +// (see https://github.com/eapache/channels/issues/1). +// Resizing back and forth between a finite and infinite buffer is fully supported. +type ResizableChannel struct { + input, output chan interface{} + length chan int + capacity, resize chan BufferCap + size BufferCap + buffer *queue.Queue +} + +func NewResizableChannel() *ResizableChannel { + ch := &ResizableChannel{ + input: make(chan interface{}), + output: make(chan interface{}), + length: make(chan int), + capacity: make(chan BufferCap), + resize: make(chan BufferCap), + size: 1, + buffer: queue.New(), + } + go ch.magicBuffer() + return ch +} + +func (ch *ResizableChannel) In() chan<- interface{} { + return ch.input +} + +func (ch *ResizableChannel) Out() <-chan interface{} { + return ch.output +} + +func (ch *ResizableChannel) Len() int { + return <-ch.length +} + +func (ch *ResizableChannel) Cap() BufferCap { + val, open := <-ch.capacity + if open { + return val + } else { + return ch.size + } +} + +func (ch *ResizableChannel) Close() { + close(ch.input) +} + +func (ch *ResizableChannel) Resize(newSize BufferCap) { + if newSize == None { + panic("channels: ResizableChannel does not support unbuffered behaviour") + } + if newSize < 0 && newSize != Infinity { + panic("channels: invalid negative size trying to resize channel") + } + ch.resize <- newSize +} + +func (ch *ResizableChannel) magicBuffer() { + var input, output, nextInput chan interface{} + var next interface{} + nextInput = ch.input + input = nextInput + + for input != nil || output != nil { + select { + case elem, open := <-input: + if open { + ch.buffer.Add(elem) + } else { + input = nil + nextInput = nil + } + case output <- next: + ch.buffer.Remove() + case ch.size = <-ch.resize: + case ch.length <- ch.buffer.Length(): + case ch.capacity <- ch.size: + } + + if ch.buffer.Length() == 0 { + output = nil + next = nil + } else { + output = ch.output + next = ch.buffer.Peek() + } + + if ch.size != Infinity && ch.buffer.Length() >= int(ch.size) { + input = nil + } else { + input = nextInput + } + } + + close(ch.output) + close(ch.resize) + close(ch.length) + close(ch.capacity) +} diff --git a/vendor/github.com/eapache/channels/ring_channel.go b/vendor/github.com/eapache/channels/ring_channel.go new file mode 100644 index 0000000000..7aec207bdf --- /dev/null +++ b/vendor/github.com/eapache/channels/ring_channel.go @@ -0,0 +1,114 @@ +package channels + +import "github.com/eapache/queue" + +// RingChannel implements the Channel interface in a way that never blocks the writer. +// Specifically, if a value is written to a RingChannel when its buffer is full then the oldest +// value in the buffer is discarded to make room (just like a standard ring-buffer). +// Note that Go's scheduler can cause discarded values when they could be avoided, simply by scheduling +// the writer before the reader, so caveat emptor. +// For the opposite behaviour (discarding the newest element, not the oldest) see OverflowingChannel. +type RingChannel struct { + input, output chan interface{} + length chan int + buffer *queue.Queue + size BufferCap +} + +func NewRingChannel(size BufferCap) *RingChannel { + if size < 0 && size != Infinity { + panic("channels: invalid negative size in NewRingChannel") + } + ch := &RingChannel{ + input: make(chan interface{}), + output: make(chan interface{}), + buffer: queue.New(), + size: size, + } + if size == None { + go ch.overflowingDirect() + } else { + ch.length = make(chan int) + go ch.ringBuffer() + } + return ch +} + +func (ch *RingChannel) In() chan<- interface{} { + return ch.input +} + +func (ch *RingChannel) Out() <-chan interface{} { + return ch.output +} + +func (ch *RingChannel) Len() int { + if ch.size == None { + return 0 + } else { + return <-ch.length + } +} + +func (ch *RingChannel) Cap() BufferCap { + return ch.size +} + +func (ch *RingChannel) Close() { + close(ch.input) +} + +// for entirely unbuffered cases +func (ch *RingChannel) overflowingDirect() { + for elem := range ch.input { + // if we can't write it immediately, drop it and move on + select { + case ch.output <- elem: + default: + } + } + close(ch.output) +} + +// for all buffered cases +func (ch *RingChannel) ringBuffer() { + var input, output chan interface{} + var next interface{} + input = ch.input + + for input != nil || output != nil { + select { + // Prefer to write if possible, which is surprisingly effective in reducing + // dropped elements due to overflow. The naive read/write select chooses randomly + // when both channels are ready, which produces unnecessary drops 50% of the time. + case output <- next: + ch.buffer.Remove() + default: + select { + case elem, open := <-input: + if open { + ch.buffer.Add(elem) + if ch.size != Infinity && ch.buffer.Length() > int(ch.size) { + ch.buffer.Remove() + } + } else { + input = nil + } + case output <- next: + ch.buffer.Remove() + case ch.length <- ch.buffer.Length(): + } + } + + if ch.buffer.Length() > 0 { + output = ch.output + next = ch.buffer.Peek() + } else { + output = nil + next = nil + } + } + + close(ch.output) + close(ch.length) +} diff --git a/vendor/github.com/eapache/channels/shared_buffer.go b/vendor/github.com/eapache/channels/shared_buffer.go new file mode 100644 index 0000000000..556dc190a1 --- /dev/null +++ b/vendor/github.com/eapache/channels/shared_buffer.go @@ -0,0 +1,167 @@ +package channels + +import ( + "reflect" + + "github.com/eapache/queue" +) + +//sharedBufferChannel implements SimpleChannel and is created by the public +//SharedBuffer type below +type sharedBufferChannel struct { + in chan interface{} + out chan interface{} + buf *queue.Queue + closed bool +} + +func (sch *sharedBufferChannel) In() chan<- interface{} { + return sch.in +} + +func (sch *sharedBufferChannel) Out() <-chan interface{} { + return sch.out +} + +func (sch *sharedBufferChannel) Close() { + close(sch.in) +} + +//SharedBuffer implements the Buffer interface, and permits multiple SimpleChannel instances to "share" a single buffer. +//Each channel spawned by NewChannel has its own internal queue (so values flowing through do not get mixed up with +//other channels) but the total number of elements buffered by all spawned channels is limited to a single capacity. This +//means *all* such channels block and unblock for writing together. The primary use case is for implementing pipeline-style +//parallelism with goroutines, limiting the total number of elements in the pipeline without limiting the number of elements +//at any particular step. +type SharedBuffer struct { + cases []reflect.SelectCase // 2n+1 of these; [0] is for control, [1,3,5...] for recv, [2,4,6...] for send + chans []*sharedBufferChannel // n of these + count int + size BufferCap + in chan *sharedBufferChannel +} + +func NewSharedBuffer(size BufferCap) *SharedBuffer { + if size < 0 && size != Infinity { + panic("channels: invalid negative size in NewSharedBuffer") + } else if size == None { + panic("channels: SharedBuffer does not support unbuffered behaviour") + } + + buf := &SharedBuffer{ + size: size, + in: make(chan *sharedBufferChannel), + } + + buf.cases = append(buf.cases, reflect.SelectCase{ + Dir: reflect.SelectRecv, + Chan: reflect.ValueOf(buf.in), + }) + + go buf.mainLoop() + + return buf +} + +//NewChannel spawns and returns a new channel sharing the underlying buffer. +func (buf *SharedBuffer) NewChannel() SimpleChannel { + ch := &sharedBufferChannel{ + in: make(chan interface{}), + out: make(chan interface{}), + buf: queue.New(), + } + buf.in <- ch + return ch +} + +//Close shuts down the SharedBuffer. It is an error to call Close while channels are still using +//the buffer (I'm not really sure what would happen if you do so). +func (buf *SharedBuffer) Close() { + // TODO: what if there are still active channels using this buffer? + close(buf.in) +} + +func (buf *SharedBuffer) mainLoop() { + for { + i, val, ok := reflect.Select(buf.cases) + + if i == 0 { + if !ok { + //Close was called on the SharedBuffer itself + return + } + + //NewChannel was called on the SharedBuffer + ch := val.Interface().(*sharedBufferChannel) + buf.chans = append(buf.chans, ch) + buf.cases = append(buf.cases, + reflect.SelectCase{Dir: reflect.SelectRecv}, + reflect.SelectCase{Dir: reflect.SelectSend}, + ) + if buf.size == Infinity || buf.count < int(buf.size) { + buf.cases[len(buf.cases)-2].Chan = reflect.ValueOf(ch.in) + } + } else if i%2 == 0 { + //Send + if buf.count == int(buf.size) { + //room in the buffer again, re-enable all recv cases + for j := range buf.chans { + if !buf.chans[j].closed { + buf.cases[(j*2)+1].Chan = reflect.ValueOf(buf.chans[j].in) + } + } + } + buf.count-- + ch := buf.chans[(i-1)/2] + if ch.buf.Length() > 0 { + buf.cases[i].Send = reflect.ValueOf(ch.buf.Peek()) + ch.buf.Remove() + } else { + //nothing left for this channel to send, disable sending + buf.cases[i].Chan = reflect.Value{} + buf.cases[i].Send = reflect.Value{} + if ch.closed { + // and it was closed, so close the output channel + //TODO: shrink slice + close(ch.out) + } + } + } else { + ch := buf.chans[i/2] + if ok { + //Receive + buf.count++ + if ch.buf.Length() == 0 && !buf.cases[i+1].Chan.IsValid() { + //this channel now has something to send + buf.cases[i+1].Chan = reflect.ValueOf(ch.out) + buf.cases[i+1].Send = val + } else { + ch.buf.Add(val.Interface()) + } + if buf.count == int(buf.size) { + //buffer full, disable recv cases + for j := range buf.chans { + buf.cases[(j*2)+1].Chan = reflect.Value{} + } + } + } else { + //Close + buf.cases[i].Chan = reflect.Value{} + ch.closed = true + if ch.buf.Length() == 0 && !buf.cases[i+1].Chan.IsValid() { + //nothing pending, close the out channel right away + //TODO: shrink slice + close(ch.out) + } + } + } + } +} + +func (buf *SharedBuffer) Len() int { + return buf.count +} + +func (buf *SharedBuffer) Cap() BufferCap { + return buf.size +} diff --git a/vendor/github.com/eapache/queue/.gitignore b/vendor/github.com/eapache/queue/.gitignore new file mode 100644 index 0000000000..836562412f --- /dev/null +++ b/vendor/github.com/eapache/queue/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/vendor/github.com/eapache/queue/.travis.yml b/vendor/github.com/eapache/queue/.travis.yml new file mode 100644 index 0000000000..235a40a493 --- /dev/null +++ b/vendor/github.com/eapache/queue/.travis.yml @@ -0,0 +1,7 @@ +language: go +sudo: false + +go: + - 1.2 + - 1.3 + - 1.4 diff --git a/vendor/github.com/eapache/queue/LICENSE b/vendor/github.com/eapache/queue/LICENSE new file mode 100644 index 0000000000..d5f36dbcaa --- /dev/null +++ b/vendor/github.com/eapache/queue/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Evan Huus + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/eapache/queue/README.md b/vendor/github.com/eapache/queue/README.md new file mode 100644 index 0000000000..8e782335cd --- /dev/null +++ b/vendor/github.com/eapache/queue/README.md @@ -0,0 +1,16 @@ +Queue +===== + +[![Build Status](https://travis-ci.org/eapache/queue.svg)](https://travis-ci.org/eapache/queue) +[![GoDoc](https://godoc.org/github.com/eapache/queue?status.png)](https://godoc.org/github.com/eapache/queue) +[![Code of Conduct](https://img.shields.io/badge/code%20of%20conduct-active-blue.svg)](https://eapache.github.io/conduct.html) + +A fast Golang queue using a ring-buffer, based on the version suggested by Dariusz Górecki. +Using this instead of other, simpler, queue implementations (slice+append or linked list) provides +substantial memory and time benefits, and fewer GC pauses. + +The queue implemented here is as fast as it is in part because it is *not* thread-safe. + +Follows semantic versioning using https://gopkg.in/ - import from +[`gopkg.in/eapache/queue.v1`](https://gopkg.in/eapache/queue.v1) +for guaranteed API stability. diff --git a/vendor/github.com/eapache/queue/queue.go b/vendor/github.com/eapache/queue/queue.go new file mode 100644 index 0000000000..71d1acdf27 --- /dev/null +++ b/vendor/github.com/eapache/queue/queue.go @@ -0,0 +1,102 @@ +/* +Package queue provides a fast, ring-buffer queue based on the version suggested by Dariusz Górecki. +Using this instead of other, simpler, queue implementations (slice+append or linked list) provides +substantial memory and time benefits, and fewer GC pauses. + +The queue implemented here is as fast as it is for an additional reason: it is *not* thread-safe. +*/ +package queue + +// minQueueLen is smallest capacity that queue may have. +// Must be power of 2 for bitwise modulus: x % n == x & (n - 1). +const minQueueLen = 16 + +// Queue represents a single instance of the queue data structure. +type Queue struct { + buf []interface{} + head, tail, count int +} + +// New constructs and returns a new Queue. +func New() *Queue { + return &Queue{ + buf: make([]interface{}, minQueueLen), + } +} + +// Length returns the number of elements currently stored in the queue. +func (q *Queue) Length() int { + return q.count +} + +// resizes the queue to fit exactly twice its current contents +// this can result in shrinking if the queue is less than half-full +func (q *Queue) resize() { + newBuf := make([]interface{}, q.count<<1) + + if q.tail > q.head { + copy(newBuf, q.buf[q.head:q.tail]) + } else { + n := copy(newBuf, q.buf[q.head:]) + copy(newBuf[n:], q.buf[:q.tail]) + } + + q.head = 0 + q.tail = q.count + q.buf = newBuf +} + +// Add puts an element on the end of the queue. +func (q *Queue) Add(elem interface{}) { + if q.count == len(q.buf) { + q.resize() + } + + q.buf[q.tail] = elem + // bitwise modulus + q.tail = (q.tail + 1) & (len(q.buf) - 1) + q.count++ +} + +// Peek returns the element at the head of the queue. This call panics +// if the queue is empty. +func (q *Queue) Peek() interface{} { + if q.count <= 0 { + panic("queue: Peek() called on empty queue") + } + return q.buf[q.head] +} + +// Get returns the element at index i in the queue. If the index is +// invalid, the call will panic. This method accepts both positive and +// negative index values. Index 0 refers to the first element, and +// index -1 refers to the last. +func (q *Queue) Get(i int) interface{} { + // If indexing backwards, convert to positive index. + if i < 0 { + i += q.count + } + if i < 0 || i >= q.count { + panic("queue: Get() called with index out of range") + } + // bitwise modulus + return q.buf[(q.head+i)&(len(q.buf)-1)] +} + +// Remove removes and returns the element from the front of the queue. If the +// queue is empty, the call will panic. +func (q *Queue) Remove() interface{} { + if q.count <= 0 { + panic("queue: Remove() called on empty queue") + } + ret := q.buf[q.head] + q.buf[q.head] = nil + // bitwise modulus + q.head = (q.head + 1) & (len(q.buf) - 1) + q.count-- + // Resize down if buffer 1/4 full. + if len(q.buf) > minQueueLen && (q.count<<2) == len(q.buf) { + q.resize() + } + return ret +} diff --git a/vendor/github.com/emicklei/go-restful-swagger12/.travis.yml b/vendor/github.com/emicklei/go-restful-swagger12/.travis.yml deleted file mode 100644 index c74e4fa57a..0000000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/.travis.yml +++ /dev/null @@ -1,4 +0,0 @@ -language: go - -go: - - 1.x \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful-swagger12/CHANGES.md b/vendor/github.com/emicklei/go-restful-swagger12/CHANGES.md deleted file mode 100644 index 213b8e7b3c..0000000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/CHANGES.md +++ /dev/null @@ -1,46 +0,0 @@ -Change history of swagger -= -2017-01-30 -- moved from go-restful/swagger to go-restful-swagger12 - -2015-10-16 -- add type override mechanism for swagger models (MR 254, nathanejohnson) -- replace uses of wildcard in generated apidocs (issue 251) - -2015-05-25 -- (api break) changed the type of Properties in Model -- (api break) changed the type of Models in ApiDeclaration -- (api break) changed the parameter type of PostBuildDeclarationMapFunc - -2015-04-09 -- add ModelBuildable interface for customization of Model - -2015-03-17 -- preserve order of Routes per WebService in Swagger listing -- fix use of $ref and type in Swagger models -- add api version to listing - -2014-11-14 -- operation parameters are now sorted using ordering path,query,form,header,body - -2014-11-12 -- respect omitempty tag value for embedded structs -- expose ApiVersion of WebService to Swagger ApiDeclaration - -2014-05-29 -- (api add) Ability to define custom http.Handler to serve swagger-ui static files - -2014-05-04 -- (fix) include model for array element type of response - -2014-01-03 -- (fix) do not add primitive type to the Api models - -2013-11-27 -- (fix) make Swagger work for WebServices with root ("/" or "") paths - -2013-10-29 -- (api add) package variable LogInfo to customize logging function - -2013-10-15 -- upgraded to spec version 1.2 (https://github.com/wordnik/swagger-core/wiki/1.2-transition) \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful-swagger12/LICENSE b/vendor/github.com/emicklei/go-restful-swagger12/LICENSE deleted file mode 100644 index aeab5b440e..0000000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2017 Ernest Micklei - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful-swagger12/README.md b/vendor/github.com/emicklei/go-restful-swagger12/README.md deleted file mode 100644 index cad28966a9..0000000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/README.md +++ /dev/null @@ -1,83 +0,0 @@ -# go-restful-swagger12 - -[![Build Status](https://travis-ci.org/emicklei/go-restful-swagger12.png)](https://travis-ci.org/emicklei/go-restful-swagger12) -[![GoDoc](https://godoc.org/github.com/emicklei/go-restful-swagger12?status.svg)](https://godoc.org/github.com/emicklei/go-restful-swagger12) - -How to use Swagger UI with go-restful -= - -Get the Swagger UI sources (version 1.2 only) - - git clone https://github.com/wordnik/swagger-ui.git - -The project contains a "dist" folder. -Its contents has all the Swagger UI files you need. - -The `index.html` has an `url` set to `http://petstore.swagger.wordnik.com/api/api-docs`. -You need to change that to match your WebService JSON endpoint e.g. `http://localhost:8080/apidocs.json` - -Now, you can install the Swagger WebService for serving the Swagger specification in JSON. - - config := swagger.Config{ - WebServices: restful.RegisteredWebServices(), - ApiPath: "/apidocs.json", - SwaggerPath: "/apidocs/", - SwaggerFilePath: "/Users/emicklei/Projects/swagger-ui/dist"} - swagger.InstallSwaggerService(config) - - -Documenting Structs --- - -Currently there are 2 ways to document your structs in the go-restful Swagger. - -###### By using struct tags -- Use tag "description" to annotate a struct field with a description to show in the UI -- Use tag "modelDescription" to annotate the struct itself with a description to show in the UI. The tag can be added in an field of the struct and in case that there are multiple definition, they will be appended with an empty line. - -###### By using the SwaggerDoc method -Here is an example with an `Address` struct and the documentation for each of the fields. The `""` is a special entry for **documenting the struct itself**. - - type Address struct { - Country string `json:"country,omitempty"` - PostCode int `json:"postcode,omitempty"` - } - - func (Address) SwaggerDoc() map[string]string { - return map[string]string{ - "": "Address doc", - "country": "Country doc", - "postcode": "PostCode doc", - } - } - -This example will generate a JSON like this - - { - "Address": { - "id": "Address", - "description": "Address doc", - "properties": { - "country": { - "type": "string", - "description": "Country doc" - }, - "postcode": { - "type": "integer", - "format": "int32", - "description": "PostCode doc" - } - } - } - } - -**Very Important Notes:** -- `SwaggerDoc()` is using a **NON-Pointer** receiver (e.g. func (Address) and not func (*Address)) -- The returned map should use as key the name of the field as defined in the JSON parameter (e.g. `"postcode"` and not `"PostCode"`) - -Notes --- -- The Nickname of an Operation is automatically set by finding the name of the function. You can override it using RouteBuilder.Operation(..) -- The WebServices field of swagger.Config can be used to control which service you want to expose and document ; you can have multiple configs and therefore multiple endpoints. - -© 2017, ernestmicklei.com. MIT License. Contributions welcome. \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful-swagger12/api_declaration_list.go b/vendor/github.com/emicklei/go-restful-swagger12/api_declaration_list.go deleted file mode 100644 index 9f4c3690ac..0000000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/api_declaration_list.go +++ /dev/null @@ -1,64 +0,0 @@ -package swagger - -// Copyright 2015 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "bytes" - "encoding/json" -) - -// ApiDeclarationList maintains an ordered list of ApiDeclaration. -type ApiDeclarationList struct { - List []ApiDeclaration -} - -// At returns the ApiDeclaration by its path unless absent, then ok is false -func (l *ApiDeclarationList) At(path string) (a ApiDeclaration, ok bool) { - for _, each := range l.List { - if each.ResourcePath == path { - return each, true - } - } - return a, false -} - -// Put adds or replaces a ApiDeclaration with this name -func (l *ApiDeclarationList) Put(path string, a ApiDeclaration) { - // maybe replace existing - for i, each := range l.List { - if each.ResourcePath == path { - // replace - l.List[i] = a - return - } - } - // add - l.List = append(l.List, a) -} - -// Do enumerates all the properties, each with its assigned name -func (l *ApiDeclarationList) Do(block func(path string, decl ApiDeclaration)) { - for _, each := range l.List { - block(each.ResourcePath, each) - } -} - -// MarshalJSON writes the ModelPropertyList as if it was a map[string]ModelProperty -func (l ApiDeclarationList) MarshalJSON() ([]byte, error) { - var buf bytes.Buffer - encoder := json.NewEncoder(&buf) - buf.WriteString("{\n") - for i, each := range l.List { - buf.WriteString("\"") - buf.WriteString(each.ResourcePath) - buf.WriteString("\": ") - encoder.Encode(each) - if i < len(l.List)-1 { - buf.WriteString(",\n") - } - } - buf.WriteString("}") - return buf.Bytes(), nil -} diff --git a/vendor/github.com/emicklei/go-restful-swagger12/config.go b/vendor/github.com/emicklei/go-restful-swagger12/config.go deleted file mode 100644 index 18f8e57d90..0000000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/config.go +++ /dev/null @@ -1,46 +0,0 @@ -package swagger - -import ( - "net/http" - "reflect" - - "github.com/emicklei/go-restful" -) - -// PostBuildDeclarationMapFunc can be used to modify the api declaration map. -type PostBuildDeclarationMapFunc func(apiDeclarationMap *ApiDeclarationList) - -// MapSchemaFormatFunc can be used to modify typeName at definition time. -type MapSchemaFormatFunc func(typeName string) string - -// MapModelTypeNameFunc can be used to return the desired typeName for a given -// type. It will return false if the default name should be used. -type MapModelTypeNameFunc func(t reflect.Type) (string, bool) - -type Config struct { - // url where the services are available, e.g. http://localhost:8080 - // if left empty then the basePath of Swagger is taken from the actual request - WebServicesUrl string - // path where the JSON api is avaiable , e.g. /apidocs - ApiPath string - // [optional] path where the swagger UI will be served, e.g. /swagger - SwaggerPath string - // [optional] location of folder containing Swagger HTML5 application index.html - SwaggerFilePath string - // api listing is constructed from this list of restful WebServices. - WebServices []*restful.WebService - // will serve all static content (scripts,pages,images) - StaticHandler http.Handler - // [optional] on default CORS (Cross-Origin-Resource-Sharing) is enabled. - DisableCORS bool - // Top-level API version. Is reflected in the resource listing. - ApiVersion string - // If set then call this handler after building the complete ApiDeclaration Map - PostBuildHandler PostBuildDeclarationMapFunc - // Swagger global info struct - Info Info - // [optional] If set, model builder should call this handler to get addition typename-to-swagger-format-field conversion. - SchemaFormatHandler MapSchemaFormatFunc - // [optional] If set, model builder should call this handler to retrieve the name for a given type. - ModelTypeNameHandler MapModelTypeNameFunc -} diff --git a/vendor/github.com/emicklei/go-restful-swagger12/model_builder.go b/vendor/github.com/emicklei/go-restful-swagger12/model_builder.go deleted file mode 100644 index d40786f251..0000000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/model_builder.go +++ /dev/null @@ -1,467 +0,0 @@ -package swagger - -import ( - "encoding/json" - "reflect" - "strings" -) - -// ModelBuildable is used for extending Structs that need more control over -// how the Model appears in the Swagger api declaration. -type ModelBuildable interface { - PostBuildModel(m *Model) *Model -} - -type modelBuilder struct { - Models *ModelList - Config *Config -} - -type documentable interface { - SwaggerDoc() map[string]string -} - -// Check if this structure has a method with signature func () SwaggerDoc() map[string]string -// If it exists, retrive the documentation and overwrite all struct tag descriptions -func getDocFromMethodSwaggerDoc2(model reflect.Type) map[string]string { - if docable, ok := reflect.New(model).Elem().Interface().(documentable); ok { - return docable.SwaggerDoc() - } - return make(map[string]string) -} - -// addModelFrom creates and adds a Model to the builder and detects and calls -// the post build hook for customizations -func (b modelBuilder) addModelFrom(sample interface{}) { - if modelOrNil := b.addModel(reflect.TypeOf(sample), ""); modelOrNil != nil { - // allow customizations - if buildable, ok := sample.(ModelBuildable); ok { - modelOrNil = buildable.PostBuildModel(modelOrNil) - b.Models.Put(modelOrNil.Id, *modelOrNil) - } - } -} - -func (b modelBuilder) addModel(st reflect.Type, nameOverride string) *Model { - // Turn pointers into simpler types so further checks are - // correct. - if st.Kind() == reflect.Ptr { - st = st.Elem() - } - - modelName := b.keyFrom(st) - if nameOverride != "" { - modelName = nameOverride - } - // no models needed for primitive types - if b.isPrimitiveType(modelName) { - return nil - } - // golang encoding/json packages says array and slice values encode as - // JSON arrays, except that []byte encodes as a base64-encoded string. - // If we see a []byte here, treat it at as a primitive type (string) - // and deal with it in buildArrayTypeProperty. - if (st.Kind() == reflect.Slice || st.Kind() == reflect.Array) && - st.Elem().Kind() == reflect.Uint8 { - return nil - } - // see if we already have visited this model - if _, ok := b.Models.At(modelName); ok { - return nil - } - sm := Model{ - Id: modelName, - Required: []string{}, - Properties: ModelPropertyList{}} - - // reference the model before further initializing (enables recursive structs) - b.Models.Put(modelName, sm) - - // check for slice or array - if st.Kind() == reflect.Slice || st.Kind() == reflect.Array { - b.addModel(st.Elem(), "") - return &sm - } - // check for structure or primitive type - if st.Kind() != reflect.Struct { - return &sm - } - - fullDoc := getDocFromMethodSwaggerDoc2(st) - modelDescriptions := []string{} - - for i := 0; i < st.NumField(); i++ { - field := st.Field(i) - jsonName, modelDescription, prop := b.buildProperty(field, &sm, modelName) - if len(modelDescription) > 0 { - modelDescriptions = append(modelDescriptions, modelDescription) - } - - // add if not omitted - if len(jsonName) != 0 { - // update description - if fieldDoc, ok := fullDoc[jsonName]; ok { - prop.Description = fieldDoc - } - // update Required - if b.isPropertyRequired(field) { - sm.Required = append(sm.Required, jsonName) - } - sm.Properties.Put(jsonName, prop) - } - } - - // We always overwrite documentation if SwaggerDoc method exists - // "" is special for documenting the struct itself - if modelDoc, ok := fullDoc[""]; ok { - sm.Description = modelDoc - } else if len(modelDescriptions) != 0 { - sm.Description = strings.Join(modelDescriptions, "\n") - } - - // update model builder with completed model - b.Models.Put(modelName, sm) - - return &sm -} - -func (b modelBuilder) isPropertyRequired(field reflect.StructField) bool { - required := true - if jsonTag := field.Tag.Get("json"); jsonTag != "" { - s := strings.Split(jsonTag, ",") - if len(s) > 1 && s[1] == "omitempty" { - return false - } - } - return required -} - -func (b modelBuilder) buildProperty(field reflect.StructField, model *Model, modelName string) (jsonName, modelDescription string, prop ModelProperty) { - jsonName = b.jsonNameOfField(field) - if len(jsonName) == 0 { - // empty name signals skip property - return "", "", prop - } - - if field.Name == "XMLName" && field.Type.String() == "xml.Name" { - // property is metadata for the xml.Name attribute, can be skipped - return "", "", prop - } - - if tag := field.Tag.Get("modelDescription"); tag != "" { - modelDescription = tag - } - - prop.setPropertyMetadata(field) - if prop.Type != nil { - return jsonName, modelDescription, prop - } - fieldType := field.Type - - // check if type is doing its own marshalling - marshalerType := reflect.TypeOf((*json.Marshaler)(nil)).Elem() - if fieldType.Implements(marshalerType) { - var pType = "string" - if prop.Type == nil { - prop.Type = &pType - } - if prop.Format == "" { - prop.Format = b.jsonSchemaFormat(b.keyFrom(fieldType)) - } - return jsonName, modelDescription, prop - } - - // check if annotation says it is a string - if jsonTag := field.Tag.Get("json"); jsonTag != "" { - s := strings.Split(jsonTag, ",") - if len(s) > 1 && s[1] == "string" { - stringt := "string" - prop.Type = &stringt - return jsonName, modelDescription, prop - } - } - - fieldKind := fieldType.Kind() - switch { - case fieldKind == reflect.Struct: - jsonName, prop := b.buildStructTypeProperty(field, jsonName, model) - return jsonName, modelDescription, prop - case fieldKind == reflect.Slice || fieldKind == reflect.Array: - jsonName, prop := b.buildArrayTypeProperty(field, jsonName, modelName) - return jsonName, modelDescription, prop - case fieldKind == reflect.Ptr: - jsonName, prop := b.buildPointerTypeProperty(field, jsonName, modelName) - return jsonName, modelDescription, prop - case fieldKind == reflect.String: - stringt := "string" - prop.Type = &stringt - return jsonName, modelDescription, prop - case fieldKind == reflect.Map: - // if it's a map, it's unstructured, and swagger 1.2 can't handle it - objectType := "object" - prop.Type = &objectType - return jsonName, modelDescription, prop - } - - fieldTypeName := b.keyFrom(fieldType) - if b.isPrimitiveType(fieldTypeName) { - mapped := b.jsonSchemaType(fieldTypeName) - prop.Type = &mapped - prop.Format = b.jsonSchemaFormat(fieldTypeName) - return jsonName, modelDescription, prop - } - modelType := b.keyFrom(fieldType) - prop.Ref = &modelType - - if fieldType.Name() == "" { // override type of anonymous structs - nestedTypeName := modelName + "." + jsonName - prop.Ref = &nestedTypeName - b.addModel(fieldType, nestedTypeName) - } - return jsonName, modelDescription, prop -} - -func hasNamedJSONTag(field reflect.StructField) bool { - parts := strings.Split(field.Tag.Get("json"), ",") - if len(parts) == 0 { - return false - } - for _, s := range parts[1:] { - if s == "inline" { - return false - } - } - return len(parts[0]) > 0 -} - -func (b modelBuilder) buildStructTypeProperty(field reflect.StructField, jsonName string, model *Model) (nameJson string, prop ModelProperty) { - prop.setPropertyMetadata(field) - // Check for type override in tag - if prop.Type != nil { - return jsonName, prop - } - fieldType := field.Type - // check for anonymous - if len(fieldType.Name()) == 0 { - // anonymous - anonType := model.Id + "." + jsonName - b.addModel(fieldType, anonType) - prop.Ref = &anonType - return jsonName, prop - } - - if field.Name == fieldType.Name() && field.Anonymous && !hasNamedJSONTag(field) { - // embedded struct - sub := modelBuilder{new(ModelList), b.Config} - sub.addModel(fieldType, "") - subKey := sub.keyFrom(fieldType) - // merge properties from sub - subModel, _ := sub.Models.At(subKey) - subModel.Properties.Do(func(k string, v ModelProperty) { - model.Properties.Put(k, v) - // if subModel says this property is required then include it - required := false - for _, each := range subModel.Required { - if k == each { - required = true - break - } - } - if required { - model.Required = append(model.Required, k) - } - }) - // add all new referenced models - sub.Models.Do(func(key string, sub Model) { - if key != subKey { - if _, ok := b.Models.At(key); !ok { - b.Models.Put(key, sub) - } - } - }) - // empty name signals skip property - return "", prop - } - // simple struct - b.addModel(fieldType, "") - var pType = b.keyFrom(fieldType) - prop.Ref = &pType - return jsonName, prop -} - -func (b modelBuilder) buildArrayTypeProperty(field reflect.StructField, jsonName, modelName string) (nameJson string, prop ModelProperty) { - // check for type override in tags - prop.setPropertyMetadata(field) - if prop.Type != nil { - return jsonName, prop - } - fieldType := field.Type - if fieldType.Elem().Kind() == reflect.Uint8 { - stringt := "string" - prop.Type = &stringt - return jsonName, prop - } - var pType = "array" - prop.Type = &pType - isPrimitive := b.isPrimitiveType(fieldType.Elem().Name()) - elemTypeName := b.getElementTypeName(modelName, jsonName, fieldType.Elem()) - prop.Items = new(Item) - if isPrimitive { - mapped := b.jsonSchemaType(elemTypeName) - prop.Items.Type = &mapped - } else { - prop.Items.Ref = &elemTypeName - } - // add|overwrite model for element type - if fieldType.Elem().Kind() == reflect.Ptr { - fieldType = fieldType.Elem() - } - if !isPrimitive { - b.addModel(fieldType.Elem(), elemTypeName) - } - return jsonName, prop -} - -func (b modelBuilder) buildPointerTypeProperty(field reflect.StructField, jsonName, modelName string) (nameJson string, prop ModelProperty) { - prop.setPropertyMetadata(field) - // Check for type override in tags - if prop.Type != nil { - return jsonName, prop - } - fieldType := field.Type - - // override type of pointer to list-likes - if fieldType.Elem().Kind() == reflect.Slice || fieldType.Elem().Kind() == reflect.Array { - var pType = "array" - prop.Type = &pType - isPrimitive := b.isPrimitiveType(fieldType.Elem().Elem().Name()) - elemName := b.getElementTypeName(modelName, jsonName, fieldType.Elem().Elem()) - if isPrimitive { - primName := b.jsonSchemaType(elemName) - prop.Items = &Item{Ref: &primName} - } else { - prop.Items = &Item{Ref: &elemName} - } - if !isPrimitive { - // add|overwrite model for element type - b.addModel(fieldType.Elem().Elem(), elemName) - } - } else { - // non-array, pointer type - fieldTypeName := b.keyFrom(fieldType.Elem()) - var pType = b.jsonSchemaType(fieldTypeName) // no star, include pkg path - if b.isPrimitiveType(fieldTypeName) { - prop.Type = &pType - prop.Format = b.jsonSchemaFormat(fieldTypeName) - return jsonName, prop - } - prop.Ref = &pType - elemName := "" - if fieldType.Elem().Name() == "" { - elemName = modelName + "." + jsonName - prop.Ref = &elemName - } - b.addModel(fieldType.Elem(), elemName) - } - return jsonName, prop -} - -func (b modelBuilder) getElementTypeName(modelName, jsonName string, t reflect.Type) string { - if t.Kind() == reflect.Ptr { - t = t.Elem() - } - if t.Name() == "" { - return modelName + "." + jsonName - } - return b.keyFrom(t) -} - -func (b modelBuilder) keyFrom(st reflect.Type) string { - key := st.String() - if b.Config != nil && b.Config.ModelTypeNameHandler != nil { - if name, ok := b.Config.ModelTypeNameHandler(st); ok { - key = name - } - } - if len(st.Name()) == 0 { // unnamed type - // Swagger UI has special meaning for [ - key = strings.Replace(key, "[]", "||", -1) - } - return key -} - -// see also https://golang.org/ref/spec#Numeric_types -func (b modelBuilder) isPrimitiveType(modelName string) bool { - if len(modelName) == 0 { - return false - } - return strings.Contains("uint uint8 uint16 uint32 uint64 int int8 int16 int32 int64 float32 float64 bool string byte rune time.Time", modelName) -} - -// jsonNameOfField returns the name of the field as it should appear in JSON format -// An empty string indicates that this field is not part of the JSON representation -func (b modelBuilder) jsonNameOfField(field reflect.StructField) string { - if jsonTag := field.Tag.Get("json"); jsonTag != "" { - s := strings.Split(jsonTag, ",") - if s[0] == "-" { - // empty name signals skip property - return "" - } else if s[0] != "" { - return s[0] - } - } - return field.Name -} - -// see also http://json-schema.org/latest/json-schema-core.html#anchor8 -func (b modelBuilder) jsonSchemaType(modelName string) string { - schemaMap := map[string]string{ - "uint": "integer", - "uint8": "integer", - "uint16": "integer", - "uint32": "integer", - "uint64": "integer", - - "int": "integer", - "int8": "integer", - "int16": "integer", - "int32": "integer", - "int64": "integer", - - "byte": "integer", - "float64": "number", - "float32": "number", - "bool": "boolean", - "time.Time": "string", - } - mapped, ok := schemaMap[modelName] - if !ok { - return modelName // use as is (custom or struct) - } - return mapped -} - -func (b modelBuilder) jsonSchemaFormat(modelName string) string { - if b.Config != nil && b.Config.SchemaFormatHandler != nil { - if mapped := b.Config.SchemaFormatHandler(modelName); mapped != "" { - return mapped - } - } - schemaMap := map[string]string{ - "int": "int32", - "int32": "int32", - "int64": "int64", - "byte": "byte", - "uint": "integer", - "uint8": "byte", - "float64": "double", - "float32": "float", - "time.Time": "date-time", - "*time.Time": "date-time", - } - mapped, ok := schemaMap[modelName] - if !ok { - return "" // no format - } - return mapped -} diff --git a/vendor/github.com/emicklei/go-restful-swagger12/model_builder_test.go b/vendor/github.com/emicklei/go-restful-swagger12/model_builder_test.go deleted file mode 100644 index a2d2f126f7..0000000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/model_builder_test.go +++ /dev/null @@ -1,1283 +0,0 @@ -package swagger - -import ( - "encoding/xml" - "net" - "reflect" - "testing" - "time" -) - -type YesNo bool - -func (y YesNo) MarshalJSON() ([]byte, error) { - if y { - return []byte("yes"), nil - } - return []byte("no"), nil -} - -// clear && go test -v -test.run TestRef_Issue190 ...swagger -func TestRef_Issue190(t *testing.T) { - type User struct { - items []string - } - testJsonFromStruct(t, User{}, `{ - "swagger.User": { - "id": "swagger.User", - "required": [ - "items" - ], - "properties": { - "items": { - "type": "array", - "items": { - "type": "string" - } - } - } - } - }`) -} - -func TestWithoutAdditionalFormat(t *testing.T) { - type mytime struct { - time.Time - } - type usemytime struct { - t mytime - } - testJsonFromStruct(t, usemytime{}, `{ - "swagger.usemytime": { - "id": "swagger.usemytime", - "required": [ - "t" - ], - "properties": { - "t": { - "type": "string" - } - } - } - }`) -} - -func TestWithAdditionalFormat(t *testing.T) { - type mytime struct { - time.Time - } - type usemytime struct { - t mytime - } - testJsonFromStructWithConfig(t, usemytime{}, `{ - "swagger.usemytime": { - "id": "swagger.usemytime", - "required": [ - "t" - ], - "properties": { - "t": { - "type": "string", - "format": "date-time" - } - } - } - }`, &Config{ - SchemaFormatHandler: func(typeName string) string { - switch typeName { - case "swagger.mytime": - return "date-time" - } - return "" - }, - }) -} - -// clear && go test -v -test.run TestCustomMarshaller_Issue96 ...swagger -func TestCustomMarshaller_Issue96(t *testing.T) { - type Vote struct { - What YesNo - } - testJsonFromStruct(t, Vote{}, `{ - "swagger.Vote": { - "id": "swagger.Vote", - "required": [ - "What" - ], - "properties": { - "What": { - "type": "string" - } - } - } - }`) -} - -// clear && go test -v -test.run TestPrimitiveTypes ...swagger -func TestPrimitiveTypes(t *testing.T) { - type Prims struct { - f float64 - t time.Time - } - testJsonFromStruct(t, Prims{}, `{ - "swagger.Prims": { - "id": "swagger.Prims", - "required": [ - "f", - "t" - ], - "properties": { - "f": { - "type": "number", - "format": "double" - }, - "t": { - "type": "string", - "format": "date-time" - } - } - } - }`) -} - -// clear && go test -v -test.run TestPrimitivePtrTypes ...swagger -func TestPrimitivePtrTypes(t *testing.T) { - type Prims struct { - f *float64 - t *time.Time - b *bool - s *string - i *int - } - testJsonFromStruct(t, Prims{}, `{ - "swagger.Prims": { - "id": "swagger.Prims", - "required": [ - "f", - "t", - "b", - "s", - "i" - ], - "properties": { - "b": { - "type": "boolean" - }, - "f": { - "type": "number", - "format": "double" - }, - "i": { - "type": "integer", - "format": "int32" - }, - "s": { - "type": "string" - }, - "t": { - "type": "string", - "format": "date-time" - } - } - } - }`) -} - -// clear && go test -v -test.run TestS1 ...swagger -func TestS1(t *testing.T) { - type S1 struct { - Id string - } - testJsonFromStruct(t, S1{}, `{ - "swagger.S1": { - "id": "swagger.S1", - "required": [ - "Id" - ], - "properties": { - "Id": { - "type": "string" - } - } - } - }`) -} - -// clear && go test -v -test.run TestS2 ...swagger -func TestS2(t *testing.T) { - type S2 struct { - Ids []string - } - testJsonFromStruct(t, S2{}, `{ - "swagger.S2": { - "id": "swagger.S2", - "required": [ - "Ids" - ], - "properties": { - "Ids": { - "type": "array", - "items": { - "type": "string" - } - } - } - } - }`) -} - -// clear && go test -v -test.run TestS3 ...swagger -func TestS3(t *testing.T) { - type NestedS3 struct { - Id string - } - type S3 struct { - Nested NestedS3 - } - testJsonFromStruct(t, S3{}, `{ - "swagger.NestedS3": { - "id": "swagger.NestedS3", - "required": [ - "Id" - ], - "properties": { - "Id": { - "type": "string" - } - } - }, - "swagger.S3": { - "id": "swagger.S3", - "required": [ - "Nested" - ], - "properties": { - "Nested": { - "$ref": "swagger.NestedS3" - } - } - } - }`) -} - -type sample struct { - id string `swagger:"required"` // TODO - items []item - rootItem item `json:"root" description:"root desc"` -} - -type item struct { - itemName string `json:"name"` -} - -// clear && go test -v -test.run TestSampleToModelAsJson ...swagger -func TestSampleToModelAsJson(t *testing.T) { - testJsonFromStruct(t, sample{items: []item{}}, `{ - "swagger.item": { - "id": "swagger.item", - "required": [ - "name" - ], - "properties": { - "name": { - "type": "string" - } - } - }, - "swagger.sample": { - "id": "swagger.sample", - "required": [ - "id", - "items", - "root" - ], - "properties": { - "id": { - "type": "string" - }, - "items": { - "type": "array", - "items": { - "$ref": "swagger.item" - } - }, - "root": { - "$ref": "swagger.item", - "description": "root desc" - } - } - } - }`) -} - -func TestJsonTags(t *testing.T) { - type X struct { - A string - B string `json:"-"` - C int `json:",string"` - D int `json:","` - } - - expected := `{ - "swagger.X": { - "id": "swagger.X", - "required": [ - "A", - "C", - "D" - ], - "properties": { - "A": { - "type": "string" - }, - "C": { - "type": "string" - }, - "D": { - "type": "integer", - "format": "int32" - } - } - } - }` - - testJsonFromStruct(t, X{}, expected) -} - -func TestJsonTagOmitempty(t *testing.T) { - type X struct { - A int `json:",omitempty"` - B int `json:"C,omitempty"` - } - - expected := `{ - "swagger.X": { - "id": "swagger.X", - "properties": { - "A": { - "type": "integer", - "format": "int32" - }, - "C": { - "type": "integer", - "format": "int32" - } - } - } - }` - - testJsonFromStruct(t, X{}, expected) -} - -func TestJsonTagName(t *testing.T) { - type X struct { - A string `json:"B"` - } - - expected := `{ - "swagger.X": { - "id": "swagger.X", - "required": [ - "B" - ], - "properties": { - "B": { - "type": "string" - } - } - } - }` - - testJsonFromStruct(t, X{}, expected) -} - -func TestAnonymousStruct(t *testing.T) { - type X struct { - A struct { - B int - } - } - - expected := `{ - "swagger.X": { - "id": "swagger.X", - "required": [ - "A" - ], - "properties": { - "A": { - "$ref": "swagger.X.A" - } - } - }, - "swagger.X.A": { - "id": "swagger.X.A", - "required": [ - "B" - ], - "properties": { - "B": { - "type": "integer", - "format": "int32" - } - } - } - }` - - testJsonFromStruct(t, X{}, expected) -} - -func TestAnonymousPtrStruct(t *testing.T) { - type X struct { - A *struct { - B int - } - } - - expected := `{ - "swagger.X": { - "id": "swagger.X", - "required": [ - "A" - ], - "properties": { - "A": { - "$ref": "swagger.X.A" - } - } - }, - "swagger.X.A": { - "id": "swagger.X.A", - "required": [ - "B" - ], - "properties": { - "B": { - "type": "integer", - "format": "int32" - } - } - } - }` - - testJsonFromStruct(t, X{}, expected) -} - -func TestAnonymousArrayStruct(t *testing.T) { - type X struct { - A []struct { - B int - } - } - - expected := `{ - "swagger.X": { - "id": "swagger.X", - "required": [ - "A" - ], - "properties": { - "A": { - "type": "array", - "items": { - "$ref": "swagger.X.A" - } - } - } - }, - "swagger.X.A": { - "id": "swagger.X.A", - "required": [ - "B" - ], - "properties": { - "B": { - "type": "integer", - "format": "int32" - } - } - } - }` - - testJsonFromStruct(t, X{}, expected) -} - -func TestAnonymousPtrArrayStruct(t *testing.T) { - type X struct { - A *[]struct { - B int - } - } - - expected := `{ - "swagger.X": { - "id": "swagger.X", - "required": [ - "A" - ], - "properties": { - "A": { - "type": "array", - "items": { - "$ref": "swagger.X.A" - } - } - } - }, - "swagger.X.A": { - "id": "swagger.X.A", - "required": [ - "B" - ], - "properties": { - "B": { - "type": "integer", - "format": "int32" - } - } - } - }` - - testJsonFromStruct(t, X{}, expected) -} - -// go test -v -test.run TestEmbeddedStruct_Issue98 ...swagger -func TestEmbeddedStruct_Issue98(t *testing.T) { - type Y struct { - A int - } - type X struct { - Y - } - testJsonFromStruct(t, X{}, `{ - "swagger.X": { - "id": "swagger.X", - "required": [ - "A" - ], - "properties": { - "A": { - "type": "integer", - "format": "int32" - } - } - } - }`) -} - -type Dataset struct { - Names []string -} - -// clear && go test -v -test.run TestIssue85 ...swagger -func TestIssue85(t *testing.T) { - anon := struct{ Datasets []Dataset }{} - testJsonFromStruct(t, anon, `{ - "struct { Datasets ||swagger.Dataset }": { - "id": "struct { Datasets ||swagger.Dataset }", - "required": [ - "Datasets" - ], - "properties": { - "Datasets": { - "type": "array", - "items": { - "$ref": "swagger.Dataset" - } - } - } - }, - "swagger.Dataset": { - "id": "swagger.Dataset", - "required": [ - "Names" - ], - "properties": { - "Names": { - "type": "array", - "items": { - "type": "string" - } - } - } - } - }`) -} - -type File struct { - History []File - HistoryPtrs []*File -} - -// go test -v -test.run TestRecursiveStructure ...swagger -func TestRecursiveStructure(t *testing.T) { - testJsonFromStruct(t, File{}, `{ - "swagger.File": { - "id": "swagger.File", - "required": [ - "History", - "HistoryPtrs" - ], - "properties": { - "History": { - "type": "array", - "items": { - "$ref": "swagger.File" - } - }, - "HistoryPtrs": { - "type": "array", - "items": { - "$ref": "swagger.File" - } - } - } - } - }`) -} - -type A1 struct { - B struct { - Id int - Comment string `json:"comment,omitempty"` - } -} - -// go test -v -test.run TestEmbeddedStructA1 ...swagger -func TestEmbeddedStructA1(t *testing.T) { - testJsonFromStruct(t, A1{}, `{ - "swagger.A1": { - "id": "swagger.A1", - "required": [ - "B" - ], - "properties": { - "B": { - "$ref": "swagger.A1.B" - } - } - }, - "swagger.A1.B": { - "id": "swagger.A1.B", - "required": [ - "Id" - ], - "properties": { - "Id": { - "type": "integer", - "format": "int32" - }, - "comment": { - "type": "string" - } - } - } - }`) -} - -type A2 struct { - C -} -type C struct { - Id int `json:"B"` - Comment string `json:"comment,omitempty"` - Secure bool `json:"secure"` -} - -// go test -v -test.run TestEmbeddedStructA2 ...swagger -func TestEmbeddedStructA2(t *testing.T) { - testJsonFromStruct(t, A2{}, `{ - "swagger.A2": { - "id": "swagger.A2", - "required": [ - "B", - "secure" - ], - "properties": { - "B": { - "type": "integer", - "format": "int32" - }, - "comment": { - "type": "string" - }, - "secure": { - "type": "boolean" - } - } - } - }`) -} - -type A3 struct { - B D -} - -type D struct { - Id int -} - -// clear && go test -v -test.run TestStructA3 ...swagger -func TestStructA3(t *testing.T) { - testJsonFromStruct(t, A3{}, `{ - "swagger.A3": { - "id": "swagger.A3", - "required": [ - "B" - ], - "properties": { - "B": { - "$ref": "swagger.D" - } - } - }, - "swagger.D": { - "id": "swagger.D", - "required": [ - "Id" - ], - "properties": { - "Id": { - "type": "integer", - "format": "int32" - } - } - } - }`) -} - -type A4 struct { - D "json:,inline" -} - -// clear && go test -v -test.run TestStructA4 ...swagger -func TestEmbeddedStructA4(t *testing.T) { - testJsonFromStruct(t, A4{}, `{ - "swagger.A4": { - "id": "swagger.A4", - "required": [ - "Id" - ], - "properties": { - "Id": { - "type": "integer", - "format": "int32" - } - } - } - }`) -} - -type A5 struct { - D `json:"d"` -} - -// clear && go test -v -test.run TestStructA5 ...swagger -func TestEmbeddedStructA5(t *testing.T) { - testJsonFromStruct(t, A5{}, `{ - "swagger.A5": { - "id": "swagger.A5", - "required": [ - "d" - ], - "properties": { - "d": { - "$ref": "swagger.D" - } - } - }, - "swagger.D": { - "id": "swagger.D", - "required": [ - "Id" - ], - "properties": { - "Id": { - "type": "integer", - "format": "int32" - } - } - } - }`) -} - -type D2 struct { - id int - D []D -} - -type A6 struct { - D2 "json:,inline" -} - -// clear && go test -v -test.run TestStructA4 ...swagger -func TestEmbeddedStructA6(t *testing.T) { - testJsonFromStruct(t, A6{}, `{ - "swagger.A6": { - "id": "swagger.A6", - "required": [ - "id", - "D" - ], - "properties": { - "D": { - "type": "array", - "items": { - "$ref": "swagger.D" - } - }, - "id": { - "type": "integer", - "format": "int32" - } - } - }, - "swagger.D": { - "id": "swagger.D", - "required": [ - "Id" - ], - "properties": { - "Id": { - "type": "integer", - "format": "int32" - } - } - } - }`) -} - -type ObjectId []byte - -type Region struct { - Id ObjectId `bson:"_id" json:"id"` - Name string `bson:"name" json:"name"` - Type string `bson:"type" json:"type"` -} - -// clear && go test -v -test.run TestRegion_Issue113 ...swagger -func TestRegion_Issue113(t *testing.T) { - testJsonFromStruct(t, []Region{}, `{ - "||swagger.Region": { - "id": "||swagger.Region", - "properties": {} - }, - "swagger.Region": { - "id": "swagger.Region", - "required": [ - "id", - "name", - "type" - ], - "properties": { - "id": { - "type": "string" - }, - "name": { - "type": "string" - }, - "type": { - "type": "string" - } - } - } - }`) -} - -// clear && go test -v -test.run TestIssue158 ...swagger -func TestIssue158(t *testing.T) { - type Address struct { - Country string `json:"country,omitempty"` - } - - type Customer struct { - Name string `json:"name"` - Address Address `json:"address"` - } - expected := `{ - "swagger.Address": { - "id": "swagger.Address", - "properties": { - "country": { - "type": "string" - } - } - }, - "swagger.Customer": { - "id": "swagger.Customer", - "required": [ - "name", - "address" - ], - "properties": { - "address": { - "$ref": "swagger.Address" - }, - "name": { - "type": "string" - } - } - } - }` - testJsonFromStruct(t, Customer{}, expected) -} - -func TestPointers(t *testing.T) { - type Vote struct { - What YesNo - } - testJsonFromStruct(t, &Vote{}, `{ - "swagger.Vote": { - "id": "swagger.Vote", - "required": [ - "What" - ], - "properties": { - "What": { - "type": "string" - } - } - } - }`) -} - -func TestSlices(t *testing.T) { - type Address struct { - Country string `json:"country,omitempty"` - } - expected := `{ - "swagger.Address": { - "id": "swagger.Address", - "properties": { - "country": { - "type": "string" - } - } - }, - "swagger.Customer": { - "id": "swagger.Customer", - "required": [ - "name", - "addresses" - ], - "properties": { - "addresses": { - "type": "array", - "items": { - "$ref": "swagger.Address" - } - }, - "name": { - "type": "string" - } - } - } - }` - // both slices (with pointer value and with type value) should have equal swagger representation - { - type Customer struct { - Name string `json:"name"` - Addresses []Address `json:"addresses"` - } - testJsonFromStruct(t, Customer{}, expected) - } - { - type Customer struct { - Name string `json:"name"` - Addresses []*Address `json:"addresses"` - } - testJsonFromStruct(t, Customer{}, expected) - } - -} - -type Name struct { - Value string -} - -func (n Name) PostBuildModel(m *Model) *Model { - m.Description = "titles must be upcase" - return m -} - -type TOC struct { - Titles []Name -} - -type Discography struct { - Title Name - TOC -} - -// clear && go test -v -test.run TestEmbeddedStructPull204 ...swagger -func TestEmbeddedStructPull204(t *testing.T) { - b := Discography{} - testJsonFromStruct(t, b, ` -{ - "swagger.Discography": { - "id": "swagger.Discography", - "required": [ - "Title", - "Titles" - ], - "properties": { - "Title": { - "$ref": "swagger.Name" - }, - "Titles": { - "type": "array", - "items": { - "$ref": "swagger.Name" - } - } - } - }, - "swagger.Name": { - "id": "swagger.Name", - "required": [ - "Value" - ], - "properties": { - "Value": { - "type": "string" - } - } - } - } -`) -} - -type AddressWithMethod struct { - Country string `json:"country,omitempty"` - PostCode int `json:"postcode,omitempty"` -} - -func (AddressWithMethod) SwaggerDoc() map[string]string { - return map[string]string{ - "": "Address doc", - "country": "Country doc", - "postcode": "PostCode doc", - } -} - -func TestDocInMethodSwaggerDoc(t *testing.T) { - expected := `{ - "swagger.AddressWithMethod": { - "id": "swagger.AddressWithMethod", - "description": "Address doc", - "properties": { - "country": { - "type": "string", - "description": "Country doc" - }, - "postcode": { - "type": "integer", - "format": "int32", - "description": "PostCode doc" - } - } - } - }` - testJsonFromStruct(t, AddressWithMethod{}, expected) -} - -type RefDesc struct { - f1 *int64 `description:"desc"` -} - -func TestPtrDescription(t *testing.T) { - b := RefDesc{} - expected := `{ - "swagger.RefDesc": { - "id": "swagger.RefDesc", - "required": [ - "f1" - ], - "properties": { - "f1": { - "type": "integer", - "format": "int64", - "description": "desc" - } - } - } - }` - testJsonFromStruct(t, b, expected) -} - -type A struct { - B `json:",inline"` - C1 `json:"metadata,omitempty"` -} - -type B struct { - SB string -} - -type C1 struct { - SC string -} - -func (A) SwaggerDoc() map[string]string { - return map[string]string{ - "": "A struct", - "B": "B field", // We should not get anything from this - "metadata": "C1 field", - } -} - -func (B) SwaggerDoc() map[string]string { - return map[string]string{ - "": "B struct", - "SB": "SB field", - } -} - -func (C1) SwaggerDoc() map[string]string { - return map[string]string{ - "": "C1 struct", - "SC": "SC field", - } -} - -func TestNestedStructDescription(t *testing.T) { - expected := ` -{ - "swagger.A": { - "id": "swagger.A", - "description": "A struct", - "required": [ - "SB" - ], - "properties": { - "SB": { - "type": "string", - "description": "SB field" - }, - "metadata": { - "$ref": "swagger.C1", - "description": "C1 field" - } - } - }, - "swagger.C1": { - "id": "swagger.C1", - "description": "C1 struct", - "required": [ - "SC" - ], - "properties": { - "SC": { - "type": "string", - "description": "SC field" - } - } - } - } -` - testJsonFromStruct(t, A{}, expected) -} - -// This tests a primitive with type overrides in the struct tags -type FakeInt int -type E struct { - Id FakeInt `type:"integer"` - IP net.IP `type:"string"` -} - -func TestOverridenTypeTagE1(t *testing.T) { - expected := ` -{ - "swagger.E": { - "id": "swagger.E", - "required": [ - "Id", - "IP" - ], - "properties": { - "Id": { - "type": "integer" - }, - "IP": { - "type": "string" - } - } - } - } -` - testJsonFromStruct(t, E{}, expected) -} - -type XmlNamed struct { - XMLName xml.Name `xml:"user"` - Id string `json:"id" xml:"id"` - Name string `json:"name" xml:"name"` -} - -func TestXmlNameStructs(t *testing.T) { - expected := ` -{ - "swagger.XmlNamed": { - "id": "swagger.XmlNamed", - "required": [ - "id", - "name" - ], - "properties": { - "id": { - "type": "string" - }, - "name": { - "type": "string" - } - } - } - } -` - testJsonFromStruct(t, XmlNamed{}, expected) -} - -func TestNameCustomization(t *testing.T) { - expected := ` -{ - "swagger.A": { - "id": "swagger.A", - "description": "A struct", - "required": [ - "SB" - ], - "properties": { - "SB": { - "type": "string", - "description": "SB field" - }, - "metadata": { - "$ref": "new.swagger.SpecialC1", - "description": "C1 field" - } - } - }, - "new.swagger.SpecialC1": { - "id": "new.swagger.SpecialC1", - "description": "C1 struct", - "required": [ - "SC" - ], - "properties": { - "SC": { - "type": "string", - "description": "SC field" - } - } - } - }` - - testJsonFromStructWithConfig(t, A{}, expected, &Config{ - ModelTypeNameHandler: func(t reflect.Type) (string, bool) { - if t == reflect.TypeOf(C1{}) { - return "new.swagger.SpecialC1", true - } - return "", false - }, - }) -} diff --git a/vendor/github.com/emicklei/go-restful-swagger12/model_list.go b/vendor/github.com/emicklei/go-restful-swagger12/model_list.go deleted file mode 100644 index 9bb6cb6785..0000000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/model_list.go +++ /dev/null @@ -1,86 +0,0 @@ -package swagger - -// Copyright 2015 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "bytes" - "encoding/json" -) - -// NamedModel associates a name with a Model (not using its Id) -type NamedModel struct { - Name string - Model Model -} - -// ModelList encapsulates a list of NamedModel (association) -type ModelList struct { - List []NamedModel -} - -// Put adds or replaces a Model by its name -func (l *ModelList) Put(name string, model Model) { - for i, each := range l.List { - if each.Name == name { - // replace - l.List[i] = NamedModel{name, model} - return - } - } - // add - l.List = append(l.List, NamedModel{name, model}) -} - -// At returns a Model by its name, ok is false if absent -func (l *ModelList) At(name string) (m Model, ok bool) { - for _, each := range l.List { - if each.Name == name { - return each.Model, true - } - } - return m, false -} - -// Do enumerates all the models, each with its assigned name -func (l *ModelList) Do(block func(name string, value Model)) { - for _, each := range l.List { - block(each.Name, each.Model) - } -} - -// MarshalJSON writes the ModelList as if it was a map[string]Model -func (l ModelList) MarshalJSON() ([]byte, error) { - var buf bytes.Buffer - encoder := json.NewEncoder(&buf) - buf.WriteString("{\n") - for i, each := range l.List { - buf.WriteString("\"") - buf.WriteString(each.Name) - buf.WriteString("\": ") - encoder.Encode(each.Model) - if i < len(l.List)-1 { - buf.WriteString(",\n") - } - } - buf.WriteString("}") - return buf.Bytes(), nil -} - -// UnmarshalJSON reads back a ModelList. This is an expensive operation. -func (l *ModelList) UnmarshalJSON(data []byte) error { - raw := map[string]interface{}{} - json.NewDecoder(bytes.NewReader(data)).Decode(&raw) - for k, v := range raw { - // produces JSON bytes for each value - data, err := json.Marshal(v) - if err != nil { - return err - } - var m Model - json.NewDecoder(bytes.NewReader(data)).Decode(&m) - l.Put(k, m) - } - return nil -} diff --git a/vendor/github.com/emicklei/go-restful-swagger12/model_list_test.go b/vendor/github.com/emicklei/go-restful-swagger12/model_list_test.go deleted file mode 100644 index 9a9ab919b4..0000000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/model_list_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package swagger - -import ( - "encoding/json" - "testing" -) - -func TestModelList(t *testing.T) { - m := Model{} - m.Id = "m" - l := ModelList{} - l.Put("m", m) - k, ok := l.At("m") - if !ok { - t.Error("want model back") - } - if got, want := k.Id, "m"; got != want { - t.Errorf("got %v want %v", got, want) - } -} - -func TestModelList_Marshal(t *testing.T) { - l := ModelList{} - m := Model{Id: "myid"} - l.Put("myid", m) - data, err := json.Marshal(l) - if err != nil { - t.Error(err) - } - if got, want := string(data), `{"myid":{"id":"myid","properties":{}}}`; got != want { - t.Errorf("got %v want %v", got, want) - } -} - -func TestModelList_Unmarshal(t *testing.T) { - data := `{"myid":{"id":"myid","properties":{}}}` - l := ModelList{} - if err := json.Unmarshal([]byte(data), &l); err != nil { - t.Error(err) - } - m, ok := l.At("myid") - if !ok { - t.Error("expected myid") - } - if got, want := m.Id, "myid"; got != want { - t.Errorf("got %v want %v", got, want) - } -} diff --git a/vendor/github.com/emicklei/go-restful-swagger12/model_property_ext.go b/vendor/github.com/emicklei/go-restful-swagger12/model_property_ext.go deleted file mode 100644 index a433b6b702..0000000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/model_property_ext.go +++ /dev/null @@ -1,81 +0,0 @@ -package swagger - -import ( - "reflect" - "strings" -) - -func (prop *ModelProperty) setDescription(field reflect.StructField) { - if tag := field.Tag.Get("description"); tag != "" { - prop.Description = tag - } -} - -func (prop *ModelProperty) setDefaultValue(field reflect.StructField) { - if tag := field.Tag.Get("default"); tag != "" { - prop.DefaultValue = Special(tag) - } -} - -func (prop *ModelProperty) setEnumValues(field reflect.StructField) { - // We use | to separate the enum values. This value is chosen - // since its unlikely to be useful in actual enumeration values. - if tag := field.Tag.Get("enum"); tag != "" { - prop.Enum = strings.Split(tag, "|") - } -} - -func (prop *ModelProperty) setMaximum(field reflect.StructField) { - if tag := field.Tag.Get("maximum"); tag != "" { - prop.Maximum = tag - } -} - -func (prop *ModelProperty) setType(field reflect.StructField) { - if tag := field.Tag.Get("type"); tag != "" { - // Check if the first two characters of the type tag are - // intended to emulate slice/array behaviour. - // - // If type is intended to be a slice/array then add the - // overriden type to the array item instead of the main property - if len(tag) > 2 && tag[0:2] == "[]" { - pType := "array" - prop.Type = &pType - prop.Items = new(Item) - - iType := tag[2:] - prop.Items.Type = &iType - return - } - - prop.Type = &tag - } -} - -func (prop *ModelProperty) setMinimum(field reflect.StructField) { - if tag := field.Tag.Get("minimum"); tag != "" { - prop.Minimum = tag - } -} - -func (prop *ModelProperty) setUniqueItems(field reflect.StructField) { - tag := field.Tag.Get("unique") - switch tag { - case "true": - v := true - prop.UniqueItems = &v - case "false": - v := false - prop.UniqueItems = &v - } -} - -func (prop *ModelProperty) setPropertyMetadata(field reflect.StructField) { - prop.setDescription(field) - prop.setEnumValues(field) - prop.setMinimum(field) - prop.setMaximum(field) - prop.setUniqueItems(field) - prop.setDefaultValue(field) - prop.setType(field) -} diff --git a/vendor/github.com/emicklei/go-restful-swagger12/model_property_ext_test.go b/vendor/github.com/emicklei/go-restful-swagger12/model_property_ext_test.go deleted file mode 100644 index 1123ed9923..0000000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/model_property_ext_test.go +++ /dev/null @@ -1,70 +0,0 @@ -package swagger - -import ( - "net" - "testing" -) - -// clear && go test -v -test.run TestThatExtraTagsAreReadIntoModel ...swagger -func TestThatExtraTagsAreReadIntoModel(t *testing.T) { - type fakeint int - type fakearray string - type Anything struct { - Name string `description:"name" modelDescription:"a test"` - Size int `minimum:"0" maximum:"10"` - Stati string `enum:"off|on" default:"on" modelDescription:"more description"` - ID string `unique:"true"` - FakeInt fakeint `type:"integer"` - FakeArray fakearray `type:"[]string"` - IP net.IP `type:"string"` - Password string - } - m := modelsFromStruct(Anything{}) - props, _ := m.At("swagger.Anything") - p1, _ := props.Properties.At("Name") - if got, want := p1.Description, "name"; got != want { - t.Errorf("got %v want %v", got, want) - } - p2, _ := props.Properties.At("Size") - if got, want := p2.Minimum, "0"; got != want { - t.Errorf("got %v want %v", got, want) - } - if got, want := p2.Maximum, "10"; got != want { - t.Errorf("got %v want %v", got, want) - } - p3, _ := props.Properties.At("Stati") - if got, want := p3.Enum[0], "off"; got != want { - t.Errorf("got %v want %v", got, want) - } - if got, want := p3.Enum[1], "on"; got != want { - t.Errorf("got %v want %v", got, want) - } - p4, _ := props.Properties.At("ID") - if got, want := *p4.UniqueItems, true; got != want { - t.Errorf("got %v want %v", got, want) - } - p5, _ := props.Properties.At("Password") - if got, want := *p5.Type, "string"; got != want { - t.Errorf("got %v want %v", got, want) - } - p6, _ := props.Properties.At("FakeInt") - if got, want := *p6.Type, "integer"; got != want { - t.Errorf("got %v want %v", got, want) - } - p7, _ := props.Properties.At("FakeArray") - if got, want := *p7.Type, "array"; got != want { - t.Errorf("got %v want %v", got, want) - } - p7p, _ := props.Properties.At("FakeArray") - if got, want := *p7p.Items.Type, "string"; got != want { - t.Errorf("got %v want %v", got, want) - } - p8, _ := props.Properties.At("IP") - if got, want := *p8.Type, "string"; got != want { - t.Errorf("got %v want %v", got, want) - } - - if got, want := props.Description, "a test\nmore description"; got != want { - t.Errorf("got %v want %v", got, want) - } -} diff --git a/vendor/github.com/emicklei/go-restful-swagger12/model_property_list.go b/vendor/github.com/emicklei/go-restful-swagger12/model_property_list.go deleted file mode 100644 index 3babb19448..0000000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/model_property_list.go +++ /dev/null @@ -1,87 +0,0 @@ -package swagger - -// Copyright 2015 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "bytes" - "encoding/json" -) - -// NamedModelProperty associates a name to a ModelProperty -type NamedModelProperty struct { - Name string - Property ModelProperty -} - -// ModelPropertyList encapsulates a list of NamedModelProperty (association) -type ModelPropertyList struct { - List []NamedModelProperty -} - -// At returns the ModelPropety by its name unless absent, then ok is false -func (l *ModelPropertyList) At(name string) (p ModelProperty, ok bool) { - for _, each := range l.List { - if each.Name == name { - return each.Property, true - } - } - return p, false -} - -// Put adds or replaces a ModelProperty with this name -func (l *ModelPropertyList) Put(name string, prop ModelProperty) { - // maybe replace existing - for i, each := range l.List { - if each.Name == name { - // replace - l.List[i] = NamedModelProperty{Name: name, Property: prop} - return - } - } - // add - l.List = append(l.List, NamedModelProperty{Name: name, Property: prop}) -} - -// Do enumerates all the properties, each with its assigned name -func (l *ModelPropertyList) Do(block func(name string, value ModelProperty)) { - for _, each := range l.List { - block(each.Name, each.Property) - } -} - -// MarshalJSON writes the ModelPropertyList as if it was a map[string]ModelProperty -func (l ModelPropertyList) MarshalJSON() ([]byte, error) { - var buf bytes.Buffer - encoder := json.NewEncoder(&buf) - buf.WriteString("{\n") - for i, each := range l.List { - buf.WriteString("\"") - buf.WriteString(each.Name) - buf.WriteString("\": ") - encoder.Encode(each.Property) - if i < len(l.List)-1 { - buf.WriteString(",\n") - } - } - buf.WriteString("}") - return buf.Bytes(), nil -} - -// UnmarshalJSON reads back a ModelPropertyList. This is an expensive operation. -func (l *ModelPropertyList) UnmarshalJSON(data []byte) error { - raw := map[string]interface{}{} - json.NewDecoder(bytes.NewReader(data)).Decode(&raw) - for k, v := range raw { - // produces JSON bytes for each value - data, err := json.Marshal(v) - if err != nil { - return err - } - var m ModelProperty - json.NewDecoder(bytes.NewReader(data)).Decode(&m) - l.Put(k, m) - } - return nil -} diff --git a/vendor/github.com/emicklei/go-restful-swagger12/model_property_list_test.go b/vendor/github.com/emicklei/go-restful-swagger12/model_property_list_test.go deleted file mode 100644 index 2833ad8fda..0000000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/model_property_list_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package swagger - -import ( - "encoding/json" - "testing" -) - -func TestModelPropertyList(t *testing.T) { - l := ModelPropertyList{} - p := ModelProperty{Description: "d"} - l.Put("p", p) - q, ok := l.At("p") - if !ok { - t.Error("expected p") - } - if got, want := q.Description, "d"; got != want { - t.Errorf("got %v want %v", got, want) - } -} - -func TestModelPropertyList_Marshal(t *testing.T) { - l := ModelPropertyList{} - p := ModelProperty{Description: "d"} - l.Put("p", p) - data, err := json.Marshal(l) - if err != nil { - t.Error(err) - } - if got, want := string(data), `{"p":{"description":"d"}}`; got != want { - t.Errorf("got %v want %v", got, want) - } -} - -func TestModelPropertyList_Unmarshal(t *testing.T) { - data := `{"p":{"description":"d"}}` - l := ModelPropertyList{} - if err := json.Unmarshal([]byte(data), &l); err != nil { - t.Error(err) - } - m, ok := l.At("p") - if !ok { - t.Error("expected p") - } - if got, want := m.Description, "d"; got != want { - t.Errorf("got %v want %v", got, want) - } -} diff --git a/vendor/github.com/emicklei/go-restful-swagger12/ordered_route_map.go b/vendor/github.com/emicklei/go-restful-swagger12/ordered_route_map.go deleted file mode 100644 index b33ccfbeb9..0000000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/ordered_route_map.go +++ /dev/null @@ -1,36 +0,0 @@ -package swagger - -// Copyright 2015 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import "github.com/emicklei/go-restful" - -type orderedRouteMap struct { - elements map[string][]restful.Route - keys []string -} - -func newOrderedRouteMap() *orderedRouteMap { - return &orderedRouteMap{ - elements: map[string][]restful.Route{}, - keys: []string{}, - } -} - -func (o *orderedRouteMap) Add(key string, route restful.Route) { - routes, ok := o.elements[key] - if ok { - routes = append(routes, route) - o.elements[key] = routes - return - } - o.elements[key] = []restful.Route{route} - o.keys = append(o.keys, key) -} - -func (o *orderedRouteMap) Do(block func(key string, routes []restful.Route)) { - for _, k := range o.keys { - block(k, o.elements[k]) - } -} diff --git a/vendor/github.com/emicklei/go-restful-swagger12/ordered_route_map_test.go b/vendor/github.com/emicklei/go-restful-swagger12/ordered_route_map_test.go deleted file mode 100644 index 964e7da05a..0000000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/ordered_route_map_test.go +++ /dev/null @@ -1,29 +0,0 @@ -package swagger - -import ( - "testing" - - "github.com/emicklei/go-restful" -) - -// go test -v -test.run TestOrderedRouteMap ...swagger -func TestOrderedRouteMap(t *testing.T) { - m := newOrderedRouteMap() - r1 := restful.Route{Path: "/r1"} - r2 := restful.Route{Path: "/r2"} - m.Add("a", r1) - m.Add("b", r2) - m.Add("b", r1) - m.Add("d", r2) - m.Add("c", r2) - order := "" - m.Do(func(k string, routes []restful.Route) { - order += k - if len(routes) == 0 { - t.Fail() - } - }) - if order != "abdc" { - t.Fail() - } -} diff --git a/vendor/github.com/emicklei/go-restful-swagger12/postbuild_model_test.go b/vendor/github.com/emicklei/go-restful-swagger12/postbuild_model_test.go deleted file mode 100644 index 3e20d2f5b9..0000000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/postbuild_model_test.go +++ /dev/null @@ -1,42 +0,0 @@ -package swagger - -import "testing" - -type Boat struct { - Length int `json:"-"` // on default, this makes the fields not required - Weight int `json:"-"` -} - -// PostBuildModel is from swagger.ModelBuildable -func (b Boat) PostBuildModel(m *Model) *Model { - // override required - m.Required = []string{"Length", "Weight"} - - // add model property (just to test is can be added; is this a real usecase?) - extraType := "string" - m.Properties.Put("extra", ModelProperty{ - Description: "extra description", - DataTypeFields: DataTypeFields{ - Type: &extraType, - }, - }) - return m -} - -func TestCustomPostModelBuilde(t *testing.T) { - testJsonFromStruct(t, Boat{}, `{ - "swagger.Boat": { - "id": "swagger.Boat", - "required": [ - "Length", - "Weight" - ], - "properties": { - "extra": { - "type": "string", - "description": "extra description" - } - } - } -}`) -} diff --git a/vendor/github.com/emicklei/go-restful-swagger12/swagger.go b/vendor/github.com/emicklei/go-restful-swagger12/swagger.go deleted file mode 100644 index 9c40833e7d..0000000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/swagger.go +++ /dev/null @@ -1,185 +0,0 @@ -// Package swagger implements the structures of the Swagger -// https://github.com/wordnik/swagger-spec/blob/master/versions/1.2.md -package swagger - -const swaggerVersion = "1.2" - -// 4.3.3 Data Type Fields -type DataTypeFields struct { - Type *string `json:"type,omitempty"` // if Ref not used - Ref *string `json:"$ref,omitempty"` // if Type not used - Format string `json:"format,omitempty"` - DefaultValue Special `json:"defaultValue,omitempty"` - Enum []string `json:"enum,omitempty"` - Minimum string `json:"minimum,omitempty"` - Maximum string `json:"maximum,omitempty"` - Items *Item `json:"items,omitempty"` - UniqueItems *bool `json:"uniqueItems,omitempty"` -} - -type Special string - -// 4.3.4 Items Object -type Item struct { - Type *string `json:"type,omitempty"` - Ref *string `json:"$ref,omitempty"` - Format string `json:"format,omitempty"` -} - -// 5.1 Resource Listing -type ResourceListing struct { - SwaggerVersion string `json:"swaggerVersion"` // e.g 1.2 - Apis []Resource `json:"apis"` - ApiVersion string `json:"apiVersion"` - Info Info `json:"info"` - Authorizations []Authorization `json:"authorizations,omitempty"` -} - -// 5.1.2 Resource Object -type Resource struct { - Path string `json:"path"` // relative or absolute, must start with / - Description string `json:"description"` -} - -// 5.1.3 Info Object -type Info struct { - Title string `json:"title"` - Description string `json:"description"` - TermsOfServiceUrl string `json:"termsOfServiceUrl,omitempty"` - Contact string `json:"contact,omitempty"` - License string `json:"license,omitempty"` - LicenseUrl string `json:"licenseUrl,omitempty"` -} - -// 5.1.5 -type Authorization struct { - Type string `json:"type"` - PassAs string `json:"passAs"` - Keyname string `json:"keyname"` - Scopes []Scope `json:"scopes"` - GrantTypes []GrantType `json:"grandTypes"` -} - -// 5.1.6, 5.2.11 -type Scope struct { - // Required. The name of the scope. - Scope string `json:"scope"` - // Recommended. A short description of the scope. - Description string `json:"description"` -} - -// 5.1.7 -type GrantType struct { - Implicit Implicit `json:"implicit"` - AuthorizationCode AuthorizationCode `json:"authorization_code"` -} - -// 5.1.8 Implicit Object -type Implicit struct { - // Required. The login endpoint definition. - loginEndpoint LoginEndpoint `json:"loginEndpoint"` - // An optional alternative name to standard "access_token" OAuth2 parameter. - TokenName string `json:"tokenName"` -} - -// 5.1.9 Authorization Code Object -type AuthorizationCode struct { - TokenRequestEndpoint TokenRequestEndpoint `json:"tokenRequestEndpoint"` - TokenEndpoint TokenEndpoint `json:"tokenEndpoint"` -} - -// 5.1.10 Login Endpoint Object -type LoginEndpoint struct { - // Required. The URL of the authorization endpoint for the implicit grant flow. The value SHOULD be in a URL format. - Url string `json:"url"` -} - -// 5.1.11 Token Request Endpoint Object -type TokenRequestEndpoint struct { - // Required. The URL of the authorization endpoint for the authentication code grant flow. The value SHOULD be in a URL format. - Url string `json:"url"` - // An optional alternative name to standard "client_id" OAuth2 parameter. - ClientIdName string `json:"clientIdName"` - // An optional alternative name to the standard "client_secret" OAuth2 parameter. - ClientSecretName string `json:"clientSecretName"` -} - -// 5.1.12 Token Endpoint Object -type TokenEndpoint struct { - // Required. The URL of the token endpoint for the authentication code grant flow. The value SHOULD be in a URL format. - Url string `json:"url"` - // An optional alternative name to standard "access_token" OAuth2 parameter. - TokenName string `json:"tokenName"` -} - -// 5.2 API Declaration -type ApiDeclaration struct { - SwaggerVersion string `json:"swaggerVersion"` - ApiVersion string `json:"apiVersion"` - BasePath string `json:"basePath"` - ResourcePath string `json:"resourcePath"` // must start with / - Info Info `json:"info"` - Apis []Api `json:"apis,omitempty"` - Models ModelList `json:"models,omitempty"` - Produces []string `json:"produces,omitempty"` - Consumes []string `json:"consumes,omitempty"` - Authorizations []Authorization `json:"authorizations,omitempty"` -} - -// 5.2.2 API Object -type Api struct { - Path string `json:"path"` // relative or absolute, must start with / - Description string `json:"description"` - Operations []Operation `json:"operations,omitempty"` -} - -// 5.2.3 Operation Object -type Operation struct { - DataTypeFields - Method string `json:"method"` - Summary string `json:"summary,omitempty"` - Notes string `json:"notes,omitempty"` - Nickname string `json:"nickname"` - Authorizations []Authorization `json:"authorizations,omitempty"` - Parameters []Parameter `json:"parameters"` - ResponseMessages []ResponseMessage `json:"responseMessages,omitempty"` // optional - Produces []string `json:"produces,omitempty"` - Consumes []string `json:"consumes,omitempty"` - Deprecated string `json:"deprecated,omitempty"` -} - -// 5.2.4 Parameter Object -type Parameter struct { - DataTypeFields - ParamType string `json:"paramType"` // path,query,body,header,form - Name string `json:"name"` - Description string `json:"description"` - Required bool `json:"required"` - AllowMultiple bool `json:"allowMultiple"` -} - -// 5.2.5 Response Message Object -type ResponseMessage struct { - Code int `json:"code"` - Message string `json:"message"` - ResponseModel string `json:"responseModel,omitempty"` -} - -// 5.2.6, 5.2.7 Models Object -type Model struct { - Id string `json:"id"` - Description string `json:"description,omitempty"` - Required []string `json:"required,omitempty"` - Properties ModelPropertyList `json:"properties"` - SubTypes []string `json:"subTypes,omitempty"` - Discriminator string `json:"discriminator,omitempty"` -} - -// 5.2.8 Properties Object -type ModelProperty struct { - DataTypeFields - Description string `json:"description,omitempty"` -} - -// 5.2.10 -type Authorizations map[string]Authorization diff --git a/vendor/github.com/emicklei/go-restful-swagger12/swagger_builder.go b/vendor/github.com/emicklei/go-restful-swagger12/swagger_builder.go deleted file mode 100644 index 05a3c7e76f..0000000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/swagger_builder.go +++ /dev/null @@ -1,21 +0,0 @@ -package swagger - -type SwaggerBuilder struct { - SwaggerService -} - -func NewSwaggerBuilder(config Config) *SwaggerBuilder { - return &SwaggerBuilder{*newSwaggerService(config)} -} - -func (sb SwaggerBuilder) ProduceListing() ResourceListing { - return sb.SwaggerService.produceListing() -} - -func (sb SwaggerBuilder) ProduceAllDeclarations() map[string]ApiDeclaration { - return sb.SwaggerService.produceAllDeclarations() -} - -func (sb SwaggerBuilder) ProduceDeclarations(route string) (*ApiDeclaration, bool) { - return sb.SwaggerService.produceDeclarations(route) -} diff --git a/vendor/github.com/emicklei/go-restful-swagger12/swagger_test.go b/vendor/github.com/emicklei/go-restful-swagger12/swagger_test.go deleted file mode 100644 index 3db904afc9..0000000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/swagger_test.go +++ /dev/null @@ -1,318 +0,0 @@ -package swagger - -import ( - "encoding/json" - "testing" - - "github.com/emicklei/go-restful" - "github.com/emicklei/go-restful-swagger12/test_package" -) - -func TestInfoStruct_Issue231(t *testing.T) { - config := Config{ - Info: Info{ - Title: "Title", - Description: "Description", - TermsOfServiceUrl: "http://example.com", - Contact: "example@example.com", - License: "License", - LicenseUrl: "http://example.com/license.txt", - }, - } - sws := newSwaggerService(config) - str, err := json.MarshalIndent(sws.produceListing(), "", " ") - if err != nil { - t.Fatal(err) - } - compareJson(t, string(str), ` - { - "apiVersion": "", - "swaggerVersion": "1.2", - "apis": null, - "info": { - "title": "Title", - "description": "Description", - "termsOfServiceUrl": "http://example.com", - "contact": "example@example.com", - "license": "License", - "licenseUrl": "http://example.com/license.txt" - } - } - `) -} - -// go test -v -test.run TestThatMultiplePathsOnRootAreHandled ...swagger -func TestThatMultiplePathsOnRootAreHandled(t *testing.T) { - ws1 := new(restful.WebService) - ws1.Route(ws1.GET("/_ping").To(dummy)) - ws1.Route(ws1.GET("/version").To(dummy)) - - cfg := Config{ - WebServicesUrl: "http://here.com", - ApiPath: "/apipath", - WebServices: []*restful.WebService{ws1}, - } - sws := newSwaggerService(cfg) - decl := sws.composeDeclaration(ws1, "/") - if got, want := len(decl.Apis), 2; got != want { - t.Errorf("got %v want %v", got, want) - } -} - -func TestWriteSamples(t *testing.T) { - ws1 := new(restful.WebService) - ws1.Route(ws1.GET("/object").To(dummy).Writes(test_package.TestStruct{})) - ws1.Route(ws1.GET("/array").To(dummy).Writes([]test_package.TestStruct{})) - ws1.Route(ws1.GET("/object_and_array").To(dummy).Writes(struct{ Abc test_package.TestStruct }{})) - - cfg := Config{ - WebServicesUrl: "http://here.com", - ApiPath: "/apipath", - WebServices: []*restful.WebService{ws1}, - } - sws := newSwaggerService(cfg) - - decl := sws.composeDeclaration(ws1, "/") - - str, err := json.MarshalIndent(decl.Apis, "", " ") - if err != nil { - t.Fatal(err) - } - - compareJson(t, string(str), ` - [ - { - "path": "/object", - "description": "", - "operations": [ - { - "type": "test_package.TestStruct", - "method": "GET", - "nickname": "dummy", - "parameters": [] - } - ] - }, - { - "path": "/array", - "description": "", - "operations": [ - { - "type": "array", - "items": { - "$ref": "test_package.TestStruct" - }, - "method": "GET", - "nickname": "dummy", - "parameters": [] - } - ] - }, - { - "path": "/object_and_array", - "description": "", - "operations": [ - { - "type": "struct { Abc test_package.TestStruct }", - "method": "GET", - "nickname": "dummy", - "parameters": [] - } - ] - } - ]`) - - str, err = json.MarshalIndent(decl.Models, "", " ") - if err != nil { - t.Fatal(err) - } - compareJson(t, string(str), ` - { - "test_package.TestStruct": { - "id": "test_package.TestStruct", - "required": [ - "TestField" - ], - "properties": { - "TestField": { - "type": "string" - } - } - }, - "||test_package.TestStruct": { - "id": "||test_package.TestStruct", - "properties": {} - }, - "struct { Abc test_package.TestStruct }": { - "id": "struct { Abc test_package.TestStruct }", - "required": [ - "Abc" - ], - "properties": { - "Abc": { - "$ref": "test_package.TestStruct" - } - } - } - }`) -} - -func TestRoutesWithCommonPart(t *testing.T) { - ws1 := new(restful.WebService) - ws1.Path("/") - ws1.Route(ws1.GET("/foobar").To(dummy).Writes(test_package.TestStruct{})) - ws1.Route(ws1.HEAD("/foobar").To(dummy).Writes(test_package.TestStruct{})) - ws1.Route(ws1.GET("/foo").To(dummy).Writes([]test_package.TestStruct{})) - ws1.Route(ws1.HEAD("/foo").To(dummy).Writes(test_package.TestStruct{})) - - cfg := Config{ - WebServicesUrl: "http://here.com", - ApiPath: "/apipath", - WebServices: []*restful.WebService{ws1}, - } - sws := newSwaggerService(cfg) - - decl := sws.composeDeclaration(ws1, "/foo") - - str, err := json.MarshalIndent(decl.Apis, "", " ") - if err != nil { - t.Fatal(err) - } - - compareJson(t, string(str), `[ - { - "path": "/foo", - "description": "", - "operations": [ - { - "type": "array", - "items": { - "$ref": "test_package.TestStruct" - }, - "method": "GET", - "nickname": "dummy", - "parameters": [] - }, - { - "type": "test_package.TestStruct", - "method": "HEAD", - "nickname": "dummy", - "parameters": [] - } - ] - } - ]`) -} - -// go test -v -test.run TestServiceToApi ...swagger -func TestServiceToApi(t *testing.T) { - ws := new(restful.WebService) - ws.Path("/tests") - ws.Consumes(restful.MIME_JSON) - ws.Produces(restful.MIME_XML) - ws.Route(ws.GET("/a").To(dummy).Writes(sample{})) - ws.Route(ws.PUT("/b").To(dummy).Writes(sample{})) - ws.Route(ws.POST("/c").To(dummy).Writes(sample{})) - ws.Route(ws.DELETE("/d").To(dummy).Writes(sample{})) - - ws.Route(ws.GET("/d").To(dummy).Writes(sample{})) - ws.Route(ws.PUT("/c").To(dummy).Writes(sample{})) - ws.Route(ws.POST("/b").To(dummy).Writes(sample{})) - ws.Route(ws.DELETE("/a").To(dummy).Writes(sample{})) - ws.ApiVersion("1.2.3") - cfg := Config{ - WebServicesUrl: "http://here.com", - ApiPath: "/apipath", - WebServices: []*restful.WebService{ws}, - PostBuildHandler: func(in *ApiDeclarationList) {}, - } - sws := newSwaggerService(cfg) - decl := sws.composeDeclaration(ws, "/tests") - // checks - if decl.ApiVersion != "1.2.3" { - t.Errorf("got %v want %v", decl.ApiVersion, "1.2.3") - } - if decl.BasePath != "http://here.com" { - t.Errorf("got %v want %v", decl.BasePath, "http://here.com") - } - if len(decl.Apis) != 4 { - t.Errorf("got %v want %v", len(decl.Apis), 4) - } - pathOrder := "" - for _, each := range decl.Apis { - pathOrder += each.Path - for _, other := range each.Operations { - pathOrder += other.Method - } - } - - if pathOrder != "/tests/aGETDELETE/tests/bPUTPOST/tests/cPOSTPUT/tests/dDELETEGET" { - t.Errorf("got %v want %v", pathOrder, "see test source") - } -} - -func dummy(i *restful.Request, o *restful.Response) {} - -// go test -v -test.run TestIssue78 ...swagger -type Response struct { - Code int - Users *[]User - Items *[]TestItem -} -type User struct { - Id, Name string -} -type TestItem struct { - Id, Name string -} - -// clear && go test -v -test.run TestComposeResponseMessages ...swagger -func TestComposeResponseMessages(t *testing.T) { - responseErrors := map[int]restful.ResponseError{} - responseErrors[400] = restful.ResponseError{Code: 400, Message: "Bad Request", Model: TestItem{}} - route := restful.Route{ResponseErrors: responseErrors} - decl := new(ApiDeclaration) - decl.Models = ModelList{} - msgs := composeResponseMessages(route, decl, &Config{}) - if msgs[0].ResponseModel != "swagger.TestItem" { - t.Errorf("got %s want swagger.TestItem", msgs[0].ResponseModel) - } -} - -func TestIssue78(t *testing.T) { - sws := newSwaggerService(Config{}) - models := new(ModelList) - sws.addModelFromSampleTo(&Operation{}, true, Response{Items: &[]TestItem{}}, models) - model, ok := models.At("swagger.Response") - if !ok { - t.Fatal("missing response model") - } - if "swagger.Response" != model.Id { - t.Fatal("wrong model id:" + model.Id) - } - code, ok := model.Properties.At("Code") - if !ok { - t.Fatal("missing code") - } - if "integer" != *code.Type { - t.Fatal("wrong code type:" + *code.Type) - } - items, ok := model.Properties.At("Items") - if !ok { - t.Fatal("missing items") - } - if "array" != *items.Type { - t.Fatal("wrong items type:" + *items.Type) - } - items_items := items.Items - if items_items == nil { - t.Fatal("missing items->items") - } - ref := items_items.Ref - if ref == nil { - t.Fatal("missing $ref") - } - if *ref != "swagger.TestItem" { - t.Fatal("wrong $ref:" + *ref) - } -} diff --git a/vendor/github.com/emicklei/go-restful-swagger12/swagger_webservice.go b/vendor/github.com/emicklei/go-restful-swagger12/swagger_webservice.go deleted file mode 100644 index d90623120a..0000000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/swagger_webservice.go +++ /dev/null @@ -1,443 +0,0 @@ -package swagger - -import ( - "fmt" - - "github.com/emicklei/go-restful" - // "github.com/emicklei/hopwatch" - "net/http" - "reflect" - "sort" - "strings" - - "github.com/emicklei/go-restful/log" -) - -type SwaggerService struct { - config Config - apiDeclarationMap *ApiDeclarationList -} - -func newSwaggerService(config Config) *SwaggerService { - sws := &SwaggerService{ - config: config, - apiDeclarationMap: new(ApiDeclarationList)} - - // Build all ApiDeclarations - for _, each := range config.WebServices { - rootPath := each.RootPath() - // skip the api service itself - if rootPath != config.ApiPath { - if rootPath == "" || rootPath == "/" { - // use routes - for _, route := range each.Routes() { - entry := staticPathFromRoute(route) - _, exists := sws.apiDeclarationMap.At(entry) - if !exists { - sws.apiDeclarationMap.Put(entry, sws.composeDeclaration(each, entry)) - } - } - } else { // use root path - sws.apiDeclarationMap.Put(each.RootPath(), sws.composeDeclaration(each, each.RootPath())) - } - } - } - - // if specified then call the PostBuilderHandler - if config.PostBuildHandler != nil { - config.PostBuildHandler(sws.apiDeclarationMap) - } - return sws -} - -// LogInfo is the function that is called when this package needs to log. It defaults to log.Printf -var LogInfo = func(format string, v ...interface{}) { - // use the restful package-wide logger - log.Printf(format, v...) -} - -// InstallSwaggerService add the WebService that provides the API documentation of all services -// conform the Swagger documentation specifcation. (https://github.com/wordnik/swagger-core/wiki). -func InstallSwaggerService(aSwaggerConfig Config) { - RegisterSwaggerService(aSwaggerConfig, restful.DefaultContainer) -} - -// RegisterSwaggerService add the WebService that provides the API documentation of all services -// conform the Swagger documentation specifcation. (https://github.com/wordnik/swagger-core/wiki). -func RegisterSwaggerService(config Config, wsContainer *restful.Container) { - sws := newSwaggerService(config) - ws := new(restful.WebService) - ws.Path(config.ApiPath) - ws.Produces(restful.MIME_JSON) - if config.DisableCORS { - ws.Filter(enableCORS) - } - ws.Route(ws.GET("/").To(sws.getListing)) - ws.Route(ws.GET("/{a}").To(sws.getDeclarations)) - ws.Route(ws.GET("/{a}/{b}").To(sws.getDeclarations)) - ws.Route(ws.GET("/{a}/{b}/{c}").To(sws.getDeclarations)) - ws.Route(ws.GET("/{a}/{b}/{c}/{d}").To(sws.getDeclarations)) - ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}").To(sws.getDeclarations)) - ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}/{f}").To(sws.getDeclarations)) - ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}/{f}/{g}").To(sws.getDeclarations)) - LogInfo("[restful/swagger] listing is available at %v%v", config.WebServicesUrl, config.ApiPath) - wsContainer.Add(ws) - - // Check paths for UI serving - if config.StaticHandler == nil && config.SwaggerFilePath != "" && config.SwaggerPath != "" { - swaggerPathSlash := config.SwaggerPath - // path must end with slash / - if "/" != config.SwaggerPath[len(config.SwaggerPath)-1:] { - LogInfo("[restful/swagger] use corrected SwaggerPath ; must end with slash (/)") - swaggerPathSlash += "/" - } - - LogInfo("[restful/swagger] %v%v is mapped to folder %v", config.WebServicesUrl, swaggerPathSlash, config.SwaggerFilePath) - wsContainer.Handle(swaggerPathSlash, http.StripPrefix(swaggerPathSlash, http.FileServer(http.Dir(config.SwaggerFilePath)))) - - //if we define a custom static handler use it - } else if config.StaticHandler != nil && config.SwaggerPath != "" { - swaggerPathSlash := config.SwaggerPath - // path must end with slash / - if "/" != config.SwaggerPath[len(config.SwaggerPath)-1:] { - LogInfo("[restful/swagger] use corrected SwaggerFilePath ; must end with slash (/)") - swaggerPathSlash += "/" - - } - LogInfo("[restful/swagger] %v%v is mapped to custom Handler %T", config.WebServicesUrl, swaggerPathSlash, config.StaticHandler) - wsContainer.Handle(swaggerPathSlash, config.StaticHandler) - - } else { - LogInfo("[restful/swagger] Swagger(File)Path is empty ; no UI is served") - } -} - -func staticPathFromRoute(r restful.Route) string { - static := r.Path - bracket := strings.Index(static, "{") - if bracket <= 1 { // result cannot be empty - return static - } - if bracket != -1 { - static = r.Path[:bracket] - } - if strings.HasSuffix(static, "/") { - return static[:len(static)-1] - } else { - return static - } -} - -func enableCORS(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) { - if origin := req.HeaderParameter(restful.HEADER_Origin); origin != "" { - // prevent duplicate header - if len(resp.Header().Get(restful.HEADER_AccessControlAllowOrigin)) == 0 { - resp.AddHeader(restful.HEADER_AccessControlAllowOrigin, origin) - } - } - chain.ProcessFilter(req, resp) -} - -func (sws SwaggerService) getListing(req *restful.Request, resp *restful.Response) { - listing := sws.produceListing() - resp.WriteAsJson(listing) -} - -func (sws SwaggerService) produceListing() ResourceListing { - listing := ResourceListing{SwaggerVersion: swaggerVersion, ApiVersion: sws.config.ApiVersion, Info: sws.config.Info} - sws.apiDeclarationMap.Do(func(k string, v ApiDeclaration) { - ref := Resource{Path: k} - if len(v.Apis) > 0 { // use description of first (could still be empty) - ref.Description = v.Apis[0].Description - } - listing.Apis = append(listing.Apis, ref) - }) - return listing -} - -func (sws SwaggerService) getDeclarations(req *restful.Request, resp *restful.Response) { - decl, ok := sws.produceDeclarations(composeRootPath(req)) - if !ok { - resp.WriteErrorString(http.StatusNotFound, "ApiDeclaration not found") - return - } - // unless WebServicesUrl is given - if len(sws.config.WebServicesUrl) == 0 { - // update base path from the actual request - // TODO how to detect https? assume http for now - var host string - // X-Forwarded-Host or Host or Request.Host - hostvalues, ok := req.Request.Header["X-Forwarded-Host"] // apache specific? - if !ok || len(hostvalues) == 0 { - forwarded, ok := req.Request.Header["Host"] // without reverse-proxy - if !ok || len(forwarded) == 0 { - // fallback to Host field - host = req.Request.Host - } else { - host = forwarded[0] - } - } else { - host = hostvalues[0] - } - // inspect Referer for the scheme (http vs https) - scheme := "http" - if referer := req.Request.Header["Referer"]; len(referer) > 0 { - if strings.HasPrefix(referer[0], "https") { - scheme = "https" - } - } - decl.BasePath = fmt.Sprintf("%s://%s", scheme, host) - } - resp.WriteAsJson(decl) -} - -func (sws SwaggerService) produceAllDeclarations() map[string]ApiDeclaration { - decls := map[string]ApiDeclaration{} - sws.apiDeclarationMap.Do(func(k string, v ApiDeclaration) { - decls[k] = v - }) - return decls -} - -func (sws SwaggerService) produceDeclarations(route string) (*ApiDeclaration, bool) { - decl, ok := sws.apiDeclarationMap.At(route) - if !ok { - return nil, false - } - decl.BasePath = sws.config.WebServicesUrl - return &decl, true -} - -// composeDeclaration uses all routes and parameters to create a ApiDeclaration -func (sws SwaggerService) composeDeclaration(ws *restful.WebService, pathPrefix string) ApiDeclaration { - decl := ApiDeclaration{ - SwaggerVersion: swaggerVersion, - BasePath: sws.config.WebServicesUrl, - ResourcePath: pathPrefix, - Models: ModelList{}, - ApiVersion: ws.Version()} - - // collect any path parameters - rootParams := []Parameter{} - for _, param := range ws.PathParameters() { - rootParams = append(rootParams, asSwaggerParameter(param.Data())) - } - // aggregate by path - pathToRoutes := newOrderedRouteMap() - for _, other := range ws.Routes() { - if strings.HasPrefix(other.Path, pathPrefix) { - if len(pathPrefix) > 1 && len(other.Path) > len(pathPrefix) && other.Path[len(pathPrefix)] != '/' { - continue - } - pathToRoutes.Add(other.Path, other) - } - } - pathToRoutes.Do(func(path string, routes []restful.Route) { - api := Api{Path: strings.TrimSuffix(withoutWildcard(path), "/"), Description: ws.Documentation()} - voidString := "void" - for _, route := range routes { - operation := Operation{ - Method: route.Method, - Summary: route.Doc, - Notes: route.Notes, - // Type gets overwritten if there is a write sample - DataTypeFields: DataTypeFields{Type: &voidString}, - Parameters: []Parameter{}, - Nickname: route.Operation, - ResponseMessages: composeResponseMessages(route, &decl, &sws.config)} - - operation.Consumes = route.Consumes - operation.Produces = route.Produces - - // share root params if any - for _, swparam := range rootParams { - operation.Parameters = append(operation.Parameters, swparam) - } - // route specific params - for _, param := range route.ParameterDocs { - operation.Parameters = append(operation.Parameters, asSwaggerParameter(param.Data())) - } - - sws.addModelsFromRouteTo(&operation, route, &decl) - api.Operations = append(api.Operations, operation) - } - decl.Apis = append(decl.Apis, api) - }) - return decl -} - -func withoutWildcard(path string) string { - if strings.HasSuffix(path, ":*}") { - return path[0:len(path)-3] + "}" - } - return path -} - -// composeResponseMessages takes the ResponseErrors (if any) and creates ResponseMessages from them. -func composeResponseMessages(route restful.Route, decl *ApiDeclaration, config *Config) (messages []ResponseMessage) { - if route.ResponseErrors == nil { - return messages - } - // sort by code - codes := sort.IntSlice{} - for code := range route.ResponseErrors { - codes = append(codes, code) - } - codes.Sort() - for _, code := range codes { - each := route.ResponseErrors[code] - message := ResponseMessage{ - Code: code, - Message: each.Message, - } - if each.Model != nil { - st := reflect.TypeOf(each.Model) - isCollection, st := detectCollectionType(st) - // collection cannot be in responsemodel - if !isCollection { - modelName := modelBuilder{}.keyFrom(st) - modelBuilder{Models: &decl.Models, Config: config}.addModel(st, "") - message.ResponseModel = modelName - } - } - messages = append(messages, message) - } - return -} - -// addModelsFromRoute takes any read or write sample from the Route and creates a Swagger model from it. -func (sws SwaggerService) addModelsFromRouteTo(operation *Operation, route restful.Route, decl *ApiDeclaration) { - if route.ReadSample != nil { - sws.addModelFromSampleTo(operation, false, route.ReadSample, &decl.Models) - } - if route.WriteSample != nil { - sws.addModelFromSampleTo(operation, true, route.WriteSample, &decl.Models) - } -} - -func detectCollectionType(st reflect.Type) (bool, reflect.Type) { - isCollection := false - if st.Kind() == reflect.Slice || st.Kind() == reflect.Array { - st = st.Elem() - isCollection = true - } else { - if st.Kind() == reflect.Ptr { - if st.Elem().Kind() == reflect.Slice || st.Elem().Kind() == reflect.Array { - st = st.Elem().Elem() - isCollection = true - } - } - } - return isCollection, st -} - -// addModelFromSample creates and adds (or overwrites) a Model from a sample resource -func (sws SwaggerService) addModelFromSampleTo(operation *Operation, isResponse bool, sample interface{}, models *ModelList) { - mb := modelBuilder{Models: models, Config: &sws.config} - if isResponse { - sampleType, items := asDataType(sample, &sws.config) - operation.Type = sampleType - operation.Items = items - } - mb.addModelFrom(sample) -} - -func asSwaggerParameter(param restful.ParameterData) Parameter { - return Parameter{ - DataTypeFields: DataTypeFields{ - Type: ¶m.DataType, - Format: asFormat(param.DataType, param.DataFormat), - DefaultValue: Special(param.DefaultValue), - }, - Name: param.Name, - Description: param.Description, - ParamType: asParamType(param.Kind), - - Required: param.Required} -} - -// Between 1..7 path parameters is supported -func composeRootPath(req *restful.Request) string { - path := "/" + req.PathParameter("a") - b := req.PathParameter("b") - if b == "" { - return path - } - path = path + "/" + b - c := req.PathParameter("c") - if c == "" { - return path - } - path = path + "/" + c - d := req.PathParameter("d") - if d == "" { - return path - } - path = path + "/" + d - e := req.PathParameter("e") - if e == "" { - return path - } - path = path + "/" + e - f := req.PathParameter("f") - if f == "" { - return path - } - path = path + "/" + f - g := req.PathParameter("g") - if g == "" { - return path - } - return path + "/" + g -} - -func asFormat(dataType string, dataFormat string) string { - if dataFormat != "" { - return dataFormat - } - return "" // TODO -} - -func asParamType(kind int) string { - switch { - case kind == restful.PathParameterKind: - return "path" - case kind == restful.QueryParameterKind: - return "query" - case kind == restful.BodyParameterKind: - return "body" - case kind == restful.HeaderParameterKind: - return "header" - case kind == restful.FormParameterKind: - return "form" - } - return "" -} - -func asDataType(any interface{}, config *Config) (*string, *Item) { - // If it's not a collection, return the suggested model name - st := reflect.TypeOf(any) - isCollection, st := detectCollectionType(st) - modelName := modelBuilder{}.keyFrom(st) - // if it's not a collection we are done - if !isCollection { - return &modelName, nil - } - - // XXX: This is not very elegant - // We create an Item object referring to the given model - models := ModelList{} - mb := modelBuilder{Models: &models, Config: config} - mb.addModelFrom(any) - - elemTypeName := mb.getElementTypeName(modelName, "", st) - item := new(Item) - if mb.isPrimitiveType(elemTypeName) { - mapped := mb.jsonSchemaType(elemTypeName) - item.Type = &mapped - } else { - item.Ref = &elemTypeName - } - tmp := "array" - return &tmp, item -} diff --git a/vendor/github.com/emicklei/go-restful-swagger12/utils_test.go b/vendor/github.com/emicklei/go-restful-swagger12/utils_test.go deleted file mode 100644 index 220289af39..0000000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/utils_test.go +++ /dev/null @@ -1,86 +0,0 @@ -package swagger - -import ( - "bytes" - "encoding/json" - "fmt" - "reflect" - "strings" - "testing" -) - -func testJsonFromStructWithConfig(t *testing.T, sample interface{}, expectedJson string, config *Config) bool { - m := modelsFromStructWithConfig(sample, config) - data, _ := json.MarshalIndent(m, " ", " ") - return compareJson(t, string(data), expectedJson) -} - -func modelsFromStructWithConfig(sample interface{}, config *Config) *ModelList { - models := new(ModelList) - builder := modelBuilder{Models: models, Config: config} - builder.addModelFrom(sample) - return models -} - -func testJsonFromStruct(t *testing.T, sample interface{}, expectedJson string) bool { - return testJsonFromStructWithConfig(t, sample, expectedJson, &Config{}) -} - -func modelsFromStruct(sample interface{}) *ModelList { - return modelsFromStructWithConfig(sample, &Config{}) -} - -func compareJson(t *testing.T, actualJsonAsString string, expectedJsonAsString string) bool { - success := false - var actualMap map[string]interface{} - json.Unmarshal([]byte(actualJsonAsString), &actualMap) - var expectedMap map[string]interface{} - err := json.Unmarshal([]byte(expectedJsonAsString), &expectedMap) - if err != nil { - var actualArray []interface{} - json.Unmarshal([]byte(actualJsonAsString), &actualArray) - var expectedArray []interface{} - err := json.Unmarshal([]byte(expectedJsonAsString), &expectedArray) - success = reflect.DeepEqual(actualArray, expectedArray) - if err != nil { - t.Fatalf("Unparsable expected JSON: %s, actual: %v, expected: %v", err, actualJsonAsString, expectedJsonAsString) - } - } else { - success = reflect.DeepEqual(actualMap, expectedMap) - } - if !success { - t.Log("---- expected -----") - t.Log(withLineNumbers(expectedJsonAsString)) - t.Log("---- actual -----") - t.Log(withLineNumbers(actualJsonAsString)) - t.Log("---- raw -----") - t.Log(actualJsonAsString) - t.Error("there are differences") - return false - } - return true -} - -func indexOfNonMatchingLine(actual, expected string) int { - a := strings.Split(actual, "\n") - e := strings.Split(expected, "\n") - size := len(a) - if len(e) < len(a) { - size = len(e) - } - for i := 0; i < size; i++ { - if a[i] != e[i] { - return i - } - } - return -1 -} - -func withLineNumbers(content string) string { - var buffer bytes.Buffer - lines := strings.Split(content, "\n") - for i, each := range lines { - buffer.WriteString(fmt.Sprintf("%d:%s\n", i, each)) - } - return buffer.String() -} diff --git a/vendor/github.com/emicklei/go-restful/CHANGES.md b/vendor/github.com/emicklei/go-restful/CHANGES.md index d90aaa22e4..da5bcaacc6 100644 --- a/vendor/github.com/emicklei/go-restful/CHANGES.md +++ b/vendor/github.com/emicklei/go-restful/CHANGES.md @@ -1,9 +1,44 @@ -Change history of go-restful -= +## Change history of go-restful + + +v2.9.2 + +- Reduce allocations in per-request methods to improve performance (#395) + +v2.9.1 + +- Fix issue with default responses and invalid status code 0. (#393) + +v2.9.0 + +- add per Route content encoding setting (overrides container setting) + +v2.8.0 + +- add Request.QueryParameters() +- add json-iterator (via build tag) +- disable vgo module (until log is moved) + +v2.7.1 + +- add vgo module + +v2.6.1 + +- add JSONNewDecoderFunc to allow custom JSON Decoder usage (go 1.10+) + +v2.6.0 + +- Make JSR 311 routing and path param processing consistent +- Adding description to RouteBuilder.Reads() +- Update example for Swagger12 and OpenAPI + 2017-09-13 + - added route condition functions using `.If(func)` in route building. 2017-02-16 + - solved issue #304, make operation names unique 2017-01-30 diff --git a/vendor/github.com/emicklei/go-restful/README.md b/vendor/github.com/emicklei/go-restful/README.md index 002a08d965..f52c25acf6 100644 --- a/vendor/github.com/emicklei/go-restful/README.md +++ b/vendor/github.com/emicklei/go-restful/README.md @@ -61,8 +61,21 @@ func (u UserResource) findUser(request *restful.Request, response *restful.Respo - Route errors produce HTTP 404/405/406/415 errors, customizable using ServiceErrorHandler(...) - Configurable (trace) logging - Customizable gzip/deflate readers and writers using CompressorProvider registration - -### Resources + +## How to customize +There are several hooks to customize the behavior of the go-restful package. + +- Router algorithm +- Panic recovery +- JSON decoder +- Trace logging +- Compression +- Encoders for other serializers +- Use [jsoniter](https://github.com/json-iterator/go) by build this package using a tag, e.g. `go build -tags=jsoniter .` + +TODO: write examples of these. + +## Resources - [Example posted on blog](http://ernestmicklei.com/2012/11/go-restful-first-working-example/) - [Design explained on blog](http://ernestmicklei.com/2012/11/go-restful-api-design/) @@ -72,4 +85,4 @@ func (u UserResource) findUser(request *restful.Request, response *restful.Respo Type ```git shortlog -s``` for a full list of contributors. -© 2012 - 2017, http://ernestmicklei.com. MIT License. Contributions are welcome. +© 2012 - 2018, http://ernestmicklei.com. MIT License. Contributions are welcome. diff --git a/vendor/github.com/emicklei/go-restful/bench_curly_test.go b/vendor/github.com/emicklei/go-restful/bench_curly_test.go deleted file mode 100644 index db6a1a7524..0000000000 --- a/vendor/github.com/emicklei/go-restful/bench_curly_test.go +++ /dev/null @@ -1,51 +0,0 @@ -package restful - -import ( - "fmt" - "net/http" - "net/http/httptest" - "testing" -) - -func setupCurly(container *Container) []string { - wsCount := 26 - rtCount := 26 - urisCurly := []string{} - - container.Router(CurlyRouter{}) - for i := 0; i < wsCount; i++ { - root := fmt.Sprintf("/%s/{%s}/", string(i+97), string(i+97)) - ws := new(WebService).Path(root) - for j := 0; j < rtCount; j++ { - sub := fmt.Sprintf("/%s2/{%s2}", string(j+97), string(j+97)) - ws.Route(ws.GET(sub).Consumes("application/xml").Produces("application/xml").To(echoCurly)) - } - container.Add(ws) - for _, each := range ws.Routes() { - urisCurly = append(urisCurly, "http://bench.com"+each.Path) - } - } - return urisCurly -} - -func echoCurly(req *Request, resp *Response) {} - -func BenchmarkManyCurly(b *testing.B) { - container := NewContainer() - urisCurly := setupCurly(container) - b.ResetTimer() - for t := 0; t < b.N; t++ { - for r := 0; r < 1000; r++ { - for _, each := range urisCurly { - sendNoReturnTo(each, container, t) - } - } - } -} - -func sendNoReturnTo(address string, container *Container, t int) { - httpRequest, _ := http.NewRequest("GET", address, nil) - httpRequest.Header.Set("Accept", "application/xml") - httpWriter := httptest.NewRecorder() - container.dispatch(httpWriter, httpRequest) -} diff --git a/vendor/github.com/emicklei/go-restful/bench_test.go b/vendor/github.com/emicklei/go-restful/bench_test.go deleted file mode 100644 index 3e77c2d292..0000000000 --- a/vendor/github.com/emicklei/go-restful/bench_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package restful - -import ( - "fmt" - "io" - "testing" -) - -var uris = []string{} - -func setup(container *Container) { - wsCount := 26 - rtCount := 26 - - for i := 0; i < wsCount; i++ { - root := fmt.Sprintf("/%s/{%s}/", string(i+97), string(i+97)) - ws := new(WebService).Path(root) - for j := 0; j < rtCount; j++ { - sub := fmt.Sprintf("/%s2/{%s2}", string(j+97), string(j+97)) - ws.Route(ws.GET(sub).To(echo)) - } - container.Add(ws) - for _, each := range ws.Routes() { - uris = append(uris, "http://bench.com"+each.Path) - } - } -} - -func echo(req *Request, resp *Response) { - io.WriteString(resp.ResponseWriter, "echo") -} - -func BenchmarkMany(b *testing.B) { - container := NewContainer() - setup(container) - b.ResetTimer() - for t := 0; t < b.N; t++ { - for _, each := range uris { - // println(each) - sendItTo(each, container) - } - } -} diff --git a/vendor/github.com/emicklei/go-restful/compress_test.go b/vendor/github.com/emicklei/go-restful/compress_test.go deleted file mode 100644 index cc3e93d543..0000000000 --- a/vendor/github.com/emicklei/go-restful/compress_test.go +++ /dev/null @@ -1,125 +0,0 @@ -package restful - -import ( - "bytes" - "compress/gzip" - "compress/zlib" - "io" - "io/ioutil" - "net/http" - "net/http/httptest" - "testing" -) - -// go test -v -test.run TestGzip ...restful -func TestGzip(t *testing.T) { - EnableContentEncoding = true - httpRequest, _ := http.NewRequest("GET", "/test", nil) - httpRequest.Header.Set("Accept-Encoding", "gzip,deflate") - httpWriter := httptest.NewRecorder() - wanted, encoding := wantsCompressedResponse(httpRequest) - if !wanted { - t.Fatal("should accept gzip") - } - if encoding != "gzip" { - t.Fatal("expected gzip") - } - c, err := NewCompressingResponseWriter(httpWriter, encoding) - if err != nil { - t.Fatal(err.Error()) - } - c.Write([]byte("Hello World")) - c.Close() - if httpWriter.Header().Get("Content-Encoding") != "gzip" { - t.Fatal("Missing gzip header") - } - reader, err := gzip.NewReader(httpWriter.Body) - if err != nil { - t.Fatal(err.Error()) - } - data, err := ioutil.ReadAll(reader) - if err != nil { - t.Fatal(err.Error()) - } - if got, want := string(data), "Hello World"; got != want { - t.Errorf("got %v want %v", got, want) - } -} - -func TestDeflate(t *testing.T) { - EnableContentEncoding = true - httpRequest, _ := http.NewRequest("GET", "/test", nil) - httpRequest.Header.Set("Accept-Encoding", "deflate,gzip") - httpWriter := httptest.NewRecorder() - wanted, encoding := wantsCompressedResponse(httpRequest) - if !wanted { - t.Fatal("should accept deflate") - } - if encoding != "deflate" { - t.Fatal("expected deflate") - } - c, err := NewCompressingResponseWriter(httpWriter, encoding) - if err != nil { - t.Fatal(err.Error()) - } - c.Write([]byte("Hello World")) - c.Close() - if httpWriter.Header().Get("Content-Encoding") != "deflate" { - t.Fatal("Missing deflate header") - } - reader, err := zlib.NewReader(httpWriter.Body) - if err != nil { - t.Fatal(err.Error()) - } - data, err := ioutil.ReadAll(reader) - if err != nil { - t.Fatal(err.Error()) - } - if got, want := string(data), "Hello World"; got != want { - t.Errorf("got %v want %v", got, want) - } -} - -func TestGzipDecompressRequestBody(t *testing.T) { - b := new(bytes.Buffer) - w := newGzipWriter() - w.Reset(b) - io.WriteString(w, `{"msg":"hi"}`) - w.Flush() - w.Close() - - req := new(Request) - httpRequest, _ := http.NewRequest("GET", "/", bytes.NewReader(b.Bytes())) - httpRequest.Header.Set("Content-Type", "application/json") - httpRequest.Header.Set("Content-Encoding", "gzip") - req.Request = httpRequest - - doc := make(map[string]interface{}) - req.ReadEntity(&doc) - - if got, want := doc["msg"], "hi"; got != want { - t.Errorf("got %v want %v", got, want) - } -} - -func TestZlibDecompressRequestBody(t *testing.T) { - b := new(bytes.Buffer) - w := newZlibWriter() - w.Reset(b) - io.WriteString(w, `{"msg":"hi"}`) - w.Flush() - w.Close() - - req := new(Request) - httpRequest, _ := http.NewRequest("GET", "/", bytes.NewReader(b.Bytes())) - httpRequest.Header.Set("Content-Type", "application/json") - httpRequest.Header.Set("Content-Encoding", "deflate") - req.Request = httpRequest - - doc := make(map[string]interface{}) - req.ReadEntity(&doc) - - if got, want := doc["msg"], "hi"; got != want { - t.Errorf("got %v want %v", got, want) - } -} diff --git a/vendor/github.com/emicklei/go-restful/container.go b/vendor/github.com/emicklei/go-restful/container.go index 4196180e54..061a8d7189 100644 --- a/vendor/github.com/emicklei/go-restful/container.go +++ b/vendor/github.com/emicklei/go-restful/container.go @@ -97,7 +97,7 @@ func (c *Container) Add(service *WebService) *Container { // cannot have duplicate root paths for _, each := range c.webServices { if each.RootPath() == service.RootPath() { - log.Printf("[restful] WebService with duplicate root path detected:['%v']", each) + log.Printf("WebService with duplicate root path detected:['%v']", each) os.Exit(1) } } @@ -139,7 +139,7 @@ func (c *Container) addHandler(service *WebService, serveMux *http.ServeMux) boo func (c *Container) Remove(ws *WebService) error { if c.ServeMux == http.DefaultServeMux { - errMsg := fmt.Sprintf("[restful] cannot remove a WebService from a Container using the DefaultServeMux: ['%v']", ws) + errMsg := fmt.Sprintf("cannot remove a WebService from a Container using the DefaultServeMux: ['%v']", ws) log.Print(errMsg) return errors.New(errMsg) } @@ -168,7 +168,7 @@ func (c *Container) Remove(ws *WebService) error { // This may be a security issue as it exposes sourcecode information. func logStackOnRecover(panicReason interface{}, httpWriter http.ResponseWriter) { var buffer bytes.Buffer - buffer.WriteString(fmt.Sprintf("[restful] recover from panic situation: - %v\r\n", panicReason)) + buffer.WriteString(fmt.Sprintf("recover from panic situation: - %v\r\n", panicReason)) for i := 2; ; i += 1 { _, file, line, ok := runtime.Caller(i) if !ok { @@ -220,31 +220,37 @@ func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.R }() } + // Find best match Route ; err is non nil if no match was found + var webService *WebService + var route *Route + var err error + func() { + c.webServicesLock.RLock() + defer c.webServicesLock.RUnlock() + webService, route, err = c.router.SelectRoute( + c.webServices, + httpRequest) + }() + // Detect if compression is needed // assume without compression, test for override - if c.contentEncodingEnabled { + contentEncodingEnabled := c.contentEncodingEnabled + if route != nil && route.contentEncodingEnabled != nil { + contentEncodingEnabled = *route.contentEncodingEnabled + } + if contentEncodingEnabled { doCompress, encoding := wantsCompressedResponse(httpRequest) if doCompress { var err error writer, err = NewCompressingResponseWriter(httpWriter, encoding) if err != nil { - log.Print("[restful] unable to install compressor: ", err) + log.Print("unable to install compressor: ", err) httpWriter.WriteHeader(http.StatusInternalServerError) return } } } - // Find best match Route ; err is non nil if no match was found - var webService *WebService - var route *Route - var err error - func() { - c.webServicesLock.RLock() - defer c.webServicesLock.RUnlock() - webService, route, err = c.router.SelectRoute( - c.webServices, - httpRequest) - }() + if err != nil { // a non-200 response has already been written // run container filters anyway ; they should not touch the response... @@ -259,7 +265,12 @@ func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.R chain.ProcessFilter(NewRequest(httpRequest), NewResponse(writer)) return } - wrappedRequest, wrappedResponse := route.wrapRequestResponse(writer, httpRequest) + pathProcessor, routerProcessesPath := c.router.(PathProcessor) + if !routerProcessesPath { + pathProcessor = defaultPathProcessor{} + } + pathParams := pathProcessor.ExtractParameters(route, webService, httpRequest.URL.Path) + wrappedRequest, wrappedResponse := route.wrapRequestResponse(writer, httpRequest, pathParams) // pass through filters (if any) if len(c.containerFilters)+len(webService.filters)+len(route.Filters) > 0 { // compose filter chain diff --git a/vendor/github.com/emicklei/go-restful/container_test.go b/vendor/github.com/emicklei/go-restful/container_test.go deleted file mode 100644 index 491c793ab3..0000000000 --- a/vendor/github.com/emicklei/go-restful/container_test.go +++ /dev/null @@ -1,83 +0,0 @@ -package restful - -import ( - "net/http" - "net/http/httptest" - "testing" -) - -// go test -v -test.run TestContainer_computeAllowedMethods ...restful -func TestContainer_computeAllowedMethods(t *testing.T) { - wc := NewContainer() - ws1 := new(WebService).Path("/users") - ws1.Route(ws1.GET("{i}").To(dummy)) - ws1.Route(ws1.POST("{i}").To(dummy)) - wc.Add(ws1) - httpRequest, _ := http.NewRequest("GET", "http://api.his.com/users/1", nil) - rreq := Request{Request: httpRequest} - m := wc.computeAllowedMethods(&rreq) - if len(m) != 2 { - t.Errorf("got %d expected 2 methods, %v", len(m), m) - } -} - -func TestContainer_HandleWithFilter(t *testing.T) { - prefilterCalled := false - postfilterCalled := false - httpHandlerCalled := false - - wc := NewContainer() - wc.Filter(func(request *Request, response *Response, chain *FilterChain) { - prefilterCalled = true - chain.ProcessFilter(request, response) - }) - wc.HandleWithFilter("/", http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - httpHandlerCalled = true - w.Write([]byte("ok")) - })) - wc.Filter(func(request *Request, response *Response, chain *FilterChain) { - postfilterCalled = true - chain.ProcessFilter(request, response) - }) - - recorder := httptest.NewRecorder() - request, _ := http.NewRequest("GET", "/", nil) - wc.ServeHTTP(recorder, request) - if recorder.Code != http.StatusOK { - t.Errorf("unexpected code %d", recorder.Code) - } - if recorder.Body.String() != "ok" { - t.Errorf("unexpected body %s", recorder.Body.String()) - } - if !prefilterCalled { - t.Errorf("filter added before calling HandleWithFilter wasn't called") - } - if !postfilterCalled { - t.Errorf("filter added after calling HandleWithFilter wasn't called") - } - if !httpHandlerCalled { - t.Errorf("handler added by calling HandleWithFilter wasn't called") - } -} - -func TestContainerAddAndRemove(t *testing.T) { - ws1 := new(WebService).Path("/") - ws2 := new(WebService).Path("/users") - wc := NewContainer() - wc.Add(ws1) - wc.Add(ws2) - wc.Remove(ws2) - if len(wc.webServices) != 1 { - t.Errorf("expected one webservices") - } - if !wc.isRegisteredOnRoot { - t.Errorf("expected on root registered") - } - wc.Remove(ws1) - if len(wc.webServices) > 0 { - t.Errorf("expected zero webservices") - } - if wc.isRegisteredOnRoot { - t.Errorf("expected not on root registered") - } -} diff --git a/vendor/github.com/emicklei/go-restful/cors_filter_test.go b/vendor/github.com/emicklei/go-restful/cors_filter_test.go deleted file mode 100644 index 09c5d3300e..0000000000 --- a/vendor/github.com/emicklei/go-restful/cors_filter_test.go +++ /dev/null @@ -1,129 +0,0 @@ -package restful - -import ( - "net/http" - "net/http/httptest" - "testing" -) - -// go test -v -test.run TestCORSFilter_Preflight ...restful -// http://www.html5rocks.com/en/tutorials/cors/#toc-handling-a-not-so-simple-request -func TestCORSFilter_Preflight(t *testing.T) { - tearDown() - ws := new(WebService) - ws.Route(ws.PUT("/cors").To(dummy)) - Add(ws) - - cors := CrossOriginResourceSharing{ - ExposeHeaders: []string{"X-Custom-Header"}, - AllowedHeaders: []string{"X-Custom-Header", "X-Additional-Header"}, - CookiesAllowed: true, - Container: DefaultContainer} - Filter(cors.Filter) - - // Preflight - httpRequest, _ := http.NewRequest("OPTIONS", "http://api.alice.com/cors", nil) - httpRequest.Method = "OPTIONS" - httpRequest.Header.Set(HEADER_Origin, "http://api.bob.com") - httpRequest.Header.Set(HEADER_AccessControlRequestMethod, "PUT") - httpRequest.Header.Set(HEADER_AccessControlRequestHeaders, "X-Custom-Header, X-Additional-Header") - - httpWriter := httptest.NewRecorder() - DefaultContainer.Dispatch(httpWriter, httpRequest) - - actual := httpWriter.Header().Get(HEADER_AccessControlAllowOrigin) - if "http://api.bob.com" != actual { - t.Fatal("expected: http://api.bob.com but got:" + actual) - } - actual = httpWriter.Header().Get(HEADER_AccessControlAllowMethods) - if "PUT" != actual { - t.Fatal("expected: PUT but got:" + actual) - } - actual = httpWriter.Header().Get(HEADER_AccessControlAllowHeaders) - if "X-Custom-Header, X-Additional-Header" != actual { - t.Fatal("expected: X-Custom-Header, X-Additional-Header but got:" + actual) - } - - if !cors.isOriginAllowed("somewhere") { - t.Fatal("origin expected to be allowed") - } - cors.AllowedDomains = []string{"overthere.com"} - if cors.isOriginAllowed("somewhere") { - t.Fatal("origin [somewhere] expected NOT to be allowed") - } - if !cors.isOriginAllowed("overthere.com") { - t.Fatal("origin [overthere] expected to be allowed") - } - -} - -// go test -v -test.run TestCORSFilter_Actual ...restful -// http://www.html5rocks.com/en/tutorials/cors/#toc-handling-a-not-so-simple-request -func TestCORSFilter_Actual(t *testing.T) { - tearDown() - ws := new(WebService) - ws.Route(ws.PUT("/cors").To(dummy)) - Add(ws) - - cors := CrossOriginResourceSharing{ - ExposeHeaders: []string{"X-Custom-Header"}, - AllowedHeaders: []string{"X-Custom-Header", "X-Additional-Header"}, - CookiesAllowed: true, - Container: DefaultContainer} - Filter(cors.Filter) - - // Actual - httpRequest, _ := http.NewRequest("PUT", "http://api.alice.com/cors", nil) - httpRequest.Header.Set(HEADER_Origin, "http://api.bob.com") - httpRequest.Header.Set("X-Custom-Header", "value") - - httpWriter := httptest.NewRecorder() - DefaultContainer.Dispatch(httpWriter, httpRequest) - actual := httpWriter.Header().Get(HEADER_AccessControlAllowOrigin) - if "http://api.bob.com" != actual { - t.Fatal("expected: http://api.bob.com but got:" + actual) - } - if httpWriter.Body.String() != "dummy" { - t.Fatal("expected: dummy but got:" + httpWriter.Body.String()) - } -} - -var allowedDomainInput = []struct { - domains []string - origin string - allowed bool -}{ - {[]string{}, "http://anything.com", true}, - {[]string{"example.com"}, "example.com", true}, - {[]string{"example.com"}, "not-allowed", false}, - {[]string{"not-matching.com", "example.com"}, "example.com", true}, - {[]string{".*"}, "example.com", true}, -} - -// go test -v -test.run TestCORSFilter_AllowedDomains ...restful -func TestCORSFilter_AllowedDomains(t *testing.T) { - for _, each := range allowedDomainInput { - tearDown() - ws := new(WebService) - ws.Route(ws.PUT("/cors").To(dummy)) - Add(ws) - - cors := CrossOriginResourceSharing{ - AllowedDomains: each.domains, - CookiesAllowed: true, - Container: DefaultContainer} - Filter(cors.Filter) - - httpRequest, _ := http.NewRequest("PUT", "http://api.his.com/cors", nil) - httpRequest.Header.Set(HEADER_Origin, each.origin) - httpWriter := httptest.NewRecorder() - DefaultContainer.Dispatch(httpWriter, httpRequest) - actual := httpWriter.Header().Get(HEADER_AccessControlAllowOrigin) - if actual != each.origin && each.allowed { - t.Fatal("expected to be accepted") - } - if actual == each.origin && !each.allowed { - t.Fatal("did not expect to be accepted") - } - } -} diff --git a/vendor/github.com/emicklei/go-restful/curly.go b/vendor/github.com/emicklei/go-restful/curly.go index 79f1f5aa20..14d5b76bf0 100644 --- a/vendor/github.com/emicklei/go-restful/curly.go +++ b/vendor/github.com/emicklei/go-restful/curly.go @@ -45,14 +45,14 @@ func (c CurlyRouter) SelectRoute( // selectRoutes return a collection of Route from a WebService that matches the path tokens from the request. func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) sortableCurlyRoutes { - candidates := sortableCurlyRoutes{} + candidates := make(sortableCurlyRoutes, 0, 8) for _, each := range ws.routes { matches, paramCount, staticCount := c.matchesRouteByPathTokens(each.pathParts, requestTokens) if matches { candidates.add(curlyRoute{each, paramCount, staticCount}) // TODO make sure Routes() return pointers? } } - sort.Sort(sort.Reverse(candidates)) + sort.Sort(candidates) return candidates } diff --git a/vendor/github.com/emicklei/go-restful/curly_route.go b/vendor/github.com/emicklei/go-restful/curly_route.go index 296f94650e..403dd3be94 100644 --- a/vendor/github.com/emicklei/go-restful/curly_route.go +++ b/vendor/github.com/emicklei/go-restful/curly_route.go @@ -11,6 +11,7 @@ type curlyRoute struct { staticCount int } +// sortableCurlyRoutes orders by most parameters and path elements first. type sortableCurlyRoutes []curlyRoute func (s *sortableCurlyRoutes) add(route curlyRoute) { @@ -18,6 +19,7 @@ func (s *sortableCurlyRoutes) add(route curlyRoute) { } func (s sortableCurlyRoutes) routes() (routes []Route) { + routes = make([]Route, 0, len(s)) for _, each := range s { routes = append(routes, each.route) // TODO change return type } @@ -31,22 +33,22 @@ func (s sortableCurlyRoutes) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s sortableCurlyRoutes) Less(i, j int) bool { - ci := s[i] - cj := s[j] + a := s[j] + b := s[i] // primary key - if ci.staticCount < cj.staticCount { + if a.staticCount < b.staticCount { return true } - if ci.staticCount > cj.staticCount { + if a.staticCount > b.staticCount { return false } // secundary key - if ci.paramCount < cj.paramCount { + if a.paramCount < b.paramCount { return true } - if ci.paramCount > cj.paramCount { + if a.paramCount > b.paramCount { return false } - return ci.route.Path < cj.route.Path + return a.route.Path < b.route.Path } diff --git a/vendor/github.com/emicklei/go-restful/curly_test.go b/vendor/github.com/emicklei/go-restful/curly_test.go deleted file mode 100644 index bec017ca76..0000000000 --- a/vendor/github.com/emicklei/go-restful/curly_test.go +++ /dev/null @@ -1,231 +0,0 @@ -package restful - -import ( - "io" - "net/http" - "testing" -) - -var requestPaths = []struct { - // url with path (1) is handled by service with root (2) and remainder has value final (3) - path, root string -}{ - {"/", "/"}, - {"/p", "/p"}, - {"/p/x", "/p/{q}"}, - {"/q/x", "/q"}, - {"/p/x/", "/p/{q}"}, - {"/p/x/y", "/p/{q}"}, - {"/q/x/y", "/q"}, - {"/z/q", "/{p}/q"}, - {"/a/b/c/q", "/"}, -} - -// go test -v -test.run TestCurlyDetectWebService ...restful -func TestCurlyDetectWebService(t *testing.T) { - ws1 := new(WebService).Path("/") - ws2 := new(WebService).Path("/p") - ws3 := new(WebService).Path("/q") - ws4 := new(WebService).Path("/p/q") - ws5 := new(WebService).Path("/p/{q}") - ws7 := new(WebService).Path("/{p}/q") - var wss = []*WebService{ws1, ws2, ws3, ws4, ws5, ws7} - - for _, each := range wss { - t.Logf("path=%s,toks=%v\n", each.pathExpr.Source, each.pathExpr.tokens) - } - - router := CurlyRouter{} - - ok := true - for i, fixture := range requestPaths { - requestTokens := tokenizePath(fixture.path) - who := router.detectWebService(requestTokens, wss) - if who != nil && who.RootPath() != fixture.root { - t.Logf("[line:%v] Unexpected dispatcher, expected:%v, actual:%v", i, fixture.root, who.RootPath()) - ok = false - } - } - if !ok { - t.Fail() - } -} - -var serviceDetects = []struct { - path string - found bool - root string -}{ - {"/a/b", true, "/{p}/{q}/{r}"}, - {"/p/q", true, "/p/q"}, - {"/q/p", true, "/q"}, - {"/", true, "/"}, - {"/p/q/r", true, "/p/q"}, -} - -// go test -v -test.run Test_detectWebService ...restful -func Test_detectWebService(t *testing.T) { - router := CurlyRouter{} - ws1 := new(WebService).Path("/") - ws2 := new(WebService).Path("/p") - ws3 := new(WebService).Path("/q") - ws4 := new(WebService).Path("/p/q") - ws5 := new(WebService).Path("/p/{q}") - ws6 := new(WebService).Path("/p/{q}/") - ws7 := new(WebService).Path("/{p}/q") - ws8 := new(WebService).Path("/{p}/{q}/{r}") - var wss = []*WebService{ws8, ws7, ws6, ws5, ws4, ws3, ws2, ws1} - for _, fix := range serviceDetects { - requestPath := fix.path - requestTokens := tokenizePath(requestPath) - for _, ws := range wss { - serviceTokens := ws.pathExpr.tokens - matches, score := router.computeWebserviceScore(requestTokens, serviceTokens) - t.Logf("req=%s,toks:%v,ws=%s,toks:%v,score=%d,matches=%v", requestPath, requestTokens, ws.RootPath(), serviceTokens, score, matches) - } - best := router.detectWebService(requestTokens, wss) - if best != nil { - if fix.found { - t.Logf("best=%s", best.RootPath()) - } else { - t.Fatalf("should have found:%s", fix.root) - } - } - } -} - -var routeMatchers = []struct { - route string - path string - matches bool - paramCount int - staticCount int -}{ - // route, request-path - {"/a", "/a", true, 0, 1}, - {"/a", "/b", false, 0, 0}, - {"/a", "/b", false, 0, 0}, - {"/a/{b}/c/", "/a/2/c", true, 1, 2}, - {"/{a}/{b}/{c}/", "/a/b", false, 0, 0}, - {"/{x:*}", "/", false, 0, 0}, - {"/{x:*}", "/a", true, 1, 0}, - {"/{x:*}", "/a/b", true, 1, 0}, - {"/a/{x:*}", "/a/b", true, 1, 1}, - {"/a/{x:[A-Z][A-Z]}", "/a/ZX", true, 1, 1}, - {"/basepath/{resource:*}", "/basepath/some/other/location/test.xml", true, 1, 1}, -} - -// clear && go test -v -test.run Test_matchesRouteByPathTokens ...restful -func Test_matchesRouteByPathTokens(t *testing.T) { - router := CurlyRouter{} - for i, each := range routeMatchers { - routeToks := tokenizePath(each.route) - reqToks := tokenizePath(each.path) - matches, pCount, sCount := router.matchesRouteByPathTokens(routeToks, reqToks) - if matches != each.matches { - t.Fatalf("[%d] unexpected matches outcome route:%s, path:%s, matches:%v", i, each.route, each.path, matches) - } - if pCount != each.paramCount { - t.Fatalf("[%d] unexpected paramCount got:%d want:%d ", i, pCount, each.paramCount) - } - if sCount != each.staticCount { - t.Fatalf("[%d] unexpected staticCount got:%d want:%d ", i, sCount, each.staticCount) - } - } -} - -// clear && go test -v -test.run TestExtractParameters_Wildcard1 ...restful -func TestExtractParameters_Wildcard1(t *testing.T) { - params := doExtractParams("/fixed/{var:*}", 2, "/fixed/remainder", t) - if params["var"] != "remainder" { - t.Errorf("parameter mismatch var: %s", params["var"]) - } -} - -// clear && go test -v -test.run TestExtractParameters_Wildcard2 ...restful -func TestExtractParameters_Wildcard2(t *testing.T) { - params := doExtractParams("/fixed/{var:*}", 2, "/fixed/remain/der", t) - if params["var"] != "remain/der" { - t.Errorf("parameter mismatch var: %s", params["var"]) - } -} - -// clear && go test -v -test.run TestExtractParameters_Wildcard3 ...restful -func TestExtractParameters_Wildcard3(t *testing.T) { - params := doExtractParams("/static/{var:*}", 2, "/static/test/sub/hi.html", t) - if params["var"] != "test/sub/hi.html" { - t.Errorf("parameter mismatch var: %s", params["var"]) - } -} - -// clear && go test -v -test.run TestCurly_ISSUE_34 ...restful -func TestCurly_ISSUE_34(t *testing.T) { - ws1 := new(WebService).Path("/") - ws1.Route(ws1.GET("/{type}/{id}").To(curlyDummy)) - ws1.Route(ws1.GET("/network/{id}").To(curlyDummy)) - croutes := CurlyRouter{}.selectRoutes(ws1, tokenizePath("/network/12")) - if len(croutes) != 2 { - t.Fatal("expected 2 routes") - } - if got, want := croutes[0].route.Path, "/network/{id}"; got != want { - t.Errorf("got %v want %v", got, want) - } -} - -// clear && go test -v -test.run TestCurly_ISSUE_34_2 ...restful -func TestCurly_ISSUE_34_2(t *testing.T) { - ws1 := new(WebService) - ws1.Route(ws1.GET("/network/{id}").To(curlyDummy)) - ws1.Route(ws1.GET("/{type}/{id}").To(curlyDummy)) - croutes := CurlyRouter{}.selectRoutes(ws1, tokenizePath("/network/12")) - if len(croutes) != 2 { - t.Fatal("expected 2 routes") - } - if got, want := croutes[0].route.Path, "/network/{id}"; got != want { - t.Errorf("got %v want %v", got, want) - } -} - -// clear && go test -v -test.run TestCurly_JsonHtml ...restful -func TestCurly_JsonHtml(t *testing.T) { - ws1 := new(WebService) - ws1.Path("/") - ws1.Route(ws1.GET("/some.html").To(curlyDummy).Consumes("*/*").Produces("text/html")) - req, _ := http.NewRequest("GET", "/some.html", nil) - req.Header.Set("Accept", "application/json") - _, route, err := CurlyRouter{}.SelectRoute([]*WebService{ws1}, req) - if err == nil { - t.Error("error expected") - } - if route != nil { - t.Error("no route expected") - } -} - -// go test -v -test.run TestCurly_ISSUE_137 ...restful -func TestCurly_ISSUE_137(t *testing.T) { - ws1 := new(WebService) - ws1.Route(ws1.GET("/hello").To(curlyDummy)) - ws1.Path("/") - req, _ := http.NewRequest("GET", "/", nil) - _, route, _ := CurlyRouter{}.SelectRoute([]*WebService{ws1}, req) - t.Log(route) - if route != nil { - t.Error("no route expected") - } -} - -// go test -v -test.run TestCurly_ISSUE_137_2 ...restful -func TestCurly_ISSUE_137_2(t *testing.T) { - ws1 := new(WebService) - ws1.Route(ws1.GET("/hello").To(curlyDummy)) - ws1.Path("/") - req, _ := http.NewRequest("GET", "/hello/bob", nil) - _, route, _ := CurlyRouter{}.SelectRoute([]*WebService{ws1}, req) - t.Log(route) - if route != nil { - t.Errorf("no route expected, got %v", route) - } -} - -func curlyDummy(req *Request, resp *Response) { io.WriteString(resp.ResponseWriter, "curlyDummy") } diff --git a/vendor/github.com/emicklei/go-restful/doc_examples_test.go b/vendor/github.com/emicklei/go-restful/doc_examples_test.go deleted file mode 100644 index 0af636e553..0000000000 --- a/vendor/github.com/emicklei/go-restful/doc_examples_test.go +++ /dev/null @@ -1,41 +0,0 @@ -package restful - -import "net/http" - -func ExampleOPTIONSFilter() { - // Install the OPTIONS filter on the default Container - Filter(OPTIONSFilter()) -} -func ExampleContainer_OPTIONSFilter() { - // Install the OPTIONS filter on a Container - myContainer := new(Container) - myContainer.Filter(myContainer.OPTIONSFilter) -} - -func ExampleContainer() { - // The Default container of go-restful uses the http.DefaultServeMux. - // You can create your own Container using restful.NewContainer() and create a new http.Server for that particular container - - ws := new(WebService) - wsContainer := NewContainer() - wsContainer.Add(ws) - server := &http.Server{Addr: ":8080", Handler: wsContainer} - server.ListenAndServe() -} - -func ExampleCrossOriginResourceSharing() { - // To install this filter on the Default Container use: - cors := CrossOriginResourceSharing{ExposeHeaders: []string{"X-My-Header"}, CookiesAllowed: false, Container: DefaultContainer} - Filter(cors.Filter) -} - -func ExampleServiceError() { - resp := new(Response) - resp.WriteEntity(NewError(http.StatusBadRequest, "Non-integer {id} path parameter")) -} - -func ExampleBoundedCachedCompressors() { - // Register a compressor provider (gzip/deflate read/write) that uses - // a bounded cache with a maximum of 20 writers and 20 readers. - SetCompressorProvider(NewBoundedCachedCompressors(20, 20)) -} diff --git a/vendor/github.com/emicklei/go-restful/entity_accessors.go b/vendor/github.com/emicklei/go-restful/entity_accessors.go index 6ecf6c7f89..66dfc824f5 100644 --- a/vendor/github.com/emicklei/go-restful/entity_accessors.go +++ b/vendor/github.com/emicklei/go-restful/entity_accessors.go @@ -5,7 +5,6 @@ package restful // that can be found in the LICENSE file. import ( - "encoding/json" "encoding/xml" "strings" "sync" @@ -128,7 +127,7 @@ type entityJSONAccess struct { // Read unmarshalls the value from JSON func (e entityJSONAccess) Read(req *Request, v interface{}) error { - decoder := json.NewDecoder(req.Request.Body) + decoder := NewDecoder(req.Request.Body) decoder.UseNumber() return decoder.Decode(v) } @@ -147,7 +146,7 @@ func writeJSON(resp *Response, status int, contentType string, v interface{}) er } if resp.prettyPrint { // pretty output must be created and written explicitly - output, err := json.MarshalIndent(v, " ", " ") + output, err := MarshalIndent(v, "", " ") if err != nil { return err } @@ -159,5 +158,5 @@ func writeJSON(resp *Response, status int, contentType string, v interface{}) er // not-so-pretty resp.Header().Set(HEADER_ContentType, contentType) resp.WriteHeader(status) - return json.NewEncoder(resp).Encode(v) + return NewEncoder(resp).Encode(v) } diff --git a/vendor/github.com/emicklei/go-restful/entity_accessors_test.go b/vendor/github.com/emicklei/go-restful/entity_accessors_test.go deleted file mode 100644 index d1c1e1585f..0000000000 --- a/vendor/github.com/emicklei/go-restful/entity_accessors_test.go +++ /dev/null @@ -1,69 +0,0 @@ -package restful - -import ( - "bytes" - "fmt" - "io" - "net/http" - "net/http/httptest" - "reflect" - "testing" -) - -type keyvalue struct { - readCalled bool - writeCalled bool -} - -func (kv *keyvalue) Read(req *Request, v interface{}) error { - //t := reflect.TypeOf(v) - //rv := reflect.ValueOf(v) - kv.readCalled = true - return nil -} - -func (kv *keyvalue) Write(resp *Response, status int, v interface{}) error { - t := reflect.TypeOf(v) - rv := reflect.ValueOf(v) - for ix := 0; ix < t.NumField(); ix++ { - sf := t.Field(ix) - io.WriteString(resp, sf.Name) - io.WriteString(resp, "=") - io.WriteString(resp, fmt.Sprintf("%v\n", rv.Field(ix).Interface())) - } - kv.writeCalled = true - return nil -} - -// go test -v -test.run TestKeyValueEncoding ...restful -func TestKeyValueEncoding(t *testing.T) { - type Book struct { - Title string - Author string - PublishedYear int - } - kv := new(keyvalue) - RegisterEntityAccessor("application/kv", kv) - b := Book{"Singing for Dummies", "john doe", 2015} - - // Write - httpWriter := httptest.NewRecorder() - // Accept Produces - resp := Response{ResponseWriter: httpWriter, requestAccept: "application/kv,*/*;q=0.8", routeProduces: []string{"application/kv"}, prettyPrint: true} - resp.WriteEntity(b) - t.Log(string(httpWriter.Body.Bytes())) - if !kv.writeCalled { - t.Error("Write never called") - } - - // Read - bodyReader := bytes.NewReader(httpWriter.Body.Bytes()) - httpRequest, _ := http.NewRequest("GET", "/test", bodyReader) - httpRequest.Header.Set("Content-Type", "application/kv; charset=UTF-8") - request := NewRequest(httpRequest) - var bb Book - request.ReadEntity(&bb) - if !kv.readCalled { - t.Error("Read never called") - } -} diff --git a/vendor/github.com/emicklei/go-restful/filter_test.go b/vendor/github.com/emicklei/go-restful/filter_test.go deleted file mode 100644 index fadfb570f6..0000000000 --- a/vendor/github.com/emicklei/go-restful/filter_test.go +++ /dev/null @@ -1,141 +0,0 @@ -package restful - -import ( - "io" - "net/http" - "net/http/httptest" - "testing" -) - -func setupServices(addGlobalFilter bool, addServiceFilter bool, addRouteFilter bool) { - if addGlobalFilter { - Filter(globalFilter) - } - Add(newTestService(addServiceFilter, addRouteFilter)) -} - -func tearDown() { - DefaultContainer.webServices = []*WebService{} - DefaultContainer.isRegisteredOnRoot = true // this allows for setupServices multiple times - DefaultContainer.containerFilters = []FilterFunction{} -} - -func newTestService(addServiceFilter bool, addRouteFilter bool) *WebService { - ws := new(WebService).Path("") - if addServiceFilter { - ws.Filter(serviceFilter) - } - rb := ws.GET("/foo").To(foo) - if addRouteFilter { - rb.Filter(routeFilter) - } - ws.Route(rb) - ws.Route(ws.GET("/bar").To(bar)) - return ws -} - -func foo(req *Request, resp *Response) { - io.WriteString(resp.ResponseWriter, "foo") -} - -func bar(req *Request, resp *Response) { - io.WriteString(resp.ResponseWriter, "bar") -} - -func fail(req *Request, resp *Response) { - http.Error(resp.ResponseWriter, "something failed", http.StatusInternalServerError) -} - -func globalFilter(req *Request, resp *Response, chain *FilterChain) { - io.WriteString(resp.ResponseWriter, "global-") - chain.ProcessFilter(req, resp) -} - -func serviceFilter(req *Request, resp *Response, chain *FilterChain) { - io.WriteString(resp.ResponseWriter, "service-") - chain.ProcessFilter(req, resp) -} - -func routeFilter(req *Request, resp *Response, chain *FilterChain) { - io.WriteString(resp.ResponseWriter, "route-") - chain.ProcessFilter(req, resp) -} - -func TestNoFilter(t *testing.T) { - tearDown() - setupServices(false, false, false) - actual := sendIt("http://example.com/foo") - if "foo" != actual { - t.Fatal("expected: foo but got:" + actual) - } -} - -func TestGlobalFilter(t *testing.T) { - tearDown() - setupServices(true, false, false) - actual := sendIt("http://example.com/foo") - if "global-foo" != actual { - t.Fatal("expected: global-foo but got:" + actual) - } -} - -func TestWebServiceFilter(t *testing.T) { - tearDown() - setupServices(true, true, false) - actual := sendIt("http://example.com/foo") - if "global-service-foo" != actual { - t.Fatal("expected: global-service-foo but got:" + actual) - } -} - -func TestRouteFilter(t *testing.T) { - tearDown() - setupServices(true, true, true) - actual := sendIt("http://example.com/foo") - if "global-service-route-foo" != actual { - t.Fatal("expected: global-service-route-foo but got:" + actual) - } -} - -func TestRouteFilterOnly(t *testing.T) { - tearDown() - setupServices(false, false, true) - actual := sendIt("http://example.com/foo") - if "route-foo" != actual { - t.Fatal("expected: route-foo but got:" + actual) - } -} - -func TestBar(t *testing.T) { - tearDown() - setupServices(false, true, false) - actual := sendIt("http://example.com/bar") - if "service-bar" != actual { - t.Fatal("expected: service-bar but got:" + actual) - } -} - -func TestAllFiltersBar(t *testing.T) { - tearDown() - setupServices(true, true, true) - actual := sendIt("http://example.com/bar") - if "global-service-bar" != actual { - t.Fatal("expected: global-service-bar but got:" + actual) - } -} - -func sendIt(address string) string { - httpRequest, _ := http.NewRequest("GET", address, nil) - httpRequest.Header.Set("Accept", "*/*") - httpWriter := httptest.NewRecorder() - DefaultContainer.dispatch(httpWriter, httpRequest) - return httpWriter.Body.String() -} - -func sendItTo(address string, container *Container) string { - httpRequest, _ := http.NewRequest("GET", address, nil) - httpRequest.Header.Set("Accept", "*/*") - httpWriter := httptest.NewRecorder() - container.dispatch(httpWriter, httpRequest) - return httpWriter.Body.String() -} diff --git a/vendor/github.com/emicklei/go-restful/json.go b/vendor/github.com/emicklei/go-restful/json.go new file mode 100644 index 0000000000..871165166a --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/json.go @@ -0,0 +1,11 @@ +// +build !jsoniter + +package restful + +import "encoding/json" + +var ( + MarshalIndent = json.MarshalIndent + NewDecoder = json.NewDecoder + NewEncoder = json.NewEncoder +) diff --git a/vendor/github.com/emicklei/go-restful/jsoniter.go b/vendor/github.com/emicklei/go-restful/jsoniter.go new file mode 100644 index 0000000000..11b8f8ae7f --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/jsoniter.go @@ -0,0 +1,12 @@ +// +build jsoniter + +package restful + +import "github.com/json-iterator/go" + +var ( + json = jsoniter.ConfigCompatibleWithStandardLibrary + MarshalIndent = json.MarshalIndent + NewDecoder = json.NewDecoder + NewEncoder = json.NewEncoder +) diff --git a/vendor/github.com/emicklei/go-restful/jsr311.go b/vendor/github.com/emicklei/go-restful/jsr311.go index 9e81224164..3ede1891ec 100644 --- a/vendor/github.com/emicklei/go-restful/jsr311.go +++ b/vendor/github.com/emicklei/go-restful/jsr311.go @@ -39,10 +39,35 @@ func (r RouterJSR311) SelectRoute( return dispatcher, route, ok } +// ExtractParameters is used to obtain the path parameters from the route using the same matching +// engine as the JSR 311 router. +func (r RouterJSR311) ExtractParameters(route *Route, webService *WebService, urlPath string) map[string]string { + webServiceExpr := webService.pathExpr + webServiceMatches := webServiceExpr.Matcher.FindStringSubmatch(urlPath) + pathParameters := r.extractParams(webServiceExpr, webServiceMatches) + routeExpr := route.pathExpr + routeMatches := routeExpr.Matcher.FindStringSubmatch(webServiceMatches[len(webServiceMatches)-1]) + routeParams := r.extractParams(routeExpr, routeMatches) + for key, value := range routeParams { + pathParameters[key] = value + } + return pathParameters +} + +func (RouterJSR311) extractParams(pathExpr *pathExpression, matches []string) map[string]string { + params := map[string]string{} + for i := 1; i < len(matches); i++ { + if len(pathExpr.VarNames) >= i { + params[pathExpr.VarNames[i-1]] = matches[i] + } + } + return params +} + // http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*Route, error) { - ifOk := []Route{} - for _, each := range routes { + candidates := make([]*Route, 0, 8) + for i, each := range routes { ok := true for _, fn := range each.If { if !fn(httpRequest) { @@ -51,10 +76,10 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R } } if ok { - ifOk = append(ifOk, each) + candidates = append(candidates, &routes[i]) } } - if len(ifOk) == 0 { + if len(candidates) == 0 { if trace { traceLogger.Printf("no Route found (from %d) that passes conditional checks", len(routes)) } @@ -62,54 +87,58 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R } // http method - methodOk := []Route{} - for _, each := range ifOk { + previous := candidates + candidates = candidates[:0] + for _, each := range previous { if httpRequest.Method == each.Method { - methodOk = append(methodOk, each) + candidates = append(candidates, each) } } - if len(methodOk) == 0 { + if len(candidates) == 0 { if trace { - traceLogger.Printf("no Route found (in %d routes) that matches HTTP method %s\n", len(routes), httpRequest.Method) + traceLogger.Printf("no Route found (in %d routes) that matches HTTP method %s\n", len(previous), httpRequest.Method) } return nil, NewError(http.StatusMethodNotAllowed, "405: Method Not Allowed") } - inputMediaOk := methodOk // content-type contentType := httpRequest.Header.Get(HEADER_ContentType) - inputMediaOk = []Route{} - for _, each := range methodOk { + previous = candidates + candidates = candidates[:0] + for _, each := range previous { if each.matchesContentType(contentType) { - inputMediaOk = append(inputMediaOk, each) + candidates = append(candidates, each) } } - if len(inputMediaOk) == 0 { + if len(candidates) == 0 { if trace { - traceLogger.Printf("no Route found (from %d) that matches HTTP Content-Type: %s\n", len(methodOk), contentType) + traceLogger.Printf("no Route found (from %d) that matches HTTP Content-Type: %s\n", len(previous), contentType) + } + if httpRequest.ContentLength > 0 { + return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type") } - return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type") } // accept - outputMediaOk := []Route{} + previous = candidates + candidates = candidates[:0] accept := httpRequest.Header.Get(HEADER_Accept) if len(accept) == 0 { accept = "*/*" } - for _, each := range inputMediaOk { + for _, each := range previous { if each.matchesAccept(accept) { - outputMediaOk = append(outputMediaOk, each) + candidates = append(candidates, each) } } - if len(outputMediaOk) == 0 { + if len(candidates) == 0 { if trace { - traceLogger.Printf("no Route found (from %d) that matches HTTP Accept: %s\n", len(inputMediaOk), accept) + traceLogger.Printf("no Route found (from %d) that matches HTTP Accept: %s\n", len(previous), accept) } return nil, NewError(http.StatusNotAcceptable, "406: Not Acceptable") } // return r.bestMatchByMedia(outputMediaOk, contentType, accept), nil - return &outputMediaOk[0], nil + return candidates[0], nil } // http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 diff --git a/vendor/github.com/emicklei/go-restful/jsr311_test.go b/vendor/github.com/emicklei/go-restful/jsr311_test.go deleted file mode 100644 index ecde60366e..0000000000 --- a/vendor/github.com/emicklei/go-restful/jsr311_test.go +++ /dev/null @@ -1,251 +0,0 @@ -package restful - -import ( - "io" - "net/http" - "sort" - "testing" -) - -// -// Step 1 tests -// -var paths = []struct { - // url with path (1) is handled by service with root (2) and last capturing group has value final (3) - path, root, final string -}{ - {"/", "/", "/"}, - {"/p", "/p", ""}, - {"/p/x", "/p/{q}", ""}, - {"/q/x", "/q", "/x"}, - {"/p/x/", "/p/{q}", "/"}, - {"/p/x/y", "/p/{q}", "/y"}, - {"/q/x/y", "/q", "/x/y"}, - {"/z/q", "/{p}/q", ""}, - {"/a/b/c/q", "/", "/a/b/c/q"}, -} - -func TestDetectDispatcher(t *testing.T) { - ws1 := new(WebService).Path("/") - ws2 := new(WebService).Path("/p") - ws3 := new(WebService).Path("/q") - ws4 := new(WebService).Path("/p/q") - ws5 := new(WebService).Path("/p/{q}") - ws6 := new(WebService).Path("/p/{q}/") - ws7 := new(WebService).Path("/{p}/q") - var dispatchers = []*WebService{ws1, ws2, ws3, ws4, ws5, ws6, ws7} - - wc := NewContainer() - for _, each := range dispatchers { - wc.Add(each) - } - - router := RouterJSR311{} - - ok := true - for i, fixture := range paths { - who, final, err := router.detectDispatcher(fixture.path, dispatchers) - if err != nil { - t.Logf("error in detection:%v", err) - ok = false - } - if who.RootPath() != fixture.root { - t.Logf("[line:%v] Unexpected dispatcher, expected:%v, actual:%v", i, fixture.root, who.RootPath()) - ok = false - } - if final != fixture.final { - t.Logf("[line:%v] Unexpected final, expected:%v, actual:%v", i, fixture.final, final) - ok = false - } - } - if !ok { - t.Fail() - } -} - -// -// Step 2 tests -// - -// go test -v -test.run TestISSUE_179 ...restful -func TestISSUE_179(t *testing.T) { - ws1 := new(WebService) - ws1.Route(ws1.GET("/v1/category/{param:*}").To(dummy)) - routes := RouterJSR311{}.selectRoutes(ws1, "/v1/category/sub/sub") - t.Logf("%v", routes) -} - -// go test -v -test.run TestISSUE_30 ...restful -func TestISSUE_30(t *testing.T) { - ws1 := new(WebService).Path("/users") - ws1.Route(ws1.GET("/{id}").To(dummy)) - ws1.Route(ws1.POST("/login").To(dummy)) - routes := RouterJSR311{}.selectRoutes(ws1, "/login") - if len(routes) != 2 { - t.Fatal("expected 2 routes") - } - if routes[0].Path != "/users/login" { - t.Error("first is", routes[0].Path) - t.Logf("routes:%v", routes) - } -} - -// go test -v -test.run TestISSUE_34 ...restful -func TestISSUE_34(t *testing.T) { - ws1 := new(WebService).Path("/") - ws1.Route(ws1.GET("/{type}/{id}").To(dummy)) - ws1.Route(ws1.GET("/network/{id}").To(dummy)) - routes := RouterJSR311{}.selectRoutes(ws1, "/network/12") - if len(routes) != 2 { - t.Fatal("expected 2 routes") - } - if routes[0].Path != "/network/{id}" { - t.Error("first is", routes[0].Path) - t.Logf("routes:%v", routes) - } -} - -// go test -v -test.run TestISSUE_34_2 ...restful -func TestISSUE_34_2(t *testing.T) { - ws1 := new(WebService).Path("/") - // change the registration order - ws1.Route(ws1.GET("/network/{id}").To(dummy)) - ws1.Route(ws1.GET("/{type}/{id}").To(dummy)) - routes := RouterJSR311{}.selectRoutes(ws1, "/network/12") - if len(routes) != 2 { - t.Fatal("expected 2 routes") - } - if routes[0].Path != "/network/{id}" { - t.Error("first is", routes[0].Path) - } -} - -// go test -v -test.run TestISSUE_137 ...restful -func TestISSUE_137(t *testing.T) { - ws1 := new(WebService) - ws1.Route(ws1.GET("/hello").To(dummy)) - routes := RouterJSR311{}.selectRoutes(ws1, "/") - t.Log(routes) - if len(routes) > 0 { - t.Error("no route expected") - } -} - -func TestSelectRoutesSlash(t *testing.T) { - ws1 := new(WebService).Path("/") - ws1.Route(ws1.GET("").To(dummy)) - ws1.Route(ws1.GET("/").To(dummy)) - ws1.Route(ws1.GET("/u").To(dummy)) - ws1.Route(ws1.POST("/u").To(dummy)) - ws1.Route(ws1.POST("/u/v").To(dummy)) - ws1.Route(ws1.POST("/u/{w}").To(dummy)) - ws1.Route(ws1.POST("/u/{w}/z").To(dummy)) - routes := RouterJSR311{}.selectRoutes(ws1, "/u") - checkRoutesContains(routes, "/u", t) - checkRoutesContainsNo(routes, "/u/v", t) - checkRoutesContainsNo(routes, "/", t) - checkRoutesContainsNo(routes, "/u/{w}/z", t) -} -func TestSelectRoutesU(t *testing.T) { - ws1 := new(WebService).Path("/u") - ws1.Route(ws1.GET("").To(dummy)) - ws1.Route(ws1.GET("/").To(dummy)) - ws1.Route(ws1.GET("/v").To(dummy)) - ws1.Route(ws1.POST("/{w}").To(dummy)) - ws1.Route(ws1.POST("/{w}/z").To(dummy)) // so full path = /u/{w}/z - routes := RouterJSR311{}.selectRoutes(ws1, "/v") // test against /u/v - checkRoutesContains(routes, "/u/{w}", t) -} - -func TestSelectRoutesUsers1(t *testing.T) { - ws1 := new(WebService).Path("/users") - ws1.Route(ws1.POST("").To(dummy)) - ws1.Route(ws1.POST("/").To(dummy)) - ws1.Route(ws1.PUT("/{id}").To(dummy)) - routes := RouterJSR311{}.selectRoutes(ws1, "/1") - checkRoutesContains(routes, "/users/{id}", t) -} -func checkRoutesContains(routes []Route, path string, t *testing.T) { - if !containsRoutePath(routes, path, t) { - for _, r := range routes { - t.Logf("route %v %v", r.Method, r.Path) - } - t.Fatalf("routes should include [%v]:", path) - } -} -func checkRoutesContainsNo(routes []Route, path string, t *testing.T) { - if containsRoutePath(routes, path, t) { - for _, r := range routes { - t.Logf("route %v %v", r.Method, r.Path) - } - t.Fatalf("routes should not include [%v]:", path) - } -} -func containsRoutePath(routes []Route, path string, t *testing.T) bool { - for _, each := range routes { - if each.Path == path { - return true - } - } - return false -} - -// go test -v -test.run TestSortableRouteCandidates ...restful -func TestSortableRouteCandidates(t *testing.T) { - fixture := &sortableRouteCandidates{} - r1 := routeCandidate{matchesCount: 0, literalCount: 0, nonDefaultCount: 0} - r2 := routeCandidate{matchesCount: 0, literalCount: 0, nonDefaultCount: 1} - r3 := routeCandidate{matchesCount: 0, literalCount: 1, nonDefaultCount: 1} - r4 := routeCandidate{matchesCount: 1, literalCount: 1, nonDefaultCount: 0} - r5 := routeCandidate{matchesCount: 1, literalCount: 0, nonDefaultCount: 0} - fixture.candidates = append(fixture.candidates, r5, r4, r3, r2, r1) - sort.Sort(sort.Reverse(fixture)) - first := fixture.candidates[0] - if first.matchesCount != 1 && first.literalCount != 1 && first.nonDefaultCount != 0 { - t.Fatal("expected r4") - } - last := fixture.candidates[len(fixture.candidates)-1] - if last.matchesCount != 0 && last.literalCount != 0 && last.nonDefaultCount != 0 { - t.Fatal("expected r1") - } -} - -func TestDetectRouteReturns404IfNoRoutePassesConditions(t *testing.T) { - called := false - shouldNotBeCalledButWas := false - - routes := []Route{ - new(RouteBuilder).To(dummy). - If(func(req *http.Request) bool { return false }). - Build(), - - // check that condition functions are called in order - new(RouteBuilder). - To(dummy). - If(func(req *http.Request) bool { return true }). - If(func(req *http.Request) bool { called = true; return false }). - Build(), - - // check that condition functions short circuit - new(RouteBuilder). - To(dummy). - If(func(req *http.Request) bool { return false }). - If(func(req *http.Request) bool { shouldNotBeCalledButWas = true; return false }). - Build(), - } - - _, err := RouterJSR311{}.detectRoute(routes, (*http.Request)(nil)) - if se := err.(ServiceError); se.Code != 404 { - t.Fatalf("expected 404, got %d", se.Code) - } - - if !called { - t.Fatal("expected condition function to get called, but it wasn't") - } - - if shouldNotBeCalledButWas { - t.Fatal("expected condition function to not be called, but it was") - } -} - -func dummy(req *Request, resp *Response) { io.WriteString(resp.ResponseWriter, "dummy") } diff --git a/vendor/github.com/emicklei/go-restful/mime_test.go b/vendor/github.com/emicklei/go-restful/mime_test.go deleted file mode 100644 index a910bb1005..0000000000 --- a/vendor/github.com/emicklei/go-restful/mime_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package restful - -import ( - "fmt" - "testing" -) - -// go test -v -test.run TestSortMimes ...restful -func TestSortMimes(t *testing.T) { - accept := "text/html; q=0.8, text/plain, image/gif, */*; q=0.01, image/jpeg" - result := sortedMimes(accept) - got := fmt.Sprintf("%v", result) - want := "[{text/plain 1} {image/gif 1} {image/jpeg 1} {text/html 0.8} {*/* 0.01}]" - if got != want { - t.Errorf("bad sort order of mime types:%s", got) - } -} diff --git a/vendor/github.com/emicklei/go-restful/options_filter_test.go b/vendor/github.com/emicklei/go-restful/options_filter_test.go deleted file mode 100644 index f0fceb834e..0000000000 --- a/vendor/github.com/emicklei/go-restful/options_filter_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package restful - -import ( - "net/http" - "net/http/httptest" - "testing" -) - -// go test -v -test.run TestOptionsFilter ...restful -func TestOptionsFilter(t *testing.T) { - tearDown() - ws := new(WebService) - ws.Route(ws.GET("/candy/{kind}").To(dummy)) - ws.Route(ws.DELETE("/candy/{kind}").To(dummy)) - ws.Route(ws.POST("/candies").To(dummy)) - Add(ws) - Filter(OPTIONSFilter()) - - httpRequest, _ := http.NewRequest("OPTIONS", "http://here.io/candy/gum", nil) - httpWriter := httptest.NewRecorder() - DefaultContainer.dispatch(httpWriter, httpRequest) - actual := httpWriter.Header().Get(HEADER_Allow) - if "GET,DELETE" != actual { - t.Fatal("expected: GET,DELETE but got:" + actual) - } - - httpRequest, _ = http.NewRequest("OPTIONS", "http://here.io/candies", nil) - httpWriter = httptest.NewRecorder() - DefaultContainer.dispatch(httpWriter, httpRequest) - actual = httpWriter.Header().Get(HEADER_Allow) - if "POST" != actual { - t.Fatal("expected: POST but got:" + actual) - } -} diff --git a/vendor/github.com/emicklei/go-restful/parameter.go b/vendor/github.com/emicklei/go-restful/parameter.go index e11c8162a7..e8793304b1 100644 --- a/vendor/github.com/emicklei/go-restful/parameter.go +++ b/vendor/github.com/emicklei/go-restful/parameter.go @@ -19,8 +19,30 @@ const ( // FormParameterKind = indicator of Request parameter type "form" FormParameterKind + + // CollectionFormatCSV comma separated values `foo,bar` + CollectionFormatCSV = CollectionFormat("csv") + + // CollectionFormatSSV space separated values `foo bar` + CollectionFormatSSV = CollectionFormat("ssv") + + // CollectionFormatTSV tab separated values `foo\tbar` + CollectionFormatTSV = CollectionFormat("tsv") + + // CollectionFormatPipes pipe separated values `foo|bar` + CollectionFormatPipes = CollectionFormat("pipes") + + // CollectionFormatMulti corresponds to multiple parameter instances instead of multiple values for a single + // instance `foo=bar&foo=baz`. This is valid only for QueryParameters and FormParameters + CollectionFormatMulti = CollectionFormat("multi") ) +type CollectionFormat string + +func (cf CollectionFormat) String() string { + return string(cf) +} + // Parameter is for documententing the parameter used in a Http Request // ParameterData kinds are Path,Query and Body type Parameter struct { @@ -36,6 +58,7 @@ type ParameterData struct { AllowableValues map[string]string AllowMultiple bool DefaultValue string + CollectionFormat string } // Data returns the state of the Parameter @@ -112,3 +135,9 @@ func (p *Parameter) Description(doc string) *Parameter { p.data.Description = doc return p } + +// CollectionFormat sets the collection format for an array type +func (p *Parameter) CollectionFormat(format CollectionFormat) *Parameter { + p.data.CollectionFormat = format.String() + return p +} diff --git a/vendor/github.com/emicklei/go-restful/path_expression.go b/vendor/github.com/emicklei/go-restful/path_expression.go index a921e6f224..95a9a25450 100644 --- a/vendor/github.com/emicklei/go-restful/path_expression.go +++ b/vendor/github.com/emicklei/go-restful/path_expression.go @@ -14,8 +14,9 @@ import ( // PathExpression holds a compiled path expression (RegExp) needed to match against // Http request paths and to extract path parameter values. type pathExpression struct { - LiteralCount int // the number of literal characters (means those not resulting from template variable substitution) - VarCount int // the number of named parameters (enclosed by {}) in the path + LiteralCount int // the number of literal characters (means those not resulting from template variable substitution) + VarNames []string // the names of parameters (enclosed by {}) in the path + VarCount int // the number of named parameters (enclosed by {}) in the path Matcher *regexp.Regexp Source string // Path as defined by the RouteBuilder tokens []string @@ -24,16 +25,16 @@ type pathExpression struct { // NewPathExpression creates a PathExpression from the input URL path. // Returns an error if the path is invalid. func newPathExpression(path string) (*pathExpression, error) { - expression, literalCount, varCount, tokens := templateToRegularExpression(path) + expression, literalCount, varNames, varCount, tokens := templateToRegularExpression(path) compiled, err := regexp.Compile(expression) if err != nil { return nil, err } - return &pathExpression{literalCount, varCount, compiled, expression, tokens}, nil + return &pathExpression{literalCount, varNames, varCount, compiled, expression, tokens}, nil } // http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-370003.7.3 -func templateToRegularExpression(template string) (expression string, literalCount int, varCount int, tokens []string) { +func templateToRegularExpression(template string) (expression string, literalCount int, varNames []string, varCount int, tokens []string) { var buffer bytes.Buffer buffer.WriteString("^") //tokens = strings.Split(template, "/") @@ -46,8 +47,10 @@ func templateToRegularExpression(template string) (expression string, literalCou if strings.HasPrefix(each, "{") { // check for regular expression in variable colon := strings.Index(each, ":") + var varName string if colon != -1 { // extract expression + varName = strings.TrimSpace(each[1:colon]) paramExpr := strings.TrimSpace(each[colon+1 : len(each)-1]) if paramExpr == "*" { // special case buffer.WriteString("(.*)") @@ -56,8 +59,10 @@ func templateToRegularExpression(template string) (expression string, literalCou } } else { // plain var + varName = strings.TrimSpace(each[1 : len(each)-1]) buffer.WriteString("([^/]+?)") } + varNames = append(varNames, varName) varCount += 1 } else { literalCount += len(each) @@ -65,5 +70,5 @@ func templateToRegularExpression(template string) (expression string, literalCou buffer.WriteString(regexp.QuoteMeta(encoded)) } } - return strings.TrimRight(buffer.String(), "/") + "(/.*)?$", literalCount, varCount, tokens + return strings.TrimRight(buffer.String(), "/") + "(/.*)?$", literalCount, varNames, varCount, tokens } diff --git a/vendor/github.com/emicklei/go-restful/path_expression_test.go b/vendor/github.com/emicklei/go-restful/path_expression_test.go deleted file mode 100644 index 334fcef739..0000000000 --- a/vendor/github.com/emicklei/go-restful/path_expression_test.go +++ /dev/null @@ -1,37 +0,0 @@ -package restful - -import "testing" - -var tempregexs = []struct { - template, regex string - literalCount, varCount int -}{ - {"", "^(/.*)?$", 0, 0}, - {"/a/{b}/c/", "^/a/([^/]+?)/c(/.*)?$", 2, 1}, - {"/{a}/{b}/{c-d-e}/", "^/([^/]+?)/([^/]+?)/([^/]+?)(/.*)?$", 0, 3}, - {"/{p}/abcde", "^/([^/]+?)/abcde(/.*)?$", 5, 1}, - {"/a/{b:*}", "^/a/(.*)(/.*)?$", 1, 1}, - {"/a/{b:[a-z]+}", "^/a/([a-z]+)(/.*)?$", 1, 1}, -} - -func TestTemplateToRegularExpression(t *testing.T) { - ok := true - for i, fixture := range tempregexs { - actual, lCount, vCount, _ := templateToRegularExpression(fixture.template) - if actual != fixture.regex { - t.Logf("regex mismatch, expected:%v , actual:%v, line:%v\n", fixture.regex, actual, i) // 11 = where the data starts - ok = false - } - if lCount != fixture.literalCount { - t.Logf("literal count mismatch, expected:%v , actual:%v, line:%v\n", fixture.literalCount, lCount, i) - ok = false - } - if vCount != fixture.varCount { - t.Logf("variable count mismatch, expected:%v , actual:%v, line:%v\n", fixture.varCount, vCount, i) - ok = false - } - } - if !ok { - t.Fatal("one or more expression did not match") - } -} diff --git a/vendor/github.com/emicklei/go-restful/path_processor.go b/vendor/github.com/emicklei/go-restful/path_processor.go new file mode 100644 index 0000000000..357c723a7a --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/path_processor.go @@ -0,0 +1,63 @@ +package restful + +import ( + "bytes" + "strings" +) + +// Copyright 2018 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +// PathProcessor is extra behaviour that a Router can provide to extract path parameters from the path. +// If a Router does not implement this interface then the default behaviour will be used. +type PathProcessor interface { + // ExtractParameters gets the path parameters defined in the route and webService from the urlPath + ExtractParameters(route *Route, webService *WebService, urlPath string) map[string]string +} + +type defaultPathProcessor struct{} + +// Extract the parameters from the request url path +func (d defaultPathProcessor) ExtractParameters(r *Route, _ *WebService, urlPath string) map[string]string { + urlParts := tokenizePath(urlPath) + pathParameters := map[string]string{} + for i, key := range r.pathParts { + var value string + if i >= len(urlParts) { + value = "" + } else { + value = urlParts[i] + } + if strings.HasPrefix(key, "{") { // path-parameter + if colon := strings.Index(key, ":"); colon != -1 { + // extract by regex + regPart := key[colon+1 : len(key)-1] + keyPart := key[1:colon] + if regPart == "*" { + pathParameters[keyPart] = untokenizePath(i, urlParts) + break + } else { + pathParameters[keyPart] = value + } + } else { + // without enclosing {} + pathParameters[key[1:len(key)-1]] = value + } + } + } + return pathParameters +} + +// Untokenize back into an URL path using the slash separator +func untokenizePath(offset int, parts []string) string { + var buffer bytes.Buffer + for p := offset; p < len(parts); p++ { + buffer.WriteString(parts[p]) + // do not end + if p < len(parts)-1 { + buffer.WriteString("/") + } + } + return buffer.String() +} diff --git a/vendor/github.com/emicklei/go-restful/request.go b/vendor/github.com/emicklei/go-restful/request.go index 8c23af12c0..a20730febf 100644 --- a/vendor/github.com/emicklei/go-restful/request.go +++ b/vendor/github.com/emicklei/go-restful/request.go @@ -51,6 +51,11 @@ func (r *Request) QueryParameter(name string) string { return r.Request.FormValue(name) } +// QueryParameters returns the all the query parameters values by name +func (r *Request) QueryParameters(name string) []string { + return r.Request.URL.Query()[name] +} + // BodyParameter parses the body of the request (once for typically a POST or a PUT) and returns the value of the given name or an error. func (r *Request) BodyParameter(name string) (string, error) { err := r.Request.ParseForm() diff --git a/vendor/github.com/emicklei/go-restful/request_test.go b/vendor/github.com/emicklei/go-restful/request_test.go deleted file mode 100644 index 31f5096594..0000000000 --- a/vendor/github.com/emicklei/go-restful/request_test.go +++ /dev/null @@ -1,141 +0,0 @@ -package restful - -import ( - "encoding/json" - "net/http" - "net/url" - "strconv" - "strings" - "testing" -) - -func TestQueryParameter(t *testing.T) { - hreq := http.Request{Method: "GET"} - hreq.URL, _ = url.Parse("http://www.google.com/search?q=foo&q=bar") - rreq := Request{Request: &hreq} - if rreq.QueryParameter("q") != "foo" { - t.Errorf("q!=foo %#v", rreq) - } -} - -type Anything map[string]interface{} - -type Number struct { - ValueFloat float64 - ValueInt int64 -} - -type Sample struct { - Value string -} - -func TestReadEntityJson(t *testing.T) { - bodyReader := strings.NewReader(`{"Value" : "42"}`) - httpRequest, _ := http.NewRequest("GET", "/test", bodyReader) - httpRequest.Header.Set("Content-Type", "application/json") - request := &Request{Request: httpRequest} - sam := new(Sample) - request.ReadEntity(sam) - if sam.Value != "42" { - t.Fatal("read failed") - } -} - -func TestReadEntityJsonCharset(t *testing.T) { - bodyReader := strings.NewReader(`{"Value" : "42"}`) - httpRequest, _ := http.NewRequest("GET", "/test", bodyReader) - httpRequest.Header.Set("Content-Type", "application/json; charset=UTF-8") - request := NewRequest(httpRequest) - sam := new(Sample) - request.ReadEntity(sam) - if sam.Value != "42" { - t.Fatal("read failed") - } -} - -func TestReadEntityJsonNumber(t *testing.T) { - bodyReader := strings.NewReader(`{"Value" : 4899710515899924123}`) - httpRequest, _ := http.NewRequest("GET", "/test", bodyReader) - httpRequest.Header.Set("Content-Type", "application/json") - request := &Request{Request: httpRequest} - any := make(Anything) - request.ReadEntity(&any) - number, ok := any["Value"].(json.Number) - if !ok { - t.Fatal("read failed") - } - vint, err := number.Int64() - if err != nil { - t.Fatal("convert failed") - } - if vint != 4899710515899924123 { - t.Fatal("read failed") - } - vfloat, err := number.Float64() - if err != nil { - t.Fatal("convert failed") - } - // match the default behaviour - vstring := strconv.FormatFloat(vfloat, 'e', 15, 64) - if vstring != "4.899710515899924e+18" { - t.Fatal("convert float64 failed") - } -} - -func TestReadEntityJsonLong(t *testing.T) { - bodyReader := strings.NewReader(`{"ValueFloat" : 4899710515899924123, "ValueInt": 4899710515899924123}`) - httpRequest, _ := http.NewRequest("GET", "/test", bodyReader) - httpRequest.Header.Set("Content-Type", "application/json") - request := &Request{Request: httpRequest} - number := new(Number) - request.ReadEntity(&number) - if number.ValueInt != 4899710515899924123 { - t.Fatal("read failed") - } - // match the default behaviour - vstring := strconv.FormatFloat(number.ValueFloat, 'e', 15, 64) - if vstring != "4.899710515899924e+18" { - t.Fatal("convert float64 failed") - } -} - -func TestBodyParameter(t *testing.T) { - bodyReader := strings.NewReader(`value1=42&value2=43`) - httpRequest, _ := http.NewRequest("POST", "/test?value1=44", bodyReader) // POST and PUT body parameters take precedence over URL query string - httpRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=UTF-8") - request := NewRequest(httpRequest) - v1, err := request.BodyParameter("value1") - if err != nil { - t.Error(err) - } - v2, err := request.BodyParameter("value2") - if err != nil { - t.Error(err) - } - if v1 != "42" || v2 != "43" { - t.Fatal("read failed") - } -} - -func TestReadEntityUnkown(t *testing.T) { - bodyReader := strings.NewReader("?") - httpRequest, _ := http.NewRequest("GET", "/test", bodyReader) - httpRequest.Header.Set("Content-Type", "application/rubbish") - request := NewRequest(httpRequest) - sam := new(Sample) - err := request.ReadEntity(sam) - if err == nil { - t.Fatal("read should be in error") - } -} - -func TestSetAttribute(t *testing.T) { - bodyReader := strings.NewReader("?") - httpRequest, _ := http.NewRequest("GET", "/test", bodyReader) - request := NewRequest(httpRequest) - request.SetAttribute("go", "there") - there := request.Attribute("go") - if there != "there" { - t.Fatalf("missing request attribute:%v", there) - } -} diff --git a/vendor/github.com/emicklei/go-restful/response_test.go b/vendor/github.com/emicklei/go-restful/response_test.go deleted file mode 100644 index 0587c40b40..0000000000 --- a/vendor/github.com/emicklei/go-restful/response_test.go +++ /dev/null @@ -1,213 +0,0 @@ -package restful - -import ( - "errors" - "net/http" - "net/http/httptest" - "strings" - "testing" -) - -func TestWriteHeader(t *testing.T) { - httpWriter := httptest.NewRecorder() - resp := Response{ResponseWriter: httpWriter, requestAccept: "*/*", routeProduces: []string{"*/*"}, prettyPrint: true} - resp.WriteHeader(123) - if resp.StatusCode() != 123 { - t.Errorf("Unexpected status code:%d", resp.StatusCode()) - } -} - -func TestNoWriteHeader(t *testing.T) { - httpWriter := httptest.NewRecorder() - resp := Response{ResponseWriter: httpWriter, requestAccept: "*/*", routeProduces: []string{"*/*"}, prettyPrint: true} - if resp.StatusCode() != http.StatusOK { - t.Errorf("Unexpected status code:%d", resp.StatusCode()) - } -} - -type food struct { - Kind string -} - -// go test -v -test.run TestMeasureContentLengthXml ...restful -func TestMeasureContentLengthXml(t *testing.T) { - httpWriter := httptest.NewRecorder() - resp := Response{ResponseWriter: httpWriter, requestAccept: "*/*", routeProduces: []string{"*/*"}, prettyPrint: true} - resp.WriteAsXml(food{"apple"}) - if resp.ContentLength() != 76 { - t.Errorf("Incorrect measured length:%d", resp.ContentLength()) - } -} - -// go test -v -test.run TestMeasureContentLengthJson ...restful -func TestMeasureContentLengthJson(t *testing.T) { - httpWriter := httptest.NewRecorder() - resp := Response{ResponseWriter: httpWriter, requestAccept: "*/*", routeProduces: []string{"*/*"}, prettyPrint: true} - resp.WriteAsJson(food{"apple"}) - if resp.ContentLength() != 22 { - t.Errorf("Incorrect measured length:%d", resp.ContentLength()) - } -} - -// go test -v -test.run TestMeasureContentLengthJsonNotPretty ...restful -func TestMeasureContentLengthJsonNotPretty(t *testing.T) { - httpWriter := httptest.NewRecorder() - resp := Response{ResponseWriter: httpWriter, requestAccept: "*/*", routeProduces: []string{"*/*"}} - resp.WriteAsJson(food{"apple"}) - if resp.ContentLength() != 17 { // 16+1 using the Encoder directly yields another /n - t.Errorf("Incorrect measured length:%d", resp.ContentLength()) - } -} - -// go test -v -test.run TestMeasureContentLengthWriteErrorString ...restful -func TestMeasureContentLengthWriteErrorString(t *testing.T) { - httpWriter := httptest.NewRecorder() - resp := Response{ResponseWriter: httpWriter, requestAccept: "*/*", routeProduces: []string{"*/*"}, prettyPrint: true} - resp.WriteErrorString(404, "Invalid") - if resp.ContentLength() != len("Invalid") { - t.Errorf("Incorrect measured length:%d", resp.ContentLength()) - } -} - -// go test -v -test.run TestStatusIsPassedToResponse ...restful -func TestStatusIsPassedToResponse(t *testing.T) { - for _, each := range []struct { - write, read int - }{ - {write: 204, read: 204}, - {write: 304, read: 304}, - {write: 200, read: 200}, - {write: 400, read: 400}, - } { - httpWriter := httptest.NewRecorder() - resp := Response{ResponseWriter: httpWriter, requestAccept: "*/*", routeProduces: []string{"*/*"}, prettyPrint: true} - resp.WriteHeader(each.write) - if got, want := httpWriter.Code, each.read; got != want { - t.Errorf("got %v want %v", got, want) - } - } -} - -// go test -v -test.run TestStatusCreatedAndContentTypeJson_Issue54 ...restful -func TestStatusCreatedAndContentTypeJson_Issue54(t *testing.T) { - httpWriter := httptest.NewRecorder() - resp := Response{ResponseWriter: httpWriter, requestAccept: "application/json", routeProduces: []string{"application/json"}, prettyPrint: true} - resp.WriteHeader(201) - resp.WriteAsJson(food{"Juicy"}) - if httpWriter.HeaderMap.Get("Content-Type") != "application/json" { - t.Errorf("Expected content type json but got:%s", httpWriter.HeaderMap.Get("Content-Type")) - } - if httpWriter.Code != 201 { - t.Errorf("Expected status 201 but got:%d", httpWriter.Code) - } -} - -type errorOnWriteRecorder struct { - *httptest.ResponseRecorder -} - -func (e errorOnWriteRecorder) Write(bytes []byte) (int, error) { - return 0, errors.New("fail") -} - -// go test -v -test.run TestLastWriteErrorCaught ...restful -func TestLastWriteErrorCaught(t *testing.T) { - httpWriter := errorOnWriteRecorder{httptest.NewRecorder()} - resp := Response{ResponseWriter: httpWriter, requestAccept: "application/json", routeProduces: []string{"application/json"}, prettyPrint: true} - err := resp.WriteAsJson(food{"Juicy"}) - if err.Error() != "fail" { - t.Errorf("Unexpected error message:%v", err) - } -} - -// go test -v -test.run TestAcceptStarStar_Issue83 ...restful -func TestAcceptStarStar_Issue83(t *testing.T) { - httpWriter := httptest.NewRecorder() - // Accept Produces - resp := Response{ResponseWriter: httpWriter, requestAccept: "application/bogus,*/*;q=0.8", routeProduces: []string{"application/json"}, prettyPrint: true} - resp.WriteEntity(food{"Juicy"}) - ct := httpWriter.Header().Get("Content-Type") - if "application/json" != ct { - t.Errorf("Unexpected content type:%s", ct) - } -} - -// go test -v -test.run TestAcceptSkipStarStar_Issue83 ...restful -func TestAcceptSkipStarStar_Issue83(t *testing.T) { - httpWriter := httptest.NewRecorder() - // Accept Produces - resp := Response{ResponseWriter: httpWriter, requestAccept: " application/xml ,*/* ; q=0.8", routeProduces: []string{"application/json", "application/xml"}, prettyPrint: true} - resp.WriteEntity(food{"Juicy"}) - ct := httpWriter.Header().Get("Content-Type") - if "application/xml" != ct { - t.Errorf("Unexpected content type:%s", ct) - } -} - -// go test -v -test.run TestAcceptXmlBeforeStarStar_Issue83 ...restful -func TestAcceptXmlBeforeStarStar_Issue83(t *testing.T) { - httpWriter := httptest.NewRecorder() - // Accept Produces - resp := Response{ResponseWriter: httpWriter, requestAccept: "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", routeProduces: []string{"application/json"}, prettyPrint: true} - resp.WriteEntity(food{"Juicy"}) - ct := httpWriter.Header().Get("Content-Type") - if "application/json" != ct { - t.Errorf("Unexpected content type:%s", ct) - } -} - -// go test -v -test.run TestWriteHeaderNoContent_Issue124 ...restful -func TestWriteHeaderNoContent_Issue124(t *testing.T) { - httpWriter := httptest.NewRecorder() - resp := Response{ResponseWriter: httpWriter, requestAccept: "text/plain", routeProduces: []string{"text/plain"}, prettyPrint: true} - resp.WriteHeader(http.StatusNoContent) - if httpWriter.Code != http.StatusNoContent { - t.Errorf("got %d want %d", httpWriter.Code, http.StatusNoContent) - } -} - -// go test -v -test.run TestStatusCreatedAndContentTypeJson_Issue163 ...restful -func TestStatusCreatedAndContentTypeJson_Issue163(t *testing.T) { - httpWriter := httptest.NewRecorder() - resp := Response{ResponseWriter: httpWriter, requestAccept: "application/json", routeProduces: []string{"application/json"}, prettyPrint: true} - resp.WriteHeader(http.StatusNotModified) - if httpWriter.Code != http.StatusNotModified { - t.Errorf("Got %d want %d", httpWriter.Code, http.StatusNotModified) - } -} - -func TestWriteHeaderAndEntity_Issue235(t *testing.T) { - httpWriter := httptest.NewRecorder() - resp := Response{ResponseWriter: httpWriter, requestAccept: "application/json", routeProduces: []string{"application/json"}, prettyPrint: true} - var pong = struct { - Foo string `json:"foo"` - }{Foo: "123"} - resp.WriteHeaderAndEntity(404, pong) - if httpWriter.Code != http.StatusNotFound { - t.Errorf("got %d want %d", httpWriter.Code, http.StatusNoContent) - } - if got, want := httpWriter.Header().Get("Content-Type"), "application/json"; got != want { - t.Errorf("got %v want %v", got, want) - } - if !strings.HasPrefix(httpWriter.Body.String(), "{") { - t.Errorf("expected pong struct in json:%s", httpWriter.Body.String()) - } -} - -func TestWriteEntityNoAcceptMatchWithProduces(t *testing.T) { - httpWriter := httptest.NewRecorder() - resp := Response{ResponseWriter: httpWriter, requestAccept: "application/bogus", routeProduces: []string{"application/json"}, prettyPrint: true} - resp.WriteEntity("done") - if httpWriter.Code != http.StatusOK { - t.Errorf("got %d want %d", httpWriter.Code, http.StatusOK) - } -} - -func TestWriteEntityNoAcceptMatchNoProduces(t *testing.T) { - httpWriter := httptest.NewRecorder() - resp := Response{ResponseWriter: httpWriter, requestAccept: "application/bogus", routeProduces: []string{}, prettyPrint: true} - resp.WriteEntity("done") - if httpWriter.Code != http.StatusNotAcceptable { - t.Errorf("got %d want %d", httpWriter.Code, http.StatusNotAcceptable) - } -} diff --git a/vendor/github.com/emicklei/go-restful/route.go b/vendor/github.com/emicklei/go-restful/route.go index 9d5b156e0f..6d15dbf66f 100644 --- a/vendor/github.com/emicklei/go-restful/route.go +++ b/vendor/github.com/emicklei/go-restful/route.go @@ -5,7 +5,6 @@ package restful // that can be found in the LICENSE file. import ( - "bytes" "net/http" "strings" ) @@ -39,10 +38,17 @@ type Route struct { Operation string ParameterDocs []*Parameter ResponseErrors map[int]ResponseError + DefaultResponse *ResponseError ReadSample, WriteSample interface{} // structs that model an example request or response payload // Extra information used to store custom information about the route. Metadata map[string]interface{} + + // marks a route as deprecated + Deprecated bool + + //Overrides the container.contentEncodingEnabled + contentEncodingEnabled *bool } // Initialize for Route @@ -51,10 +57,9 @@ func (r *Route) postBuild() { } // Create Request and Response from their http versions -func (r *Route) wrapRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request) (*Request, *Response) { - params := r.extractParameters(httpRequest.URL.Path) +func (r *Route) wrapRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request, pathParams map[string]string) (*Request, *Response) { wrappedRequest := NewRequest(httpRequest) - wrappedRequest.pathParameters = params + wrappedRequest.pathParameters = pathParams wrappedRequest.selectedRoutePath = r.Path wrappedResponse := NewResponse(httpWriter) wrappedResponse.requestAccept = httpRequest.Header.Get(HEADER_Accept) @@ -73,28 +78,36 @@ func (r *Route) dispatchWithFilters(wrappedRequest *Request, wrappedResponse *Re } } +func stringTrimSpaceCutset(r rune) bool { + return r == ' ' +} + // Return whether the mimeType matches to what this Route can produce. func (r Route) matchesAccept(mimeTypesWithQuality string) bool { - parts := strings.Split(mimeTypesWithQuality, ",") - for _, each := range parts { - var withoutQuality string - if strings.Contains(each, ";") { - withoutQuality = strings.Split(each, ";")[0] + remaining := mimeTypesWithQuality + for { + var mimeType string + if end := strings.Index(remaining, ","); end == -1 { + mimeType, remaining = remaining, "" } else { - withoutQuality = each + mimeType, remaining = remaining[:end], remaining[end+1:] + } + if quality := strings.Index(mimeType, ";"); quality != -1 { + mimeType = mimeType[:quality] } - // trim before compare - withoutQuality = strings.Trim(withoutQuality, " ") - if withoutQuality == "*/*" { + mimeType = strings.TrimFunc(mimeType, stringTrimSpaceCutset) + if mimeType == "*/*" { return true } for _, producibleType := range r.Produces { - if producibleType == "*/*" || producibleType == withoutQuality { + if producibleType == "*/*" || producibleType == mimeType { return true } } + if len(remaining) == 0 { + return false + } } - return false } // Return whether this Route can consume content with a type specified by mimeTypes (can be empty). @@ -115,73 +128,33 @@ func (r Route) matchesContentType(mimeTypes string) bool { mimeTypes = MIME_OCTET } - parts := strings.Split(mimeTypes, ",") - for _, each := range parts { - var contentType string - if strings.Contains(each, ";") { - contentType = strings.Split(each, ";")[0] + remaining := mimeTypes + for { + var mimeType string + if end := strings.Index(remaining, ","); end == -1 { + mimeType, remaining = remaining, "" } else { - contentType = each + mimeType, remaining = remaining[:end], remaining[end+1:] + } + if quality := strings.Index(mimeType, ";"); quality != -1 { + mimeType = mimeType[:quality] } - // trim before compare - contentType = strings.Trim(contentType, " ") + mimeType = strings.TrimFunc(mimeType, stringTrimSpaceCutset) for _, consumeableType := range r.Consumes { - if consumeableType == "*/*" || consumeableType == contentType { + if consumeableType == "*/*" || consumeableType == mimeType { return true } } - } - return false -} - -// Extract the parameters from the request url path -func (r Route) extractParameters(urlPath string) map[string]string { - urlParts := tokenizePath(urlPath) - pathParameters := map[string]string{} - for i, key := range r.pathParts { - var value string - if i >= len(urlParts) { - value = "" - } else { - value = urlParts[i] - } - if strings.HasPrefix(key, "{") { // path-parameter - if colon := strings.Index(key, ":"); colon != -1 { - // extract by regex - regPart := key[colon+1 : len(key)-1] - keyPart := key[1:colon] - if regPart == "*" { - pathParameters[keyPart] = untokenizePath(i, urlParts) - break - } else { - pathParameters[keyPart] = value - } - } else { - // without enclosing {} - pathParameters[key[1:len(key)-1]] = value - } + if len(remaining) == 0 { + return false } } - return pathParameters -} - -// Untokenize back into an URL path using the slash separator -func untokenizePath(offset int, parts []string) string { - var buffer bytes.Buffer - for p := offset; p < len(parts); p++ { - buffer.WriteString(parts[p]) - // do not end - if p < len(parts)-1 { - buffer.WriteString("/") - } - } - return buffer.String() } // Tokenize an URL path using the slash separator ; the result does not have empty tokens func tokenizePath(path string) []string { if "/" == path { - return []string{} + return nil } return strings.Split(strings.Trim(path, "/"), "/") } @@ -190,3 +163,8 @@ func tokenizePath(path string) []string { func (r Route) String() string { return r.Method + " " + r.Path } + +// EnableContentEncoding (default=false) allows for GZIP or DEFLATE encoding of responses. Overrides the container.contentEncodingEnabled value. +func (r Route) EnableContentEncoding(enabled bool) { + r.contentEncodingEnabled = &enabled +} diff --git a/vendor/github.com/emicklei/go-restful/route_builder.go b/vendor/github.com/emicklei/go-restful/route_builder.go index 83db02b7c6..8dfdf934bf 100644 --- a/vendor/github.com/emicklei/go-restful/route_builder.go +++ b/vendor/github.com/emicklei/go-restful/route_builder.go @@ -24,7 +24,7 @@ type RouteBuilder struct { httpMethod string // required function RouteFunction // required filters []FilterFunction - conditions []RouteSelectionConditionFunction + conditions []RouteSelectionConditionFunction typeNameHandleFunc TypeNameHandleFunction // required @@ -35,7 +35,9 @@ type RouteBuilder struct { readSample, writeSample interface{} parameters []*Parameter errorMap map[int]ResponseError + defaultResponse *ResponseError metadata map[string]interface{} + deprecated bool } // Do evaluates each argument with the RouteBuilder itself. @@ -98,15 +100,18 @@ func (b *RouteBuilder) Notes(notes string) *RouteBuilder { // Reads tells what resource type will be read from the request payload. Optional. // A parameter of type "body" is added ,required is set to true and the dataType is set to the qualified name of the sample's type. -func (b *RouteBuilder) Reads(sample interface{}) *RouteBuilder { +func (b *RouteBuilder) Reads(sample interface{}, optionalDescription ...string) *RouteBuilder { fn := b.typeNameHandleFunc if fn == nil { fn = reflectTypeName } typeAsName := fn(sample) - + description := "" + if len(optionalDescription) > 0 { + description = optionalDescription[0] + } b.readSample = sample - bodyParameter := &Parameter{&ParameterData{Name: "body"}} + bodyParameter := &Parameter{&ParameterData{Name: "body", Description: description}} bodyParameter.beBody() bodyParameter.Required(true) bodyParameter.DataType(typeAsName) @@ -160,7 +165,7 @@ func (b *RouteBuilder) Returns(code int, message string, model interface{}) *Rou Code: code, Message: message, Model: model, - IsDefault: false, + IsDefault: false, // this field is deprecated, use default response instead. } // lazy init because there is no NewRouteBuilder (yet) if b.errorMap == nil { @@ -170,17 +175,11 @@ func (b *RouteBuilder) Returns(code int, message string, model interface{}) *Rou return b } -// DefaultReturns is a special Returns call that sets the default of the response ; the code is zero. +// DefaultReturns is a special Returns call that sets the default of the response. func (b *RouteBuilder) DefaultReturns(message string, model interface{}) *RouteBuilder { - b.Returns(0, message, model) - // Modify the ResponseError just added/updated - re := b.errorMap[0] - // errorMap is initialized - b.errorMap[0] = ResponseError{ - Code: re.Code, - Message: re.Message, - Model: re.Model, - IsDefault: true, + b.defaultResponse = &ResponseError{ + Message: message, + Model: model, } return b } @@ -194,6 +193,12 @@ func (b *RouteBuilder) Metadata(key string, value interface{}) *RouteBuilder { return b } +// Deprecate sets the value of deprecated to true. Deprecated routes have a special UI treatment to warn against use +func (b *RouteBuilder) Deprecate() *RouteBuilder { + b.deprecated = true + return b +} + // ResponseError represents a response; not necessarily an error. type ResponseError struct { Code int @@ -251,11 +256,11 @@ func (b *RouteBuilder) typeNameHandler(handler TypeNameHandleFunction) *RouteBui func (b *RouteBuilder) Build() Route { pathExpr, err := newPathExpression(b.currentPath) if err != nil { - log.Printf("[restful] Invalid path:%s because:%v", b.currentPath, err) + log.Printf("Invalid path:%s because:%v", b.currentPath, err) os.Exit(1) } if b.function == nil { - log.Printf("[restful] No function specified for route:" + b.currentPath) + log.Printf("No function specified for route:" + b.currentPath) os.Exit(1) } operationName := b.operation @@ -264,23 +269,25 @@ func (b *RouteBuilder) Build() Route { operationName = nameOfFunction(b.function) } route := Route{ - Method: b.httpMethod, - Path: concatPath(b.rootPath, b.currentPath), - Produces: b.produces, - Consumes: b.consumes, - Function: b.function, - Filters: b.filters, - If: b.conditions, - relativePath: b.currentPath, - pathExpr: pathExpr, - Doc: b.doc, - Notes: b.notes, - Operation: operationName, - ParameterDocs: b.parameters, - ResponseErrors: b.errorMap, - ReadSample: b.readSample, - WriteSample: b.writeSample, - Metadata: b.metadata} + Method: b.httpMethod, + Path: concatPath(b.rootPath, b.currentPath), + Produces: b.produces, + Consumes: b.consumes, + Function: b.function, + Filters: b.filters, + If: b.conditions, + relativePath: b.currentPath, + pathExpr: pathExpr, + Doc: b.doc, + Notes: b.notes, + Operation: operationName, + ParameterDocs: b.parameters, + ResponseErrors: b.errorMap, + DefaultResponse: b.defaultResponse, + ReadSample: b.readSample, + WriteSample: b.writeSample, + Metadata: b.metadata, + Deprecated: b.deprecated} route.postBuild() return route } diff --git a/vendor/github.com/emicklei/go-restful/route_builder_test.go b/vendor/github.com/emicklei/go-restful/route_builder_test.go deleted file mode 100644 index 25881d5eb8..0000000000 --- a/vendor/github.com/emicklei/go-restful/route_builder_test.go +++ /dev/null @@ -1,76 +0,0 @@ -package restful - -import ( - "testing" - "time" -) - -func TestRouteBuilder_PathParameter(t *testing.T) { - p := &Parameter{&ParameterData{Name: "name", Description: "desc"}} - p.AllowMultiple(true) - p.DataType("int") - p.Required(true) - values := map[string]string{"a": "b"} - p.AllowableValues(values) - p.bePath() - - b := new(RouteBuilder) - b.function = dummy - b.Param(p) - r := b.Build() - if !r.ParameterDocs[0].Data().AllowMultiple { - t.Error("AllowMultiple invalid") - } - if r.ParameterDocs[0].Data().DataType != "int" { - t.Error("dataType invalid") - } - if !r.ParameterDocs[0].Data().Required { - t.Error("required invalid") - } - if r.ParameterDocs[0].Data().Kind != PathParameterKind { - t.Error("kind invalid") - } - if r.ParameterDocs[0].Data().AllowableValues["a"] != "b" { - t.Error("allowableValues invalid") - } - if b.ParameterNamed("name") == nil { - t.Error("access to parameter failed") - } -} - -func TestRouteBuilder(t *testing.T) { - json := "application/json" - b := new(RouteBuilder) - b.To(dummy) - b.Path("/routes").Method("HEAD").Consumes(json).Produces(json).Metadata("test", "test-value").DefaultReturns("default", time.Now()) - r := b.Build() - if r.Path != "/routes" { - t.Error("path invalid") - } - if r.Produces[0] != json { - t.Error("produces invalid") - } - if r.Consumes[0] != json { - t.Error("consumes invalid") - } - if r.Operation != "dummy" { - t.Error("Operation not set") - } - if r.Metadata["test"] != "test-value" { - t.Errorf("Metadata not set") - } - if _, ok := r.ResponseErrors[0]; !ok { - t.Fatal("expected default response") - } -} - -func TestAnonymousFuncNaming(t *testing.T) { - f1 := func() {} - f2 := func() {} - if got, want := nameOfFunction(f1), "func1"; got != want { - t.Errorf("got %v want %v", got, want) - } - if got, want := nameOfFunction(f2), "func2"; got != want { - t.Errorf("got %v want %v", got, want) - } -} diff --git a/vendor/github.com/emicklei/go-restful/route_test.go b/vendor/github.com/emicklei/go-restful/route_test.go deleted file mode 100644 index 6a104aff8e..0000000000 --- a/vendor/github.com/emicklei/go-restful/route_test.go +++ /dev/null @@ -1,127 +0,0 @@ -package restful - -import ( - "testing" -) - -// accept should match produces -func TestMatchesAcceptPlainTextWhenProducePlainTextAsLast(t *testing.T) { - r := Route{Produces: []string{"application/json", "text/plain"}} - if !r.matchesAccept("text/plain") { - t.Errorf("accept should match text/plain") - } -} - -// accept should match produces -func TestMatchesAcceptStar(t *testing.T) { - r := Route{Produces: []string{"application/xml"}} - if !r.matchesAccept("*/*") { - t.Errorf("accept should match star") - } -} - -// accept should match produces -func TestMatchesAcceptIE(t *testing.T) { - r := Route{Produces: []string{"application/xml"}} - if !r.matchesAccept("text/html, application/xhtml+xml, */*") { - t.Errorf("accept should match star") - } -} - -// accept should match produces -func TestMatchesAcceptXml(t *testing.T) { - r := Route{Produces: []string{"application/xml"}} - if r.matchesAccept("application/json") { - t.Errorf("accept should not match json") - } - if !r.matchesAccept("application/xml") { - t.Errorf("accept should match xml") - } -} - -// accept should match produces -func TestMatchesAcceptAny(t *testing.T) { - r := Route{Produces: []string{"*/*"}} - if !r.matchesAccept("application/json") { - t.Errorf("accept should match json") - } - if !r.matchesAccept("application/xml") { - t.Errorf("accept should match xml") - } -} - -// content type should match consumes -func TestMatchesContentTypeXml(t *testing.T) { - r := Route{Consumes: []string{"application/xml"}} - if r.matchesContentType("application/json") { - t.Errorf("accept should not match json") - } - if !r.matchesContentType("application/xml") { - t.Errorf("accept should match xml") - } -} - -// content type should match consumes -func TestMatchesContentTypeCharsetInformation(t *testing.T) { - r := Route{Consumes: []string{"application/json"}} - if !r.matchesContentType("application/json; charset=UTF-8") { - t.Errorf("matchesContentType should ignore charset information") - } -} - -func TestMatchesPath_OneParam(t *testing.T) { - params := doExtractParams("/from/{source}", 2, "/from/here", t) - if params["source"] != "here" { - t.Errorf("parameter mismatch here") - } -} - -func TestMatchesPath_Slash(t *testing.T) { - params := doExtractParams("/", 0, "/", t) - if len(params) != 0 { - t.Errorf("expected empty parameters") - } -} - -func TestMatchesPath_SlashNonVar(t *testing.T) { - params := doExtractParams("/any", 1, "/any", t) - if len(params) != 0 { - t.Errorf("expected empty parameters") - } -} - -func TestMatchesPath_TwoVars(t *testing.T) { - params := doExtractParams("/from/{source}/to/{destination}", 4, "/from/AMS/to/NY", t) - if params["source"] != "AMS" { - t.Errorf("parameter mismatch AMS") - } -} - -func TestMatchesPath_VarOnFront(t *testing.T) { - params := doExtractParams("{what}/from/{source}/", 3, "who/from/SOS/", t) - if params["source"] != "SOS" { - t.Errorf("parameter mismatch SOS") - } -} - -func TestExtractParameters_EmptyValue(t *testing.T) { - params := doExtractParams("/fixed/{var}", 2, "/fixed/", t) - if params["var"] != "" { - t.Errorf("parameter mismatch var") - } -} - -func TestTokenizePath(t *testing.T) { - if len(tokenizePath("/")) != 0 { - t.Errorf("not empty path tokens") - } -} - -func doExtractParams(routePath string, size int, urlPath string, t *testing.T) map[string]string { - r := Route{Path: routePath} - r.postBuild() - if len(r.pathParts) != size { - t.Fatalf("len not %v %v, but %v", size, r.pathParts, len(r.pathParts)) - } - return r.extractParameters(urlPath) -} diff --git a/vendor/github.com/emicklei/go-restful/router.go b/vendor/github.com/emicklei/go-restful/router.go index 9b32fb6753..19078af1c0 100644 --- a/vendor/github.com/emicklei/go-restful/router.go +++ b/vendor/github.com/emicklei/go-restful/router.go @@ -7,6 +7,8 @@ package restful import "net/http" // A RouteSelector finds the best matching Route given the input HTTP Request +// RouteSelectors can optionally also implement the PathProcessor interface to also calculate the +// path parameters after the route has been selected. type RouteSelector interface { // SelectRoute finds a Route given the input HTTP Request and a list of WebServices. diff --git a/vendor/github.com/emicklei/go-restful/tracer_test.go b/vendor/github.com/emicklei/go-restful/tracer_test.go deleted file mode 100644 index 60c1e9fc09..0000000000 --- a/vendor/github.com/emicklei/go-restful/tracer_test.go +++ /dev/null @@ -1,18 +0,0 @@ -package restful - -import "testing" - -// Use like this: -// -// TraceLogger(testLogger{t}) -type testLogger struct { - t *testing.T -} - -func (l testLogger) Print(v ...interface{}) { - l.t.Log(v...) -} - -func (l testLogger) Printf(format string, v ...interface{}) { - l.t.Logf(format, v...) -} diff --git a/vendor/github.com/emicklei/go-restful/web_service.go b/vendor/github.com/emicklei/go-restful/web_service.go index 094c0a02ab..77ba9a8cfc 100644 --- a/vendor/github.com/emicklei/go-restful/web_service.go +++ b/vendor/github.com/emicklei/go-restful/web_service.go @@ -60,7 +60,7 @@ func reflectTypeName(sample interface{}) string { func (w *WebService) compilePathExpression() { compiled, err := newPathExpression(w.rootPath) if err != nil { - log.Printf("[restful] invalid path:%s because:%v", w.rootPath, err) + log.Printf("invalid path:%s because:%v", w.rootPath, err) os.Exit(1) } w.pathExpr = compiled @@ -118,7 +118,7 @@ func (w *WebService) QueryParameter(name, description string) *Parameter { // QueryParameter creates a new Parameter of kind Query for documentation purposes. // It is initialized as not required with string as its DataType. func QueryParameter(name, description string) *Parameter { - p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}} + p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string", CollectionFormat: CollectionFormatCSV.String()}} p.beQuery() return p } diff --git a/vendor/github.com/emicklei/go-restful/web_service_test.go b/vendor/github.com/emicklei/go-restful/web_service_test.go deleted file mode 100644 index 734938134f..0000000000 --- a/vendor/github.com/emicklei/go-restful/web_service_test.go +++ /dev/null @@ -1,343 +0,0 @@ -package restful - -import ( - "net/http" - "net/http/httptest" - "testing" -) - -const ( - pathGetFriends = "/get/{userId}/friends" -) - -func TestParameter(t *testing.T) { - p := &Parameter{&ParameterData{Name: "name", Description: "desc"}} - p.AllowMultiple(true) - p.DataType("int") - p.Required(true) - values := map[string]string{"a": "b"} - p.AllowableValues(values) - p.bePath() - - ws := new(WebService) - ws.Param(p) - if ws.pathParameters[0].Data().Name != "name" { - t.Error("path parameter (or name) invalid") - } -} -func TestWebService_CanCreateParameterKinds(t *testing.T) { - ws := new(WebService) - if ws.BodyParameter("b", "b").Kind() != BodyParameterKind { - t.Error("body parameter expected") - } - if ws.PathParameter("p", "p").Kind() != PathParameterKind { - t.Error("path parameter expected") - } - if ws.QueryParameter("q", "q").Kind() != QueryParameterKind { - t.Error("query parameter expected") - } -} - -func TestCapturePanic(t *testing.T) { - tearDown() - Add(newPanicingService()) - httpRequest, _ := http.NewRequest("GET", "http://here.com/fire", nil) - httpRequest.Header.Set("Accept", "*/*") - httpWriter := httptest.NewRecorder() - // override the default here - DefaultContainer.DoNotRecover(false) - DefaultContainer.dispatch(httpWriter, httpRequest) - if 500 != httpWriter.Code { - t.Error("500 expected on fire") - } -} - -func TestCapturePanicWithEncoded(t *testing.T) { - tearDown() - Add(newPanicingService()) - DefaultContainer.EnableContentEncoding(true) - httpRequest, _ := http.NewRequest("GET", "http://here.com/fire", nil) - httpRequest.Header.Set("Accept", "*/*") - httpRequest.Header.Set("Accept-Encoding", "gzip") - httpWriter := httptest.NewRecorder() - DefaultContainer.dispatch(httpWriter, httpRequest) - if 500 != httpWriter.Code { - t.Error("500 expected on fire, got", httpWriter.Code) - } -} - -func TestNotFound(t *testing.T) { - tearDown() - httpRequest, _ := http.NewRequest("GET", "http://here.com/missing", nil) - httpRequest.Header.Set("Accept", "*/*") - httpWriter := httptest.NewRecorder() - DefaultContainer.dispatch(httpWriter, httpRequest) - if 404 != httpWriter.Code { - t.Error("404 expected on missing") - } -} - -func TestMethodNotAllowed(t *testing.T) { - tearDown() - Add(newGetOnlyService()) - httpRequest, _ := http.NewRequest("POST", "http://here.com/get", nil) - httpRequest.Header.Set("Accept", "*/*") - httpWriter := httptest.NewRecorder() - DefaultContainer.dispatch(httpWriter, httpRequest) - if 405 != httpWriter.Code { - t.Error("405 expected method not allowed") - } -} - -func TestSelectedRoutePath_Issue100(t *testing.T) { - tearDown() - Add(newSelectedRouteTestingService()) - httpRequest, _ := http.NewRequest("GET", "http://here.com/get/232452/friends", nil) - httpRequest.Header.Set("Accept", "*/*") - httpWriter := httptest.NewRecorder() - DefaultContainer.dispatch(httpWriter, httpRequest) - if http.StatusOK != httpWriter.Code { - t.Error(http.StatusOK, "expected,", httpWriter.Code, "received.") - } -} - -func TestContentType415_Issue170(t *testing.T) { - tearDown() - Add(newGetOnlyJsonOnlyService()) - httpRequest, _ := http.NewRequest("GET", "http://here.com/get", nil) - httpWriter := httptest.NewRecorder() - DefaultContainer.dispatch(httpWriter, httpRequest) - if 200 != httpWriter.Code { - t.Errorf("Expected 200, got %d", httpWriter.Code) - } -} - -func TestNoContentTypePOST(t *testing.T) { - tearDown() - Add(newPostNoConsumesService()) - httpRequest, _ := http.NewRequest("POST", "http://here.com/post", nil) - httpWriter := httptest.NewRecorder() - DefaultContainer.dispatch(httpWriter, httpRequest) - if 204 != httpWriter.Code { - t.Errorf("Expected 204, got %d", httpWriter.Code) - } -} - -func TestContentType415_POST_Issue170(t *testing.T) { - tearDown() - Add(newPostOnlyJsonOnlyService()) - httpRequest, _ := http.NewRequest("POST", "http://here.com/post", nil) - httpRequest.Header.Set("Content-Type", "application/json") - httpWriter := httptest.NewRecorder() - DefaultContainer.dispatch(httpWriter, httpRequest) - if 200 != httpWriter.Code { - t.Errorf("Expected 200, got %d", httpWriter.Code) - } -} - -// go test -v -test.run TestContentType406PlainJson ...restful -func TestContentType406PlainJson(t *testing.T) { - tearDown() - TraceLogger(testLogger{t}) - Add(newGetPlainTextOrJsonService()) - httpRequest, _ := http.NewRequest("GET", "http://here.com/get", nil) - httpRequest.Header.Set("Accept", "text/plain") - httpWriter := httptest.NewRecorder() - DefaultContainer.dispatch(httpWriter, httpRequest) - if got, want := httpWriter.Code, 200; got != want { - t.Errorf("got %v, want %v", got, want) - } -} - -func TestRemoveRoute(t *testing.T) { - tearDown() - TraceLogger(testLogger{t}) - ws := newGetPlainTextOrJsonService() - Add(ws) - httpRequest, _ := http.NewRequest("GET", "http://here.com/get", nil) - httpRequest.Header.Set("Accept", "text/plain") - httpWriter := httptest.NewRecorder() - DefaultContainer.dispatch(httpWriter, httpRequest) - if got, want := httpWriter.Code, 200; got != want { - t.Errorf("got %v, want %v", got, want) - } - - // dynamic apis are disabled, should error and do nothing - if err := ws.RemoveRoute("/get", "GET"); err == nil { - t.Error("unexpected non-error") - } - - httpWriter = httptest.NewRecorder() - DefaultContainer.dispatch(httpWriter, httpRequest) - if got, want := httpWriter.Code, 200; got != want { - t.Errorf("got %v, want %v", got, want) - } - - ws.SetDynamicRoutes(true) - if err := ws.RemoveRoute("/get", "GET"); err != nil { - t.Errorf("unexpected error %v", err) - } - - httpWriter = httptest.NewRecorder() - DefaultContainer.dispatch(httpWriter, httpRequest) - if got, want := httpWriter.Code, 404; got != want { - t.Errorf("got %v, want %v", got, want) - } -} -func TestRemoveLastRoute(t *testing.T) { - tearDown() - TraceLogger(testLogger{t}) - ws := newGetPlainTextOrJsonServiceMultiRoute() - Add(ws) - httpRequest, _ := http.NewRequest("GET", "http://here.com/get", nil) - httpRequest.Header.Set("Accept", "text/plain") - httpWriter := httptest.NewRecorder() - DefaultContainer.dispatch(httpWriter, httpRequest) - if got, want := httpWriter.Code, 200; got != want { - t.Errorf("got %v, want %v", got, want) - } - - // dynamic apis are disabled, should error and do nothing - if err := ws.RemoveRoute("/get", "GET"); err == nil { - t.Error("unexpected non-error") - } - - httpWriter = httptest.NewRecorder() - DefaultContainer.dispatch(httpWriter, httpRequest) - if got, want := httpWriter.Code, 200; got != want { - t.Errorf("got %v, want %v", got, want) - } - - ws.SetDynamicRoutes(true) - if err := ws.RemoveRoute("/get", "GET"); err != nil { - t.Errorf("unexpected error %v", err) - } - - httpWriter = httptest.NewRecorder() - DefaultContainer.dispatch(httpWriter, httpRequest) - if got, want := httpWriter.Code, 404; got != want { - t.Errorf("got %v, want %v", got, want) - } -} - -// go test -v -test.run TestContentTypeOctet_Issue170 ...restful -func TestContentTypeOctet_Issue170(t *testing.T) { - tearDown() - Add(newGetConsumingOctetStreamService()) - // with content-type - httpRequest, _ := http.NewRequest("GET", "http://here.com/get", nil) - httpRequest.Header.Set("Content-Type", MIME_OCTET) - httpWriter := httptest.NewRecorder() - DefaultContainer.dispatch(httpWriter, httpRequest) - if 200 != httpWriter.Code { - t.Errorf("Expected 200, got %d", httpWriter.Code) - } - // without content-type - httpRequest, _ = http.NewRequest("GET", "http://here.com/get", nil) - httpWriter = httptest.NewRecorder() - DefaultContainer.dispatch(httpWriter, httpRequest) - if 200 != httpWriter.Code { - t.Errorf("Expected 200, got %d", httpWriter.Code) - } -} - -type exampleBody struct{} - -func TestParameterDataTypeDefaults(t *testing.T) { - tearDown() - ws := new(WebService) - route := ws.POST("/post").Reads(&exampleBody{}) - if route.parameters[0].data.DataType != "*restful.exampleBody" { - t.Errorf("body parameter incorrect name: %#v", route.parameters[0].data) - } -} - -func TestParameterDataTypeCustomization(t *testing.T) { - tearDown() - ws := new(WebService) - ws.TypeNameHandler(func(sample interface{}) string { - return "my.custom.type.name" - }) - route := ws.POST("/post").Reads(&exampleBody{}) - if route.parameters[0].data.DataType != "my.custom.type.name" { - t.Errorf("body parameter incorrect name: %#v", route.parameters[0].data) - } -} - -func newPanicingService() *WebService { - ws := new(WebService).Path("") - ws.Route(ws.GET("/fire").To(doPanic)) - return ws -} - -func newGetOnlyService() *WebService { - ws := new(WebService).Path("") - ws.Route(ws.GET("/get").To(doPanic)) - return ws -} - -func newPostOnlyJsonOnlyService() *WebService { - ws := new(WebService).Path("") - ws.Consumes("application/json") - ws.Route(ws.POST("/post").To(doNothing)) - return ws -} - -func newGetOnlyJsonOnlyService() *WebService { - ws := new(WebService).Path("") - ws.Consumes("application/json") - ws.Route(ws.GET("/get").To(doNothing)) - return ws -} - -func newGetPlainTextOrJsonService() *WebService { - ws := new(WebService).Path("") - ws.Produces("text/plain", "application/json") - ws.Route(ws.GET("/get").To(doNothing)) - return ws -} - -func newGetPlainTextOrJsonServiceMultiRoute() *WebService { - ws := new(WebService).Path("") - ws.Produces("text/plain", "application/json") - ws.Route(ws.GET("/get").To(doNothing)) - ws.Route(ws.GET("/status").To(doNothing)) - return ws -} - -func newGetConsumingOctetStreamService() *WebService { - ws := new(WebService).Path("") - ws.Consumes("application/octet-stream") - ws.Route(ws.GET("/get").To(doNothing)) - return ws -} - -func newPostNoConsumesService() *WebService { - ws := new(WebService).Path("") - ws.Route(ws.POST("/post").To(return204)) - return ws -} - -func newSelectedRouteTestingService() *WebService { - ws := new(WebService).Path("") - ws.Route(ws.GET(pathGetFriends).To(selectedRouteChecker)) - return ws -} - -func selectedRouteChecker(req *Request, resp *Response) { - if req.SelectedRoutePath() != pathGetFriends { - resp.InternalServerError() - } -} - -func doPanic(req *Request, resp *Response) { - println("lightning...") - panic("fire") -} - -func doNothing(req *Request, resp *Response) { -} - -func return204(req *Request, resp *Response) { - resp.WriteHeader(204) -} diff --git a/vendor/github.com/evanphx/json-patch/.travis.yml b/vendor/github.com/evanphx/json-patch/.travis.yml new file mode 100644 index 0000000000..2092c72c46 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/.travis.yml @@ -0,0 +1,16 @@ +language: go + +go: + - 1.8 + - 1.7 + +install: + - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi + - go get github.com/jessevdk/go-flags + +script: + - go get + - go test -cover ./... + +notifications: + email: false diff --git a/vendor/github.com/evanphx/json-patch/LICENSE b/vendor/github.com/evanphx/json-patch/LICENSE new file mode 100644 index 0000000000..0eb9b72d84 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2014, Evan Phoenix +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +* Neither the name of the Evan Phoenix nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/evanphx/json-patch/README.md b/vendor/github.com/evanphx/json-patch/README.md new file mode 100644 index 0000000000..a711d7961b --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/README.md @@ -0,0 +1,292 @@ +# JSON-Patch +`jsonpatch` is a library which provides functionallity for both applying +[RFC6902 JSON patches](http://tools.ietf.org/html/rfc6902) against documents, as +well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ietf.org/html/rfc7396). + +[![GoDoc](https://godoc.org/github.com/evanphx/json-patch?status.svg)](http://godoc.org/github.com/evanphx/json-patch) +[![Build Status](https://travis-ci.org/evanphx/json-patch.svg?branch=master)](https://travis-ci.org/evanphx/json-patch) +[![Report Card](https://goreportcard.com/badge/github.com/evanphx/json-patch)](https://goreportcard.com/report/github.com/evanphx/json-patch) + +# Get It! + +**Latest and greatest**: +```bash +go get -u github.com/evanphx/json-patch +``` + +**Stable Versions**: +* Version 4: `go get -u gopkg.in/evanphx/json-patch.v4` + +(previous versions below `v3` are unavailable) + +# Use It! +* [Create and apply a merge patch](#create-and-apply-a-merge-patch) +* [Create and apply a JSON Patch](#create-and-apply-a-json-patch) +* [Comparing JSON documents](#comparing-json-documents) +* [Combine merge patches](#combine-merge-patches) + + +# Configuration + +There is a single global configuration variable `jsonpatch.SupportNegativeIndices'. This +defaults to `true` and enables the non-standard practice of allowing negative indices +to mean indices starting at the end of an array. This functionality can be disabled +by setting `jsonpatch.SupportNegativeIndices = false`. + +## Create and apply a merge patch +Given both an original JSON document and a modified JSON document, you can create +a [Merge Patch](https://tools.ietf.org/html/rfc7396) document. + +It can describe the changes needed to convert from the original to the +modified JSON document. + +Once you have a merge patch, you can apply it to other JSON documents using the +`jsonpatch.MergePatch(document, patch)` function. + +```go +package main + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" +) + +func main() { + // Let's create a merge patch from these two documents... + original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) + target := []byte(`{"name": "Jane", "age": 24}`) + + patch, err := jsonpatch.CreateMergePatch(original, target) + if err != nil { + panic(err) + } + + // Now lets apply the patch against a different JSON document... + + alternative := []byte(`{"name": "Tina", "age": 28, "height": 3.75}`) + modifiedAlternative, err := jsonpatch.MergePatch(alternative, patch) + + fmt.Printf("patch document: %s\n", patch) + fmt.Printf("updated alternative doc: %s\n", modifiedAlternative) +} +``` + +When ran, you get the following output: + +```bash +$ go run main.go +patch document: {"height":null,"name":"Jane"} +updated tina doc: {"age":28,"name":"Jane"} +``` + +## Create and apply a JSON Patch +You can create patch objects using `DecodePatch([]byte)`, which can then +be applied against JSON documents. + +The following is an example of creating a patch from two operations, and +applying it against a JSON document. + +```go +package main + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" +) + +func main() { + original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) + patchJSON := []byte(`[ + {"op": "replace", "path": "/name", "value": "Jane"}, + {"op": "remove", "path": "/height"} + ]`) + + patch, err := jsonpatch.DecodePatch(patchJSON) + if err != nil { + panic(err) + } + + modified, err := patch.Apply(original) + if err != nil { + panic(err) + } + + fmt.Printf("Original document: %s\n", original) + fmt.Printf("Modified document: %s\n", modified) +} +``` + +When ran, you get the following output: + +```bash +$ go run main.go +Original document: {"name": "John", "age": 24, "height": 3.21} +Modified document: {"age":24,"name":"Jane"} +``` + +## Comparing JSON documents +Due to potential whitespace and ordering differences, one cannot simply compare +JSON strings or byte-arrays directly. + +As such, you can instead use `jsonpatch.Equal(document1, document2)` to +determine if two JSON documents are _structurally_ equal. This ignores +whitespace differences, and key-value ordering. + +```go +package main + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" +) + +func main() { + original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) + similar := []byte(` + { + "age": 24, + "height": 3.21, + "name": "John" + } + `) + different := []byte(`{"name": "Jane", "age": 20, "height": 3.37}`) + + if jsonpatch.Equal(original, similar) { + fmt.Println(`"original" is structurally equal to "similar"`) + } + + if !jsonpatch.Equal(original, different) { + fmt.Println(`"original" is _not_ structurally equal to "similar"`) + } +} +``` + +When ran, you get the following output: +```bash +$ go run main.go +"original" is structurally equal to "similar" +"original" is _not_ structurally equal to "similar" +``` + +## Combine merge patches +Given two JSON merge patch documents, it is possible to combine them into a +single merge patch which can describe both set of changes. + +The resulting merge patch can be used such that applying it results in a +document structurally similar as merging each merge patch to the document +in succession. + +```go +package main + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" +) + +func main() { + original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) + + nameAndHeight := []byte(`{"height":null,"name":"Jane"}`) + ageAndEyes := []byte(`{"age":4.23,"eyes":"blue"}`) + + // Let's combine these merge patch documents... + combinedPatch, err := jsonpatch.MergeMergePatches(nameAndHeight, ageAndEyes) + if err != nil { + panic(err) + } + + // Apply each patch individual against the original document + withoutCombinedPatch, err := jsonpatch.MergePatch(original, nameAndHeight) + if err != nil { + panic(err) + } + + withoutCombinedPatch, err = jsonpatch.MergePatch(withoutCombinedPatch, ageAndEyes) + if err != nil { + panic(err) + } + + // Apply the combined patch against the original document + + withCombinedPatch, err := jsonpatch.MergePatch(original, combinedPatch) + if err != nil { + panic(err) + } + + // Do both result in the same thing? They should! + if jsonpatch.Equal(withCombinedPatch, withoutCombinedPatch) { + fmt.Println("Both JSON documents are structurally the same!") + } + + fmt.Printf("combined merge patch: %s", combinedPatch) +} +``` + +When ran, you get the following output: +```bash +$ go run main.go +Both JSON documents are structurally the same! +combined merge patch: {"age":4.23,"eyes":"blue","height":null,"name":"Jane"} +``` + +# CLI for comparing JSON documents +You can install the commandline program `json-patch`. + +This program can take multiple JSON patch documents as arguments, +and fed a JSON document from `stdin`. It will apply the patch(es) against +the document and output the modified doc. + +**patch.1.json** +```json +[ + {"op": "replace", "path": "/name", "value": "Jane"}, + {"op": "remove", "path": "/height"} +] +``` + +**patch.2.json** +```json +[ + {"op": "add", "path": "/address", "value": "123 Main St"}, + {"op": "replace", "path": "/age", "value": "21"} +] +``` + +**document.json** +```json +{ + "name": "John", + "age": 24, + "height": 3.21 +} +``` + +You can then run: + +```bash +$ go install github.com/evanphx/json-patch/cmd/json-patch +$ cat document.json | json-patch -p patch.1.json -p patch.2.json +{"address":"123 Main St","age":"21","name":"Jane"} +``` + +# Help It! +Contributions are welcomed! Leave [an issue](https://github.com/evanphx/json-patch/issues) +or [create a PR](https://github.com/evanphx/json-patch/compare). + + +Before creating a pull request, we'd ask that you make sure tests are passing +and that you have added new tests when applicable. + +Contributors can run tests using: + +```bash +go test -cover ./... +``` + +Builds for pull requests are tested automatically +using [TravisCI](https://travis-ci.org/evanphx/json-patch). diff --git a/vendor/github.com/evanphx/json-patch/merge.go b/vendor/github.com/evanphx/json-patch/merge.go new file mode 100644 index 0000000000..6806c4c200 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/merge.go @@ -0,0 +1,383 @@ +package jsonpatch + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" +) + +func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode { + curDoc, err := cur.intoDoc() + + if err != nil { + pruneNulls(patch) + return patch + } + + patchDoc, err := patch.intoDoc() + + if err != nil { + return patch + } + + mergeDocs(curDoc, patchDoc, mergeMerge) + + return cur +} + +func mergeDocs(doc, patch *partialDoc, mergeMerge bool) { + for k, v := range *patch { + if v == nil { + if mergeMerge { + (*doc)[k] = nil + } else { + delete(*doc, k) + } + } else { + cur, ok := (*doc)[k] + + if !ok || cur == nil { + pruneNulls(v) + (*doc)[k] = v + } else { + (*doc)[k] = merge(cur, v, mergeMerge) + } + } + } +} + +func pruneNulls(n *lazyNode) { + sub, err := n.intoDoc() + + if err == nil { + pruneDocNulls(sub) + } else { + ary, err := n.intoAry() + + if err == nil { + pruneAryNulls(ary) + } + } +} + +func pruneDocNulls(doc *partialDoc) *partialDoc { + for k, v := range *doc { + if v == nil { + delete(*doc, k) + } else { + pruneNulls(v) + } + } + + return doc +} + +func pruneAryNulls(ary *partialArray) *partialArray { + newAry := []*lazyNode{} + + for _, v := range *ary { + if v != nil { + pruneNulls(v) + newAry = append(newAry, v) + } + } + + *ary = newAry + + return ary +} + +var errBadJSONDoc = fmt.Errorf("Invalid JSON Document") +var errBadJSONPatch = fmt.Errorf("Invalid JSON Patch") +var errBadMergeTypes = fmt.Errorf("Mismatched JSON Documents") + +// MergeMergePatches merges two merge patches together, such that +// applying this resulting merged merge patch to a document yields the same +// as merging each merge patch to the document in succession. +func MergeMergePatches(patch1Data, patch2Data []byte) ([]byte, error) { + return doMergePatch(patch1Data, patch2Data, true) +} + +// MergePatch merges the patchData into the docData. +func MergePatch(docData, patchData []byte) ([]byte, error) { + return doMergePatch(docData, patchData, false) +} + +func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) { + doc := &partialDoc{} + + docErr := json.Unmarshal(docData, doc) + + patch := &partialDoc{} + + patchErr := json.Unmarshal(patchData, patch) + + if _, ok := docErr.(*json.SyntaxError); ok { + return nil, errBadJSONDoc + } + + if _, ok := patchErr.(*json.SyntaxError); ok { + return nil, errBadJSONPatch + } + + if docErr == nil && *doc == nil { + return nil, errBadJSONDoc + } + + if patchErr == nil && *patch == nil { + return nil, errBadJSONPatch + } + + if docErr != nil || patchErr != nil { + // Not an error, just not a doc, so we turn straight into the patch + if patchErr == nil { + if mergeMerge { + doc = patch + } else { + doc = pruneDocNulls(patch) + } + } else { + patchAry := &partialArray{} + patchErr = json.Unmarshal(patchData, patchAry) + + if patchErr != nil { + return nil, errBadJSONPatch + } + + pruneAryNulls(patchAry) + + out, patchErr := json.Marshal(patchAry) + + if patchErr != nil { + return nil, errBadJSONPatch + } + + return out, nil + } + } else { + mergeDocs(doc, patch, mergeMerge) + } + + return json.Marshal(doc) +} + +// resemblesJSONArray indicates whether the byte-slice "appears" to be +// a JSON array or not. +// False-positives are possible, as this function does not check the internal +// structure of the array. It only checks that the outer syntax is present and +// correct. +func resemblesJSONArray(input []byte) bool { + input = bytes.TrimSpace(input) + + hasPrefix := bytes.HasPrefix(input, []byte("[")) + hasSuffix := bytes.HasSuffix(input, []byte("]")) + + return hasPrefix && hasSuffix +} + +// CreateMergePatch will return a merge patch document capable of converting +// the original document(s) to the modified document(s). +// The parameters can be bytes of either two JSON Documents, or two arrays of +// JSON documents. +// The merge patch returned follows the specification defined at http://tools.ietf.org/html/draft-ietf-appsawg-json-merge-patch-07 +func CreateMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { + originalResemblesArray := resemblesJSONArray(originalJSON) + modifiedResemblesArray := resemblesJSONArray(modifiedJSON) + + // Do both byte-slices seem like JSON arrays? + if originalResemblesArray && modifiedResemblesArray { + return createArrayMergePatch(originalJSON, modifiedJSON) + } + + // Are both byte-slices are not arrays? Then they are likely JSON objects... + if !originalResemblesArray && !modifiedResemblesArray { + return createObjectMergePatch(originalJSON, modifiedJSON) + } + + // None of the above? Then return an error because of mismatched types. + return nil, errBadMergeTypes +} + +// createObjectMergePatch will return a merge-patch document capable of +// converting the original document to the modified document. +func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { + originalDoc := map[string]interface{}{} + modifiedDoc := map[string]interface{}{} + + err := json.Unmarshal(originalJSON, &originalDoc) + if err != nil { + return nil, errBadJSONDoc + } + + err = json.Unmarshal(modifiedJSON, &modifiedDoc) + if err != nil { + return nil, errBadJSONDoc + } + + dest, err := getDiff(originalDoc, modifiedDoc) + if err != nil { + return nil, err + } + + return json.Marshal(dest) +} + +// createArrayMergePatch will return an array of merge-patch documents capable +// of converting the original document to the modified document for each +// pair of JSON documents provided in the arrays. +// Arrays of mismatched sizes will result in an error. +func createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { + originalDocs := []json.RawMessage{} + modifiedDocs := []json.RawMessage{} + + err := json.Unmarshal(originalJSON, &originalDocs) + if err != nil { + return nil, errBadJSONDoc + } + + err = json.Unmarshal(modifiedJSON, &modifiedDocs) + if err != nil { + return nil, errBadJSONDoc + } + + total := len(originalDocs) + if len(modifiedDocs) != total { + return nil, errBadJSONDoc + } + + result := []json.RawMessage{} + for i := 0; i < len(originalDocs); i++ { + original := originalDocs[i] + modified := modifiedDocs[i] + + patch, err := createObjectMergePatch(original, modified) + if err != nil { + return nil, err + } + + result = append(result, json.RawMessage(patch)) + } + + return json.Marshal(result) +} + +// Returns true if the array matches (must be json types). +// As is idiomatic for go, an empty array is not the same as a nil array. +func matchesArray(a, b []interface{}) bool { + if len(a) != len(b) { + return false + } + if (a == nil && b != nil) || (a != nil && b == nil) { + return false + } + for i := range a { + if !matchesValue(a[i], b[i]) { + return false + } + } + return true +} + +// Returns true if the values matches (must be json types) +// The types of the values must match, otherwise it will always return false +// If two map[string]interface{} are given, all elements must match. +func matchesValue(av, bv interface{}) bool { + if reflect.TypeOf(av) != reflect.TypeOf(bv) { + return false + } + switch at := av.(type) { + case string: + bt := bv.(string) + if bt == at { + return true + } + case float64: + bt := bv.(float64) + if bt == at { + return true + } + case bool: + bt := bv.(bool) + if bt == at { + return true + } + case nil: + // Both nil, fine. + return true + case map[string]interface{}: + bt := bv.(map[string]interface{}) + for key := range at { + if !matchesValue(at[key], bt[key]) { + return false + } + } + for key := range bt { + if !matchesValue(at[key], bt[key]) { + return false + } + } + return true + case []interface{}: + bt := bv.([]interface{}) + return matchesArray(at, bt) + } + return false +} + +// getDiff returns the (recursive) difference between a and b as a map[string]interface{}. +func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) { + into := map[string]interface{}{} + for key, bv := range b { + av, ok := a[key] + // value was added + if !ok { + into[key] = bv + continue + } + // If types have changed, replace completely + if reflect.TypeOf(av) != reflect.TypeOf(bv) { + into[key] = bv + continue + } + // Types are the same, compare values + switch at := av.(type) { + case map[string]interface{}: + bt := bv.(map[string]interface{}) + dst := make(map[string]interface{}, len(bt)) + dst, err := getDiff(at, bt) + if err != nil { + return nil, err + } + if len(dst) > 0 { + into[key] = dst + } + case string, float64, bool: + if !matchesValue(av, bv) { + into[key] = bv + } + case []interface{}: + bt := bv.([]interface{}) + if !matchesArray(at, bt) { + into[key] = bv + } + case nil: + switch bv.(type) { + case nil: + // Both nil, fine. + default: + into[key] = bv + } + default: + panic(fmt.Sprintf("Unknown type:%T in key %s", av, key)) + } + } + // Now add all deleted values as nil + for key := range a { + _, found := b[key] + if !found { + into[key] = nil + } + } + return into, nil +} diff --git a/vendor/github.com/evanphx/json-patch/patch.go b/vendor/github.com/evanphx/json-patch/patch.go new file mode 100644 index 0000000000..f26b6824b6 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/patch.go @@ -0,0 +1,682 @@ +package jsonpatch + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "strings" +) + +const ( + eRaw = iota + eDoc + eAry +) + +var SupportNegativeIndices bool = true + +type lazyNode struct { + raw *json.RawMessage + doc partialDoc + ary partialArray + which int +} + +type operation map[string]*json.RawMessage + +// Patch is an ordered collection of operations. +type Patch []operation + +type partialDoc map[string]*lazyNode +type partialArray []*lazyNode + +type container interface { + get(key string) (*lazyNode, error) + set(key string, val *lazyNode) error + add(key string, val *lazyNode) error + remove(key string) error +} + +func newLazyNode(raw *json.RawMessage) *lazyNode { + return &lazyNode{raw: raw, doc: nil, ary: nil, which: eRaw} +} + +func (n *lazyNode) MarshalJSON() ([]byte, error) { + switch n.which { + case eRaw: + return json.Marshal(n.raw) + case eDoc: + return json.Marshal(n.doc) + case eAry: + return json.Marshal(n.ary) + default: + return nil, fmt.Errorf("Unknown type") + } +} + +func (n *lazyNode) UnmarshalJSON(data []byte) error { + dest := make(json.RawMessage, len(data)) + copy(dest, data) + n.raw = &dest + n.which = eRaw + return nil +} + +func (n *lazyNode) intoDoc() (*partialDoc, error) { + if n.which == eDoc { + return &n.doc, nil + } + + if n.raw == nil { + return nil, fmt.Errorf("Unable to unmarshal nil pointer as partial document") + } + + err := json.Unmarshal(*n.raw, &n.doc) + + if err != nil { + return nil, err + } + + n.which = eDoc + return &n.doc, nil +} + +func (n *lazyNode) intoAry() (*partialArray, error) { + if n.which == eAry { + return &n.ary, nil + } + + if n.raw == nil { + return nil, fmt.Errorf("Unable to unmarshal nil pointer as partial array") + } + + err := json.Unmarshal(*n.raw, &n.ary) + + if err != nil { + return nil, err + } + + n.which = eAry + return &n.ary, nil +} + +func (n *lazyNode) compact() []byte { + buf := &bytes.Buffer{} + + if n.raw == nil { + return nil + } + + err := json.Compact(buf, *n.raw) + + if err != nil { + return *n.raw + } + + return buf.Bytes() +} + +func (n *lazyNode) tryDoc() bool { + if n.raw == nil { + return false + } + + err := json.Unmarshal(*n.raw, &n.doc) + + if err != nil { + return false + } + + n.which = eDoc + return true +} + +func (n *lazyNode) tryAry() bool { + if n.raw == nil { + return false + } + + err := json.Unmarshal(*n.raw, &n.ary) + + if err != nil { + return false + } + + n.which = eAry + return true +} + +func (n *lazyNode) equal(o *lazyNode) bool { + if n.which == eRaw { + if !n.tryDoc() && !n.tryAry() { + if o.which != eRaw { + return false + } + + return bytes.Equal(n.compact(), o.compact()) + } + } + + if n.which == eDoc { + if o.which == eRaw { + if !o.tryDoc() { + return false + } + } + + if o.which != eDoc { + return false + } + + for k, v := range n.doc { + ov, ok := o.doc[k] + + if !ok { + return false + } + + if v == nil && ov == nil { + continue + } + + if !v.equal(ov) { + return false + } + } + + return true + } + + if o.which != eAry && !o.tryAry() { + return false + } + + if len(n.ary) != len(o.ary) { + return false + } + + for idx, val := range n.ary { + if !val.equal(o.ary[idx]) { + return false + } + } + + return true +} + +func (o operation) kind() string { + if obj, ok := o["op"]; ok && obj != nil { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown" + } + + return op + } + + return "unknown" +} + +func (o operation) path() string { + if obj, ok := o["path"]; ok && obj != nil { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown" + } + + return op + } + + return "unknown" +} + +func (o operation) from() string { + if obj, ok := o["from"]; ok && obj != nil { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown" + } + + return op + } + + return "unknown" +} + +func (o operation) value() *lazyNode { + if obj, ok := o["value"]; ok { + return newLazyNode(obj) + } + + return nil +} + +func isArray(buf []byte) bool { +Loop: + for _, c := range buf { + switch c { + case ' ': + case '\n': + case '\t': + continue + case '[': + return true + default: + break Loop + } + } + + return false +} + +func findObject(pd *container, path string) (container, string) { + doc := *pd + + split := strings.Split(path, "/") + + if len(split) < 2 { + return nil, "" + } + + parts := split[1 : len(split)-1] + + key := split[len(split)-1] + + var err error + + for _, part := range parts { + + next, ok := doc.get(decodePatchKey(part)) + + if next == nil || ok != nil { + return nil, "" + } + + if isArray(*next.raw) { + doc, err = next.intoAry() + + if err != nil { + return nil, "" + } + } else { + doc, err = next.intoDoc() + + if err != nil { + return nil, "" + } + } + } + + return doc, decodePatchKey(key) +} + +func (d *partialDoc) set(key string, val *lazyNode) error { + (*d)[key] = val + return nil +} + +func (d *partialDoc) add(key string, val *lazyNode) error { + (*d)[key] = val + return nil +} + +func (d *partialDoc) get(key string) (*lazyNode, error) { + return (*d)[key], nil +} + +func (d *partialDoc) remove(key string) error { + _, ok := (*d)[key] + if !ok { + return fmt.Errorf("Unable to remove nonexistent key: %s", key) + } + + delete(*d, key) + return nil +} + +func (d *partialArray) set(key string, val *lazyNode) error { + if key == "-" { + *d = append(*d, val) + return nil + } + + idx, err := strconv.Atoi(key) + if err != nil { + return err + } + + sz := len(*d) + if idx+1 > sz { + sz = idx + 1 + } + + ary := make([]*lazyNode, sz) + + cur := *d + + copy(ary, cur) + + if idx >= len(ary) { + return fmt.Errorf("Unable to access invalid index: %d", idx) + } + + ary[idx] = val + + *d = ary + return nil +} + +func (d *partialArray) add(key string, val *lazyNode) error { + if key == "-" { + *d = append(*d, val) + return nil + } + + idx, err := strconv.Atoi(key) + if err != nil { + return err + } + + ary := make([]*lazyNode, len(*d)+1) + + cur := *d + + if idx >= len(ary) { + return fmt.Errorf("Unable to access invalid index: %d", idx) + } + + if SupportNegativeIndices { + if idx < -len(ary) { + return fmt.Errorf("Unable to access invalid index: %d", idx) + } + + if idx < 0 { + idx += len(ary) + } + } + + copy(ary[0:idx], cur[0:idx]) + ary[idx] = val + copy(ary[idx+1:], cur[idx:]) + + *d = ary + return nil +} + +func (d *partialArray) get(key string) (*lazyNode, error) { + idx, err := strconv.Atoi(key) + + if err != nil { + return nil, err + } + + if idx >= len(*d) { + return nil, fmt.Errorf("Unable to access invalid index: %d", idx) + } + + return (*d)[idx], nil +} + +func (d *partialArray) remove(key string) error { + idx, err := strconv.Atoi(key) + if err != nil { + return err + } + + cur := *d + + if idx >= len(cur) { + return fmt.Errorf("Unable to access invalid index: %d", idx) + } + + if SupportNegativeIndices { + if idx < -len(cur) { + return fmt.Errorf("Unable to access invalid index: %d", idx) + } + + if idx < 0 { + idx += len(cur) + } + } + + ary := make([]*lazyNode, len(cur)-1) + + copy(ary[0:idx], cur[0:idx]) + copy(ary[idx:], cur[idx+1:]) + + *d = ary + return nil + +} + +func (p Patch) add(doc *container, op operation) error { + path := op.path() + + con, key := findObject(doc, path) + + if con == nil { + return fmt.Errorf("jsonpatch add operation does not apply: doc is missing path: \"%s\"", path) + } + + return con.add(key, op.value()) +} + +func (p Patch) remove(doc *container, op operation) error { + path := op.path() + + con, key := findObject(doc, path) + + if con == nil { + return fmt.Errorf("jsonpatch remove operation does not apply: doc is missing path: \"%s\"", path) + } + + return con.remove(key) +} + +func (p Patch) replace(doc *container, op operation) error { + path := op.path() + + con, key := findObject(doc, path) + + if con == nil { + return fmt.Errorf("jsonpatch replace operation does not apply: doc is missing path: %s", path) + } + + _, ok := con.get(key) + if ok != nil { + return fmt.Errorf("jsonpatch replace operation does not apply: doc is missing key: %s", path) + } + + return con.set(key, op.value()) +} + +func (p Patch) move(doc *container, op operation) error { + from := op.from() + + con, key := findObject(doc, from) + + if con == nil { + return fmt.Errorf("jsonpatch move operation does not apply: doc is missing from path: %s", from) + } + + val, err := con.get(key) + if err != nil { + return err + } + + err = con.remove(key) + if err != nil { + return err + } + + path := op.path() + + con, key = findObject(doc, path) + + if con == nil { + return fmt.Errorf("jsonpatch move operation does not apply: doc is missing destination path: %s", path) + } + + return con.set(key, val) +} + +func (p Patch) test(doc *container, op operation) error { + path := op.path() + + con, key := findObject(doc, path) + + if con == nil { + return fmt.Errorf("jsonpatch test operation does not apply: is missing path: %s", path) + } + + val, err := con.get(key) + + if err != nil { + return err + } + + if val == nil { + if op.value().raw == nil { + return nil + } + return fmt.Errorf("Testing value %s failed", path) + } else if op.value() == nil { + return fmt.Errorf("Testing value %s failed", path) + } + + if val.equal(op.value()) { + return nil + } + + return fmt.Errorf("Testing value %s failed", path) +} + +func (p Patch) copy(doc *container, op operation) error { + from := op.from() + + con, key := findObject(doc, from) + + if con == nil { + return fmt.Errorf("jsonpatch copy operation does not apply: doc is missing from path: %s", from) + } + + val, err := con.get(key) + if err != nil { + return err + } + + path := op.path() + + con, key = findObject(doc, path) + + if con == nil { + return fmt.Errorf("jsonpatch copy operation does not apply: doc is missing destination path: %s", path) + } + + return con.set(key, val) +} + +// Equal indicates if 2 JSON documents have the same structural equality. +func Equal(a, b []byte) bool { + ra := make(json.RawMessage, len(a)) + copy(ra, a) + la := newLazyNode(&ra) + + rb := make(json.RawMessage, len(b)) + copy(rb, b) + lb := newLazyNode(&rb) + + return la.equal(lb) +} + +// DecodePatch decodes the passed JSON document as an RFC 6902 patch. +func DecodePatch(buf []byte) (Patch, error) { + var p Patch + + err := json.Unmarshal(buf, &p) + + if err != nil { + return nil, err + } + + return p, nil +} + +// Apply mutates a JSON document according to the patch, and returns the new +// document. +func (p Patch) Apply(doc []byte) ([]byte, error) { + return p.ApplyIndent(doc, "") +} + +// ApplyIndent mutates a JSON document according to the patch, and returns the new +// document indented. +func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) { + var pd container + if doc[0] == '[' { + pd = &partialArray{} + } else { + pd = &partialDoc{} + } + + err := json.Unmarshal(doc, pd) + + if err != nil { + return nil, err + } + + err = nil + + for _, op := range p { + switch op.kind() { + case "add": + err = p.add(&pd, op) + case "remove": + err = p.remove(&pd, op) + case "replace": + err = p.replace(&pd, op) + case "move": + err = p.move(&pd, op) + case "test": + err = p.test(&pd, op) + case "copy": + err = p.copy(&pd, op) + default: + err = fmt.Errorf("Unexpected kind: %s", op.kind()) + } + + if err != nil { + return nil, err + } + } + + if indent != "" { + return json.MarshalIndent(pd, "", indent) + } + + return json.Marshal(pd) +} + +// From http://tools.ietf.org/html/rfc6901#section-4 : +// +// Evaluation of each reference token begins by decoding any escaped +// character sequence. This is performed by first transforming any +// occurrence of the sequence '~1' to '/', and then transforming any +// occurrence of the sequence '~0' to '~'. + +var ( + rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~") +) + +func decodePatchKey(k string) string { + return rfc6901Decoder.Replace(k) +} diff --git a/vendor/github.com/fsnotify/fsnotify/.travis.yml b/vendor/github.com/fsnotify/fsnotify/.travis.yml index 3a5c933bc0..981d1bb813 100644 --- a/vendor/github.com/fsnotify/fsnotify/.travis.yml +++ b/vendor/github.com/fsnotify/fsnotify/.travis.yml @@ -2,12 +2,14 @@ sudo: false language: go go: - - 1.6.3 + - 1.8.x + - 1.9.x - tip matrix: allow_failures: - go: tip + fast_finish: true before_script: - go get -u github.com/golang/lint/golint diff --git a/vendor/github.com/fsnotify/fsnotify/AUTHORS b/vendor/github.com/fsnotify/fsnotify/AUTHORS index 0a5bf8f617..5ab5d41c54 100644 --- a/vendor/github.com/fsnotify/fsnotify/AUTHORS +++ b/vendor/github.com/fsnotify/fsnotify/AUTHORS @@ -8,8 +8,10 @@ # Please keep the list sorted. +Aaron L Adrien Bustany Amit Krishnan +Anmol Sethi Bjørn Erik Pedersen Bruno Bigras Caleb Spare @@ -26,6 +28,7 @@ Kelvin Fo Ken-ichirou MATSUZAWA Matt Layher Nathan Youngman +Nickolai Zeldovich Patrick Paul Hammond Pawel Knap @@ -33,12 +36,15 @@ Pieter Droogendijk Pursuit92 Riku Voipio Rob Figueiredo +Rodrigo Chiossi Slawek Ligus Soge Zhang Tiffany Jernigan Tilak Sharma +Tom Payne Travis Cline Tudor Golubenco +Vahe Khachikyan Yukang bronze1man debrando diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md index 40d7660d58..be4d7ea2c1 100644 --- a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md +++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -1,5 +1,15 @@ # Changelog +## v1.4.7 / 2018-01-09 + +* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine) +* Tests: Fix missing verb on format string (thanks @rchiossi) +* Linux: Fix deadlock in Remove (thanks @aarondl) +* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne) +* Docs: Moved FAQ into the README (thanks @vahe) +* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich) +* Docs: replace references to OS X with macOS + ## v1.4.2 / 2016-10-10 * Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack) @@ -79,7 +89,7 @@ kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsn ## v1.0.2 / 2014-08-17 -* [Fix] Missing create events on OS X. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) +* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) * [Fix] Make ./path and path equivalent. (thanks @zhsso) ## v1.0.0 / 2014-08-15 @@ -142,7 +152,7 @@ kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsn ## v0.9.2 / 2014-08-17 -* [Backport] Fix missing create events on OS X. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) +* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) ## v0.9.1 / 2014-06-12 @@ -161,7 +171,7 @@ kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsn ## v0.8.11 / 2013-11-02 * [Doc] Add Changelog [#72][] (thanks @nathany) -* [Doc] Spotlight and double modify events on OS X [#62][] (reported by @paulhammond) +* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond) ## v0.8.10 / 2013-10-19 diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md index 6a81ba4890..828a60b24b 100644 --- a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md +++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md @@ -17,7 +17,7 @@ Please indicate that you have signed the CLA in your pull request. ### How fsnotify is Developed * Development is done on feature branches. -* Tests are run on BSD, Linux, OS X and Windows. +* Tests are run on BSD, Linux, macOS and Windows. * Pull requests are reviewed and [applied to master][am] using [hub][]. * Maintainers may modify or squash commits rather than asking contributors to. * To issue a new release, the maintainers will: @@ -44,7 +44,7 @@ This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/ ### Testing -fsnotify uses build tags to compile different code on Linux, BSD, OS X, and Windows. +fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows. Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on. @@ -58,7 +58,7 @@ To aid in cross-platform testing there is a Vagrantfile for Linux and BSD. Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory. -Right now there is no equivalent solution for Windows and OS X, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads). +Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads). ### Maintainers diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md index 3c891e349b..3993207413 100644 --- a/vendor/github.com/fsnotify/fsnotify/README.md +++ b/vendor/github.com/fsnotify/fsnotify/README.md @@ -8,14 +8,14 @@ fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather go get -u golang.org/x/sys/... ``` -Cross platform: Windows, Linux, BSD and OS X. +Cross platform: Windows, Linux, BSD and macOS. |Adapter |OS |Status | |----------|----------|----------| |inotify |Linux 2.6.27 or later, Android\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)| -|kqueue |BSD, OS X, iOS\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)| +|kqueue |BSD, macOS, iOS\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)| |ReadDirectoryChangesW|Windows|Supported [![Build status](https://ci.appveyor.com/api/projects/status/ivwjubaih4r0udeh/branch/master?svg=true)](https://ci.appveyor.com/project/NathanYoungman/fsnotify/branch/master)| -|FSEvents |OS X |[Planned](https://github.com/fsnotify/fsnotify/issues/11)| +|FSEvents |macOS |[Planned](https://github.com/fsnotify/fsnotify/issues/11)| |FEN |Solaris 11 |[In Progress](https://github.com/fsnotify/fsnotify/issues/12)| |fanotify |Linux 2.6.37+ | | |USN Journals |Windows |[Maybe](https://github.com/fsnotify/fsnotify/issues/53)| @@ -23,7 +23,7 @@ Cross platform: Windows, Linux, BSD and OS X. \* Android and iOS are untested. -Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) for usage. Consult the [Wiki](https://github.com/fsnotify/fsnotify/wiki) for the FAQ and further information. +Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information. ## API stability @@ -41,6 +41,35 @@ Please refer to [CONTRIBUTING][] before opening an issue or pull request. See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go). +## FAQ + +**When a file is moved to another directory is it still being watched?** + +No (it shouldn't be, unless you are watching where it was moved to). + +**When I watch a directory, are all subdirectories watched as well?** + +No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]). + +**Do I have to watch the Error and Event channels in a separate goroutine?** + +As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7]) + +**Why am I receiving multiple events for the same file on OS X?** + +Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]). + +**How many files can be watched at once?** + +There are OS-specific limits as to how many watches can be created: +* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error. +* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error. + +[#62]: https://github.com/howeyc/fsnotify/issues/62 +[#18]: https://github.com/fsnotify/fsnotify/issues/18 +[#11]: https://github.com/fsnotify/fsnotify/issues/11 +[#7]: https://github.com/howeyc/fsnotify/issues/7 + [contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md ## Related Projects diff --git a/vendor/github.com/fsnotify/fsnotify/example_test.go b/vendor/github.com/fsnotify/fsnotify/example_test.go deleted file mode 100644 index 700502cb3a..0000000000 --- a/vendor/github.com/fsnotify/fsnotify/example_test.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !plan9 - -package fsnotify_test - -import ( - "log" - - "github.com/fsnotify/fsnotify" -) - -func ExampleNewWatcher() { - watcher, err := fsnotify.NewWatcher() - if err != nil { - log.Fatal(err) - } - defer watcher.Close() - - done := make(chan bool) - go func() { - for { - select { - case event := <-watcher.Events: - log.Println("event:", event) - if event.Op&fsnotify.Write == fsnotify.Write { - log.Println("modified file:", event.Name) - } - case err := <-watcher.Errors: - log.Println("error:", err) - } - } - }() - - err = watcher.Add("/tmp/foo") - if err != nil { - log.Fatal(err) - } - <-done -} diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go index e7f55fee7a..190bf0de57 100644 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -9,6 +9,7 @@ package fsnotify import ( "bytes" + "errors" "fmt" ) @@ -60,3 +61,6 @@ func (op Op) String() string { func (e Event) String() string { return fmt.Sprintf("%q: %s", e.Name, e.Op.String()) } + +// Common errors that can be reported by a watcher +var ErrEventOverflow = errors.New("fsnotify queue overflow") diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify_test.go b/vendor/github.com/fsnotify/fsnotify/fsnotify_test.go deleted file mode 100644 index 9d6d72afc5..0000000000 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify_test.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !plan9 - -package fsnotify - -import "testing" - -func TestEventStringWithValue(t *testing.T) { - for opMask, expectedString := range map[Op]string{ - Chmod | Create: `"/usr/someFile": CREATE|CHMOD`, - Rename: `"/usr/someFile": RENAME`, - Remove: `"/usr/someFile": REMOVE`, - Write | Chmod: `"/usr/someFile": WRITE|CHMOD`, - } { - event := Event{Name: "/usr/someFile", Op: opMask} - if event.String() != expectedString { - t.Fatalf("Expected %s, got: %v", expectedString, event.String()) - } - - } -} - -func TestEventOpStringWithValue(t *testing.T) { - expectedOpString := "WRITE|CHMOD" - event := Event{Name: "someFile", Op: Write | Chmod} - if event.Op.String() != expectedOpString { - t.Fatalf("Expected %s, got: %v", expectedOpString, event.Op.String()) - } -} - -func TestEventOpStringWithNoValue(t *testing.T) { - expectedOpString := "" - event := Event{Name: "testFile", Op: 0} - if event.Op.String() != expectedOpString { - t.Fatalf("Expected %s, got: %v", expectedOpString, event.Op.String()) - } -} diff --git a/vendor/github.com/fsnotify/fsnotify/inotify.go b/vendor/github.com/fsnotify/fsnotify/inotify.go index f3b74c51f0..d9fd1b88a0 100644 --- a/vendor/github.com/fsnotify/fsnotify/inotify.go +++ b/vendor/github.com/fsnotify/fsnotify/inotify.go @@ -24,7 +24,6 @@ type Watcher struct { Events chan Event Errors chan error mu sync.Mutex // Map access - cv *sync.Cond // sync removing on rm_watch with IN_IGNORE fd int poller *fdPoller watches map[string]*watch // Map of inotify watches (key: path) @@ -56,7 +55,6 @@ func NewWatcher() (*Watcher, error) { done: make(chan struct{}), doneResp: make(chan struct{}), } - w.cv = sync.NewCond(&w.mu) go w.readEvents() return w, nil @@ -103,21 +101,23 @@ func (w *Watcher) Add(name string) error { var flags uint32 = agnosticEvents w.mu.Lock() - watchEntry, found := w.watches[name] - w.mu.Unlock() - if found { - watchEntry.flags |= flags - flags |= unix.IN_MASK_ADD + defer w.mu.Unlock() + watchEntry := w.watches[name] + if watchEntry != nil { + flags |= watchEntry.flags | unix.IN_MASK_ADD } wd, errno := unix.InotifyAddWatch(w.fd, name, flags) if wd == -1 { return errno } - w.mu.Lock() - w.watches[name] = &watch{wd: uint32(wd), flags: flags} - w.paths[wd] = name - w.mu.Unlock() + if watchEntry == nil { + w.watches[name] = &watch{wd: uint32(wd), flags: flags} + w.paths[wd] = name + } else { + watchEntry.wd = uint32(wd) + watchEntry.flags = flags + } return nil } @@ -135,6 +135,13 @@ func (w *Watcher) Remove(name string) error { if !ok { return fmt.Errorf("can't remove non-existent inotify watch for: %s", name) } + + // We successfully removed the watch if InotifyRmWatch doesn't return an + // error, we need to clean up our internal state to ensure it matches + // inotify's kernel state. + delete(w.paths, int(watch.wd)) + delete(w.watches, name) + // inotify_rm_watch will return EINVAL if the file has been deleted; // the inotify will already have been removed. // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously @@ -152,13 +159,6 @@ func (w *Watcher) Remove(name string) error { return errno } - // wait until ignoreLinux() deleting maps - exists := true - for exists { - w.cv.Wait() - _, exists = w.watches[name] - } - return nil } @@ -245,13 +245,31 @@ func (w *Watcher) readEvents() { mask := uint32(raw.Mask) nameLen := uint32(raw.Len) + + if mask&unix.IN_Q_OVERFLOW != 0 { + select { + case w.Errors <- ErrEventOverflow: + case <-w.done: + return + } + } + // If the event happened to the watched directory or the watched file, the kernel // doesn't append the filename to the event, but we would like to always fill the // the "Name" field with a valid filename. We retrieve the path of the watch from // the "paths" map. w.mu.Lock() - name := w.paths[int(raw.Wd)] + name, ok := w.paths[int(raw.Wd)] + // IN_DELETE_SELF occurs when the file/directory being watched is removed. + // This is a sign to clean up the maps, otherwise we are no longer in sync + // with the inotify kernel state which has already deleted the watch + // automatically. + if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + delete(w.paths, int(raw.Wd)) + delete(w.watches, name) + } w.mu.Unlock() + if nameLen > 0 { // Point "bytes" at the first byte of the filename bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent])) @@ -262,7 +280,7 @@ func (w *Watcher) readEvents() { event := newEvent(name, mask) // Send the events that are not ignored on the events channel - if !event.ignoreLinux(w, raw.Wd, mask) { + if !event.ignoreLinux(mask) { select { case w.Events <- event: case <-w.done: @@ -279,15 +297,9 @@ func (w *Watcher) readEvents() { // Certain types of events can be "ignored" and not sent over the Events // channel. Such as events marked ignore by the kernel, or MODIFY events // against files that do not exist. -func (e *Event) ignoreLinux(w *Watcher, wd int32, mask uint32) bool { +func (e *Event) ignoreLinux(mask uint32) bool { // Ignore anything the inotify API says to ignore if mask&unix.IN_IGNORED == unix.IN_IGNORED { - w.mu.Lock() - defer w.mu.Unlock() - name := w.paths[int(wd)] - delete(w.paths, int(wd)) - delete(w.watches, name) - w.cv.Broadcast() return true } diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_poller_test.go b/vendor/github.com/fsnotify/fsnotify/inotify_poller_test.go deleted file mode 100644 index 26623efeff..0000000000 --- a/vendor/github.com/fsnotify/fsnotify/inotify_poller_test.go +++ /dev/null @@ -1,229 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux - -package fsnotify - -import ( - "testing" - "time" - - "golang.org/x/sys/unix" -) - -type testFd [2]int - -func makeTestFd(t *testing.T) testFd { - var tfd testFd - errno := unix.Pipe(tfd[:]) - if errno != nil { - t.Fatalf("Failed to create pipe: %v", errno) - } - return tfd -} - -func (tfd testFd) fd() int { - return tfd[0] -} - -func (tfd testFd) closeWrite(t *testing.T) { - errno := unix.Close(tfd[1]) - if errno != nil { - t.Fatalf("Failed to close write end of pipe: %v", errno) - } -} - -func (tfd testFd) put(t *testing.T) { - buf := make([]byte, 10) - _, errno := unix.Write(tfd[1], buf) - if errno != nil { - t.Fatalf("Failed to write to pipe: %v", errno) - } -} - -func (tfd testFd) get(t *testing.T) { - buf := make([]byte, 10) - _, errno := unix.Read(tfd[0], buf) - if errno != nil { - t.Fatalf("Failed to read from pipe: %v", errno) - } -} - -func (tfd testFd) close() { - unix.Close(tfd[1]) - unix.Close(tfd[0]) -} - -func makePoller(t *testing.T) (testFd, *fdPoller) { - tfd := makeTestFd(t) - poller, err := newFdPoller(tfd.fd()) - if err != nil { - t.Fatalf("Failed to create poller: %v", err) - } - return tfd, poller -} - -func TestPollerWithBadFd(t *testing.T) { - _, err := newFdPoller(-1) - if err != unix.EBADF { - t.Fatalf("Expected EBADF, got: %v", err) - } -} - -func TestPollerWithData(t *testing.T) { - tfd, poller := makePoller(t) - defer tfd.close() - defer poller.close() - - tfd.put(t) - ok, err := poller.wait() - if err != nil { - t.Fatalf("poller failed: %v", err) - } - if !ok { - t.Fatalf("expected poller to return true") - } - tfd.get(t) -} - -func TestPollerWithWakeup(t *testing.T) { - tfd, poller := makePoller(t) - defer tfd.close() - defer poller.close() - - err := poller.wake() - if err != nil { - t.Fatalf("wake failed: %v", err) - } - ok, err := poller.wait() - if err != nil { - t.Fatalf("poller failed: %v", err) - } - if ok { - t.Fatalf("expected poller to return false") - } -} - -func TestPollerWithClose(t *testing.T) { - tfd, poller := makePoller(t) - defer tfd.close() - defer poller.close() - - tfd.closeWrite(t) - ok, err := poller.wait() - if err != nil { - t.Fatalf("poller failed: %v", err) - } - if !ok { - t.Fatalf("expected poller to return true") - } -} - -func TestPollerWithWakeupAndData(t *testing.T) { - tfd, poller := makePoller(t) - defer tfd.close() - defer poller.close() - - tfd.put(t) - err := poller.wake() - if err != nil { - t.Fatalf("wake failed: %v", err) - } - - // both data and wakeup - ok, err := poller.wait() - if err != nil { - t.Fatalf("poller failed: %v", err) - } - if !ok { - t.Fatalf("expected poller to return true") - } - - // data is still in the buffer, wakeup is cleared - ok, err = poller.wait() - if err != nil { - t.Fatalf("poller failed: %v", err) - } - if !ok { - t.Fatalf("expected poller to return true") - } - - tfd.get(t) - // data is gone, only wakeup now - err = poller.wake() - if err != nil { - t.Fatalf("wake failed: %v", err) - } - ok, err = poller.wait() - if err != nil { - t.Fatalf("poller failed: %v", err) - } - if ok { - t.Fatalf("expected poller to return false") - } -} - -func TestPollerConcurrent(t *testing.T) { - tfd, poller := makePoller(t) - defer tfd.close() - defer poller.close() - - oks := make(chan bool) - live := make(chan bool) - defer close(live) - go func() { - defer close(oks) - for { - ok, err := poller.wait() - if err != nil { - t.Fatalf("poller failed: %v", err) - } - oks <- ok - if !<-live { - return - } - } - }() - - // Try a write - select { - case <-time.After(50 * time.Millisecond): - case <-oks: - t.Fatalf("poller did not wait") - } - tfd.put(t) - if !<-oks { - t.Fatalf("expected true") - } - tfd.get(t) - live <- true - - // Try a wakeup - select { - case <-time.After(50 * time.Millisecond): - case <-oks: - t.Fatalf("poller did not wait") - } - err := poller.wake() - if err != nil { - t.Fatalf("wake failed: %v", err) - } - if <-oks { - t.Fatalf("expected false") - } - live <- true - - // Try a close - select { - case <-time.After(50 * time.Millisecond): - case <-oks: - t.Fatalf("poller did not wait") - } - tfd.closeWrite(t) - if !<-oks { - t.Fatalf("expected true") - } - tfd.get(t) -} diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_test.go b/vendor/github.com/fsnotify/fsnotify/inotify_test.go deleted file mode 100644 index a4bb202d1f..0000000000 --- a/vendor/github.com/fsnotify/fsnotify/inotify_test.go +++ /dev/null @@ -1,360 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux - -package fsnotify - -import ( - "fmt" - "os" - "path/filepath" - "strings" - "testing" - "time" -) - -func TestInotifyCloseRightAway(t *testing.T) { - w, err := NewWatcher() - if err != nil { - t.Fatalf("Failed to create watcher") - } - - // Close immediately; it won't even reach the first unix.Read. - w.Close() - - // Wait for the close to complete. - <-time.After(50 * time.Millisecond) - isWatcherReallyClosed(t, w) -} - -func TestInotifyCloseSlightlyLater(t *testing.T) { - w, err := NewWatcher() - if err != nil { - t.Fatalf("Failed to create watcher") - } - - // Wait until readEvents has reached unix.Read, and Close. - <-time.After(50 * time.Millisecond) - w.Close() - - // Wait for the close to complete. - <-time.After(50 * time.Millisecond) - isWatcherReallyClosed(t, w) -} - -func TestInotifyCloseSlightlyLaterWithWatch(t *testing.T) { - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - w, err := NewWatcher() - if err != nil { - t.Fatalf("Failed to create watcher") - } - w.Add(testDir) - - // Wait until readEvents has reached unix.Read, and Close. - <-time.After(50 * time.Millisecond) - w.Close() - - // Wait for the close to complete. - <-time.After(50 * time.Millisecond) - isWatcherReallyClosed(t, w) -} - -func TestInotifyCloseAfterRead(t *testing.T) { - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - w, err := NewWatcher() - if err != nil { - t.Fatalf("Failed to create watcher") - } - - err = w.Add(testDir) - if err != nil { - t.Fatalf("Failed to add .") - } - - // Generate an event. - os.Create(filepath.Join(testDir, "somethingSOMETHINGsomethingSOMETHING")) - - // Wait for readEvents to read the event, then close the watcher. - <-time.After(50 * time.Millisecond) - w.Close() - - // Wait for the close to complete. - <-time.After(50 * time.Millisecond) - isWatcherReallyClosed(t, w) -} - -func isWatcherReallyClosed(t *testing.T, w *Watcher) { - select { - case err, ok := <-w.Errors: - if ok { - t.Fatalf("w.Errors is not closed; readEvents is still alive after closing (error: %v)", err) - } - default: - t.Fatalf("w.Errors would have blocked; readEvents is still alive!") - } - - select { - case _, ok := <-w.Events: - if ok { - t.Fatalf("w.Events is not closed; readEvents is still alive after closing") - } - default: - t.Fatalf("w.Events would have blocked; readEvents is still alive!") - } -} - -func TestInotifyCloseCreate(t *testing.T) { - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - w, err := NewWatcher() - if err != nil { - t.Fatalf("Failed to create watcher: %v", err) - } - defer w.Close() - - err = w.Add(testDir) - if err != nil { - t.Fatalf("Failed to add testDir: %v", err) - } - h, err := os.Create(filepath.Join(testDir, "testfile")) - if err != nil { - t.Fatalf("Failed to create file in testdir: %v", err) - } - h.Close() - select { - case _ = <-w.Events: - case err := <-w.Errors: - t.Fatalf("Error from watcher: %v", err) - case <-time.After(50 * time.Millisecond): - t.Fatalf("Took too long to wait for event") - } - - // At this point, we've received one event, so the goroutine is ready. - // It's also blocking on unix.Read. - // Now we try to swap the file descriptor under its nose. - w.Close() - w, err = NewWatcher() - defer w.Close() - if err != nil { - t.Fatalf("Failed to create second watcher: %v", err) - } - - <-time.After(50 * time.Millisecond) - err = w.Add(testDir) - if err != nil { - t.Fatalf("Error adding testDir again: %v", err) - } -} - -// This test verifies the watcher can keep up with file creations/deletions -// when under load. -func TestInotifyStress(t *testing.T) { - maxNumToCreate := 1000 - - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - testFilePrefix := filepath.Join(testDir, "testfile") - - w, err := NewWatcher() - if err != nil { - t.Fatalf("Failed to create watcher: %v", err) - } - defer w.Close() - - err = w.Add(testDir) - if err != nil { - t.Fatalf("Failed to add testDir: %v", err) - } - - doneChan := make(chan struct{}) - // The buffer ensures that the file generation goroutine is never blocked. - errChan := make(chan error, 2*maxNumToCreate) - - go func() { - for i := 0; i < maxNumToCreate; i++ { - testFile := fmt.Sprintf("%s%d", testFilePrefix, i) - - handle, err := os.Create(testFile) - if err != nil { - errChan <- fmt.Errorf("Create failed: %v", err) - continue - } - - err = handle.Close() - if err != nil { - errChan <- fmt.Errorf("Close failed: %v", err) - continue - } - } - - // If we delete a newly created file too quickly, inotify will skip the - // create event and only send the delete event. - time.Sleep(100 * time.Millisecond) - - for i := 0; i < maxNumToCreate; i++ { - testFile := fmt.Sprintf("%s%d", testFilePrefix, i) - err = os.Remove(testFile) - if err != nil { - errChan <- fmt.Errorf("Remove failed: %v", err) - } - } - - close(doneChan) - }() - - creates := 0 - removes := 0 - - finished := false - after := time.After(10 * time.Second) - for !finished { - select { - case <-after: - t.Fatalf("Not done") - case <-doneChan: - finished = true - case err := <-errChan: - t.Fatalf("Got an error from file creator goroutine: %v", err) - case err := <-w.Errors: - t.Fatalf("Got an error from watcher: %v", err) - case evt := <-w.Events: - if !strings.HasPrefix(evt.Name, testFilePrefix) { - t.Fatalf("Got an event for an unknown file: %s", evt.Name) - } - if evt.Op == Create { - creates++ - } - if evt.Op == Remove { - removes++ - } - } - } - - // Drain remaining events from channels - count := 0 - for count < 10 { - select { - case err := <-errChan: - t.Fatalf("Got an error from file creator goroutine: %v", err) - case err := <-w.Errors: - t.Fatalf("Got an error from watcher: %v", err) - case evt := <-w.Events: - if !strings.HasPrefix(evt.Name, testFilePrefix) { - t.Fatalf("Got an event for an unknown file: %s", evt.Name) - } - if evt.Op == Create { - creates++ - } - if evt.Op == Remove { - removes++ - } - count = 0 - default: - count++ - // Give the watcher chances to fill the channels. - time.Sleep(time.Millisecond) - } - } - - if creates-removes > 1 || creates-removes < -1 { - t.Fatalf("Creates and removes should not be off by more than one: %d creates, %d removes", creates, removes) - } - if creates < 50 { - t.Fatalf("Expected at least 50 creates, got %d", creates) - } -} - -func TestInotifyRemoveTwice(t *testing.T) { - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - testFile := filepath.Join(testDir, "testfile") - - handle, err := os.Create(testFile) - if err != nil { - t.Fatalf("Create failed: %v", err) - } - handle.Close() - - w, err := NewWatcher() - if err != nil { - t.Fatalf("Failed to create watcher: %v", err) - } - defer w.Close() - - err = w.Add(testFile) - if err != nil { - t.Fatalf("Failed to add testFile: %v", err) - } - - err = os.Remove(testFile) - if err != nil { - t.Fatalf("Failed to remove testFile: %v", err) - } - - err = w.Remove(testFile) - if err == nil { - t.Fatalf("no error on removing invalid file") - } - s1 := fmt.Sprintf("%s", err) - - err = w.Remove(testFile) - if err == nil { - t.Fatalf("no error on removing invalid file") - } - s2 := fmt.Sprintf("%s", err) - - if s1 != s2 { - t.Fatalf("receive different error - %s / %s", s1, s2) - } -} - -func TestInotifyInnerMapLength(t *testing.T) { - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - testFile := filepath.Join(testDir, "testfile") - - handle, err := os.Create(testFile) - if err != nil { - t.Fatalf("Create failed: %v", err) - } - handle.Close() - - w, err := NewWatcher() - if err != nil { - t.Fatalf("Failed to create watcher: %v", err) - } - defer w.Close() - - err = w.Add(testFile) - if err != nil { - t.Fatalf("Failed to add testFile: %v", err) - } - go func() { - for err := range w.Errors { - t.Fatalf("error received: %s", err) - } - }() - - err = os.Remove(testFile) - if err != nil { - t.Fatalf("Failed to remove testFile: %v", err) - } - _ = <-w.Events // consume Remove event - <-time.After(50 * time.Millisecond) // wait IN_IGNORE propagated - - w.mu.Lock() - defer w.mu.Unlock() - if len(w.watches) != 0 { - t.Fatalf("Expected watches len is 0, but got: %d, %v", len(w.watches), w.watches) - } - if len(w.paths) != 0 { - t.Fatalf("Expected paths len is 0, but got: %d, %v", len(w.paths), w.paths) - } -} diff --git a/vendor/github.com/fsnotify/fsnotify/integration_darwin_test.go b/vendor/github.com/fsnotify/fsnotify/integration_darwin_test.go deleted file mode 100644 index 5564554f72..0000000000 --- a/vendor/github.com/fsnotify/fsnotify/integration_darwin_test.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package fsnotify - -import ( - "os" - "path/filepath" - "testing" - "time" - - "golang.org/x/sys/unix" -) - -// testExchangedataForWatcher tests the watcher with the exchangedata operation on OS X. -// -// This is widely used for atomic saves on OS X, e.g. TextMate and in Apple's NSDocument. -// -// See https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man2/exchangedata.2.html -// Also see: https://github.com/textmate/textmate/blob/cd016be29489eba5f3c09b7b70b06da134dda550/Frameworks/io/src/swap_file_data.cc#L20 -func testExchangedataForWatcher(t *testing.T, watchDir bool) { - // Create directory to watch - testDir1 := tempMkdir(t) - - // For the intermediate file - testDir2 := tempMkdir(t) - - defer os.RemoveAll(testDir1) - defer os.RemoveAll(testDir2) - - resolvedFilename := "TestFsnotifyEvents.file" - - // TextMate does: - // - // 1. exchangedata (intermediate, resolved) - // 2. unlink intermediate - // - // Let's try to simulate that: - resolved := filepath.Join(testDir1, resolvedFilename) - intermediate := filepath.Join(testDir2, resolvedFilename+"~") - - // Make sure we create the file before we start watching - createAndSyncFile(t, resolved) - - watcher := newWatcher(t) - - // Test both variants in isolation - if watchDir { - addWatch(t, watcher, testDir1) - } else { - addWatch(t, watcher, resolved) - } - - // Receive errors on the error channel on a separate goroutine - go func() { - for err := range watcher.Errors { - t.Fatalf("error received: %s", err) - } - }() - - // Receive events on the event channel on a separate goroutine - eventstream := watcher.Events - var removeReceived counter - var createReceived counter - - done := make(chan bool) - - go func() { - for event := range eventstream { - // Only count relevant events - if event.Name == filepath.Clean(resolved) { - if event.Op&Remove == Remove { - removeReceived.increment() - } - if event.Op&Create == Create { - createReceived.increment() - } - } - t.Logf("event received: %s", event) - } - done <- true - }() - - // Repeat to make sure the watched file/directory "survives" the REMOVE/CREATE loop. - for i := 1; i <= 3; i++ { - // The intermediate file is created in a folder outside the watcher - createAndSyncFile(t, intermediate) - - // 1. Swap - if err := unix.Exchangedata(intermediate, resolved, 0); err != nil { - t.Fatalf("[%d] exchangedata failed: %s", i, err) - } - - time.Sleep(50 * time.Millisecond) - - // 2. Delete the intermediate file - err := os.Remove(intermediate) - - if err != nil { - t.Fatalf("[%d] remove %s failed: %s", i, intermediate, err) - } - - time.Sleep(50 * time.Millisecond) - - } - - // We expect this event to be received almost immediately, but let's wait 500 ms to be sure - time.Sleep(500 * time.Millisecond) - - // The events will be (CHMOD + REMOVE + CREATE) X 2. Let's focus on the last two: - if removeReceived.value() < 3 { - t.Fatal("fsnotify remove events have not been received after 500 ms") - } - - if createReceived.value() < 3 { - t.Fatal("fsnotify create events have not been received after 500 ms") - } - - watcher.Close() - t.Log("waiting for the event channel to become closed...") - select { - case <-done: - t.Log("event channel closed") - case <-time.After(2 * time.Second): - t.Fatal("event stream was not closed after 2 seconds") - } -} - -// TestExchangedataInWatchedDir test exchangedata operation on file in watched dir. -func TestExchangedataInWatchedDir(t *testing.T) { - testExchangedataForWatcher(t, true) -} - -// TestExchangedataInWatchedDir test exchangedata operation on watched file. -func TestExchangedataInWatchedFile(t *testing.T) { - testExchangedataForWatcher(t, false) -} - -func createAndSyncFile(t *testing.T, filepath string) { - f1, err := os.OpenFile(filepath, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating %s failed: %s", filepath, err) - } - f1.Sync() - f1.Close() -} diff --git a/vendor/github.com/fsnotify/fsnotify/integration_test.go b/vendor/github.com/fsnotify/fsnotify/integration_test.go deleted file mode 100644 index 8b7e9d3ec8..0000000000 --- a/vendor/github.com/fsnotify/fsnotify/integration_test.go +++ /dev/null @@ -1,1237 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !plan9,!solaris - -package fsnotify - -import ( - "io/ioutil" - "os" - "os/exec" - "path" - "path/filepath" - "runtime" - "sync/atomic" - "testing" - "time" -) - -// An atomic counter -type counter struct { - val int32 -} - -func (c *counter) increment() { - atomic.AddInt32(&c.val, 1) -} - -func (c *counter) value() int32 { - return atomic.LoadInt32(&c.val) -} - -func (c *counter) reset() { - atomic.StoreInt32(&c.val, 0) -} - -// tempMkdir makes a temporary directory -func tempMkdir(t *testing.T) string { - dir, err := ioutil.TempDir("", "fsnotify") - if err != nil { - t.Fatalf("failed to create test directory: %s", err) - } - return dir -} - -// tempMkFile makes a temporary file. -func tempMkFile(t *testing.T, dir string) string { - f, err := ioutil.TempFile(dir, "fsnotify") - if err != nil { - t.Fatalf("failed to create test file: %v", err) - } - defer f.Close() - return f.Name() -} - -// newWatcher initializes an fsnotify Watcher instance. -func newWatcher(t *testing.T) *Watcher { - watcher, err := NewWatcher() - if err != nil { - t.Fatalf("NewWatcher() failed: %s", err) - } - return watcher -} - -// addWatch adds a watch for a directory -func addWatch(t *testing.T, watcher *Watcher, dir string) { - if err := watcher.Add(dir); err != nil { - t.Fatalf("watcher.Add(%q) failed: %s", dir, err) - } -} - -func TestFsnotifyMultipleOperations(t *testing.T) { - watcher := newWatcher(t) - - // Receive errors on the error channel on a separate goroutine - go func() { - for err := range watcher.Errors { - t.Fatalf("error received: %s", err) - } - }() - - // Create directory to watch - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - // Create directory that's not watched - testDirToMoveFiles := tempMkdir(t) - defer os.RemoveAll(testDirToMoveFiles) - - testFile := filepath.Join(testDir, "TestFsnotifySeq.testfile") - testFileRenamed := filepath.Join(testDirToMoveFiles, "TestFsnotifySeqRename.testfile") - - addWatch(t, watcher, testDir) - - // Receive events on the event channel on a separate goroutine - eventstream := watcher.Events - var createReceived, modifyReceived, deleteReceived, renameReceived counter - done := make(chan bool) - go func() { - for event := range eventstream { - // Only count relevant events - if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) { - t.Logf("event received: %s", event) - if event.Op&Remove == Remove { - deleteReceived.increment() - } - if event.Op&Write == Write { - modifyReceived.increment() - } - if event.Op&Create == Create { - createReceived.increment() - } - if event.Op&Rename == Rename { - renameReceived.increment() - } - } else { - t.Logf("unexpected event received: %s", event) - } - } - done <- true - }() - - // Create a file - // This should add at least one event to the fsnotify event queue - var f *os.File - f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - - time.Sleep(time.Millisecond) - f.WriteString("data") - f.Sync() - f.Close() - - time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete - - if err := testRename(testFile, testFileRenamed); err != nil { - t.Fatalf("rename failed: %s", err) - } - - // Modify the file outside of the watched dir - f, err = os.Open(testFileRenamed) - if err != nil { - t.Fatalf("open test renamed file failed: %s", err) - } - f.WriteString("data") - f.Sync() - f.Close() - - time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete - - // Recreate the file that was moved - f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Close() - time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete - - // We expect this event to be received almost immediately, but let's wait 500 ms to be sure - time.Sleep(500 * time.Millisecond) - cReceived := createReceived.value() - if cReceived != 2 { - t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 2) - } - mReceived := modifyReceived.value() - if mReceived != 1 { - t.Fatalf("incorrect number of modify events received after 500 ms (%d vs %d)", mReceived, 1) - } - dReceived := deleteReceived.value() - rReceived := renameReceived.value() - if dReceived+rReceived != 1 { - t.Fatalf("incorrect number of rename+delete events received after 500 ms (%d vs %d)", rReceived+dReceived, 1) - } - - // Try closing the fsnotify instance - t.Log("calling Close()") - watcher.Close() - t.Log("waiting for the event channel to become closed...") - select { - case <-done: - t.Log("event channel closed") - case <-time.After(2 * time.Second): - t.Fatal("event stream was not closed after 2 seconds") - } -} - -func TestFsnotifyMultipleCreates(t *testing.T) { - watcher := newWatcher(t) - - // Receive errors on the error channel on a separate goroutine - go func() { - for err := range watcher.Errors { - t.Fatalf("error received: %s", err) - } - }() - - // Create directory to watch - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - testFile := filepath.Join(testDir, "TestFsnotifySeq.testfile") - - addWatch(t, watcher, testDir) - - // Receive events on the event channel on a separate goroutine - eventstream := watcher.Events - var createReceived, modifyReceived, deleteReceived counter - done := make(chan bool) - go func() { - for event := range eventstream { - // Only count relevant events - if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) { - t.Logf("event received: %s", event) - if event.Op&Remove == Remove { - deleteReceived.increment() - } - if event.Op&Create == Create { - createReceived.increment() - } - if event.Op&Write == Write { - modifyReceived.increment() - } - } else { - t.Logf("unexpected event received: %s", event) - } - } - done <- true - }() - - // Create a file - // This should add at least one event to the fsnotify event queue - var f *os.File - f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - - time.Sleep(time.Millisecond) - f.WriteString("data") - f.Sync() - f.Close() - - time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete - - os.Remove(testFile) - - time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete - - // Recreate the file - f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Close() - time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete - - // Modify - f, err = os.OpenFile(testFile, os.O_WRONLY, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - - time.Sleep(time.Millisecond) - f.WriteString("data") - f.Sync() - f.Close() - - time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete - - // Modify - f, err = os.OpenFile(testFile, os.O_WRONLY, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - - time.Sleep(time.Millisecond) - f.WriteString("data") - f.Sync() - f.Close() - - time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete - - // We expect this event to be received almost immediately, but let's wait 500 ms to be sure - time.Sleep(500 * time.Millisecond) - cReceived := createReceived.value() - if cReceived != 2 { - t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 2) - } - mReceived := modifyReceived.value() - if mReceived < 3 { - t.Fatalf("incorrect number of modify events received after 500 ms (%d vs atleast %d)", mReceived, 3) - } - dReceived := deleteReceived.value() - if dReceived != 1 { - t.Fatalf("incorrect number of rename+delete events received after 500 ms (%d vs %d)", dReceived, 1) - } - - // Try closing the fsnotify instance - t.Log("calling Close()") - watcher.Close() - t.Log("waiting for the event channel to become closed...") - select { - case <-done: - t.Log("event channel closed") - case <-time.After(2 * time.Second): - t.Fatal("event stream was not closed after 2 seconds") - } -} - -func TestFsnotifyDirOnly(t *testing.T) { - watcher := newWatcher(t) - - // Create directory to watch - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - // Create a file before watching directory - // This should NOT add any events to the fsnotify event queue - testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile") - { - var f *os.File - f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - f.Close() - } - - addWatch(t, watcher, testDir) - - // Receive errors on the error channel on a separate goroutine - go func() { - for err := range watcher.Errors { - t.Fatalf("error received: %s", err) - } - }() - - testFile := filepath.Join(testDir, "TestFsnotifyDirOnly.testfile") - - // Receive events on the event channel on a separate goroutine - eventstream := watcher.Events - var createReceived, modifyReceived, deleteReceived counter - done := make(chan bool) - go func() { - for event := range eventstream { - // Only count relevant events - if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) || event.Name == filepath.Clean(testFileAlreadyExists) { - t.Logf("event received: %s", event) - if event.Op&Remove == Remove { - deleteReceived.increment() - } - if event.Op&Write == Write { - modifyReceived.increment() - } - if event.Op&Create == Create { - createReceived.increment() - } - } else { - t.Logf("unexpected event received: %s", event) - } - } - done <- true - }() - - // Create a file - // This should add at least one event to the fsnotify event queue - var f *os.File - f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - - time.Sleep(time.Millisecond) - f.WriteString("data") - f.Sync() - f.Close() - - time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete - - os.Remove(testFile) - os.Remove(testFileAlreadyExists) - - // We expect this event to be received almost immediately, but let's wait 500 ms to be sure - time.Sleep(500 * time.Millisecond) - cReceived := createReceived.value() - if cReceived != 1 { - t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 1) - } - mReceived := modifyReceived.value() - if mReceived != 1 { - t.Fatalf("incorrect number of modify events received after 500 ms (%d vs %d)", mReceived, 1) - } - dReceived := deleteReceived.value() - if dReceived != 2 { - t.Fatalf("incorrect number of delete events received after 500 ms (%d vs %d)", dReceived, 2) - } - - // Try closing the fsnotify instance - t.Log("calling Close()") - watcher.Close() - t.Log("waiting for the event channel to become closed...") - select { - case <-done: - t.Log("event channel closed") - case <-time.After(2 * time.Second): - t.Fatal("event stream was not closed after 2 seconds") - } -} - -func TestFsnotifyDeleteWatchedDir(t *testing.T) { - watcher := newWatcher(t) - defer watcher.Close() - - // Create directory to watch - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - // Create a file before watching directory - testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile") - { - var f *os.File - f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - f.Close() - } - - addWatch(t, watcher, testDir) - - // Add a watch for testFile - addWatch(t, watcher, testFileAlreadyExists) - - // Receive errors on the error channel on a separate goroutine - go func() { - for err := range watcher.Errors { - t.Fatalf("error received: %s", err) - } - }() - - // Receive events on the event channel on a separate goroutine - eventstream := watcher.Events - var deleteReceived counter - go func() { - for event := range eventstream { - // Only count relevant events - if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFileAlreadyExists) { - t.Logf("event received: %s", event) - if event.Op&Remove == Remove { - deleteReceived.increment() - } - } else { - t.Logf("unexpected event received: %s", event) - } - } - }() - - os.RemoveAll(testDir) - - // We expect this event to be received almost immediately, but let's wait 500 ms to be sure - time.Sleep(500 * time.Millisecond) - dReceived := deleteReceived.value() - if dReceived < 2 { - t.Fatalf("did not receive at least %d delete events, received %d after 500 ms", 2, dReceived) - } -} - -func TestFsnotifySubDir(t *testing.T) { - watcher := newWatcher(t) - - // Create directory to watch - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - testFile1 := filepath.Join(testDir, "TestFsnotifyFile1.testfile") - testSubDir := filepath.Join(testDir, "sub") - testSubDirFile := filepath.Join(testDir, "sub/TestFsnotifyFile1.testfile") - - // Receive errors on the error channel on a separate goroutine - go func() { - for err := range watcher.Errors { - t.Fatalf("error received: %s", err) - } - }() - - // Receive events on the event channel on a separate goroutine - eventstream := watcher.Events - var createReceived, deleteReceived counter - done := make(chan bool) - go func() { - for event := range eventstream { - // Only count relevant events - if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testSubDir) || event.Name == filepath.Clean(testFile1) { - t.Logf("event received: %s", event) - if event.Op&Create == Create { - createReceived.increment() - } - if event.Op&Remove == Remove { - deleteReceived.increment() - } - } else { - t.Logf("unexpected event received: %s", event) - } - } - done <- true - }() - - addWatch(t, watcher, testDir) - - // Create sub-directory - if err := os.Mkdir(testSubDir, 0777); err != nil { - t.Fatalf("failed to create test sub-directory: %s", err) - } - - // Create a file - var f *os.File - f, err := os.OpenFile(testFile1, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - f.Close() - - // Create a file (Should not see this! we are not watching subdir) - var fs *os.File - fs, err = os.OpenFile(testSubDirFile, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - fs.Sync() - fs.Close() - - time.Sleep(200 * time.Millisecond) - - // Make sure receive deletes for both file and sub-directory - os.RemoveAll(testSubDir) - os.Remove(testFile1) - - // We expect this event to be received almost immediately, but let's wait 500 ms to be sure - time.Sleep(500 * time.Millisecond) - cReceived := createReceived.value() - if cReceived != 2 { - t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 2) - } - dReceived := deleteReceived.value() - if dReceived != 2 { - t.Fatalf("incorrect number of delete events received after 500 ms (%d vs %d)", dReceived, 2) - } - - // Try closing the fsnotify instance - t.Log("calling Close()") - watcher.Close() - t.Log("waiting for the event channel to become closed...") - select { - case <-done: - t.Log("event channel closed") - case <-time.After(2 * time.Second): - t.Fatal("event stream was not closed after 2 seconds") - } -} - -func TestFsnotifyRename(t *testing.T) { - watcher := newWatcher(t) - - // Create directory to watch - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - addWatch(t, watcher, testDir) - - // Receive errors on the error channel on a separate goroutine - go func() { - for err := range watcher.Errors { - t.Fatalf("error received: %s", err) - } - }() - - testFile := filepath.Join(testDir, "TestFsnotifyEvents.testfile") - testFileRenamed := filepath.Join(testDir, "TestFsnotifyEvents.testfileRenamed") - - // Receive events on the event channel on a separate goroutine - eventstream := watcher.Events - var renameReceived counter - done := make(chan bool) - go func() { - for event := range eventstream { - // Only count relevant events - if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) || event.Name == filepath.Clean(testFileRenamed) { - if event.Op&Rename == Rename { - renameReceived.increment() - } - t.Logf("event received: %s", event) - } else { - t.Logf("unexpected event received: %s", event) - } - } - done <- true - }() - - // Create a file - // This should add at least one event to the fsnotify event queue - var f *os.File - f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - - f.WriteString("data") - f.Sync() - f.Close() - - // Add a watch for testFile - addWatch(t, watcher, testFile) - - if err := testRename(testFile, testFileRenamed); err != nil { - t.Fatalf("rename failed: %s", err) - } - - // We expect this event to be received almost immediately, but let's wait 500 ms to be sure - time.Sleep(500 * time.Millisecond) - if renameReceived.value() == 0 { - t.Fatal("fsnotify rename events have not been received after 500 ms") - } - - // Try closing the fsnotify instance - t.Log("calling Close()") - watcher.Close() - t.Log("waiting for the event channel to become closed...") - select { - case <-done: - t.Log("event channel closed") - case <-time.After(2 * time.Second): - t.Fatal("event stream was not closed after 2 seconds") - } - - os.Remove(testFileRenamed) -} - -func TestFsnotifyRenameToCreate(t *testing.T) { - watcher := newWatcher(t) - - // Create directory to watch - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - // Create directory to get file - testDirFrom := tempMkdir(t) - defer os.RemoveAll(testDirFrom) - - addWatch(t, watcher, testDir) - - // Receive errors on the error channel on a separate goroutine - go func() { - for err := range watcher.Errors { - t.Fatalf("error received: %s", err) - } - }() - - testFile := filepath.Join(testDirFrom, "TestFsnotifyEvents.testfile") - testFileRenamed := filepath.Join(testDir, "TestFsnotifyEvents.testfileRenamed") - - // Receive events on the event channel on a separate goroutine - eventstream := watcher.Events - var createReceived counter - done := make(chan bool) - go func() { - for event := range eventstream { - // Only count relevant events - if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) || event.Name == filepath.Clean(testFileRenamed) { - if event.Op&Create == Create { - createReceived.increment() - } - t.Logf("event received: %s", event) - } else { - t.Logf("unexpected event received: %s", event) - } - } - done <- true - }() - - // Create a file - // This should add at least one event to the fsnotify event queue - var f *os.File - f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - f.Close() - - if err := testRename(testFile, testFileRenamed); err != nil { - t.Fatalf("rename failed: %s", err) - } - - // We expect this event to be received almost immediately, but let's wait 500 ms to be sure - time.Sleep(500 * time.Millisecond) - if createReceived.value() == 0 { - t.Fatal("fsnotify create events have not been received after 500 ms") - } - - // Try closing the fsnotify instance - t.Log("calling Close()") - watcher.Close() - t.Log("waiting for the event channel to become closed...") - select { - case <-done: - t.Log("event channel closed") - case <-time.After(2 * time.Second): - t.Fatal("event stream was not closed after 2 seconds") - } - - os.Remove(testFileRenamed) -} - -func TestFsnotifyRenameToOverwrite(t *testing.T) { - switch runtime.GOOS { - case "plan9", "windows": - t.Skipf("skipping test on %q (os.Rename over existing file does not create event).", runtime.GOOS) - } - - watcher := newWatcher(t) - - // Create directory to watch - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - // Create directory to get file - testDirFrom := tempMkdir(t) - defer os.RemoveAll(testDirFrom) - - testFile := filepath.Join(testDirFrom, "TestFsnotifyEvents.testfile") - testFileRenamed := filepath.Join(testDir, "TestFsnotifyEvents.testfileRenamed") - - // Create a file - var fr *os.File - fr, err := os.OpenFile(testFileRenamed, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - fr.Sync() - fr.Close() - - addWatch(t, watcher, testDir) - - // Receive errors on the error channel on a separate goroutine - go func() { - for err := range watcher.Errors { - t.Fatalf("error received: %s", err) - } - }() - - // Receive events on the event channel on a separate goroutine - eventstream := watcher.Events - var eventReceived counter - done := make(chan bool) - go func() { - for event := range eventstream { - // Only count relevant events - if event.Name == filepath.Clean(testFileRenamed) { - eventReceived.increment() - t.Logf("event received: %s", event) - } else { - t.Logf("unexpected event received: %s", event) - } - } - done <- true - }() - - // Create a file - // This should add at least one event to the fsnotify event queue - var f *os.File - f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - f.Close() - - if err := testRename(testFile, testFileRenamed); err != nil { - t.Fatalf("rename failed: %s", err) - } - - // We expect this event to be received almost immediately, but let's wait 500 ms to be sure - time.Sleep(500 * time.Millisecond) - if eventReceived.value() == 0 { - t.Fatal("fsnotify events have not been received after 500 ms") - } - - // Try closing the fsnotify instance - t.Log("calling Close()") - watcher.Close() - t.Log("waiting for the event channel to become closed...") - select { - case <-done: - t.Log("event channel closed") - case <-time.After(2 * time.Second): - t.Fatal("event stream was not closed after 2 seconds") - } - - os.Remove(testFileRenamed) -} - -func TestRemovalOfWatch(t *testing.T) { - // Create directory to watch - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - // Create a file before watching directory - testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile") - { - var f *os.File - f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - f.Close() - } - - watcher := newWatcher(t) - defer watcher.Close() - - addWatch(t, watcher, testDir) - if err := watcher.Remove(testDir); err != nil { - t.Fatalf("Could not remove the watch: %v\n", err) - } - - go func() { - select { - case ev := <-watcher.Events: - t.Fatalf("We received event: %v\n", ev) - case <-time.After(500 * time.Millisecond): - t.Log("No event received, as expected.") - } - }() - - time.Sleep(200 * time.Millisecond) - // Modify the file outside of the watched dir - f, err := os.Open(testFileAlreadyExists) - if err != nil { - t.Fatalf("Open test file failed: %s", err) - } - f.WriteString("data") - f.Sync() - f.Close() - if err := os.Chmod(testFileAlreadyExists, 0700); err != nil { - t.Fatalf("chmod failed: %s", err) - } - time.Sleep(400 * time.Millisecond) -} - -func TestFsnotifyAttrib(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("attributes don't work on Windows.") - } - - watcher := newWatcher(t) - - // Create directory to watch - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - // Receive errors on the error channel on a separate goroutine - go func() { - for err := range watcher.Errors { - t.Fatalf("error received: %s", err) - } - }() - - testFile := filepath.Join(testDir, "TestFsnotifyAttrib.testfile") - - // Receive events on the event channel on a separate goroutine - eventstream := watcher.Events - // The modifyReceived counter counts IsModify events that are not IsAttrib, - // and the attribReceived counts IsAttrib events (which are also IsModify as - // a consequence). - var modifyReceived counter - var attribReceived counter - done := make(chan bool) - go func() { - for event := range eventstream { - // Only count relevant events - if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) { - if event.Op&Write == Write { - modifyReceived.increment() - } - if event.Op&Chmod == Chmod { - attribReceived.increment() - } - t.Logf("event received: %s", event) - } else { - t.Logf("unexpected event received: %s", event) - } - } - done <- true - }() - - // Create a file - // This should add at least one event to the fsnotify event queue - var f *os.File - f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - - f.WriteString("data") - f.Sync() - f.Close() - - // Add a watch for testFile - addWatch(t, watcher, testFile) - - if err := os.Chmod(testFile, 0700); err != nil { - t.Fatalf("chmod failed: %s", err) - } - - // We expect this event to be received almost immediately, but let's wait 500 ms to be sure - // Creating/writing a file changes also the mtime, so IsAttrib should be set to true here - time.Sleep(500 * time.Millisecond) - if modifyReceived.value() != 0 { - t.Fatal("received an unexpected modify event when creating a test file") - } - if attribReceived.value() == 0 { - t.Fatal("fsnotify attribute events have not received after 500 ms") - } - - // Modifying the contents of the file does not set the attrib flag (although eg. the mtime - // might have been modified). - modifyReceived.reset() - attribReceived.reset() - - f, err = os.OpenFile(testFile, os.O_WRONLY, 0) - if err != nil { - t.Fatalf("reopening test file failed: %s", err) - } - - f.WriteString("more data") - f.Sync() - f.Close() - - time.Sleep(500 * time.Millisecond) - - if modifyReceived.value() != 1 { - t.Fatal("didn't receive a modify event after changing test file contents") - } - - if attribReceived.value() != 0 { - t.Fatal("did receive an unexpected attrib event after changing test file contents") - } - - modifyReceived.reset() - attribReceived.reset() - - // Doing a chmod on the file should trigger an event with the "attrib" flag set (the contents - // of the file are not changed though) - if err := os.Chmod(testFile, 0600); err != nil { - t.Fatalf("chmod failed: %s", err) - } - - time.Sleep(500 * time.Millisecond) - - if attribReceived.value() != 1 { - t.Fatal("didn't receive an attribute change after 500ms") - } - - // Try closing the fsnotify instance - t.Log("calling Close()") - watcher.Close() - t.Log("waiting for the event channel to become closed...") - select { - case <-done: - t.Log("event channel closed") - case <-time.After(1e9): - t.Fatal("event stream was not closed after 1 second") - } - - os.Remove(testFile) -} - -func TestFsnotifyClose(t *testing.T) { - watcher := newWatcher(t) - watcher.Close() - - var done int32 - go func() { - watcher.Close() - atomic.StoreInt32(&done, 1) - }() - - time.Sleep(50e6) // 50 ms - if atomic.LoadInt32(&done) == 0 { - t.Fatal("double Close() test failed: second Close() call didn't return") - } - - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - if err := watcher.Add(testDir); err == nil { - t.Fatal("expected error on Watch() after Close(), got nil") - } -} - -func TestFsnotifyFakeSymlink(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("symlinks don't work on Windows.") - } - - watcher := newWatcher(t) - - // Create directory to watch - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - var errorsReceived counter - // Receive errors on the error channel on a separate goroutine - go func() { - for errors := range watcher.Errors { - t.Logf("Received error: %s", errors) - errorsReceived.increment() - } - }() - - // Count the CREATE events received - var createEventsReceived, otherEventsReceived counter - go func() { - for ev := range watcher.Events { - t.Logf("event received: %s", ev) - if ev.Op&Create == Create { - createEventsReceived.increment() - } else { - otherEventsReceived.increment() - } - } - }() - - addWatch(t, watcher, testDir) - - if err := os.Symlink(filepath.Join(testDir, "zzz"), filepath.Join(testDir, "zzznew")); err != nil { - t.Fatalf("Failed to create bogus symlink: %s", err) - } - t.Logf("Created bogus symlink") - - // We expect this event to be received almost immediately, but let's wait 500 ms to be sure - time.Sleep(500 * time.Millisecond) - - // Should not be error, just no events for broken links (watching nothing) - if errorsReceived.value() > 0 { - t.Fatal("fsnotify errors have been received.") - } - if otherEventsReceived.value() > 0 { - t.Fatal("fsnotify other events received on the broken link") - } - - // Except for 1 create event (for the link itself) - if createEventsReceived.value() == 0 { - t.Fatal("fsnotify create events were not received after 500 ms") - } - if createEventsReceived.value() > 1 { - t.Fatal("fsnotify more create events received than expected") - } - - // Try closing the fsnotify instance - t.Log("calling Close()") - watcher.Close() -} - -func TestCyclicSymlink(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("symlinks don't work on Windows.") - } - - watcher := newWatcher(t) - - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - link := path.Join(testDir, "link") - if err := os.Symlink(".", link); err != nil { - t.Fatalf("could not make symlink: %v", err) - } - addWatch(t, watcher, testDir) - - var createEventsReceived counter - go func() { - for ev := range watcher.Events { - if ev.Op&Create == Create { - createEventsReceived.increment() - } - } - }() - - if err := os.Remove(link); err != nil { - t.Fatalf("Error removing link: %v", err) - } - - // It would be nice to be able to expect a delete event here, but kqueue has - // no way for us to get events on symlinks themselves, because opening them - // opens an fd to the file to which they point. - - if err := ioutil.WriteFile(link, []byte("foo"), 0700); err != nil { - t.Fatalf("could not make symlink: %v", err) - } - - // We expect this event to be received almost immediately, but let's wait 500 ms to be sure - time.Sleep(500 * time.Millisecond) - - if got := createEventsReceived.value(); got == 0 { - t.Errorf("want at least 1 create event got %v", got) - } - - watcher.Close() -} - -// TestConcurrentRemovalOfWatch tests that concurrent calls to RemoveWatch do not race. -// See https://codereview.appspot.com/103300045/ -// go test -test.run=TestConcurrentRemovalOfWatch -test.cpu=1,1,1,1,1 -race -func TestConcurrentRemovalOfWatch(t *testing.T) { - if runtime.GOOS != "darwin" { - t.Skip("regression test for race only present on darwin") - } - - // Create directory to watch - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - // Create a file before watching directory - testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile") - { - var f *os.File - f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - f.Close() - } - - watcher := newWatcher(t) - defer watcher.Close() - - addWatch(t, watcher, testDir) - - // Test that RemoveWatch can be invoked concurrently, with no data races. - removed1 := make(chan struct{}) - go func() { - defer close(removed1) - watcher.Remove(testDir) - }() - removed2 := make(chan struct{}) - go func() { - close(removed2) - watcher.Remove(testDir) - }() - <-removed1 - <-removed2 -} - -func TestClose(t *testing.T) { - // Regression test for #59 bad file descriptor from Close - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - watcher := newWatcher(t) - if err := watcher.Add(testDir); err != nil { - t.Fatalf("Expected no error on Add, got %v", err) - } - err := watcher.Close() - if err != nil { - t.Fatalf("Expected no error on Close, got %v.", err) - } -} - -// TestRemoveWithClose tests if one can handle Remove events and, at the same -// time, close Watcher object without any data races. -func TestRemoveWithClose(t *testing.T) { - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - const fileN = 200 - tempFiles := make([]string, 0, fileN) - for i := 0; i < fileN; i++ { - tempFiles = append(tempFiles, tempMkFile(t, testDir)) - } - watcher := newWatcher(t) - if err := watcher.Add(testDir); err != nil { - t.Fatalf("Expected no error on Add, got %v", err) - } - startC, stopC := make(chan struct{}), make(chan struct{}) - errC := make(chan error) - go func() { - for { - select { - case <-watcher.Errors: - case <-watcher.Events: - case <-stopC: - return - } - } - }() - go func() { - <-startC - for _, fileName := range tempFiles { - os.Remove(fileName) - } - }() - go func() { - <-startC - errC <- watcher.Close() - }() - close(startC) - defer close(stopC) - if err := <-errC; err != nil { - t.Fatalf("Expected no error on Close, got %v.", err) - } -} - -func testRename(file1, file2 string) error { - switch runtime.GOOS { - case "windows", "plan9": - return os.Rename(file1, file2) - default: - cmd := exec.Command("mv", file1, file2) - return cmd.Run() - } -} diff --git a/vendor/github.com/fsnotify/fsnotify/kqueue.go b/vendor/github.com/fsnotify/fsnotify/kqueue.go index c2b4acb18d..86e76a3d67 100644 --- a/vendor/github.com/fsnotify/fsnotify/kqueue.go +++ b/vendor/github.com/fsnotify/fsnotify/kqueue.go @@ -22,7 +22,7 @@ import ( type Watcher struct { Events chan Event Errors chan error - done chan bool // Channel for sending a "quit message" to the reader goroutine + done chan struct{} // Channel for sending a "quit message" to the reader goroutine kq int // File descriptor (as returned by the kqueue() syscall). @@ -56,7 +56,7 @@ func NewWatcher() (*Watcher, error) { externalWatches: make(map[string]bool), Events: make(chan Event), Errors: make(chan error), - done: make(chan bool), + done: make(chan struct{}), } go w.readEvents() @@ -71,10 +71,8 @@ func (w *Watcher) Close() error { return nil } w.isClosed = true - w.mu.Unlock() // copy paths to remove while locked - w.mu.Lock() var pathsToRemove = make([]string, 0, len(w.watches)) for name := range w.watches { pathsToRemove = append(pathsToRemove, name) @@ -82,15 +80,12 @@ func (w *Watcher) Close() error { w.mu.Unlock() // unlock before calling Remove, which also locks - var err error for _, name := range pathsToRemove { - if e := w.Remove(name); e != nil && err == nil { - err = e - } + w.Remove(name) } - // Send "quit" message to the reader goroutine: - w.done <- true + // send a "quit" message to the reader goroutine + close(w.done) return nil } @@ -266,17 +261,12 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { func (w *Watcher) readEvents() { eventBuffer := make([]unix.Kevent_t, 10) +loop: for { // See if there is a message on the "done" channel select { case <-w.done: - err := unix.Close(w.kq) - if err != nil { - w.Errors <- err - } - close(w.Events) - close(w.Errors) - return + break loop default: } @@ -284,7 +274,11 @@ func (w *Watcher) readEvents() { kevents, err := read(w.kq, eventBuffer, &keventWaitTime) // EINTR is okay, the syscall was interrupted before timeout expired. if err != nil && err != unix.EINTR { - w.Errors <- err + select { + case w.Errors <- err: + case <-w.done: + break loop + } continue } @@ -319,8 +313,12 @@ func (w *Watcher) readEvents() { if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) { w.sendDirectoryChangeEvents(event.Name) } else { - // Send the event on the Events channel - w.Events <- event + // Send the event on the Events channel. + select { + case w.Events <- event: + case <-w.done: + break loop + } } if event.Op&Remove == Remove { @@ -352,6 +350,18 @@ func (w *Watcher) readEvents() { kevents = kevents[1:] } } + + // cleanup + err := unix.Close(w.kq) + if err != nil { + // only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors. + select { + case w.Errors <- err: + default: + } + } + close(w.Events) + close(w.Errors) } // newEvent returns an platform-independent Event based on kqueue Fflags. @@ -407,7 +417,11 @@ func (w *Watcher) sendDirectoryChangeEvents(dirPath string) { // Get all files files, err := ioutil.ReadDir(dirPath) if err != nil { - w.Errors <- err + select { + case w.Errors <- err: + case <-w.done: + return + } } // Search for new files @@ -428,7 +442,11 @@ func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInf w.mu.Unlock() if !doesExist { // Send create event - w.Events <- newCreateEvent(filePath) + select { + case w.Events <- newCreateEvent(filePath): + case <-w.done: + return + } } // like watchDirectoryFiles (but without doing another ReadDir) diff --git a/vendor/github.com/fullsailor/pkcs7/.travis.yml b/vendor/github.com/fullsailor/pkcs7/.travis.yml index c7154e46c3..bc12043763 100644 --- a/vendor/github.com/fullsailor/pkcs7/.travis.yml +++ b/vendor/github.com/fullsailor/pkcs7/.travis.yml @@ -1,6 +1,7 @@ language: go go: - - 1.6 - - 1.7 + - 1.8 + - 1.9 + - "1.10" - tip diff --git a/vendor/github.com/fullsailor/pkcs7/ber.go b/vendor/github.com/fullsailor/pkcs7/ber.go index bf3e804296..89e96d30c7 100644 --- a/vendor/github.com/fullsailor/pkcs7/ber.go +++ b/vendor/github.com/fullsailor/pkcs7/ber.go @@ -5,7 +5,7 @@ import ( "errors" ) -var encodeIndent = 0 +// var encodeIndent = 0 type asn1Object interface { EncodeTo(writer *bytes.Buffer) error @@ -18,7 +18,7 @@ type asn1Structured struct { func (s asn1Structured) EncodeTo(out *bytes.Buffer) error { //fmt.Printf("%s--> tag: % X\n", strings.Repeat("| ", encodeIndent), s.tagBytes) - encodeIndent++ + //encodeIndent++ inner := new(bytes.Buffer) for _, obj := range s.content { err := obj.EncodeTo(inner) @@ -26,7 +26,7 @@ func (s asn1Structured) EncodeTo(out *bytes.Buffer) error { return err } } - encodeIndent-- + //encodeIndent-- out.Write(s.tagBytes) encodeLength(out, inner.Len()) out.Write(inner.Bytes()) diff --git a/vendor/github.com/fullsailor/pkcs7/ber_test.go b/vendor/github.com/fullsailor/pkcs7/ber_test.go deleted file mode 100644 index 19a0f514c2..0000000000 --- a/vendor/github.com/fullsailor/pkcs7/ber_test.go +++ /dev/null @@ -1,97 +0,0 @@ -package pkcs7 - -import ( - "bytes" - "encoding/asn1" - "strings" - "testing" -) - -func TestBer2Der(t *testing.T) { - // indefinite length fixture - ber := []byte{0x30, 0x80, 0x02, 0x01, 0x01, 0x00, 0x00} - expected := []byte{0x30, 0x03, 0x02, 0x01, 0x01} - der, err := ber2der(ber) - if err != nil { - t.Fatalf("ber2der failed with error: %v", err) - } - if bytes.Compare(der, expected) != 0 { - t.Errorf("ber2der result did not match.\n\tExpected: % X\n\tActual: % X", expected, der) - } - - if der2, err := ber2der(der); err != nil { - t.Errorf("ber2der on DER bytes failed with error: %v", err) - } else { - if !bytes.Equal(der, der2) { - t.Error("ber2der is not idempotent") - } - } - var thing struct { - Number int - } - rest, err := asn1.Unmarshal(der, &thing) - if err != nil { - t.Errorf("Cannot parse resulting DER because: %v", err) - } else if len(rest) > 0 { - t.Errorf("Resulting DER has trailing data: % X", rest) - } -} - -func TestBer2Der_Negatives(t *testing.T) { - fixtures := []struct { - Input []byte - ErrorContains string - }{ - {[]byte{0x30, 0x85}, "length too long"}, - {[]byte{0x30, 0x84, 0x80, 0x0, 0x0, 0x0}, "length is negative"}, - {[]byte{0x30, 0x82, 0x0, 0x1}, "length has leading zero"}, - {[]byte{0x30, 0x80, 0x1, 0x2, 0x1, 0x2}, "Invalid BER format"}, - {[]byte{0x30, 0x03, 0x01, 0x02}, "length is more than available data"}, - } - - for _, fixture := range fixtures { - _, err := ber2der(fixture.Input) - if err == nil { - t.Errorf("No error thrown. Expected: %s", fixture.ErrorContains) - } - if !strings.Contains(err.Error(), fixture.ErrorContains) { - t.Errorf("Unexpected error thrown.\n\tExpected: /%s/\n\tActual: %s", fixture.ErrorContains, err.Error()) - } - } -} - -func TestBer2Der_NestedMultipleIndefinite(t *testing.T) { - // indefinite length fixture - ber := []byte{0x30, 0x80, 0x30, 0x80, 0x02, 0x01, 0x01, 0x00, 0x00, 0x30, 0x80, 0x02, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00} - expected := []byte{0x30, 0x0A, 0x30, 0x03, 0x02, 0x01, 0x01, 0x30, 0x03, 0x02, 0x01, 0x02} - - der, err := ber2der(ber) - if err != nil { - t.Fatalf("ber2der failed with error: %v", err) - } - if bytes.Compare(der, expected) != 0 { - t.Errorf("ber2der result did not match.\n\tExpected: % X\n\tActual: % X", expected, der) - } - - if der2, err := ber2der(der); err != nil { - t.Errorf("ber2der on DER bytes failed with error: %v", err) - } else { - if !bytes.Equal(der, der2) { - t.Error("ber2der is not idempotent") - } - } - var thing struct { - Nest1 struct { - Number int - } - Nest2 struct { - Number int - } - } - rest, err := asn1.Unmarshal(der, &thing) - if err != nil { - t.Errorf("Cannot parse resulting DER because: %v", err) - } else if len(rest) > 0 { - t.Errorf("Resulting DER has trailing data: % X", rest) - } -} diff --git a/vendor/github.com/fullsailor/pkcs7/pkcs7.go b/vendor/github.com/fullsailor/pkcs7/pkcs7.go index 8d5af853ad..0264466b46 100644 --- a/vendor/github.com/fullsailor/pkcs7/pkcs7.go +++ b/vendor/github.com/fullsailor/pkcs7/pkcs7.go @@ -84,7 +84,7 @@ type recipientInfo struct { type encryptedContentInfo struct { ContentType asn1.ObjectIdentifier ContentEncryptionAlgorithm pkix.AlgorithmIdentifier - EncryptedContent asn1.RawValue `asn1:"tag:0,optional,explicit"` + EncryptedContent asn1.RawValue `asn1:"tag:0,optional"` } type attribute struct { @@ -222,6 +222,10 @@ func (p7 *PKCS7) Verify() (err error) { func verifySignature(p7 *PKCS7, signer signerInfo) error { signedData := p7.Content + hash, err := getHashForOID(signer.DigestAlgorithm.Algorithm) + if err != nil { + return err + } if len(signer.AuthenticatedAttributes) > 0 { // TODO(fullsailor): First check the content type match var digest []byte @@ -229,10 +233,6 @@ func verifySignature(p7 *PKCS7, signer signerInfo) error { if err != nil { return err } - hash, err := getHashForOID(signer.DigestAlgorithm.Algorithm) - if err != nil { - return err - } h := hash.New() h.Write(p7.Content) computed := h.Sum(nil) @@ -254,7 +254,18 @@ func verifySignature(p7 *PKCS7, signer signerInfo) error { return errors.New("pkcs7: No certificate for signer") } - algo := x509.SHA1WithRSA + algo := getSignatureAlgorithmFromAI(signer.DigestEncryptionAlgorithm) + if algo == x509.UnknownSignatureAlgorithm { + // I'm not sure what the spec here is, and the openssl sources were not + // helpful. But, this is what App Store receipts appear to do. + // The DigestEncryptionAlgorithm is just "rsaEncryption (PKCS #1)" + // But we're expecting a digest + encryption algorithm. So... we're going + // to determine an algorithm based on the DigestAlgorithm and this + // encryption algorithm. + if signer.DigestEncryptionAlgorithm.Algorithm.Equal(oidEncryptionAlgorithmRSA) { + algo = getRSASignatureAlgorithmForDigestAlgorithm(hash) + } + } return cert.CheckSignature(algo, signedData, signer.EncryptedDigest) } @@ -290,10 +301,21 @@ func getHashForOID(oid asn1.ObjectIdentifier) (crypto.Hash, error) { switch { case oid.Equal(oidDigestAlgorithmSHA1): return crypto.SHA1, nil + case oid.Equal(oidSHA256): + return crypto.SHA256, nil } return crypto.Hash(0), ErrUnsupportedAlgorithm } +func getRSASignatureAlgorithmForDigestAlgorithm(hash crypto.Hash) x509.SignatureAlgorithm { + for _, details := range signatureAlgorithmDetails { + if details.pubKeyAlgo == x509.RSA && details.hash == hash { + return details.algo + } + } + return x509.UnknownSignatureAlgorithm +} + // GetOnlySigner returns an x509.Certificate for the first signer of the signed // data payload. If there are more or less than one signer, nil is returned func (p7 *PKCS7) GetOnlySigner() *x509.Certificate { @@ -633,7 +655,7 @@ func (sd *SignedData) AddSigner(cert *x509.Certificate, pkey crypto.PrivateKey, signer := signerInfo{ AuthenticatedAttributes: finalAttrs, DigestAlgorithm: pkix.AlgorithmIdentifier{Algorithm: oidDigestAlgorithmSHA1}, - DigestEncryptionAlgorithm: pkix.AlgorithmIdentifier{Algorithm: oidEncryptionAlgorithmRSA}, + DigestEncryptionAlgorithm: pkix.AlgorithmIdentifier{Algorithm: oidSignatureSHA1WithRSA}, IssuerAndSerialNumber: ias, EncryptedDigest: signature, Version: 1, @@ -652,7 +674,7 @@ func (sd *SignedData) AddCertificate(cert *x509.Certificate) { // Detach removes content from the signed data struct to make it a detached signature. // This must be called right before Finish() func (sd *SignedData) Detach() { - sd.sd.ContentInfo = contentInfo{ContentType: oidSignedData} + sd.sd.ContentInfo = contentInfo{ContentType: oidData} } // Finish marshals the content and its signers diff --git a/vendor/github.com/fullsailor/pkcs7/pkcs7_test.go b/vendor/github.com/fullsailor/pkcs7/pkcs7_test.go deleted file mode 100644 index cbe2c5cad1..0000000000 --- a/vendor/github.com/fullsailor/pkcs7/pkcs7_test.go +++ /dev/null @@ -1,678 +0,0 @@ -package pkcs7 - -import ( - "bytes" - "crypto" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "encoding/pem" - "fmt" - "io" - "io/ioutil" - "math/big" - "os" - "os/exec" - "testing" - "time" -) - -func TestVerify(t *testing.T) { - fixture := UnmarshalTestFixture(SignedTestFixture) - p7, err := Parse(fixture.Input) - if err != nil { - t.Errorf("Parse encountered unexpected error: %v", err) - } - - if err := p7.Verify(); err != nil { - t.Errorf("Verify failed with error: %v", err) - } - expected := []byte("We the People") - if bytes.Compare(p7.Content, expected) != 0 { - t.Errorf("Signed content does not match.\n\tExpected:%s\n\tActual:%s", expected, p7.Content) - - } -} - -func TestVerifyEC2(t *testing.T) { - fixture := UnmarshalTestFixture(EC2IdentityDocumentFixture) - p7, err := Parse(fixture.Input) - if err != nil { - t.Errorf("Parse encountered unexpected error: %v", err) - } - p7.Certificates = []*x509.Certificate{fixture.Certificate} - if err := p7.Verify(); err != nil { - t.Errorf("Verify failed with error: %v", err) - } -} - -func TestVerifyAppStore(t *testing.T) { - fixture := UnmarshalTestFixture(AppStoreRecieptFixture) - p7, err := Parse(fixture.Input) - if err != nil { - t.Errorf("Parse encountered unexpected error: %v", err) - } - if err := p7.Verify(); err != nil { - t.Errorf("Verify failed with error: %v", err) - } -} - -func TestDecrypt(t *testing.T) { - fixture := UnmarshalTestFixture(EncryptedTestFixture) - p7, err := Parse(fixture.Input) - if err != nil { - t.Fatal(err) - } - content, err := p7.Decrypt(fixture.Certificate, fixture.PrivateKey) - if err != nil { - t.Errorf("Cannot Decrypt with error: %v", err) - } - expected := []byte("This is a test") - if bytes.Compare(content, expected) != 0 { - t.Errorf("Decrypted result does not match.\n\tExpected:%s\n\tActual:%s", expected, content) - } -} - -func TestDegenerateCertificate(t *testing.T) { - cert, err := createTestCertificate() - if err != nil { - t.Fatal(err) - } - deg, err := DegenerateCertificate(cert.Certificate.Raw) - if err != nil { - t.Fatal(err) - } - testOpenSSLParse(t, deg) - pem.Encode(os.Stdout, &pem.Block{Type: "PKCS7", Bytes: deg}) -} - -// writes the cert to a temporary file and tests that openssl can read it. -func testOpenSSLParse(t *testing.T, certBytes []byte) { - tmpCertFile, err := ioutil.TempFile("", "testCertificate") - if err != nil { - t.Fatal(err) - } - defer os.Remove(tmpCertFile.Name()) // clean up - - if _, err := tmpCertFile.Write(certBytes); err != nil { - t.Fatal(err) - } - - opensslCMD := exec.Command("openssl", "pkcs7", "-inform", "der", "-in", tmpCertFile.Name()) - _, err = opensslCMD.Output() - if err != nil { - t.Fatal(err) - } - - if err := tmpCertFile.Close(); err != nil { - t.Fatal(err) - } - -} - -func TestSign(t *testing.T) { - cert, err := createTestCertificate() - if err != nil { - t.Fatal(err) - } - content := []byte("Hello World") - for _, testDetach := range []bool{false, true} { - toBeSigned, err := NewSignedData(content) - if err != nil { - t.Fatalf("Cannot initialize signed data: %s", err) - } - if err := toBeSigned.AddSigner(cert.Certificate, cert.PrivateKey, SignerInfoConfig{}); err != nil { - t.Fatalf("Cannot add signer: %s", err) - } - if testDetach { - t.Log("Testing detached signature") - toBeSigned.Detach() - } else { - t.Log("Testing attached signature") - } - signed, err := toBeSigned.Finish() - if err != nil { - t.Fatalf("Cannot finish signing data: %s", err) - } - pem.Encode(os.Stdout, &pem.Block{Type: "PKCS7", Bytes: signed}) - p7, err := Parse(signed) - if err != nil { - t.Fatalf("Cannot parse our signed data: %s", err) - } - if testDetach { - p7.Content = content - } - if bytes.Compare(content, p7.Content) != 0 { - t.Errorf("Our content was not in the parsed data:\n\tExpected: %s\n\tActual: %s", content, p7.Content) - } - if err := p7.Verify(); err != nil { - t.Errorf("Cannot verify our signed data: %s", err) - } - } -} - -func ExampleSignedData() { - // generate a signing cert or load a key pair - cert, err := createTestCertificate() - if err != nil { - fmt.Printf("Cannot create test certificates: %s", err) - } - - // Initialize a SignedData struct with content to be signed - signedData, err := NewSignedData([]byte("Example data to be signed")) - if err != nil { - fmt.Printf("Cannot initialize signed data: %s", err) - } - - // Add the signing cert and private key - if err := signedData.AddSigner(cert.Certificate, cert.PrivateKey, SignerInfoConfig{}); err != nil { - fmt.Printf("Cannot add signer: %s", err) - } - - // Call Detach() is you want to remove content from the signature - // and generate an S/MIME detached signature - signedData.Detach() - - // Finish() to obtain the signature bytes - detachedSignature, err := signedData.Finish() - if err != nil { - fmt.Printf("Cannot finish signing data: %s", err) - } - pem.Encode(os.Stdout, &pem.Block{Type: "PKCS7", Bytes: detachedSignature}) -} - -func TestOpenSSLVerifyDetachedSignature(t *testing.T) { - rootCert, err := createTestCertificateByIssuer("PKCS7 Test Root CA", nil) - if err != nil { - t.Fatalf("Cannot generate root cert: %s", err) - } - signerCert, err := createTestCertificateByIssuer("PKCS7 Test Signer Cert", rootCert) - if err != nil { - t.Fatalf("Cannot generate signer cert: %s", err) - } - content := []byte("Hello World") - toBeSigned, err := NewSignedData(content) - if err != nil { - t.Fatalf("Cannot initialize signed data: %s", err) - } - if err := toBeSigned.AddSigner(signerCert.Certificate, signerCert.PrivateKey, SignerInfoConfig{}); err != nil { - t.Fatalf("Cannot add signer: %s", err) - } - toBeSigned.Detach() - signed, err := toBeSigned.Finish() - if err != nil { - t.Fatalf("Cannot finish signing data: %s", err) - } - - // write the root cert to a temp file - tmpRootCertFile, err := ioutil.TempFile("", "pkcs7TestRootCA") - if err != nil { - t.Fatal(err) - } - defer os.Remove(tmpRootCertFile.Name()) // clean up - fd, err := os.OpenFile(tmpRootCertFile.Name(), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0755) - if err != nil { - t.Fatal(err) - } - pem.Encode(fd, &pem.Block{Type: "CERTIFICATE", Bytes: rootCert.Certificate.Raw}) - fd.Close() - - // write the signature to a temp file - tmpSignatureFile, err := ioutil.TempFile("", "pkcs7Signature") - if err != nil { - t.Fatal(err) - } - defer os.Remove(tmpSignatureFile.Name()) // clean up - ioutil.WriteFile(tmpSignatureFile.Name(), signed, 0755) - - // write the content to a temp file - tmpContentFile, err := ioutil.TempFile("", "pkcs7Content") - if err != nil { - t.Fatal(err) - } - defer os.Remove(tmpContentFile.Name()) // clean up - ioutil.WriteFile(tmpContentFile.Name(), content, 0755) - - // call openssl to verify the signature on the content using the root - opensslCMD := exec.Command("openssl", "smime", "-verify", - "-in", tmpSignatureFile.Name(), "-inform", "DER", - "-content", tmpContentFile.Name(), - "-CAfile", tmpRootCertFile.Name()) - out, err := opensslCMD.Output() - t.Logf("%s", out) - if err != nil { - t.Fatalf("openssl command failed with %s", err) - } -} - -func TestEncrypt(t *testing.T) { - modes := []int{ - EncryptionAlgorithmDESCBC, - EncryptionAlgorithmAES128GCM, - } - - for _, mode := range modes { - ContentEncryptionAlgorithm = mode - - plaintext := []byte("Hello Secret World!") - cert, err := createTestCertificate() - if err != nil { - t.Fatal(err) - } - encrypted, err := Encrypt(plaintext, []*x509.Certificate{cert.Certificate}) - if err != nil { - t.Fatal(err) - } - p7, err := Parse(encrypted) - if err != nil { - t.Fatalf("cannot Parse encrypted result: %s", err) - } - result, err := p7.Decrypt(cert.Certificate, cert.PrivateKey) - if err != nil { - t.Fatalf("cannot Decrypt encrypted result: %s", err) - } - if bytes.Compare(plaintext, result) != 0 { - t.Errorf("encrypted data does not match plaintext:\n\tExpected: %s\n\tActual: %s", plaintext, result) - } - } -} - -func TestUnmarshalSignedAttribute(t *testing.T) { - cert, err := createTestCertificate() - if err != nil { - t.Fatal(err) - } - content := []byte("Hello World") - toBeSigned, err := NewSignedData(content) - if err != nil { - t.Fatalf("Cannot initialize signed data: %s", err) - } - oidTest := asn1.ObjectIdentifier{2, 3, 4, 5, 6, 7} - testValue := "TestValue" - if err := toBeSigned.AddSigner(cert.Certificate, cert.PrivateKey, SignerInfoConfig{ - ExtraSignedAttributes: []Attribute{Attribute{Type: oidTest, Value: testValue}}, - }); err != nil { - t.Fatalf("Cannot add signer: %s", err) - } - signed, err := toBeSigned.Finish() - if err != nil { - t.Fatalf("Cannot finish signing data: %s", err) - } - p7, err := Parse(signed) - var actual string - err = p7.UnmarshalSignedAttribute(oidTest, &actual) - if err != nil { - t.Fatalf("Cannot unmarshal test value: %s", err) - } - if testValue != actual { - t.Errorf("Attribute does not match test value\n\tExpected: %s\n\tActual: %s", testValue, actual) - } -} - -func TestPad(t *testing.T) { - tests := []struct { - Original []byte - Expected []byte - BlockSize int - }{ - {[]byte{0x1, 0x2, 0x3, 0x10}, []byte{0x1, 0x2, 0x3, 0x10, 0x4, 0x4, 0x4, 0x4}, 8}, - {[]byte{0x1, 0x2, 0x3, 0x0, 0x0, 0x0, 0x0, 0x0}, []byte{0x1, 0x2, 0x3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8}, 8}, - } - for _, test := range tests { - padded, err := pad(test.Original, test.BlockSize) - if err != nil { - t.Errorf("pad encountered error: %s", err) - continue - } - if bytes.Compare(test.Expected, padded) != 0 { - t.Errorf("pad results mismatch:\n\tExpected: %X\n\tActual: %X", test.Expected, padded) - } - } -} - -type certKeyPair struct { - Certificate *x509.Certificate - PrivateKey *rsa.PrivateKey -} - -func createTestCertificate() (certKeyPair, error) { - signer, err := createTestCertificateByIssuer("Eddard Stark", nil) - if err != nil { - return certKeyPair{}, err - } - fmt.Println("Created root cert") - pem.Encode(os.Stdout, &pem.Block{Type: "CERTIFICATE", Bytes: signer.Certificate.Raw}) - pair, err := createTestCertificateByIssuer("Jon Snow", signer) - if err != nil { - return certKeyPair{}, err - } - fmt.Println("Created signer cert") - pem.Encode(os.Stdout, &pem.Block{Type: "CERTIFICATE", Bytes: pair.Certificate.Raw}) - return *pair, nil -} - -func createTestCertificateByIssuer(name string, issuer *certKeyPair) (*certKeyPair, error) { - priv, err := rsa.GenerateKey(rand.Reader, 1024) - if err != nil { - return nil, err - } - serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 32) - serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) - if err != nil { - return nil, err - } - - template := x509.Certificate{ - SerialNumber: serialNumber, - SignatureAlgorithm: x509.SHA256WithRSA, - Subject: pkix.Name{ - CommonName: name, - Organization: []string{"Acme Co"}, - }, - NotBefore: time.Now(), - NotAfter: time.Now().AddDate(1, 0, 0), - KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageEmailProtection}, - } - var issuerCert *x509.Certificate - var issuerKey crypto.PrivateKey - if issuer != nil { - issuerCert = issuer.Certificate - issuerKey = issuer.PrivateKey - } else { - template.IsCA = true - template.KeyUsage |= x509.KeyUsageCertSign - issuerCert = &template - issuerKey = priv - } - cert, err := x509.CreateCertificate(rand.Reader, &template, issuerCert, priv.Public(), issuerKey) - if err != nil { - return nil, err - } - leaf, err := x509.ParseCertificate(cert) - if err != nil { - return nil, err - } - return &certKeyPair{ - Certificate: leaf, - PrivateKey: priv, - }, nil -} - -type TestFixture struct { - Input []byte - Certificate *x509.Certificate - PrivateKey *rsa.PrivateKey -} - -func UnmarshalTestFixture(testPEMBlock string) TestFixture { - var result TestFixture - var derBlock *pem.Block - var pemBlock = []byte(testPEMBlock) - for { - derBlock, pemBlock = pem.Decode(pemBlock) - if derBlock == nil { - break - } - switch derBlock.Type { - case "PKCS7": - result.Input = derBlock.Bytes - case "CERTIFICATE": - result.Certificate, _ = x509.ParseCertificate(derBlock.Bytes) - case "PRIVATE KEY": - result.PrivateKey, _ = x509.ParsePKCS1PrivateKey(derBlock.Bytes) - } - } - - return result -} - -func MarshalTestFixture(t TestFixture, w io.Writer) { - if t.Input != nil { - pem.Encode(w, &pem.Block{Type: "PKCS7", Bytes: t.Input}) - } - if t.Certificate != nil { - pem.Encode(w, &pem.Block{Type: "CERTIFICATE", Bytes: t.Certificate.Raw}) - } - if t.PrivateKey != nil { - pem.Encode(w, &pem.Block{Type: "PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(t.PrivateKey)}) - } -} - -var SignedTestFixture = ` ------BEGIN PKCS7----- -MIIDVgYJKoZIhvcNAQcCoIIDRzCCA0MCAQExCTAHBgUrDgMCGjAcBgkqhkiG9w0B -BwGgDwQNV2UgdGhlIFBlb3BsZaCCAdkwggHVMIIBQKADAgECAgRpuDctMAsGCSqG -SIb3DQEBCzApMRAwDgYDVQQKEwdBY21lIENvMRUwEwYDVQQDEwxFZGRhcmQgU3Rh -cmswHhcNMTUwNTA2MDQyNDQ4WhcNMTYwNTA2MDQyNDQ4WjAlMRAwDgYDVQQKEwdB -Y21lIENvMREwDwYDVQQDEwhKb24gU25vdzCBnzANBgkqhkiG9w0BAQEFAAOBjQAw -gYkCgYEAqr+tTF4mZP5rMwlXp1y+crRtFpuLXF1zvBZiYMfIvAHwo1ta8E1IcyEP -J1jIiKMcwbzeo6kAmZzIJRCTezq9jwXUsKbQTvcfOH9HmjUmXBRWFXZYoQs/OaaF -a45deHmwEeMQkuSWEtYiVKKZXtJOtflKIT3MryJEDiiItMkdybUCAwEAAaMSMBAw -DgYDVR0PAQH/BAQDAgCgMAsGCSqGSIb3DQEBCwOBgQDK1EweZWRL+f7Z+J0kVzY8 -zXptcBaV4Lf5wGZJLJVUgp33bpLNpT3yadS++XQJ+cvtW3wADQzBSTMduyOF8Zf+ -L7TjjrQ2+F2HbNbKUhBQKudxTfv9dJHdKbD+ngCCdQJYkIy2YexsoNG0C8nQkggy -axZd/J69xDVx6pui3Sj8sDGCATYwggEyAgEBMDEwKTEQMA4GA1UEChMHQWNtZSBD -bzEVMBMGA1UEAxMMRWRkYXJkIFN0YXJrAgRpuDctMAcGBSsOAwIaoGEwGAYJKoZI -hvcNAQkDMQsGCSqGSIb3DQEHATAgBgkqhkiG9w0BCQUxExcRMTUwNTA2MDAyNDQ4 -LTA0MDAwIwYJKoZIhvcNAQkEMRYEFG9D7gcTh9zfKiYNJ1lgB0yTh4sZMAsGCSqG -SIb3DQEBAQSBgFF3sGDU9PtXty/QMtpcFa35vvIOqmWQAIZt93XAskQOnBq4OloX -iL9Ct7t1m4pzjRm0o9nDkbaSLZe7HKASHdCqijroScGlI8M+alJ8drHSFv6ZIjnM -FIwIf0B2Lko6nh9/6mUXq7tbbIHa3Gd1JUVire/QFFtmgRXMbXYk8SIS ------END PKCS7----- ------BEGIN CERTIFICATE----- -MIIB1TCCAUCgAwIBAgIEabg3LTALBgkqhkiG9w0BAQswKTEQMA4GA1UEChMHQWNt -ZSBDbzEVMBMGA1UEAxMMRWRkYXJkIFN0YXJrMB4XDTE1MDUwNjA0MjQ0OFoXDTE2 -MDUwNjA0MjQ0OFowJTEQMA4GA1UEChMHQWNtZSBDbzERMA8GA1UEAxMISm9uIFNu -b3cwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAKq/rUxeJmT+azMJV6dcvnK0 -bRabi1xdc7wWYmDHyLwB8KNbWvBNSHMhDydYyIijHMG83qOpAJmcyCUQk3s6vY8F -1LCm0E73Hzh/R5o1JlwUVhV2WKELPzmmhWuOXXh5sBHjEJLklhLWIlSimV7STrX5 -SiE9zK8iRA4oiLTJHcm1AgMBAAGjEjAQMA4GA1UdDwEB/wQEAwIAoDALBgkqhkiG -9w0BAQsDgYEAytRMHmVkS/n+2fidJFc2PM16bXAWleC3+cBmSSyVVIKd926SzaU9 -8mnUvvl0CfnL7Vt8AA0MwUkzHbsjhfGX/i+04460Nvhdh2zWylIQUCrncU37/XSR -3Smw/p4AgnUCWJCMtmHsbKDRtAvJ0JIIMmsWXfyevcQ1ceqbot0o/LA= ------END CERTIFICATE----- ------BEGIN PRIVATE KEY----- -MIICXgIBAAKBgQCqv61MXiZk/mszCVenXL5ytG0Wm4tcXXO8FmJgx8i8AfCjW1rw -TUhzIQ8nWMiIoxzBvN6jqQCZnMglEJN7Or2PBdSwptBO9x84f0eaNSZcFFYVdlih -Cz85poVrjl14ebAR4xCS5JYS1iJUople0k61+UohPcyvIkQOKIi0yR3JtQIDAQAB -AoGBAIPLCR9N+IKxodq11lNXEaUFwMHXc1zqwP8no+2hpz3+nVfplqqubEJ4/PJY -5AgbJoIfnxVhyBXJXu7E+aD/OPneKZrgp58YvHKgGvvPyJg2gpC/1Fh0vQB0HNpI -1ZzIZUl8ZTUtVgtnCBUOh5JGI4bFokAqrT//Uvcfd+idgxqBAkEA1ZbP/Kseld14 -qbWmgmU5GCVxsZRxgR1j4lG3UVjH36KXMtRTm1atAam1uw3OEGa6Y3ANjpU52FaB -Hep5rkk4FQJBAMynMo1L1uiN5GP+KYLEF5kKRxK+FLjXR0ywnMh+gpGcZDcOae+J -+t1gLoWBIESH/Xt639T7smuSfrZSA9V0EyECQA8cvZiWDvLxmaEAXkipmtGPjKzQ -4PsOtkuEFqFl07aKDYKmLUg3aMROWrJidqsIabWxbvQgsNgSvs38EiH3wkUCQQCg -ndxb7piVXb9RBwm3OoU2tE1BlXMX+sVXmAkEhd2dwDsaxrI3sHf1xGXem5AimQRF -JBOFyaCnMotGNioSHY5hAkEAxyXcNixQ2RpLXJTQZtwnbk0XDcbgB+fBgXnv/4f3 -BCvcu85DqJeJyQv44Oe1qsXEX9BfcQIOVaoep35RPlKi9g== ------END PRIVATE KEY-----` - -// Content is "This is a test" -var EncryptedTestFixture = ` ------BEGIN PKCS7----- -MIIBFwYJKoZIhvcNAQcDoIIBCDCCAQQCAQAxgcowgccCAQAwMjApMRAwDgYDVQQK -EwdBY21lIENvMRUwEwYDVQQDEwxFZGRhcmQgU3RhcmsCBQDL+CvWMAsGCSqGSIb3 -DQEBAQSBgKyP/5WlRTZD3dWMrLOX6QRNDrXEkQjhmToRwFZdY3LgUh25ZU0S/q4G -dHPV21Fv9lQD+q7l3vfeHw8M6Z1PKi9sHMVfxAkQpvaI96DTIT3YHtuLC1w3geCO -8eFWTq2qS4WChSuS/yhYosjA1kTkE0eLnVZcGw0z/WVuEZznkdyIMDIGCSqGSIb3 -DQEHATARBgUrDgMCBwQImpKsUyMPpQigEgQQRcWWrCRXqpD5Njs0GkJl+g== ------END PKCS7----- ------BEGIN CERTIFICATE----- -MIIB1jCCAUGgAwIBAgIFAMv4K9YwCwYJKoZIhvcNAQELMCkxEDAOBgNVBAoTB0Fj -bWUgQ28xFTATBgNVBAMTDEVkZGFyZCBTdGFyazAeFw0xNTA1MDYwMzU2NDBaFw0x -NjA1MDYwMzU2NDBaMCUxEDAOBgNVBAoTB0FjbWUgQ28xETAPBgNVBAMTCEpvbiBT -bm93MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDK6NU0R0eiCYVquU4RcjKc -LzGfx0aa1lMr2TnLQUSeLFZHFxsyyMXXuMPig3HK4A7SGFHupO+/1H/sL4xpH5zg -8+Zg2r8xnnney7abxcuv0uATWSIeKlNnb1ZO1BAxFnESc3GtyOCr2dUwZHX5mRVP -+Zxp2ni5qHNraf3wE2VPIQIDAQABoxIwEDAOBgNVHQ8BAf8EBAMCAKAwCwYJKoZI -hvcNAQELA4GBAIr2F7wsqmEU/J/kLyrCgEVXgaV/sKZq4pPNnzS0tBYk8fkV3V18 -sBJyHKRLL/wFZASvzDcVGCplXyMdAOCyfd8jO3F9Ac/xdlz10RrHJT75hNu3a7/n -9KNwKhfN4A1CQv2x372oGjRhCW5bHNCWx4PIVeNzCyq/KZhyY9sxHE6f ------END CERTIFICATE----- ------BEGIN PRIVATE KEY----- -MIICXgIBAAKBgQDK6NU0R0eiCYVquU4RcjKcLzGfx0aa1lMr2TnLQUSeLFZHFxsy -yMXXuMPig3HK4A7SGFHupO+/1H/sL4xpH5zg8+Zg2r8xnnney7abxcuv0uATWSIe -KlNnb1ZO1BAxFnESc3GtyOCr2dUwZHX5mRVP+Zxp2ni5qHNraf3wE2VPIQIDAQAB -AoGBALyvnSt7KUquDen7nXQtvJBudnf9KFPt//OjkdHHxNZNpoF/JCSqfQeoYkeu -MdAVYNLQGMiRifzZz4dDhA9xfUAuy7lcGQcMCxEQ1dwwuFaYkawbS0Tvy2PFlq2d -H5/HeDXU4EDJ3BZg0eYj2Bnkt1sJI35UKQSxblQ0MY2q0uFBAkEA5MMOogkgUx1C -67S1tFqMUSM8D0mZB0O5vOJZC5Gtt2Urju6vywge2ArExWRXlM2qGl8afFy2SgSv -Xk5eybcEiQJBAOMRwwbEoW5NYHuFFbSJyWll4n71CYuWuQOCzehDPyTb80WFZGLV -i91kFIjeERyq88eDE5xVB3ZuRiXqaShO/9kCQQCKOEkpInaDgZSjskZvuJ47kByD -6CYsO4GIXQMMeHML8ncFH7bb6AYq5ybJVb2NTU7QLFJmfeYuhvIm+xdOreRxAkEA -o5FC5Jg2FUfFzZSDmyZ6IONUsdF/i78KDV5nRv1R+hI6/oRlWNCtTNBv/lvBBd6b -dseUE9QoaQZsn5lpILEvmQJAZ0B+Or1rAYjnbjnUhdVZoy9kC4Zov+4UH3N/BtSy -KJRWUR0wTWfZBPZ5hAYZjTBEAFULaYCXlQKsODSp0M1aQA== ------END PRIVATE KEY-----` - -var EC2IdentityDocumentFixture = ` ------BEGIN PKCS7----- -MIAGCSqGSIb3DQEHAqCAMIACAQExCzAJBgUrDgMCGgUAMIAGCSqGSIb3DQEHAaCA -JIAEggGmewogICJwcml2YXRlSXAiIDogIjE3Mi4zMC4wLjI1MiIsCiAgImRldnBh -eVByb2R1Y3RDb2RlcyIgOiBudWxsLAogICJhdmFpbGFiaWxpdHlab25lIiA6ICJ1 -cy1lYXN0LTFhIiwKICAidmVyc2lvbiIgOiAiMjAxMC0wOC0zMSIsCiAgImluc3Rh -bmNlSWQiIDogImktZjc5ZmU1NmMiLAogICJiaWxsaW5nUHJvZHVjdHMiIDogbnVs -bCwKICAiaW5zdGFuY2VUeXBlIiA6ICJ0Mi5taWNybyIsCiAgImFjY291bnRJZCIg -OiAiMTIxNjU5MDE0MzM0IiwKICAiaW1hZ2VJZCIgOiAiYW1pLWZjZTNjNjk2IiwK -ICAicGVuZGluZ1RpbWUiIDogIjIwMTYtMDQtMDhUMDM6MDE6MzhaIiwKICAiYXJj -aGl0ZWN0dXJlIiA6ICJ4ODZfNjQiLAogICJrZXJuZWxJZCIgOiBudWxsLAogICJy -YW1kaXNrSWQiIDogbnVsbCwKICAicmVnaW9uIiA6ICJ1cy1lYXN0LTEiCn0AAAAA -AAAxggEYMIIBFAIBATBpMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBXYXNoaW5n -dG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6b24gV2Vi -IFNlcnZpY2VzIExMQwIJAJa6SNnlXhpnMAkGBSsOAwIaBQCgXTAYBgkqhkiG9w0B -CQMxCwYJKoZIhvcNAQcBMBwGCSqGSIb3DQEJBTEPFw0xNjA0MDgwMzAxNDRaMCMG -CSqGSIb3DQEJBDEWBBTuUc28eBXmImAautC+wOjqcFCBVjAJBgcqhkjOOAQDBC8w -LQIVAKA54NxGHWWCz5InboDmY/GHs33nAhQ6O/ZI86NwjA9Vz3RNMUJrUPU5tAAA -AAAAAA== ------END PKCS7----- ------BEGIN CERTIFICATE----- -MIIC7TCCAq0CCQCWukjZ5V4aZzAJBgcqhkjOOAQDMFwxCzAJBgNVBAYTAlVTMRkw -FwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYD -VQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAeFw0xMjAxMDUxMjU2MTJaFw0z -ODAxMDUxMjU2MTJaMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9u -IFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNl -cnZpY2VzIExMQzCCAbcwggEsBgcqhkjOOAQBMIIBHwKBgQCjkvcS2bb1VQ4yt/5e -ih5OO6kK/n1Lzllr7D8ZwtQP8fOEpp5E2ng+D6Ud1Z1gYipr58Kj3nssSNpI6bX3 -VyIQzK7wLclnd/YozqNNmgIyZecN7EglK9ITHJLP+x8FtUpt3QbyYXJdmVMegN6P -hviYt5JH/nYl4hh3Pa1HJdskgQIVALVJ3ER11+Ko4tP6nwvHwh6+ERYRAoGBAI1j -k+tkqMVHuAFcvAGKocTgsjJem6/5qomzJuKDmbJNu9Qxw3rAotXau8Qe+MBcJl/U -hhy1KHVpCGl9fueQ2s6IL0CaO/buycU1CiYQk40KNHCcHfNiZbdlx1E9rpUp7bnF -lRa2v1ntMX3caRVDdbtPEWmdxSCYsYFDk4mZrOLBA4GEAAKBgEbmeve5f8LIE/Gf -MNmP9CM5eovQOGx5ho8WqD+aTebs+k2tn92BBPqeZqpWRa5P/+jrdKml1qx4llHW -MXrs3IgIb6+hUIB+S8dz8/mmO0bpr76RoZVCXYab2CZedFut7qc3WUH9+EUAH5mw -vSeDCOUMYQR7R9LINYwouHIziqQYMAkGByqGSM44BAMDLwAwLAIUWXBlk40xTwSw -7HX32MxXYruse9ACFBNGmdX2ZBrVNGrN9N2f6ROk0k9K ------END CERTIFICATE-----` - -var AppStoreRecieptFixture = ` ------BEGIN PKCS7----- -MIITtgYJKoZIhvcNAQcCoIITpzCCE6MCAQExCzAJBgUrDgMCGgUAMIIDVwYJKoZI -hvcNAQcBoIIDSASCA0QxggNAMAoCAQgCAQEEAhYAMAoCARQCAQEEAgwAMAsCAQEC -AQEEAwIBADALAgEDAgEBBAMMATEwCwIBCwIBAQQDAgEAMAsCAQ8CAQEEAwIBADAL -AgEQAgEBBAMCAQAwCwIBGQIBAQQDAgEDMAwCAQoCAQEEBBYCNCswDAIBDgIBAQQE -AgIAjTANAgENAgEBBAUCAwFgvTANAgETAgEBBAUMAzEuMDAOAgEJAgEBBAYCBFAy -NDcwGAIBAgIBAQQQDA5jb20uemhpaHUudGVzdDAYAgEEAgECBBCS+ZODNMHwT1Nz -gWYDXyWZMBsCAQACAQEEEwwRUHJvZHVjdGlvblNhbmRib3gwHAIBBQIBAQQU4nRh -YCEZx70Flzv7hvJRjJZckYIwHgIBDAIBAQQWFhQyMDE2LTA3LTIzVDA2OjIxOjEx -WjAeAgESAgEBBBYWFDIwMTMtMDgtMDFUMDc6MDA6MDBaMD0CAQYCAQEENbR21I+a -8+byMXo3NPRoDWQmSXQF2EcCeBoD4GaL//ZCRETp9rGFPSg1KekCP7Kr9HAqw09m -MEICAQcCAQEEOlVJozYYBdugybShbiiMsejDMNeCbZq6CrzGBwW6GBy+DGWxJI91 -Y3ouXN4TZUhuVvLvN1b0m5T3ggQwggFaAgERAgEBBIIBUDGCAUwwCwICBqwCAQEE -AhYAMAsCAgatAgEBBAIMADALAgIGsAIBAQQCFgAwCwICBrICAQEEAgwAMAsCAgaz -AgEBBAIMADALAgIGtAIBAQQCDAAwCwICBrUCAQEEAgwAMAsCAga2AgEBBAIMADAM -AgIGpQIBAQQDAgEBMAwCAgarAgEBBAMCAQEwDAICBq4CAQEEAwIBADAMAgIGrwIB -AQQDAgEAMAwCAgaxAgEBBAMCAQAwGwICBqcCAQEEEgwQMTAwMDAwMDIyNTMyNTkw -MTAbAgIGqQIBAQQSDBAxMDAwMDAwMjI1MzI1OTAxMB8CAgaoAgEBBBYWFDIwMTYt -MDctMjNUMDY6MjE6MTFaMB8CAgaqAgEBBBYWFDIwMTYtMDctMjNUMDY6MjE6MTFa -MCACAgamAgEBBBcMFWNvbS56aGlodS50ZXN0LnRlc3RfMaCCDmUwggV8MIIEZKAD -AgECAggO61eH554JjTANBgkqhkiG9w0BAQUFADCBljELMAkGA1UEBhMCVVMxEzAR -BgNVBAoMCkFwcGxlIEluYy4xLDAqBgNVBAsMI0FwcGxlIFdvcmxkd2lkZSBEZXZl -bG9wZXIgUmVsYXRpb25zMUQwQgYDVQQDDDtBcHBsZSBXb3JsZHdpZGUgRGV2ZWxv -cGVyIFJlbGF0aW9ucyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0xNTExMTMw -MjE1MDlaFw0yMzAyMDcyMTQ4NDdaMIGJMTcwNQYDVQQDDC5NYWMgQXBwIFN0b3Jl -IGFuZCBpVHVuZXMgU3RvcmUgUmVjZWlwdCBTaWduaW5nMSwwKgYDVQQLDCNBcHBs -ZSBXb3JsZHdpZGUgRGV2ZWxvcGVyIFJlbGF0aW9uczETMBEGA1UECgwKQXBwbGUg -SW5jLjELMAkGA1UEBhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB -AQClz4H9JaKBW9aH7SPaMxyO4iPApcQmyz3Gn+xKDVWG/6QC15fKOVRtfX+yVBid -xCxScY5ke4LOibpJ1gjltIhxzz9bRi7GxB24A6lYogQ+IXjV27fQjhKNg0xbKmg3 -k8LyvR7E0qEMSlhSqxLj7d0fmBWQNS3CzBLKjUiB91h4VGvojDE2H0oGDEdU8zeQ -uLKSiX1fpIVK4cCc4Lqku4KXY/Qrk8H9Pm/KwfU8qY9SGsAlCnYO3v6Z/v/Ca/Vb -XqxzUUkIVonMQ5DMjoEC0KCXtlyxoWlph5AQaCYmObgdEHOwCl3Fc9DfdjvYLdmI -HuPsB8/ijtDT+iZVge/iA0kjAgMBAAGjggHXMIIB0zA/BggrBgEFBQcBAQQzMDEw -LwYIKwYBBQUHMAGGI2h0dHA6Ly9vY3NwLmFwcGxlLmNvbS9vY3NwMDMtd3dkcjA0 -MB0GA1UdDgQWBBSRpJz8xHa3n6CK9E31jzZd7SsEhTAMBgNVHRMBAf8EAjAAMB8G -A1UdIwQYMBaAFIgnFwmpthhgi+zruvZHWcVSVKO3MIIBHgYDVR0gBIIBFTCCAREw -ggENBgoqhkiG92NkBQYBMIH+MIHDBggrBgEFBQcCAjCBtgyBs1JlbGlhbmNlIG9u -IHRoaXMgY2VydGlmaWNhdGUgYnkgYW55IHBhcnR5IGFzc3VtZXMgYWNjZXB0YW5j -ZSBvZiB0aGUgdGhlbiBhcHBsaWNhYmxlIHN0YW5kYXJkIHRlcm1zIGFuZCBjb25k -aXRpb25zIG9mIHVzZSwgY2VydGlmaWNhdGUgcG9saWN5IGFuZCBjZXJ0aWZpY2F0 -aW9uIHByYWN0aWNlIHN0YXRlbWVudHMuMDYGCCsGAQUFBwIBFipodHRwOi8vd3d3 -LmFwcGxlLmNvbS9jZXJ0aWZpY2F0ZWF1dGhvcml0eS8wDgYDVR0PAQH/BAQDAgeA -MBAGCiqGSIb3Y2QGCwEEAgUAMA0GCSqGSIb3DQEBBQUAA4IBAQANphvTLj3jWysH -bkKWbNPojEMwgl/gXNGNvr0PvRr8JZLbjIXDgFnf4+LXLgUUrA3btrj+/DUufMut -F2uOfx/kd7mxZ5W0E16mGYZ2+FogledjjA9z/Ojtxh+umfhlSFyg4Cg6wBA3Lbmg -BDkfc7nIBf3y3n8aKipuKwH8oCBc2et9J6Yz+PWY4L5E27FMZ/xuCk/J4gao0pfz -p45rUaJahHVl0RYEYuPBX/UIqc9o2ZIAycGMs/iNAGS6WGDAfK+PdcppuVsq1h1o -bphC9UynNxmbzDscehlD86Ntv0hgBgw2kivs3hi1EdotI9CO/KBpnBcbnoB7OUdF -MGEvxxOoMIIEIjCCAwqgAwIBAgIIAd68xDltoBAwDQYJKoZIhvcNAQEFBQAwYjEL -MAkGA1UEBhMCVVMxEzARBgNVBAoTCkFwcGxlIEluYy4xJjAkBgNVBAsTHUFwcGxl -IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MRYwFAYDVQQDEw1BcHBsZSBSb290IENB -MB4XDTEzMDIwNzIxNDg0N1oXDTIzMDIwNzIxNDg0N1owgZYxCzAJBgNVBAYTAlVT -MRMwEQYDVQQKDApBcHBsZSBJbmMuMSwwKgYDVQQLDCNBcHBsZSBXb3JsZHdpZGUg -RGV2ZWxvcGVyIFJlbGF0aW9uczFEMEIGA1UEAww7QXBwbGUgV29ybGR3aWRlIERl -dmVsb3BlciBSZWxhdGlvbnMgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEiMA0G -CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDKOFSmy1aqyCQ5SOmM7uxfuH8mkbw0 -U3rOfGOAYXdkXqUHI7Y5/lAtFVZYcC1+xG7BSoU+L/DehBqhV8mvexj/avoVEkkV -CBmsqtsqMu2WY2hSFT2Miuy/axiV4AOsAX2XBWfODoWVN2rtCbauZ81RZJ/GXNG8 -V25nNYB2NqSHgW44j9grFU57Jdhav06DwY3Sk9UacbVgnJ0zTlX5ElgMhrgWDcHl -d0WNUEi6Ky3klIXh6MSdxmilsKP8Z35wugJZS3dCkTm59c3hTO/AO0iMpuUhXf1q -arunFjVg0uat80YpyejDi+l5wGphZxWy8P3laLxiX27Pmd3vG2P+kmWrAgMBAAGj -gaYwgaMwHQYDVR0OBBYEFIgnFwmpthhgi+zruvZHWcVSVKO3MA8GA1UdEwEB/wQF -MAMBAf8wHwYDVR0jBBgwFoAUK9BpR5R2Cf70a40uQKb3R01/CF4wLgYDVR0fBCcw -JTAjoCGgH4YdaHR0cDovL2NybC5hcHBsZS5jb20vcm9vdC5jcmwwDgYDVR0PAQH/ -BAQDAgGGMBAGCiqGSIb3Y2QGAgEEAgUAMA0GCSqGSIb3DQEBBQUAA4IBAQBPz+9Z -viz1smwvj+4ThzLoBTWobot9yWkMudkXvHcs1Gfi/ZptOllc34MBvbKuKmFysa/N -w0Uwj6ODDc4dR7Txk4qjdJukw5hyhzs+r0ULklS5MruQGFNrCk4QttkdUGwhgAqJ -TleMa1s8Pab93vcNIx0LSiaHP7qRkkykGRIZbVf1eliHe2iK5IaMSuviSRSqpd1V -AKmuu0swruGgsbwpgOYJd+W+NKIByn/c4grmO7i77LpilfMFY0GCzQ87HUyVpNur -+cmV6U/kTecmmYHpvPm0KdIBembhLoz2IYrF+Hjhga6/05Cdqa3zr/04GpZnMBxR -pVzscYqCtGwPDBUfMIIEuzCCA6OgAwIBAgIBAjANBgkqhkiG9w0BAQUFADBiMQsw -CQYDVQQGEwJVUzETMBEGA1UEChMKQXBwbGUgSW5jLjEmMCQGA1UECxMdQXBwbGUg -Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkxFjAUBgNVBAMTDUFwcGxlIFJvb3QgQ0Ew -HhcNMDYwNDI1MjE0MDM2WhcNMzUwMjA5MjE0MDM2WjBiMQswCQYDVQQGEwJVUzET -MBEGA1UEChMKQXBwbGUgSW5jLjEmMCQGA1UECxMdQXBwbGUgQ2VydGlmaWNhdGlv -biBBdXRob3JpdHkxFjAUBgNVBAMTDUFwcGxlIFJvb3QgQ0EwggEiMA0GCSqGSIb3 -DQEBAQUAA4IBDwAwggEKAoIBAQDkkakJH5HbHkdQ6wXtXnmELes2oldMVeyLGYne -+Uts9QerIjAC6Bg++FAJ039BqJj50cpmnCRrEdCju+QbKsMflZ56DKRHi1vUFjcz -y8QPTc4UadHJGXL1XQ7Vf1+b8iUDulWPTV0N8WQ1IxVLFVkds5T39pyez1C6wVhQ -Z48ItCD3y6wsIG9wtj8BMIy3Q88PnT3zK0koGsj+zrW5DtleHNbLPbU6rfQPDgCS -C7EhFi501TwN22IWq6NxkkdTVcGvL0Gz+PvjcM3mo0xFfh9Ma1CWQYnEdGILEINB -hzOKgbEwWOxaBDKMaLOPHd5lc/9nXmW8Sdh2nzMUZaF3lMktAgMBAAGjggF6MIIB -djAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUK9Bp -R5R2Cf70a40uQKb3R01/CF4wHwYDVR0jBBgwFoAUK9BpR5R2Cf70a40uQKb3R01/ -CF4wggERBgNVHSAEggEIMIIBBDCCAQAGCSqGSIb3Y2QFATCB8jAqBggrBgEFBQcC -ARYeaHR0cHM6Ly93d3cuYXBwbGUuY29tL2FwcGxlY2EvMIHDBggrBgEFBQcCAjCB -thqBs1JlbGlhbmNlIG9uIHRoaXMgY2VydGlmaWNhdGUgYnkgYW55IHBhcnR5IGFz -c3VtZXMgYWNjZXB0YW5jZSBvZiB0aGUgdGhlbiBhcHBsaWNhYmxlIHN0YW5kYXJk -IHRlcm1zIGFuZCBjb25kaXRpb25zIG9mIHVzZSwgY2VydGlmaWNhdGUgcG9saWN5 -IGFuZCBjZXJ0aWZpY2F0aW9uIHByYWN0aWNlIHN0YXRlbWVudHMuMA0GCSqGSIb3 -DQEBBQUAA4IBAQBcNplMLXi37Yyb3PN3m/J20ncwT8EfhYOFG5k9RzfyqZtAjizU -sZAS2L70c5vu0mQPy3lPNNiiPvl4/2vIB+x9OYOLUyDTOMSxv5pPCmv/K/xZpwUJ -fBdAVhEedNO3iyM7R6PVbyTi69G3cN8PReEnyvFteO3ntRcXqNx+IjXKJdXZD9Zr -1KIkIxH3oayPc4FgxhtbCS+SsvhESPBgOJ4V9T0mZyCKM2r3DYLP3uujL/lTaltk -wGMzd/c6ByxW69oPIQ7aunMZT7XZNn/Bh1XZp5m5MkL72NVxnn6hUrcbvZNCJBIq -xw8dtk2cXmPIS4AXUKqK1drk/NAJBzewdXUhMYIByzCCAccCAQEwgaMwgZYxCzAJ -BgNVBAYTAlVTMRMwEQYDVQQKDApBcHBsZSBJbmMuMSwwKgYDVQQLDCNBcHBsZSBX -b3JsZHdpZGUgRGV2ZWxvcGVyIFJlbGF0aW9uczFEMEIGA1UEAww7QXBwbGUgV29y -bGR3aWRlIERldmVsb3BlciBSZWxhdGlvbnMgQ2VydGlmaWNhdGlvbiBBdXRob3Jp -dHkCCA7rV4fnngmNMAkGBSsOAwIaBQAwDQYJKoZIhvcNAQEBBQAEggEAasPtnide -NWyfUtewW9OSgcQA8pW+5tWMR0469cBPZR84uJa0gyfmPspySvbNOAwnrwzZHYLa -ujOxZLip4DUw4F5s3QwUa3y4BXpF4J+NSn9XNvxNtnT/GcEQtCuFwgJ0o3F0ilhv -MTHrwiwyx/vr+uNDqlORK8lfK+1qNp+A/kzh8eszMrn4JSeTh9ZYxLHE56WkTQGD -VZXl0gKgxSOmDrcp1eQxdlymzrPv9U60wUJ0bkPfrU9qZj3mJrmrkQk61JTe3j6/ -QfjfFBG9JG2mUmYQP1KQ3SypGHzDW8vngvsGu//tNU0NFfOqQu4bYU4VpQl0nPtD -4B85NkrgvQsWAQ== ------END PKCS7-----` diff --git a/vendor/github.com/fullsailor/pkcs7/x509.go b/vendor/github.com/fullsailor/pkcs7/x509.go new file mode 100644 index 0000000000..195fd0e4bb --- /dev/null +++ b/vendor/github.com/fullsailor/pkcs7/x509.go @@ -0,0 +1,133 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the go/golang LICENSE file. + +package pkcs7 + +// These are private constants and functions from the crypto/x509 package that +// are useful when dealing with signatures verified by x509 certificates + +import ( + "bytes" + "crypto" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" +) + +var ( + oidSignatureMD2WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 2} + oidSignatureMD5WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 4} + oidSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5} + oidSignatureSHA256WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11} + oidSignatureSHA384WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12} + oidSignatureSHA512WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13} + oidSignatureRSAPSS = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 10} + oidSignatureDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3} + oidSignatureDSAWithSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 3, 2} + oidSignatureECDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 1} + oidSignatureECDSAWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2} + oidSignatureECDSAWithSHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3} + oidSignatureECDSAWithSHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4} + + oidSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 1} + oidSHA384 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 2} + oidSHA512 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 3} + + oidMGF1 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 8} + + // oidISOSignatureSHA1WithRSA means the same as oidSignatureSHA1WithRSA + // but it's specified by ISO. Microsoft's makecert.exe has been known + // to produce certificates with this OID. + oidISOSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 29} +) + +var signatureAlgorithmDetails = []struct { + algo x509.SignatureAlgorithm + name string + oid asn1.ObjectIdentifier + pubKeyAlgo x509.PublicKeyAlgorithm + hash crypto.Hash +}{ + {x509.MD2WithRSA, "MD2-RSA", oidSignatureMD2WithRSA, x509.RSA, crypto.Hash(0) /* no value for MD2 */}, + {x509.MD5WithRSA, "MD5-RSA", oidSignatureMD5WithRSA, x509.RSA, crypto.MD5}, + {x509.SHA1WithRSA, "SHA1-RSA", oidSignatureSHA1WithRSA, x509.RSA, crypto.SHA1}, + {x509.SHA1WithRSA, "SHA1-RSA", oidISOSignatureSHA1WithRSA, x509.RSA, crypto.SHA1}, + {x509.SHA256WithRSA, "SHA256-RSA", oidSignatureSHA256WithRSA, x509.RSA, crypto.SHA256}, + {x509.SHA384WithRSA, "SHA384-RSA", oidSignatureSHA384WithRSA, x509.RSA, crypto.SHA384}, + {x509.SHA512WithRSA, "SHA512-RSA", oidSignatureSHA512WithRSA, x509.RSA, crypto.SHA512}, + {x509.SHA256WithRSAPSS, "SHA256-RSAPSS", oidSignatureRSAPSS, x509.RSA, crypto.SHA256}, + {x509.SHA384WithRSAPSS, "SHA384-RSAPSS", oidSignatureRSAPSS, x509.RSA, crypto.SHA384}, + {x509.SHA512WithRSAPSS, "SHA512-RSAPSS", oidSignatureRSAPSS, x509.RSA, crypto.SHA512}, + {x509.DSAWithSHA1, "DSA-SHA1", oidSignatureDSAWithSHA1, x509.DSA, crypto.SHA1}, + {x509.DSAWithSHA256, "DSA-SHA256", oidSignatureDSAWithSHA256, x509.DSA, crypto.SHA256}, + {x509.ECDSAWithSHA1, "ECDSA-SHA1", oidSignatureECDSAWithSHA1, x509.ECDSA, crypto.SHA1}, + {x509.ECDSAWithSHA256, "ECDSA-SHA256", oidSignatureECDSAWithSHA256, x509.ECDSA, crypto.SHA256}, + {x509.ECDSAWithSHA384, "ECDSA-SHA384", oidSignatureECDSAWithSHA384, x509.ECDSA, crypto.SHA384}, + {x509.ECDSAWithSHA512, "ECDSA-SHA512", oidSignatureECDSAWithSHA512, x509.ECDSA, crypto.SHA512}, +} + +// pssParameters reflects the parameters in an AlgorithmIdentifier that +// specifies RSA PSS. See https://tools.ietf.org/html/rfc3447#appendix-A.2.3 +type pssParameters struct { + // The following three fields are not marked as + // optional because the default values specify SHA-1, + // which is no longer suitable for use in signatures. + Hash pkix.AlgorithmIdentifier `asn1:"explicit,tag:0"` + MGF pkix.AlgorithmIdentifier `asn1:"explicit,tag:1"` + SaltLength int `asn1:"explicit,tag:2"` + TrailerField int `asn1:"optional,explicit,tag:3,default:1"` +} + +// asn1.NullBytes is not available prior to Go 1.9 +var nullBytes = []byte{5, 0} + +func getSignatureAlgorithmFromAI(ai pkix.AlgorithmIdentifier) x509.SignatureAlgorithm { + if !ai.Algorithm.Equal(oidSignatureRSAPSS) { + for _, details := range signatureAlgorithmDetails { + if ai.Algorithm.Equal(details.oid) { + return details.algo + } + } + return x509.UnknownSignatureAlgorithm + } + + // RSA PSS is special because it encodes important parameters + // in the Parameters. + + var params pssParameters + if _, err := asn1.Unmarshal(ai.Parameters.FullBytes, ¶ms); err != nil { + return x509.UnknownSignatureAlgorithm + } + + var mgf1HashFunc pkix.AlgorithmIdentifier + if _, err := asn1.Unmarshal(params.MGF.Parameters.FullBytes, &mgf1HashFunc); err != nil { + return x509.UnknownSignatureAlgorithm + } + + // PSS is greatly overburdened with options. This code forces + // them into three buckets by requiring that the MGF1 hash + // function always match the message hash function (as + // recommended in + // https://tools.ietf.org/html/rfc3447#section-8.1), that the + // salt length matches the hash length, and that the trailer + // field has the default value. + if !bytes.Equal(params.Hash.Parameters.FullBytes, nullBytes) || + !params.MGF.Algorithm.Equal(oidMGF1) || + !mgf1HashFunc.Algorithm.Equal(params.Hash.Algorithm) || + !bytes.Equal(mgf1HashFunc.Parameters.FullBytes, nullBytes) || + params.TrailerField != 1 { + return x509.UnknownSignatureAlgorithm + } + + switch { + case params.Hash.Algorithm.Equal(oidSHA256) && params.SaltLength == 32: + return x509.SHA256WithRSAPSS + case params.Hash.Algorithm.Equal(oidSHA384) && params.SaltLength == 48: + return x509.SHA384WithRSAPSS + case params.Hash.Algorithm.Equal(oidSHA512) && params.SaltLength == 64: + return x509.SHA512WithRSAPSS + } + + return x509.UnknownSignatureAlgorithm +} diff --git a/vendor/github.com/ghodss/yaml/yaml_test.go b/vendor/github.com/ghodss/yaml/yaml_test.go deleted file mode 100644 index 505af45301..0000000000 --- a/vendor/github.com/ghodss/yaml/yaml_test.go +++ /dev/null @@ -1,287 +0,0 @@ -package yaml - -import ( - "fmt" - "math" - "reflect" - "strconv" - "testing" -) - -type MarshalTest struct { - A string - B int64 - // Would like to test float64, but it's not supported in go-yaml. - // (See https://github.com/go-yaml/yaml/issues/83.) - C float32 -} - -func TestMarshal(t *testing.T) { - f32String := strconv.FormatFloat(math.MaxFloat32, 'g', -1, 32) - s := MarshalTest{"a", math.MaxInt64, math.MaxFloat32} - e := []byte(fmt.Sprintf("A: a\nB: %d\nC: %s\n", math.MaxInt64, f32String)) - - y, err := Marshal(s) - if err != nil { - t.Errorf("error marshaling YAML: %v", err) - } - - if !reflect.DeepEqual(y, e) { - t.Errorf("marshal YAML was unsuccessful, expected: %#v, got: %#v", - string(e), string(y)) - } -} - -type UnmarshalString struct { - A string - True string -} - -type UnmarshalStringMap struct { - A map[string]string -} - -type UnmarshalNestedString struct { - A NestedString -} - -type NestedString struct { - A string -} - -type UnmarshalSlice struct { - A []NestedSlice -} - -type NestedSlice struct { - B string - C *string -} - -func TestUnmarshal(t *testing.T) { - y := []byte("a: 1") - s1 := UnmarshalString{} - e1 := UnmarshalString{A: "1"} - unmarshal(t, y, &s1, &e1) - - y = []byte("a: true") - s1 = UnmarshalString{} - e1 = UnmarshalString{A: "true"} - unmarshal(t, y, &s1, &e1) - - y = []byte("true: 1") - s1 = UnmarshalString{} - e1 = UnmarshalString{True: "1"} - unmarshal(t, y, &s1, &e1) - - y = []byte("a:\n a: 1") - s2 := UnmarshalNestedString{} - e2 := UnmarshalNestedString{NestedString{"1"}} - unmarshal(t, y, &s2, &e2) - - y = []byte("a:\n - b: abc\n c: def\n - b: 123\n c: 456\n") - s3 := UnmarshalSlice{} - e3 := UnmarshalSlice{[]NestedSlice{NestedSlice{"abc", strPtr("def")}, NestedSlice{"123", strPtr("456")}}} - unmarshal(t, y, &s3, &e3) - - y = []byte("a:\n b: 1") - s4 := UnmarshalStringMap{} - e4 := UnmarshalStringMap{map[string]string{"b": "1"}} - unmarshal(t, y, &s4, &e4) - - y = []byte(` -a: - name: TestA -b: - name: TestB -`) - type NamedThing struct { - Name string `json:"name"` - } - s5 := map[string]*NamedThing{} - e5 := map[string]*NamedThing{ - "a": &NamedThing{Name: "TestA"}, - "b": &NamedThing{Name: "TestB"}, - } - unmarshal(t, y, &s5, &e5) -} - -func unmarshal(t *testing.T, y []byte, s, e interface{}) { - err := Unmarshal(y, s) - if err != nil { - t.Errorf("error unmarshaling YAML: %v", err) - } - - if !reflect.DeepEqual(s, e) { - t.Errorf("unmarshal YAML was unsuccessful, expected: %+#v, got: %+#v", - e, s) - } -} - -type Case struct { - input string - output string - // By default we test that reversing the output == input. But if there is a - // difference in the reversed output, you can optionally specify it here. - reverse *string -} - -type RunType int - -const ( - RunTypeJSONToYAML RunType = iota - RunTypeYAMLToJSON -) - -func TestJSONToYAML(t *testing.T) { - cases := []Case{ - { - `{"t":"a"}`, - "t: a\n", - nil, - }, { - `{"t":null}`, - "t: null\n", - nil, - }, - } - - runCases(t, RunTypeJSONToYAML, cases) -} - -func TestYAMLToJSON(t *testing.T) { - cases := []Case{ - { - "t: a\n", - `{"t":"a"}`, - nil, - }, { - "t: \n", - `{"t":null}`, - strPtr("t: null\n"), - }, { - "t: null\n", - `{"t":null}`, - nil, - }, { - "1: a\n", - `{"1":"a"}`, - strPtr("\"1\": a\n"), - }, { - "1000000000000000000000000000000000000: a\n", - `{"1e+36":"a"}`, - strPtr("\"1e+36\": a\n"), - }, { - "1e+36: a\n", - `{"1e+36":"a"}`, - strPtr("\"1e+36\": a\n"), - }, { - "\"1e+36\": a\n", - `{"1e+36":"a"}`, - nil, - }, { - "\"1.2\": a\n", - `{"1.2":"a"}`, - nil, - }, { - "- t: a\n", - `[{"t":"a"}]`, - nil, - }, { - "- t: a\n" + - "- t:\n" + - " b: 1\n" + - " c: 2\n", - `[{"t":"a"},{"t":{"b":1,"c":2}}]`, - nil, - }, { - `[{t: a}, {t: {b: 1, c: 2}}]`, - `[{"t":"a"},{"t":{"b":1,"c":2}}]`, - strPtr("- t: a\n" + - "- t:\n" + - " b: 1\n" + - " c: 2\n"), - }, { - "- t: \n", - `[{"t":null}]`, - strPtr("- t: null\n"), - }, { - "- t: null\n", - `[{"t":null}]`, - nil, - }, - } - - // Cases that should produce errors. - _ = []Case{ - { - "~: a", - `{"null":"a"}`, - nil, - }, { - "a: !!binary gIGC\n", - "{\"a\":\"\x80\x81\x82\"}", - nil, - }, - } - - runCases(t, RunTypeYAMLToJSON, cases) -} - -func runCases(t *testing.T, runType RunType, cases []Case) { - var f func([]byte) ([]byte, error) - var invF func([]byte) ([]byte, error) - var msg string - var invMsg string - if runType == RunTypeJSONToYAML { - f = JSONToYAML - invF = YAMLToJSON - msg = "JSON to YAML" - invMsg = "YAML back to JSON" - } else { - f = YAMLToJSON - invF = JSONToYAML - msg = "YAML to JSON" - invMsg = "JSON back to YAML" - } - - for _, c := range cases { - // Convert the string. - t.Logf("converting %s\n", c.input) - output, err := f([]byte(c.input)) - if err != nil { - t.Errorf("Failed to convert %s, input: `%s`, err: %v", msg, c.input, err) - } - - // Check it against the expected output. - if string(output) != c.output { - t.Errorf("Failed to convert %s, input: `%s`, expected `%s`, got `%s`", - msg, c.input, c.output, string(output)) - } - - // Set the string that we will compare the reversed output to. - reverse := c.input - // If a special reverse string was specified, use that instead. - if c.reverse != nil { - reverse = *c.reverse - } - - // Reverse the output. - input, err := invF(output) - if err != nil { - t.Errorf("Failed to convert %s, input: `%s`, err: %v", invMsg, string(output), err) - } - - // Check the reverse is equal to the input (or to *c.reverse). - if string(input) != reverse { - t.Errorf("Failed to convert %s, input: `%s`, expected `%s`, got `%s`", - invMsg, string(output), reverse, string(input)) - } - } - -} - -// To be able to easily fill in the *Case.reverse string above. -func strPtr(s string) *string { - return &s -} diff --git a/vendor/github.com/go-logr/logr/LICENSE b/vendor/github.com/go-logr/logr/LICENSE new file mode 100644 index 0000000000..8dada3edaf --- /dev/null +++ b/vendor/github.com/go-logr/logr/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md new file mode 100644 index 0000000000..26296d0240 --- /dev/null +++ b/vendor/github.com/go-logr/logr/README.md @@ -0,0 +1,36 @@ +# A more minimal logging API for Go + +Before you consider this package, please read [this blog post by the inimitable +Dave Cheney](http://dave.cheney.net/2015/11/05/lets-talk-about-logging). I +really appreciate what he has to say, and it largely aligns with my own +experiences. Too many choices of levels means inconsistent logs. + +This package offers a purely abstract interface, based on these ideas but with +a few twists. Code can depend on just this interface and have the actual +logging implementation be injected from callers. Ideally only `main()` knows +what logging implementation is being used. + +# Differences from Dave's ideas + +The main differences are: + +1) Dave basically proposes doing away with the notion of a logging API in favor +of `fmt.Printf()`. I disagree, especially when you consider things like output +locations, timestamps, file and line decorations, and structured logging. I +restrict the API to just 2 types of logs: info and error. + +Info logs are things you want to tell the user which are not errors. Error +logs are, well, errors. If your code receives an `error` from a subordinate +function call and is logging that `error` *and not returning it*, use error +logs. + +2) Verbosity-levels on info logs. This gives developers a chance to indicate +arbitrary grades of importance for info logs, without assigning names with +semantic meaning such as "warning", "trace", and "debug". Superficially this +may feel very similar, but the primary difference is the lack of semantics. +Because verbosity is a numerical value, it's safe to assume that an app running +with higher verbosity means more (and less important) logs will be generated. + +This is a BETA grade API. I have implemented it for +[glog](https://godoc.org/github.com/golang/glog). Until there is a significant +2nd implementation, I don't really know how it will change. diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go new file mode 100644 index 0000000000..ad72e78869 --- /dev/null +++ b/vendor/github.com/go-logr/logr/logr.go @@ -0,0 +1,151 @@ +// Package logr defines abstract interfaces for logging. Packages can depend on +// these interfaces and callers can implement logging in whatever way is +// appropriate. +// +// This design derives from Dave Cheney's blog: +// http://dave.cheney.net/2015/11/05/lets-talk-about-logging +// +// This is a BETA grade API. Until there is a significant 2nd implementation, +// I don't really know how it will change. +// +// The logging specifically makes it non-trivial to use format strings, to encourage +// attaching structured information instead of unstructured format strings. +// +// Usage +// +// Logging is done using a Logger. Loggers can have name prefixes and named values +// attached, so that all log messages logged with that Logger have some base context +// associated. +// +// The term "key" is used to refer to the name associated with a particular value, to +// disambiguate it from the general Logger name. +// +// For instance, suppose we're trying to reconcile the state of an object, and we want +// to log that we've made some decision. +// +// With the traditional log package, we might write +// log.Printf( +// "decided to set field foo to value %q for object %s/%s", +// targetValue, object.Namespace, object.Name) +// +// With logr's structured logging, we'd write +// // elsewhere in the file, set up the logger to log with the prefix of "reconcilers", +// // and the named value target-type=Foo, for extra context. +// log := mainLogger.WithName("reconcilers").WithValues("target-type", "Foo") +// +// // later on... +// log.Info("setting field foo on object", "value", targetValue, "object", object) +// +// Depending on our logging implementation, we could then make logging decisions based on field values +// (like only logging such events for objects in a certain namespace), or copy the structured +// information into a structured log store. +// +// For logging errors, Logger has a method called Error. Suppose we wanted to log an +// error while reconciling. With the traditional log package, we might write +// log.Errorf("unable to reconcile object %s/%s: %v", object.Namespace, object.Name, err) +// +// With logr, we'd instead write +// // assuming the above setup for log +// log.Error(err, "unable to reconcile object", "object", object) +// +// This functions similarly to: +// log.Info("unable to reconcile object", "error", err, "object", object) +// +// However, it ensures that a standard key for the error value ("error") is used across all +// error logging. Furthermore, certain implementations may choose to attach additional +// information (such as stack traces) on calls to Error, so it's preferred to use Error +// to log errors. +// +// Parts of a log line +// +// Each log message from a Logger has four types of context: +// logger name, log verbosity, log message, and the named values. +// +// The Logger name constists of a series of name "segments" added by successive calls to WithName. +// These name segments will be joined in some way by the underlying implementation. It is strongly +// reccomended that name segements contain simple identifiers (letters, digits, and hyphen), and do +// not contain characters that could muddle the log output or confuse the joining operation (e.g. +// whitespace, commas, periods, slashes, brackets, quotes, etc). +// +// Log verbosity represents how little a log matters. Level zero, the default, matters most. +// Increasing levels matter less and less. Try to avoid lots of different verbosity levels, +// and instead provide useful keys, logger names, and log messages for users to filter on. +// It's illegal to pass a log level below zero. +// +// The log message consists of a constant message attached to the the log line. This +// should generally be a simple description of what's occuring, and should never be a format string. +// +// Variable information can then be attached using named values (key/value pairs). Keys are arbitrary +// strings, while values may be any Go value. +// +// Key Naming Conventions +// +// While users are generally free to use key names of their choice, it's generally best to avoid +// using the following keys, as they're frequently used by implementations: +// +// - `"error"`: the underlying error value in the `Error` method. +// - `"stacktrace"`: the stack trace associated with a particular log line or error +// (often from the `Error` message). +// - `"caller"`: the calling information (file/line) of a particular log line. +// - `"msg"`: the log message. +// - `"level"`: the log level. +// - `"ts"`: the timestamp for a log line. +// +// Implementations are encouraged to make use of these keys to represent the above +// concepts, when neccessary (for example, in a pure-JSON output form, it would be +// necessary to represent at least message and timestamp as ordinary named values). +package logr + +// TODO: consider adding back in format strings if they're really needed +// TODO: consider other bits of zap/zapcore functionality like ObjectMarshaller (for arbitrary objects) +// TODO: consider other bits of glog functionality like Flush, InfoDepth, OutputStats + +// InfoLogger represents the ability to log non-error messages, at a particular verbosity. +type InfoLogger interface { + // Info logs a non-error message with the given key/value pairs as context. + // + // The msg argument should be used to add some constant description to + // the log line. The key/value pairs can then be used to add additional + // variable information. The key/value pairs should alternate string + // keys and arbitrary values. + Info(msg string, keysAndValues ...interface{}) + + // Enabled tests whether this InfoLogger is enabled. For example, + // commandline flags might be used to set the logging verbosity and disable + // some info logs. + Enabled() bool +} + +// Logger represents the ability to log messages, both errors and not. +type Logger interface { + // All Loggers implement InfoLogger. Calling InfoLogger methods directly on + // a Logger value is equivalent to calling them on a V(0) InfoLogger. For + // example, logger.Info() produces the same result as logger.V(0).Info. + InfoLogger + + // Error logs an error, with the given message and key/value pairs as context. + // It functions similarly to calling Info with the "error" named value, but may + // have unique behavior, and should be preferred for logging errors (see the + // package documentations for more information). + // + // The msg field should be used to add context to any underlying error, + // while the err field should be used to attach the actual error that + // triggered this log line, if present. + Error(err error, msg string, keysAndValues ...interface{}) + + // V returns an InfoLogger value for a specific verbosity level. A higher + // verbosity level means a log message is less important. It's illegal to + // pass a log level less than zero. + V(level int) InfoLogger + + // WithValues adds some key-value pairs of context to a logger. + // See Info for documentation on how key/value pairs work. + WithValues(keysAndValues ...interface{}) Logger + + // WithName adds a new element to the logger's name. + // Successive calls with WithName continue to append + // suffixes to the logger's name. It's strongly reccomended + // that name segments contain only letters, digits, and hyphens + // (see the package documentation for more information). + WithName(name string) Logger +} diff --git a/vendor/github.com/go-logr/zapr/.gitignore b/vendor/github.com/go-logr/zapr/.gitignore new file mode 100644 index 0000000000..5ba77727f1 --- /dev/null +++ b/vendor/github.com/go-logr/zapr/.gitignore @@ -0,0 +1,3 @@ +*~ +*.swp +/vendor diff --git a/vendor/github.com/go-logr/zapr/Gopkg.lock b/vendor/github.com/go-logr/zapr/Gopkg.lock new file mode 100644 index 0000000000..8da0a8f76c --- /dev/null +++ b/vendor/github.com/go-logr/zapr/Gopkg.lock @@ -0,0 +1,52 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + digest = "1:edd2fa4578eb086265db78a9201d15e76b298dfd0d5c379da83e9c61712cf6df" + name = "github.com/go-logr/logr" + packages = ["."] + pruneopts = "UT" + revision = "9fb12b3b21c5415d16ac18dc5cd42c1cfdd40c4e" + version = "v0.1.0" + +[[projects]] + digest = "1:3c1a69cdae3501bf75e76d0d86dc6f2b0a7421bc205c0cb7b96b19eed464a34d" + name = "go.uber.org/atomic" + packages = ["."] + pruneopts = "UT" + revision = "1ea20fb1cbb1cc08cbd0d913a96dead89aa18289" + version = "v1.3.2" + +[[projects]] + digest = "1:60bf2a5e347af463c42ed31a493d817f8a72f102543060ed992754e689805d1a" + name = "go.uber.org/multierr" + packages = ["."] + pruneopts = "UT" + revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a" + version = "v1.1.0" + +[[projects]] + digest = "1:9580b1b079114140ade8cec957685344d14f00119e0241f6b369633cb346eeb3" + name = "go.uber.org/zap" + packages = [ + ".", + "buffer", + "internal/bufferpool", + "internal/color", + "internal/exit", + "zapcore", + ] + pruneopts = "UT" + revision = "eeedf312bc6c57391d84767a4cd413f02a917974" + version = "v1.8.0" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + input-imports = [ + "github.com/go-logr/logr", + "go.uber.org/zap", + "go.uber.org/zap/zapcore", + ] + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/go-logr/zapr/Gopkg.toml b/vendor/github.com/go-logr/zapr/Gopkg.toml new file mode 100644 index 0000000000..ae475d72e0 --- /dev/null +++ b/vendor/github.com/go-logr/zapr/Gopkg.toml @@ -0,0 +1,38 @@ +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" +# +# [prune] +# non-go = false +# go-tests = true +# unused-packages = true + + +[[constraint]] + name = "github.com/go-logr/logr" + version = "0.1.0" + +[[constraint]] + name = "go.uber.org/zap" + version = "1.8.0" + +[prune] + go-tests = true + unused-packages = true diff --git a/vendor/github.com/go-logr/zapr/LICENSE b/vendor/github.com/go-logr/zapr/LICENSE new file mode 100644 index 0000000000..8dada3edaf --- /dev/null +++ b/vendor/github.com/go-logr/zapr/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-logr/zapr/README.md b/vendor/github.com/go-logr/zapr/README.md new file mode 100644 index 0000000000..8472875fa3 --- /dev/null +++ b/vendor/github.com/go-logr/zapr/README.md @@ -0,0 +1,45 @@ +Zapr :zap: +========== + +A [logr](https://github.com/go-logr/logr) implementation using +[Zap](go.uber.org/zap). + +Usage +----- + +```go +import ( + "fmt" + + "go.uber.org/zap" + "github.com/go-logr/logr" + "github.com/directxman12/zapr" +) + +func main() { + var log logr.Logger + + zapLog, err := zap.NewDevelopment() + if err != nil { + panic(fmt.Sprintf("who watches the watchmen (%v)?", err)) + } + log = zapr.NewLogger(zapLog) + + log.Info("Logr in action!", "the answer", 42) +} +``` + +Implementation Details +---------------------- + +For the most part, concepts in Zap correspond directly with those in logr. + +Unlike Zap, all fields *must* be in the form of suggared fields -- +it's illegal to pass a strongly-typed Zap field in a key position to any +of the logging methods (`Log`, `Error`). + +Levels in logr correspond to custom debug levels in Zap. Any given level +in logr is represents by its inverse in Zap (`zapLevel = -1*logrLevel`). + +For example `V(2)` is equivalent to log level -2 in Zap, while `V(1)` is +equivalent to Zap's `DebugLevel`. diff --git a/vendor/github.com/go-logr/zapr/zapr.go b/vendor/github.com/go-logr/zapr/zapr.go new file mode 100644 index 0000000000..a9a10ae2e1 --- /dev/null +++ b/vendor/github.com/go-logr/zapr/zapr.go @@ -0,0 +1,163 @@ +// Copyright 2018 Solly Ross +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// package zapr defines an implementation of the github.com/go-logr/logr +// interfaces built on top of Zap (go.uber.org/zap). +// +// Usage +// +// A new logr.Logger can be constructed from an existing zap.Logger using +// the NewLogger function: +// +// log := zapr.NewLogger(someZapLogger) +// +// Implementation Details +// +// For the most part, concepts in Zap correspond directly with those in +// logr. +// +// Unlike Zap, all fields *must* be in the form of suggared fields -- +// it's illegal to pass a strongly-typed Zap field in a key position +// to any of the log methods. +// +// Levels in logr correspond to custom debug levels in Zap. Any given level +// in logr is represents by its inverse in zap (`zapLevel = -1*logrLevel`). +// For example V(2) is equivalent to log level -2 in Zap, while V(1) is +// equivalent to Zap's DebugLevel. +package zapr + +import ( + "github.com/go-logr/logr" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +// noopInfoLogger is a logr.InfoLogger that's always disabled, and does nothing. +type noopInfoLogger struct{} + +func (l *noopInfoLogger) Enabled() bool { return false } +func (l *noopInfoLogger) Info(_ string, _ ...interface{}) {} + +var disabledInfoLogger = &noopInfoLogger{} + +// NB: right now, we always use the equivalent of sugared logging. +// This is necessary, since logr doesn't define non-suggared types, +// and using zap-specific non-suggared types would make uses tied +// directly to Zap. + +// infoLogger is a logr.InfoLogger that uses Zap to log at a particular +// level. The level has already been converted to a Zap level, which +// is to say that `logrLevel = -1*zapLevel`. +type infoLogger struct { + lvl zapcore.Level + l *zap.Logger +} + +func (l *infoLogger) Enabled() bool { return true } +func (l *infoLogger) Info(msg string, keysAndVals ...interface{}) { + if checkedEntry := l.l.Check(l.lvl, msg); checkedEntry != nil { + checkedEntry.Write(handleFields(l.l, keysAndVals)...) + } +} + +// zapLogger is a logr.Logger that uses Zap to log. +type zapLogger struct { + // NB: this looks very similar to zap.SugaredLogger, but + // deals with our desire to have multiple verbosity levels. + l *zap.Logger + infoLogger +} + +// handleFields converts a bunch of arbitrary key-value pairs into Zap fields. It takes +// additional pre-converted Zap fields, for use with automatically attached fields, like +// `error`. +func handleFields(l *zap.Logger, args []interface{}, additional ...zap.Field) []zap.Field { + // a slightly modified version of zap.SugaredLogger.sweetenFields + if len(args) == 0 { + // fast-return if we have no suggared fields. + return additional + } + + // unlike Zap, we can be pretty sure users aren't passing structured + // fields (since logr has no concept of that), so guess that we need a + // little less space. + fields := make([]zap.Field, 0, len(args)/2+len(additional)) + for i := 0; i < len(args); { + // check just in case for strongly-typed Zap fields, which is illegal (since + // it breaks implementation agnosticism), so we can give a better error message. + if _, ok := args[i].(zap.Field); ok { + l.DPanic("strongly-typed Zap Field passed to logr", zap.Any("zap field", args[i])) + break + } + + // make sure this isn't a mismatched key + if i == len(args)-1 { + l.DPanic("odd number of arguments passed as key-value pairs for logging", zap.Any("ignored key", args[i])) + break + } + + // process a key-value pair, + // ensuring that the key is a string + key, val := args[i], args[i+1] + keyStr, isString := key.(string) + if !isString { + // if the key isn't a string, DPanic and stop logging + l.DPanic("non-string key argument passed to logging, ignoring all later arguments", zap.Any("invalid key", key)) + break + } + + fields = append(fields, zap.Any(keyStr, val)) + i += 2 + } + + return append(fields, additional...) +} + +func (l *zapLogger) Error(err error, msg string, keysAndVals ...interface{}) { + if checkedEntry := l.l.Check(zap.ErrorLevel, msg); checkedEntry != nil { + checkedEntry.Write(handleFields(l.l, keysAndVals, zap.Error(err))...) + } +} + +func (l *zapLogger) V(level int) logr.InfoLogger { + lvl := zapcore.Level(-1 * level) + if l.l.Core().Enabled(lvl) { + return &infoLogger{ + lvl: lvl, + l: l.l, + } + } + return disabledInfoLogger +} + +func (l *zapLogger) WithValues(keysAndValues ...interface{}) logr.Logger { + newLogger := l.l.With(handleFields(l.l, keysAndValues)...) + return NewLogger(newLogger) +} + +func (l *zapLogger) WithName(name string) logr.Logger { + newLogger := l.l.Named(name) + return NewLogger(newLogger) +} + +// NewLogger creates a new logr.Logger using the given Zap Logger to log. +func NewLogger(l *zap.Logger) logr.Logger { + return &zapLogger{ + l: l, + infoLogger: infoLogger{ + l: l, + lvl: zap.InfoLevel, + }, + } +} diff --git a/vendor/github.com/go-openapi/jsonpointer/.travis.yml b/vendor/github.com/go-openapi/jsonpointer/.travis.yml index d0f383a26b..3436c4590c 100644 --- a/vendor/github.com/go-openapi/jsonpointer/.travis.yml +++ b/vendor/github.com/go-openapi/jsonpointer/.travis.yml @@ -1,13 +1,15 @@ -language: go +after_success: +- bash <(curl -s https://codecov.io/bash) go: -- 1.7 +- '1.9' +- 1.10.x +- 1.11.x install: - go get -u github.com/stretchr/testify/assert - go get -u github.com/go-openapi/swag -script: -- go test -v -race -cover -coverprofile=coverage.txt -covermode=atomic ./... -after_success: -- bash <(curl -s https://codecov.io/bash) +language: go notifications: slack: secure: a5VgoiwB1G/AZqzmephPZIhEB9avMlsWSlVnM1dSAtYAwdrQHGTQxAmpOxYIoSPDhWNN5bfZmjd29++UlTwLcHSR+e0kJhH6IfDlsHj/HplNCJ9tyI0zYc7XchtdKgeMxMzBKCzgwFXGSbQGydXTliDNBo0HOzmY3cou/daMFTP60K+offcjS+3LRAYb1EroSRXZqrk1nuF/xDL3792DZUdPMiFR/L/Df6y74D6/QP4sTkTDFQitz4Wy/7jbsfj8dG6qK2zivgV6/l+w4OVjFkxVpPXogDWY10vVXNVynqxfJ7to2d1I9lNCHE2ilBCkWMIPdyJF7hjF8pKW+82yP4EzRh0vu8Xn0HT5MZpQxdRY/YMxNrWaG7SxsoEaO4q5uhgdzAqLYY3TRa7MjIK+7Ur+aqOeTXn6OKwVi0CjvZ6mIU3WUKSwiwkFZMbjRAkSb5CYwMEfGFO/z964xz83qGt6WAtBXNotqCQpTIiKtDHQeLOMfksHImCg6JLhQcWBVxamVgu0G3Pdh8Y6DyPnxraXY95+QDavbjqv7TeYT9T/FNnrkXaTTK0s4iWE5H4ACU0Qvz0wUYgfQrZv0/Hp7V17+rabUwnzYySHCy9SWX/7OV9Cfh31iMp9ZIffr76xmmThtOEqs8TrTtU6BWI3rWwvA9cXQipZTVtL0oswrGw= +script: +- go test -v -race -cover -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/jsonpointer/go.mod b/vendor/github.com/go-openapi/jsonpointer/go.mod new file mode 100644 index 0000000000..eb4d623c59 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/go.mod @@ -0,0 +1,10 @@ +module github.com/go-openapi/jsonpointer + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/go-openapi/swag v0.17.0 + github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/testify v1.2.2 + gopkg.in/yaml.v2 v2.2.1 // indirect +) diff --git a/vendor/github.com/go-openapi/jsonpointer/go.sum b/vendor/github.com/go-openapi/jsonpointer/go.sum new file mode 100644 index 0000000000..c71f4d7a29 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/go.sum @@ -0,0 +1,11 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-openapi/swag v0.17.0 h1:7wu+dZ5k83kvUWeAb+WUkFiUhDzwGqzTR/NhWzeo1JU= +github.com/go-openapi/swag v0.17.0/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 h1:2gxZ0XQIU/5z3Z3bUBu+FXuk2pFbkN6tcwi/pjyaDic= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/go-openapi/jsonpointer/pointer_test.go b/vendor/github.com/go-openapi/jsonpointer/pointer_test.go deleted file mode 100644 index eabd586036..0000000000 --- a/vendor/github.com/go-openapi/jsonpointer/pointer_test.go +++ /dev/null @@ -1,573 +0,0 @@ -// Copyright 2013 sigu-399 ( https://github.com/sigu-399 ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author sigu-399 -// author-github https://github.com/sigu-399 -// author-mail sigu.399@gmail.com -// -// repository-name jsonpointer -// repository-desc An implementation of JSON Pointer - Go language -// -// description Automated tests on package. -// -// created 03-03-2013 - -package jsonpointer - -import ( - "encoding/json" - "fmt" - "strconv" - "testing" - - "github.com/stretchr/testify/assert" -) - -const ( - TestDocumentNBItems = 11 - TestNodeObjNBItems = 4 - TestDocumentString = `{ -"foo": ["bar", "baz"], -"obj": { "a":1, "b":2, "c":[3,4], "d":[ {"e":9}, {"f":[50,51]} ] }, -"": 0, -"a/b": 1, -"c%d": 2, -"e^f": 3, -"g|h": 4, -"i\\j": 5, -"k\"l": 6, -" ": 7, -"m~n": 8 -}` -) - -var testDocumentJSON interface{} - -type testStructJSON struct { - Foo []string `json:"foo"` - Obj struct { - A int `json:"a"` - B int `json:"b"` - C []int `json:"c"` - D []struct { - E int `json:"e"` - F []int `json:"f"` - } `json:"d"` - } `json:"obj"` -} - -type aliasedMap map[string]interface{} - -var testStructJSONDoc testStructJSON -var testStructJSONPtr *testStructJSON - -func init() { - json.Unmarshal([]byte(TestDocumentString), &testDocumentJSON) - json.Unmarshal([]byte(TestDocumentString), &testStructJSONDoc) - testStructJSONPtr = &testStructJSONDoc -} - -func TestEscaping(t *testing.T) { - - ins := []string{`/`, `/`, `/a~1b`, `/a~1b`, `/c%d`, `/e^f`, `/g|h`, `/i\j`, `/k"l`, `/ `, `/m~0n`} - outs := []float64{0, 0, 1, 1, 2, 3, 4, 5, 6, 7, 8} - - for i := range ins { - p, err := New(ins[i]) - if assert.NoError(t, err, "input: %v", ins[i]) { - result, _, err := p.Get(testDocumentJSON) - if assert.NoError(t, err, "input: %v", ins[i]) { - assert.Equal(t, outs[i], result, "input: %v", ins[i]) - } - } - } - -} - -func TestFullDocument(t *testing.T) { - - in := `` - - p, err := New(in) - if err != nil { - t.Errorf("New(%v) error %v", in, err.Error()) - } - - result, _, err := p.Get(testDocumentJSON) - if err != nil { - t.Errorf("Get(%v) error %v", in, err.Error()) - } - - if len(result.(map[string]interface{})) != TestDocumentNBItems { - t.Errorf("Get(%v) = %v, expect full document", in, result) - } - - result, _, err = p.get(testDocumentJSON, nil) - if err != nil { - t.Errorf("Get(%v) error %v", in, err.Error()) - } - - if len(result.(map[string]interface{})) != TestDocumentNBItems { - t.Errorf("Get(%v) = %v, expect full document", in, result) - } -} - -func TestDecodedTokens(t *testing.T) { - p, err := New("/obj/a~1b") - assert.NoError(t, err) - assert.Equal(t, []string{"obj", "a/b"}, p.DecodedTokens()) -} - -func TestIsEmpty(t *testing.T) { - p, err := New("") - assert.NoError(t, err) - assert.True(t, p.IsEmpty()) - p, err = New("/obj") - assert.NoError(t, err) - assert.False(t, p.IsEmpty()) -} - -func TestGetSingle(t *testing.T) { - in := `/obj` - - _, err := New(in) - assert.NoError(t, err) - result, _, err := GetForToken(testDocumentJSON, "obj") - assert.NoError(t, err) - assert.Len(t, result, TestNodeObjNBItems) - - result, _, err = GetForToken(testStructJSONDoc, "Obj") - assert.Error(t, err) - assert.Nil(t, result) - - result, _, err = GetForToken(testStructJSONDoc, "Obj2") - assert.Error(t, err) - assert.Nil(t, result) -} - -type pointableImpl struct { - a string -} - -func (p pointableImpl) JSONLookup(token string) (interface{}, error) { - if token == "some" { - return p.a, nil - } - return nil, fmt.Errorf("object has no field %q", token) -} - -func TestPointableInterface(t *testing.T) { - p := &pointableImpl{"hello"} - - result, _, err := GetForToken(p, "some") - assert.NoError(t, err) - assert.Equal(t, p.a, result) - - result, _, err = GetForToken(p, "something") - assert.Error(t, err) - assert.Nil(t, result) -} - -func TestGetNode(t *testing.T) { - - in := `/obj` - - p, err := New(in) - assert.NoError(t, err) - result, _, err := p.Get(testDocumentJSON) - assert.NoError(t, err) - assert.Len(t, result, TestNodeObjNBItems) - - result, _, err = p.Get(aliasedMap(testDocumentJSON.(map[string]interface{}))) - assert.NoError(t, err) - assert.Len(t, result, TestNodeObjNBItems) - - result, _, err = p.Get(testStructJSONDoc) - assert.NoError(t, err) - assert.Equal(t, testStructJSONDoc.Obj, result) - - result, _, err = p.Get(testStructJSONPtr) - assert.NoError(t, err) - assert.Equal(t, testStructJSONDoc.Obj, result) -} - -func TestArray(t *testing.T) { - - ins := []string{`/foo/0`, `/foo/0`, `/foo/1`} - outs := []string{"bar", "bar", "baz"} - - for i := range ins { - p, err := New(ins[i]) - assert.NoError(t, err) - - result, _, err := p.Get(testStructJSONDoc) - assert.NoError(t, err) - assert.Equal(t, outs[i], result) - - result, _, err = p.Get(testStructJSONPtr) - assert.NoError(t, err) - assert.Equal(t, outs[i], result) - - result, _, err = p.Get(testDocumentJSON) - assert.NoError(t, err) - assert.Equal(t, outs[i], result) - } -} - -func TestOtherThings(t *testing.T) { - _, err := New("abc") - assert.Error(t, err) - - p, err := New("") - assert.NoError(t, err) - assert.Equal(t, "", p.String()) - - p, err = New("/obj/a") - assert.Equal(t, "/obj/a", p.String()) - - s := Escape("m~n") - assert.Equal(t, "m~0n", s) - s = Escape("m/n") - assert.Equal(t, "m~1n", s) - - p, err = New("/foo/3") - assert.NoError(t, err) - _, _, err = p.Get(testDocumentJSON) - assert.Error(t, err) - - p, err = New("/foo/a") - assert.NoError(t, err) - _, _, err = p.Get(testDocumentJSON) - assert.Error(t, err) - - p, err = New("/notthere") - assert.NoError(t, err) - _, _, err = p.Get(testDocumentJSON) - assert.Error(t, err) - - p, err = New("/invalid") - assert.NoError(t, err) - _, _, err = p.Get(1234) - assert.Error(t, err) - - p, err = New("/foo/1") - assert.NoError(t, err) - expected := "hello" - bbb := testDocumentJSON.(map[string]interface{})["foo"] - bbb.([]interface{})[1] = "hello" - - v, _, err := p.Get(testDocumentJSON) - assert.NoError(t, err) - assert.Equal(t, expected, v) - - esc := Escape("a/") - assert.Equal(t, "a~1", esc) - unesc := Unescape(esc) - assert.Equal(t, "a/", unesc) - - unesc = Unescape("~01") - assert.Equal(t, "~1", unesc) - assert.Equal(t, "~0~1", Escape("~/")) - assert.Equal(t, "~/", Unescape("~0~1")) -} - -func TestObject(t *testing.T) { - - ins := []string{`/obj/a`, `/obj/b`, `/obj/c/0`, `/obj/c/1`, `/obj/c/1`, `/obj/d/1/f/0`} - outs := []float64{1, 2, 3, 4, 4, 50} - - for i := range ins { - - p, err := New(ins[i]) - assert.NoError(t, err) - - result, _, err := p.Get(testDocumentJSON) - assert.NoError(t, err) - assert.Equal(t, outs[i], result) - - result, _, err = p.Get(testStructJSONDoc) - assert.NoError(t, err) - assert.EqualValues(t, outs[i], result) - - result, _, err = p.Get(testStructJSONPtr) - assert.NoError(t, err) - assert.EqualValues(t, outs[i], result) - } -} - -type setJsonDocEle struct { - B int `json:"b"` - C int `json:"c"` -} -type setJsonDoc struct { - A []struct { - B int `json:"b"` - C int `json:"c"` - } `json:"a"` - D int `json:"d"` -} - -type settableDoc struct { - Coll settableColl - Int settableInt -} - -func (s settableDoc) MarshalJSON() ([]byte, error) { - var res struct { - A settableColl `json:"a"` - D settableInt `json:"d"` - } - res.A = s.Coll - res.D = s.Int - return json.Marshal(res) -} -func (s *settableDoc) UnmarshalJSON(data []byte) error { - var res struct { - A settableColl `json:"a"` - D settableInt `json:"d"` - } - - if err := json.Unmarshal(data, &res); err != nil { - return err - } - s.Coll = res.A - s.Int = res.D - return nil -} - -// JSONLookup implements an interface to customize json pointer lookup -func (s settableDoc) JSONLookup(token string) (interface{}, error) { - switch token { - case "a": - return &s.Coll, nil - case "d": - return &s.Int, nil - default: - return nil, fmt.Errorf("%s is not a known field", token) - } -} - -// JSONLookup implements an interface to customize json pointer lookup -func (s *settableDoc) JSONSet(token string, data interface{}) error { - switch token { - case "a": - switch dt := data.(type) { - case settableColl: - s.Coll = dt - return nil - case *settableColl: - if dt != nil { - s.Coll = *dt - } else { - s.Coll = settableColl{} - } - return nil - case []settableCollItem: - s.Coll.Items = dt - return nil - } - case "d": - switch dt := data.(type) { - case settableInt: - s.Int = dt - return nil - case int: - s.Int.Value = dt - return nil - case int8: - s.Int.Value = int(dt) - return nil - case int16: - s.Int.Value = int(dt) - return nil - case int32: - s.Int.Value = int(dt) - return nil - case int64: - s.Int.Value = int(dt) - return nil - default: - return fmt.Errorf("invalid type %T for %s", data, token) - } - } - return fmt.Errorf("%s is not a known field", token) -} - -type settableColl struct { - Items []settableCollItem -} - -func (s settableColl) MarshalJSON() ([]byte, error) { - return json.Marshal(s.Items) -} -func (s *settableColl) UnmarshalJSON(data []byte) error { - return json.Unmarshal(data, &s.Items) -} - -// JSONLookup implements an interface to customize json pointer lookup -func (s settableColl) JSONLookup(token string) (interface{}, error) { - if tok, err := strconv.Atoi(token); err == nil { - return &s.Items[tok], nil - } - return nil, fmt.Errorf("%s is not a valid index", token) -} - -// JSONLookup implements an interface to customize json pointer lookup -func (s *settableColl) JSONSet(token string, data interface{}) error { - if _, err := strconv.Atoi(token); err == nil { - _, err := SetForToken(s.Items, token, data) - return err - } - return fmt.Errorf("%s is not a valid index", token) -} - -type settableCollItem struct { - B int `json:"b"` - C int `json:"c"` -} - -type settableInt struct { - Value int -} - -func (s settableInt) MarshalJSON() ([]byte, error) { - return json.Marshal(s.Value) -} -func (s *settableInt) UnmarshalJSON(data []byte) error { - return json.Unmarshal(data, &s.Value) -} - -func TestSetNode(t *testing.T) { - - jsonText := `{"a":[{"b": 1, "c": 2}], "d": 3}` - - var jsonDocument interface{} - if assert.NoError(t, json.Unmarshal([]byte(jsonText), &jsonDocument)) { - in := "/a/0/c" - p, err := New(in) - if assert.NoError(t, err) { - - _, err = p.Set(jsonDocument, 999) - assert.NoError(t, err) - - firstNode := jsonDocument.(map[string]interface{}) - assert.Len(t, firstNode, 2) - - sliceNode := firstNode["a"].([]interface{}) - assert.Len(t, sliceNode, 1) - - changedNode := sliceNode[0].(map[string]interface{}) - chNodeVI := changedNode["c"] - if assert.IsType(t, 0, chNodeVI) { - changedNodeValue := chNodeVI.(int) - if assert.Equal(t, 999, changedNodeValue) { - assert.Len(t, sliceNode, 1) - } - } - } - - v, err := New("/a/0") - if assert.NoError(t, err) { - _, err = v.Set(jsonDocument, map[string]interface{}{"b": 3, "c": 8}) - if assert.NoError(t, err) { - firstNode := jsonDocument.(map[string]interface{}) - assert.Len(t, firstNode, 2) - - sliceNode := firstNode["a"].([]interface{}) - assert.Len(t, sliceNode, 1) - changedNode := sliceNode[0].(map[string]interface{}) - assert.Equal(t, 3, changedNode["b"]) - assert.Equal(t, 8, changedNode["c"]) - } - } - } - - var structDoc setJsonDoc - if assert.NoError(t, json.Unmarshal([]byte(jsonText), &structDoc)) { - g, err := New("/a") - if assert.NoError(t, err) { - _, err = g.Set(&structDoc, []struct { - B int `json:"b"` - C int `json:"c"` - }{{B: 4, C: 7}}) - - if assert.NoError(t, err) { - assert.Len(t, structDoc.A, 1) - changedNode := structDoc.A[0] - assert.Equal(t, 4, changedNode.B) - assert.Equal(t, 7, changedNode.C) - } - } - - v, err := New("/a/0") - if assert.NoError(t, err) { - _, err = v.Set(structDoc, struct { - B int `json:"b"` - C int `json:"c"` - }{B: 3, C: 8}) - - if assert.NoError(t, err) { - assert.Len(t, structDoc.A, 1) - changedNode := structDoc.A[0] - assert.Equal(t, 3, changedNode.B) - assert.Equal(t, 8, changedNode.C) - } - } - - p, err := New("/a/0/c") - if assert.NoError(t, err) { - _, err = p.Set(&structDoc, 999) - assert.NoError(t, err) - if assert.Len(t, structDoc.A, 1) { - assert.Equal(t, 999, structDoc.A[0].C) - } - } - } - - var setDoc settableDoc - if assert.NoError(t, json.Unmarshal([]byte(jsonText), &setDoc)) { - g, err := New("/a") - if assert.NoError(t, err) { - _, err = g.Set(&setDoc, []settableCollItem{{B: 4, C: 7}}) - - if assert.NoError(t, err) { - assert.Len(t, setDoc.Coll.Items, 1) - changedNode := setDoc.Coll.Items[0] - assert.Equal(t, 4, changedNode.B) - assert.Equal(t, 7, changedNode.C) - } - } - - v, err := New("/a/0") - if assert.NoError(t, err) { - _, err = v.Set(setDoc, settableCollItem{B: 3, C: 8}) - - if assert.NoError(t, err) { - assert.Len(t, setDoc.Coll.Items, 1) - changedNode := setDoc.Coll.Items[0] - assert.Equal(t, 3, changedNode.B) - assert.Equal(t, 8, changedNode.C) - } - } - - p, err := New("/a/0/c") - if assert.NoError(t, err) { - _, err = p.Set(setDoc, 999) - assert.NoError(t, err) - if assert.Len(t, setDoc.Coll.Items, 1) { - assert.Equal(t, 999, setDoc.Coll.Items[0].C) - } - } - } -} diff --git a/vendor/github.com/go-openapi/jsonreference/.travis.yml b/vendor/github.com/go-openapi/jsonreference/.travis.yml index 5b31a1b3e1..40034d28dc 100644 --- a/vendor/github.com/go-openapi/jsonreference/.travis.yml +++ b/vendor/github.com/go-openapi/jsonreference/.travis.yml @@ -1,14 +1,16 @@ -language: go +after_success: +- bash <(curl -s https://codecov.io/bash) go: -- 1.7 +- '1.9' +- 1.10.x +- 1.11.x install: - go get -u github.com/stretchr/testify/assert - go get -u github.com/PuerkitoBio/purell - go get -u github.com/go-openapi/jsonpointer -script: -- go test -v -race -cover -coverprofile=coverage.txt -covermode=atomic ./... -after_success: -- bash <(curl -s https://codecov.io/bash) +language: go notifications: slack: secure: OpQG/36F7DSF00HLm9WZMhyqFCYYyYTsVDObW226cWiR8PWYiNfLZiSEvIzT1Gx4dDjhigKTIqcLhG34CkL5iNXDjm9Yyo2RYhQPlK8NErNqUEXuBqn4RqYHW48VGhEhOyDd4Ei0E2FN5ZbgpvHgtpkdZ6XDi64r3Ac89isP9aPHXQTuv2Jog6b4/OKKiUTftLcTIst0p4Cp3gqOJWf1wnoj+IadWiECNVQT6zb47IYjtyw6+uV8iUjTzdKcRB6Zc6b4Dq7JAg1Zd7Jfxkql3hlKp4PNlRf9Cy7y5iA3G7MLyg3FcPX5z2kmcyPt2jOTRMBWUJ5zIQpOxizAcN8WsT3WWBL5KbuYK6k0PzujrIDLqdxGpNmjkkMfDBT9cKmZpm2FdW+oZgPFJP+oKmAo4u4KJz/vjiPTXgQlN5bmrLuRMCp+AwC5wkIohTqWZVPE2TK6ZSnMYcg/W39s+RP/9mJoyryAvPSpBOLTI+biCgaUCTOAZxNTWpMFc3tPYntc41WWkdKcooZ9JA5DwfcaVFyTGQ3YXz+HvX6G1z/gW0Q/A4dBi9mj2iE1xm7tRTT+4VQ2AXFvSEI1HJpfPgYnwAtwOD1v3Qm2EUHk9sCdtEDR4wVGEPIVn44GnwFMnGKx9JWppMPYwFu3SVDdHt+E+LOlhZUply11Aa+IVrT2KUQ= +script: +- go test -v -race -cover -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/jsonreference/go.mod b/vendor/github.com/go-openapi/jsonreference/go.mod new file mode 100644 index 0000000000..6d15a70503 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/go.mod @@ -0,0 +1,15 @@ +module github.com/go-openapi/jsonreference + +require ( + github.com/PuerkitoBio/purell v1.1.0 + github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/go-openapi/jsonpointer v0.17.0 + github.com/go-openapi/swag v0.17.0 // indirect + github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/testify v1.2.2 + golang.org/x/net v0.0.0-20181005035420-146acd28ed58 // indirect + golang.org/x/text v0.3.0 // indirect + gopkg.in/yaml.v2 v2.2.1 // indirect +) diff --git a/vendor/github.com/go-openapi/jsonreference/go.sum b/vendor/github.com/go-openapi/jsonreference/go.sum new file mode 100644 index 0000000000..ec9bdbc286 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/go.sum @@ -0,0 +1,20 @@ +github.com/PuerkitoBio/purell v1.1.0 h1:rmGxhojJlM0tuKtfdvliR84CFHljx9ag64t2xmVkjK4= +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-openapi/jsonpointer v0.17.0 h1:Bpl2DtZ6k7wKqfFs7e+4P08+M9I3FQgn09a1UsRUQbk= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/swag v0.17.0 h1:7wu+dZ5k83kvUWeAb+WUkFiUhDzwGqzTR/NhWzeo1JU= +github.com/go-openapi/swag v0.17.0/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 h1:2gxZ0XQIU/5z3Z3bUBu+FXuk2pFbkN6tcwi/pjyaDic= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58 h1:otZG8yDCO4LVps5+9bxOeNiCvgmOyt96J3roHTYs7oE= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/go-openapi/jsonreference/reference_test.go b/vendor/github.com/go-openapi/jsonreference/reference_test.go deleted file mode 100644 index 499c634c58..0000000000 --- a/vendor/github.com/go-openapi/jsonreference/reference_test.go +++ /dev/null @@ -1,420 +0,0 @@ -// Copyright 2013 sigu-399 ( https://github.com/sigu-399 ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author sigu-399 -// author-github https://github.com/sigu-399 -// author-mail sigu.399@gmail.com -// -// repository-name jsonreference -// repository-desc An implementation of JSON Reference - Go language -// -// description Automated tests on package. -// -// created 03-03-2013 - -package jsonreference - -import ( - "testing" - - "github.com/go-openapi/jsonpointer" - "github.com/stretchr/testify/assert" -) - -func TestIsRoot(t *testing.T) { - in := "#" - r1, err := New(in) - assert.NoError(t, err) - assert.True(t, r1.IsRoot()) - - in = "#/ok" - r1 = MustCreateRef(in) - assert.False(t, r1.IsRoot()) - - assert.Panics(t, assert.PanicTestFunc(func() { - MustCreateRef("%2") - })) -} - -func TestFull(t *testing.T) { - - in := "http://host/path/a/b/c#/f/a/b" - - r1, err := New(in) - if err != nil { - t.Errorf("New(%v) error %s", in, err.Error()) - } - - if in != r1.String() { - t.Errorf("New(%v) = %v, expect %v", in, r1.String(), in) - } - - if r1.HasFragmentOnly != false { - t.Errorf("New(%v)::HasFragmentOnly %v expect %v", in, r1.HasFragmentOnly, false) - } - - if r1.HasFullURL != true { - t.Errorf("New(%v)::HasFullURL %v expect %v", in, r1.HasFullURL, true) - } - - if r1.HasURLPathOnly != false { - t.Errorf("New(%v)::HasURLPathOnly %v expect %v", in, r1.HasURLPathOnly, false) - } - - if r1.HasFileScheme != false { - t.Errorf("New(%v)::HasFileScheme %v expect %v", in, r1.HasFileScheme, false) - } - - if r1.GetPointer().String() != "/f/a/b" { - t.Errorf("New(%v)::GetPointer() %v expect %v", in, r1.GetPointer().String(), "/f/a/b") - } -} - -func TestFullURL(t *testing.T) { - - in := "http://host/path/a/b/c" - - r1, err := New(in) - if err != nil { - t.Errorf("New(%v) error %s", in, err.Error()) - } - - if in != r1.String() { - t.Errorf("New(%v) = %v, expect %v", in, r1.String(), in) - } - - if r1.HasFragmentOnly != false { - t.Errorf("New(%v)::HasFragmentOnly %v expect %v", in, r1.HasFragmentOnly, false) - } - - if r1.HasFullURL != true { - t.Errorf("New(%v)::HasFullURL %v expect %v", in, r1.HasFullURL, true) - } - - if r1.HasURLPathOnly != false { - t.Errorf("New(%v)::HasURLPathOnly %v expect %v", in, r1.HasURLPathOnly, false) - } - - if r1.HasFileScheme != false { - t.Errorf("New(%v)::HasFileScheme %v expect %v", in, r1.HasFileScheme, false) - } - - if r1.GetPointer().String() != "" { - t.Errorf("New(%v)::GetPointer() %v expect %v", in, r1.GetPointer().String(), "") - } -} - -func TestFragmentOnly(t *testing.T) { - - in := "#/fragment/only" - - r1, err := New(in) - if err != nil { - t.Errorf("New(%v) error %s", in, err.Error()) - } - - if in != r1.String() { - t.Errorf("New(%v) = %v, expect %v", in, r1.String(), in) - } - - if r1.HasFragmentOnly != true { - t.Errorf("New(%v)::HasFragmentOnly %v expect %v", in, r1.HasFragmentOnly, true) - } - - if r1.HasFullURL != false { - t.Errorf("New(%v)::HasFullURL %v expect %v", in, r1.HasFullURL, false) - } - - if r1.HasURLPathOnly != false { - t.Errorf("New(%v)::HasURLPathOnly %v expect %v", in, r1.HasURLPathOnly, false) - } - - if r1.HasFileScheme != false { - t.Errorf("New(%v)::HasFileScheme %v expect %v", in, r1.HasFileScheme, false) - } - - if r1.GetPointer().String() != "/fragment/only" { - t.Errorf("New(%v)::GetPointer() %v expect %v", in, r1.GetPointer().String(), "/fragment/only") - } - - p, _ := jsonpointer.New(r1.referenceURL.Fragment) - r2 := Ref{referencePointer: p, HasFragmentOnly: true} - assert.Equal(t, r2.String(), in) - - r3 := Ref{referencePointer: p, HasFragmentOnly: false} - assert.Equal(t, r3.String(), in[1:]) -} - -func TestURLPathOnly(t *testing.T) { - - in := "/documents/document.json" - - r1, err := New(in) - if err != nil { - t.Errorf("New(%v) error %s", in, err.Error()) - } - - if in != r1.String() { - t.Errorf("New(%v) = %v, expect %v", in, r1.String(), in) - } - - if r1.HasFragmentOnly != false { - t.Errorf("New(%v)::HasFragmentOnly %v expect %v", in, r1.HasFragmentOnly, false) - } - - if r1.HasFullURL != false { - t.Errorf("New(%v)::HasFullURL %v expect %v", in, r1.HasFullURL, false) - } - - if r1.HasURLPathOnly != true { - t.Errorf("New(%v)::HasURLPathOnly %v expect %v", in, r1.HasURLPathOnly, true) - } - - if r1.HasFileScheme != false { - t.Errorf("New(%v)::HasFileScheme %v expect %v", in, r1.HasFileScheme, false) - } - - if r1.GetPointer().String() != "" { - t.Errorf("New(%v)::GetPointer() %v expect %v", in, r1.GetPointer().String(), "") - } -} - -func TestURLRelativePathOnly(t *testing.T) { - - in := "document.json" - - r1, err := New(in) - if err != nil { - t.Errorf("New(%v) error %s", in, err.Error()) - } - - if in != r1.String() { - t.Errorf("New(%v) = %v, expect %v", in, r1.String(), in) - } - - if r1.HasFragmentOnly != false { - t.Errorf("New(%v)::HasFragmentOnly %v expect %v", in, r1.HasFragmentOnly, false) - } - - if r1.HasFullURL != false { - t.Errorf("New(%v)::HasFullURL %v expect %v", in, r1.HasFullURL, false) - } - - if r1.HasURLPathOnly != true { - t.Errorf("New(%v)::HasURLPathOnly %v expect %v", in, r1.HasURLPathOnly, true) - } - - if r1.HasFileScheme != false { - t.Errorf("New(%v)::HasFileScheme %v expect %v", in, r1.HasFileScheme, false) - } - - if r1.GetPointer().String() != "" { - t.Errorf("New(%v)::GetPointer() %v expect %v", in, r1.GetPointer().String(), "") - } -} - -func TestInheritsInValid(t *testing.T) { - in1 := "http://www.test.com/doc.json" - in2 := "#/a/b" - - r1, _ := New(in1) - r2 := Ref{} - result, err := r1.Inherits(r2) - assert.Error(t, err) - assert.Nil(t, result) - - r1 = Ref{} - r2, _ = New(in2) - result, err = r1.Inherits(r2) - assert.NoError(t, err) - assert.Equal(t, r2, *result) -} - -func TestInheritsValid(t *testing.T) { - - in1 := "http://www.test.com/doc.json" - in2 := "#/a/b" - out := in1 + in2 - - r1, _ := New(in1) - r2, _ := New(in2) - - result, err := r1.Inherits(r2) - if err != nil { - t.Errorf("Inherits(%s,%s) error %s", r1.String(), r2.String(), err.Error()) - } - - if result.String() != out { - t.Errorf("Inherits(%s,%s) = %s, expect %s", r1.String(), r2.String(), result.String(), out) - } - - if result.GetPointer().String() != "/a/b" { - t.Errorf("result(%v)::GetPointer() %v expect %v", result.String(), result.GetPointer().String(), "/a/b") - } -} - -func TestInheritsDifferentHost(t *testing.T) { - - in1 := "http://www.test.com/doc.json" - in2 := "http://www.test2.com/doc.json#bla" - - r1, _ := New(in1) - r2, _ := New(in2) - - result, err := r1.Inherits(r2) - - if err != nil { - t.Errorf("Inherits(%s,%s) should not fail. Error: %s", r1.String(), r2.String(), err.Error()) - } - - if result.String() != in2 { - t.Errorf("Inherits(%s,%s) should be %s but is %s", in1, in2, in2, result) - } - - if result.GetPointer().String() != "" { - t.Errorf("result(%v)::GetPointer() %v expect %v", result.String(), result.GetPointer().String(), "") - } -} - -func TestFileScheme(t *testing.T) { - - in1 := "file:///Users/mac/1.json#a" - in2 := "file:///Users/mac/2.json#b" - - r1, _ := New(in1) - r2, _ := New(in2) - - if r1.HasFragmentOnly != false { - t.Errorf("New(%v)::HasFragmentOnly %v expect %v", in1, r1.HasFragmentOnly, false) - } - - if r1.HasFileScheme != true { - t.Errorf("New(%v)::HasFileScheme %v expect %v", in1, r1.HasFileScheme, true) - } - - if r1.HasFullFilePath != true { - t.Errorf("New(%v)::HasFullFilePath %v expect %v", in1, r1.HasFullFilePath, true) - } - - if r1.IsCanonical() != true { - t.Errorf("New(%v)::IsCanonical %v expect %v", in1, r1.IsCanonical, true) - } - - result, err := r1.Inherits(r2) - if err != nil { - t.Errorf("Inherits(%s,%s) should not fail. Error: %s", r1.String(), r2.String(), err.Error()) - } - if result.String() != in2 { - t.Errorf("Inherits(%s,%s) should be %s but is %s", in1, in2, in2, result) - } - - if result.GetPointer().String() != "" { - t.Errorf("result(%v)::GetPointer() %v expect %v", result.String(), result.GetPointer().String(), "") - } -} - -func TestReferenceResolution(t *testing.T) { - - // 5.4. Reference Resolution Examples - // http://tools.ietf.org/html/rfc3986#section-5.4 - - base := "http://a/b/c/d;p?q" - baseRef, err := New(base) - - if err != nil { - t.Errorf("New(%s) failed error: %s", base, err.Error()) - } - if baseRef.String() != base { - t.Errorf("New(%s) %s expected %s", base, baseRef.String(), base) - } - - checks := []string{ - // 5.4.1. Normal Examples - // http://tools.ietf.org/html/rfc3986#section-5.4.1 - - "g:h", "g:h", - "g", "http://a/b/c/g", - "./g", "http://a/b/c/g", - "g/", "http://a/b/c/g/", - "/g", "http://a/g", - "//g", "http://g", - "?y", "http://a/b/c/d;p?y", - "g?y", "http://a/b/c/g?y", - "#s", "http://a/b/c/d;p?q#s", - "g#s", "http://a/b/c/g#s", - "g?y#s", "http://a/b/c/g?y#s", - ";x", "http://a/b/c/;x", - "g;x", "http://a/b/c/g;x", - "g;x?y#s", "http://a/b/c/g;x?y#s", - "", "http://a/b/c/d;p?q", - ".", "http://a/b/c/", - "./", "http://a/b/c/", - "..", "http://a/b/", - "../", "http://a/b/", - "../g", "http://a/b/g", - "../..", "http://a/", - "../../", "http://a/", - "../../g", "http://a/g", - - // 5.4.2. Abnormal Examples - // http://tools.ietf.org/html/rfc3986#section-5.4.2 - - "../../../g", "http://a/g", - "../../../../g", "http://a/g", - - "/./g", "http://a/g", - "/../g", "http://a/g", - "g.", "http://a/b/c/g.", - ".g", "http://a/b/c/.g", - "g..", "http://a/b/c/g..", - "..g", "http://a/b/c/..g", - - "./../g", "http://a/b/g", - "./g/.", "http://a/b/c/g/", - "g/./h", "http://a/b/c/g/h", - "g/../h", "http://a/b/c/h", - "g;x=1/./y", "http://a/b/c/g;x=1/y", - "g;x=1/../y", "http://a/b/c/y", - - "g?y/./x", "http://a/b/c/g?y/./x", - "g?y/../x", "http://a/b/c/g?y/../x", - "g#s/./x", "http://a/b/c/g#s/./x", - "g#s/../x", "http://a/b/c/g#s/../x", - - "http:g", "http:g", // for strict parsers - //"http:g", "http://a/b/c/g", // for backward compatibility - - } - for i := 0; i < len(checks); i += 2 { - child := checks[i] - expected := checks[i+1] - // fmt.Printf("%d: %v -> %v\n", i/2, child, expected) - - childRef, e := New(child) - if e != nil { - t.Errorf("%d: New(%s) failed error: %s", i/2, child, e.Error()) - } - - res, e := baseRef.Inherits(childRef) - if res == nil { - t.Errorf("%d: Inherits(%s, %s) nil not expected", i/2, base, child) - } - if e != nil { - t.Errorf("%d: Inherits(%s) failed error: %s", i/2, child, e.Error()) - } - if res.String() != expected { - t.Errorf("%d: Inherits(%s, %s) %s expected %s", i/2, base, child, res.String(), expected) - } - } -} diff --git a/vendor/github.com/go-openapi/spec/.golangci.yml b/vendor/github.com/go-openapi/spec/.golangci.yml new file mode 100644 index 0000000000..00277082fb --- /dev/null +++ b/vendor/github.com/go-openapi/spec/.golangci.yml @@ -0,0 +1,23 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 45 + maligned: + suggest-new: true + dupl: + threshold: 100 + goconst: + min-len: 2 + min-occurrences: 2 + +linters: + enable-all: true + disable: + - maligned + - unparam + - lll + - gochecknoinits + - gochecknoglobals diff --git a/vendor/github.com/go-openapi/spec/.travis.yml b/vendor/github.com/go-openapi/spec/.travis.yml index ea80ef2a71..a4f03484b7 100644 --- a/vendor/github.com/go-openapi/spec/.travis.yml +++ b/vendor/github.com/go-openapi/spec/.travis.yml @@ -1,16 +1,18 @@ -language: go +after_success: +- bash <(curl -s https://codecov.io/bash) go: -- 1.7 +- '1.9' +- 1.10.x +- 1.11.x install: - go get -u github.com/stretchr/testify - go get -u github.com/go-openapi/swag - go get -u gopkg.in/yaml.v2 - go get -u github.com/go-openapi/jsonpointer - go get -u github.com/go-openapi/jsonreference -script: -- go test -v -race -cover -coverprofile=coverage.txt -covermode=atomic ./... -after_success: -- bash <(curl -s https://codecov.io/bash) +language: go notifications: slack: secure: QUWvCkBBK09GF7YtEvHHVt70JOkdlNBG0nIKu/5qc4/nW5HP8I2w0SEf/XR2je0eED1Qe3L/AfMCWwrEj+IUZc3l4v+ju8X8R3Lomhme0Eb0jd1MTMCuPcBT47YCj0M7RON7vXtbFfm1hFJ/jLe5+9FXz0hpXsR24PJc5ZIi/ogNwkaPqG4BmndzecpSh0vc2FJPZUD9LT0I09REY/vXR0oQAalLkW0asGD5taHZTUZq/kBpsNxaAFrLM23i4mUcf33M5fjLpvx5LRICrX/57XpBrDh2TooBU6Qj3CgoY0uPRYUmSNxbVx1czNzl2JtEpb5yjoxfVPQeg0BvQM00G8LJINISR+ohrjhkZmAqchDupAX+yFrxTtORa78CtnIL6z/aTNlgwwVD8kvL/1pFA/JWYmKDmz93mV/+6wubGzNSQCstzjkFA4/iZEKewKUoRIAi/fxyscP6L/rCpmY/4llZZvrnyTqVbt6URWpopUpH4rwYqreXAtJxJsfBJIeSmUIiDIOMGkCTvyTEW3fWGmGoqWtSHLoaWDyAIGb7azb+KvfpWtEcoPFWfSWU+LGee0A/YsUhBl7ADB9A0CJEuR8q4BPpKpfLwPKSiKSAXL7zDkyjExyhtgqbSl2jS+rKIHOZNL8JkCcTP2MKMVd563C5rC5FMKqu3S9m2b6380E= +script: +- go test -v -race -cover -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/spec/README.md b/vendor/github.com/go-openapi/spec/README.md index 1d1622082f..6354742cbf 100644 --- a/vendor/github.com/go-openapi/spec/README.md +++ b/vendor/github.com/go-openapi/spec/README.md @@ -1,5 +1,10 @@ # OAI object model [![Build Status](https://travis-ci.org/go-openapi/spec.svg?branch=master)](https://travis-ci.org/go-openapi/spec) [![codecov](https://codecov.io/gh/go-openapi/spec/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/spec) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/spec/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/spec?status.svg)](http://godoc.org/github.com/go-openapi/spec) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/spec/master/LICENSE) +[![GoDoc](https://godoc.org/github.com/go-openapi/spec?status.svg)](http://godoc.org/github.com/go-openapi/spec) +[![GolangCI](https://golangci.com/badges/github.com/go-openapi/spec.svg)](https://golangci.com) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/spec)](https://goreportcard.com/report/github.com/go-openapi/spec) -The object model for OpenAPI specification documents +The object model for OpenAPI specification documents. + +Currently supports Swagger 2.0. diff --git a/vendor/github.com/go-openapi/spec/auth_test.go b/vendor/github.com/go-openapi/spec/auth_test.go deleted file mode 100644 index 5449fdec90..0000000000 --- a/vendor/github.com/go-openapi/spec/auth_test.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "testing" -) - -func TestSerialization_AuthSerialization(t *testing.T) { - assertSerializeJSON(t, BasicAuth(), `{"type":"basic"}`) - - assertSerializeJSON(t, APIKeyAuth("api-key", "header"), `{"type":"apiKey","name":"api-key","in":"header"}`) - - assertSerializeJSON( - t, - OAuth2Implicit("http://foo.com/authorization"), - `{"type":"oauth2","flow":"implicit","authorizationUrl":"http://foo.com/authorization"}`) - - assertSerializeJSON( - t, - OAuth2Password("http://foo.com/token"), - `{"type":"oauth2","flow":"password","tokenUrl":"http://foo.com/token"}`) - - assertSerializeJSON(t, - OAuth2Application("http://foo.com/token"), - `{"type":"oauth2","flow":"application","tokenUrl":"http://foo.com/token"}`) - - assertSerializeJSON( - t, - OAuth2AccessToken("http://foo.com/authorization", "http://foo.com/token"), - `{"type":"oauth2","flow":"accessCode","authorizationUrl":"http://foo.com/authorization","tokenUrl":"http://foo.com/token"}`) - - auth1 := OAuth2Implicit("http://foo.com/authorization") - auth1.AddScope("email", "read your email") - assertSerializeJSON( - t, - auth1, - `{"type":"oauth2","flow":"implicit","authorizationUrl":"http://foo.com/authorization","scopes":{"email":"read your email"}}`) - - auth2 := OAuth2Password("http://foo.com/authorization") - auth2.AddScope("email", "read your email") - assertSerializeJSON( - t, - auth2, - `{"type":"oauth2","flow":"password","tokenUrl":"http://foo.com/authorization","scopes":{"email":"read your email"}}`) - - auth3 := OAuth2Application("http://foo.com/token") - auth3.AddScope("email", "read your email") - assertSerializeJSON( - t, - auth3, - `{"type":"oauth2","flow":"application","tokenUrl":"http://foo.com/token","scopes":{"email":"read your email"}}`) - - auth4 := OAuth2AccessToken("http://foo.com/authorization", "http://foo.com/token") - auth4.AddScope("email", "read your email") - assertSerializeJSON( - t, - auth4, - `{"type":"oauth2","flow":"accessCode","authorizationUrl":"http://foo.com/authorization","tokenUrl":"http://foo.com/token","scopes":{"email":"read your email"}}`) -} - -func TestSerialization_AuthDeserialization(t *testing.T) { - - assertParsesJSON(t, `{"type":"basic"}`, BasicAuth()) - - assertParsesJSON( - t, - `{"in":"header","name":"api-key","type":"apiKey"}`, - APIKeyAuth("api-key", "header")) - - assertParsesJSON( - t, - `{"authorizationUrl":"http://foo.com/authorization","flow":"implicit","type":"oauth2"}`, - OAuth2Implicit("http://foo.com/authorization")) - - assertParsesJSON( - t, - `{"flow":"password","tokenUrl":"http://foo.com/token","type":"oauth2"}`, - OAuth2Password("http://foo.com/token")) - - assertParsesJSON( - t, - `{"flow":"application","tokenUrl":"http://foo.com/token","type":"oauth2"}`, - OAuth2Application("http://foo.com/token")) - - assertParsesJSON( - t, - `{"authorizationUrl":"http://foo.com/authorization","flow":"accessCode","tokenUrl":"http://foo.com/token","type":"oauth2"}`, - OAuth2AccessToken("http://foo.com/authorization", "http://foo.com/token")) - - auth1 := OAuth2Implicit("http://foo.com/authorization") - auth1.AddScope("email", "read your email") - assertParsesJSON(t, - `{"authorizationUrl":"http://foo.com/authorization","flow":"implicit","scopes":{"email":"read your email"},"type":"oauth2"}`, - auth1) - - auth2 := OAuth2Password("http://foo.com/token") - auth2.AddScope("email", "read your email") - assertParsesJSON(t, - `{"flow":"password","scopes":{"email":"read your email"},"tokenUrl":"http://foo.com/token","type":"oauth2"}`, - auth2) - - auth3 := OAuth2Application("http://foo.com/token") - auth3.AddScope("email", "read your email") - assertParsesJSON(t, - `{"flow":"application","scopes":{"email":"read your email"},"tokenUrl":"http://foo.com/token","type":"oauth2"}`, - auth3) - - auth4 := OAuth2AccessToken("http://foo.com/authorization", "http://foo.com/token") - auth4.AddScope("email", "read your email") - assertParsesJSON( - t, - `{"authorizationUrl":"http://foo.com/authorization","flow":"accessCode","scopes":{"email":"read your email"},"tokenUrl":"http://foo.com/token","type":"oauth2"}`, - auth4) - -} diff --git a/vendor/github.com/go-openapi/spec/bindata.go b/vendor/github.com/go-openapi/spec/bindata.go index 9afb5df194..d5ec7b900a 100644 --- a/vendor/github.com/go-openapi/spec/bindata.go +++ b/vendor/github.com/go-openapi/spec/bindata.go @@ -1,14 +1,14 @@ -// Code generated by go-bindata. +// Code generated by go-bindata. DO NOT EDIT. // sources: -// schemas/jsonschema-draft-04.json -// schemas/v2/schema.json -// DO NOT EDIT! +// schemas/jsonschema-draft-04.json (4.357kB) +// schemas/v2/schema.json (40.249kB) package spec import ( "bytes" "compress/gzip" + "crypto/sha256" "fmt" "io" "io/ioutil" @@ -39,8 +39,9 @@ func bindataRead(data []byte, name string) ([]byte, error) { } type asset struct { - bytes []byte - info os.FileInfo + bytes []byte + info os.FileInfo + digest [sha256.Size]byte } type bindataFileInfo struct { @@ -69,7 +70,7 @@ func (fi bindataFileInfo) Sys() interface{} { return nil } -var _jsonschemaDraft04JSON = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xc4\x57\x3b\x6f\xdb\x3e\x10\xdf\xf3\x29\x08\x26\x63\xf2\x97\xff\x40\x27\x6f\x45\xbb\x18\x68\xd1\x0c\xdd\x0c\x0f\xb4\x75\xb2\x19\x50\xa4\x42\x51\x81\x0d\x43\xdf\xbd\xa0\xa8\x07\x29\x91\x92\x2d\xbb\x8d\x97\x28\xbc\xd7\xef\x8e\xf7\xe2\xf9\x01\x21\x84\x30\x8d\xf1\x12\xe1\x83\x52\xd9\x32\x8a\xde\x72\xc1\x5f\xf2\xdd\x01\x52\xf2\x9f\x90\xfb\x28\x96\x24\x51\x2f\x8b\x2f\x91\x39\x7b\xc4\xcf\x46\xe8\xc9\xfc\x3f\x43\x32\x86\x7c\x27\x69\xa6\xa8\xe0\x5a\xfa\x9b\x90\x80\x0c\x0b\x4a\x41\x91\x5a\x45\xc7\x9d\x50\x4e\x35\x73\x8e\x97\xc8\x20\xae\x08\x86\xed\xab\x94\xe4\xe4\x10\x2a\xa2\x3a\x65\xa0\x95\x93\x8a\xfc\xec\x12\x53\xca\x57\x0a\x52\xad\xef\xff\x1e\x89\xd6\xe7\x67\x84\x9f\x24\x24\x5a\xc5\x23\x46\x65\xcb\x54\x76\xfc\x38\x13\x39\x55\xf4\x03\x56\x5c\xc1\x1e\x64\x18\x04\xad\x19\x86\x30\x68\x5a\xa4\x78\x89\x16\x97\xe8\xff\x0e\x09\x29\x98\x5a\x0c\xed\x10\xc6\x7e\x69\xa8\x6b\x07\x76\x64\x45\x2e\xea\x63\x45\xe5\xb3\x66\x8e\x8d\x4e\x0d\x01\x95\x68\xe3\x85\x91\xd3\x34\x63\xf0\xfb\x94\x41\x3e\x34\x0d\xbc\x72\x60\xdd\x46\x1a\xe1\xad\x10\x0c\x08\xd7\x9f\xad\xe3\x08\xf3\x82\x31\xf3\x37\xdd\x9a\x13\xb1\x7d\x83\x9d\xd2\x5f\xb9\x92\x94\xef\x71\xc8\x7e\x45\x9d\x73\xcf\xd6\x65\x36\x7c\x8d\xa9\xf2\xf2\x94\x28\x38\x7d\x2f\xa0\xa1\x2a\x59\x40\x07\xf3\xc1\x02\xdb\xda\x68\x1c\x33\xa7\x99\x14\x19\x48\x45\x7b\xd1\x33\x45\x17\xf0\xa6\x46\xd9\x03\x92\x08\x99\x12\x7d\x57\xb8\x90\x14\x7b\x63\xd5\x15\xe5\xbd\x35\x2b\xaa\x18\x4c\xea\xf5\x8a\xba\xf5\x3e\x4b\x41\x93\xa5\x67\xfb\x38\x2d\x98\xa2\x19\x83\x2a\xf7\x03\x6a\x9b\x74\x0b\x56\x5e\x8f\x02\xc7\x1d\x2b\x72\xfa\x01\x3f\x5b\x16\xf7\xc6\x6d\xfb\xe4\x58\xb3\x8c\x1b\xf7\x0a\x77\x86\xa6\xb4\xb4\xf5\xe4\x92\xbb\xa0\x24\x84\xe5\x01\x84\xad\x13\x37\x21\x9c\xd2\x72\x0b\x42\x72\xfc\x01\x7c\xaf\x0e\xbd\x9e\x3b\xd5\xbc\x1c\x1f\xaf\xd6\xd0\xb6\x52\xb7\xdf\x12\xa5\x40\x4e\xe7\x68\xb0\x78\x24\xec\xe1\xe8\x0f\x26\x89\xe3\x0a\x0a\x61\x4d\x23\xe9\xf7\x70\x7e\x32\x3d\xdc\x39\xd6\xbf\xf3\x30\xd0\xfd\xf6\x55\xb3\x79\x27\x96\xfe\x6d\x82\x37\x73\xf6\x8f\x36\x3a\x03\xa4\x6d\x7d\x1c\x9e\x73\x35\xf6\x18\xbf\x15\x76\x4a\x8e\x2b\xcf\x00\xbf\x2a\x99\xae\x55\xe0\xcf\x25\x77\x68\xfc\x95\xba\x79\x75\x06\xcb\x5c\x77\x67\x69\xf1\xfb\x2c\xe1\xbd\xa0\x12\xe2\x31\x45\xf6\x30\x0f\x14\xc8\xab\x7f\x60\x4e\x27\xe0\x3f\xaf\x92\xd0\x6a\x8a\x82\xdb\xc0\xa4\xbb\x63\x65\x34\x0d\x28\xb0\x6b\x7c\x1e\x1e\xd3\x51\xc7\x6e\xf4\x33\x60\xc5\x90\x01\x8f\x81\xef\xee\x88\x68\x90\x69\x23\xb9\x8a\x2e\x69\x98\x7d\xa6\x91\x32\x1a\xc8\x6e\x9c\x13\x7f\x10\xea\xcd\xfd\x4e\xef\xa6\xb1\x25\xd9\xde\x22\x8d\xfa\x59\x63\xc5\x0d\x80\xf5\x28\xf1\xd6\xb9\x37\x9e\xa3\xee\xb5\x4c\xbe\x37\xe0\x55\xc6\x27\x82\x75\x49\xd0\xda\xe0\xb9\x1d\xca\xbf\x5b\xd4\xcf\xbf\x0b\x47\xac\x2d\x59\x07\xfe\x7a\x49\xc1\x61\xa6\x24\x17\x2a\xf0\xbe\x2e\xdb\x17\x7f\xa0\x3c\x7d\x4b\xf3\xba\xdb\xc3\xed\x06\xee\xdb\x5e\xd7\xdd\x42\x5c\x47\xb2\xb3\x68\x75\x8c\xf2\xe1\x4f\x00\x00\x00\xff\xff\x4e\x9b\x8d\xdf\x17\x11\x00\x00") +var _jsonschemaDraft04JSON = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xc4\x57\x3d\x6f\xdb\x3c\x10\xde\xf3\x2b\x08\x26\x63\xf2\x2a\x2f\xd0\xc9\x5b\xd1\x2e\x01\x5a\x34\x43\x37\x23\x03\x6d\x9d\x6c\x06\x14\xa9\x50\x54\x60\xc3\xd0\x7f\x2f\x28\x4a\x14\x29\x91\x92\x2d\xa7\x8d\x97\x28\xbc\xaf\xe7\x8e\xf7\xc5\xd3\x0d\x42\x08\x61\x9a\xe2\x15\xc2\x7b\xa5\x8a\x55\x92\xbc\x96\x82\x3f\x94\xdb\x3d\xe4\xe4\x3f\x21\x77\x49\x2a\x49\xa6\x1e\x1e\xbf\x24\xe6\xec\x16\xdf\x1b\xa1\x3b\xf3\xff\x02\xc9\x14\xca\xad\xa4\x85\xa2\x82\x6b\xe9\x6f\x42\x02\x32\x2c\x28\x07\x45\x5a\x15\x3d\x77\x46\x39\xd5\xcc\x25\x5e\x21\x83\xb8\x21\x18\xb6\xaf\x52\x92\xa3\x47\x68\x88\xea\x58\x80\x56\x4e\x1a\xf2\xbd\x4f\xcc\x29\x7f\x52\x90\x6b\x7d\xff\x0f\x48\xb4\x3d\x3f\x21\x7c\x27\x21\xd3\x2a\x6e\x31\xaa\x2d\x53\xdd\xf3\xe3\x42\x94\x54\xd1\x77\x78\xe2\x0a\x76\x20\xe3\x20\x68\xcb\x30\x86\x41\xf3\x2a\xc7\x2b\xf4\x78\x8e\xfe\xef\x90\x91\x8a\xa9\xc7\xb1\x1d\xc2\xd8\x2f\x0d\x75\xed\xc1\x4e\x9c\xc8\x25\x43\xac\xa8\xbe\xd7\xcc\xa9\xd1\xa9\x21\xa0\x1a\xbd\x04\x61\x94\x34\x2f\x18\xfc\x3e\x16\x50\x8e\x4d\x03\x6f\x1c\x58\xdb\x48\x23\xbc\x11\x82\x01\xe1\xfa\xd3\x3a\x8e\x30\xaf\x18\x33\x7f\xf3\x8d\x39\x11\x9b\x57\xd8\x2a\xfd\x55\x2a\x49\xf9\x0e\xc7\xec\x37\xd4\x25\xf7\xec\x5c\x66\xc7\xd7\x99\xaa\xcf\x4f\x89\x8a\xd3\xb7\x0a\x3a\xaa\x92\x15\xf4\x30\x6f\x1c\xb0\xd6\x46\xe7\x98\x39\x2d\xa4\x28\x40\x2a\x3a\x88\x9e\x29\xba\x88\x37\x2d\xca\x60\x38\xfa\xba\x5b\x20\xac\xa8\x62\xb0\x4c\xd4\xaf\xda\x45\x0a\xba\x5c\x3b\xb9\xc7\x79\xc5\x14\x2d\x18\x34\x19\x1c\x51\xdb\x25\x4d\xb4\x7e\x06\x14\x38\x6c\x59\x55\xd2\x77\xf8\x69\x59\xfc\x7b\x73\xed\x93\x43\xcb\x32\x6d\x3c\x28\xdc\x1b\x9a\xd3\x62\xab\xc2\x27\xf7\x41\xc9\x08\x2b\x23\x08\xad\x13\x57\x21\x9c\xd3\x72\x0d\x42\x72\xf8\x01\x7c\xa7\xf6\x83\xce\x39\xd7\x82\x3c\x1f\x2f\xd6\x60\x1b\xa2\xdf\x35\x89\x52\x20\xe7\x73\x74\xe0\x66\x26\x64\x4e\xb4\x97\x58\xc2\x0e\x0e\xe1\x60\x92\x34\x6d\xa0\x10\xd6\xb5\x83\x61\x27\xe6\x47\xd3\x89\xbd\x63\xfd\x3b\x8d\x03\x3d\x6c\x42\x2d\x5b\x70\xee\xe8\xdf\x4b\xf4\x66\x4e\xe1\x01\x45\x17\x80\x74\xad\x4f\xc3\xf3\xae\xc6\x1d\xc6\xd7\xc2\xce\xc9\xe1\x29\x30\x86\x2f\x4a\xa6\x4b\x15\x84\x73\xc9\x6f\xfd\x7f\xa5\x6e\x9e\xbd\xf1\xb0\xd4\xdd\x45\x5a\xc2\x3e\x4b\x78\xab\xa8\x84\x74\x4a\x91\x3b\x92\x23\x05\xf2\x1c\x1e\x7b\xf3\x09\xf8\xcf\xab\x24\xb6\x60\xa2\xe8\x4c\x9f\x75\x77\xaa\x8c\xe6\x01\x45\x36\x86\xcf\xc3\x63\x3a\xea\xd4\x8d\x7e\x06\xac\x14\x0a\xe0\x29\xf0\xed\x07\x22\x1a\x65\xda\x44\xae\xa2\x73\x1a\xe6\x90\x69\xa2\x8c\x46\xb2\x2f\xde\x49\x38\x08\xed\xfe\xfd\x41\xaf\x9f\xa9\x55\xd7\xdd\x22\x8d\xfa\x45\x63\xc5\x0f\x80\xf3\xb4\x08\xd6\x79\x30\x9e\x93\xee\x59\xa6\xd0\x4b\xee\x22\xe3\x33\xc1\x3a\x27\x68\x36\x78\x7e\x87\x0a\x06\xd5\x2e\x20\xd3\xaf\x15\xfb\xd8\x3b\x73\x14\xbb\x92\xed\x05\x5d\x2e\x29\x38\x2c\x94\xe4\x42\x45\x5e\xd3\xb5\x7d\xdf\x47\xca\x38\xb4\x5c\xaf\xfb\x7d\xdd\x6d\xf4\xa1\x2d\x77\xdd\x2f\xce\x6d\xc4\x7b\x8b\x4e\x67\xa9\x6f\xfe\x04\x00\x00\xff\xff\xb1\xd1\x27\x78\x05\x11\x00\x00") func jsonschemaDraft04JSONBytes() ([]byte, error) { return bindataRead( @@ -84,12 +85,12 @@ func jsonschemaDraft04JSON() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "jsonschema-draft-04.json", size: 4375, mode: os.FileMode(420), modTime: time.Unix(1482389892, 0)} - a := &asset{bytes: bytes, info: info} + info := bindataFileInfo{name: "jsonschema-draft-04.json", size: 4357, mode: os.FileMode(436), modTime: time.Unix(1540282154, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe1, 0x48, 0x9d, 0xb, 0x47, 0x55, 0xf0, 0x27, 0x93, 0x30, 0x25, 0x91, 0xd3, 0xfc, 0xb8, 0xf0, 0x7b, 0x68, 0x93, 0xa8, 0x2a, 0x94, 0xf2, 0x48, 0x95, 0xf8, 0xe4, 0xed, 0xf1, 0x1b, 0x82, 0xe2}} return a, nil } -var _v2SchemaJSON = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xec\x5d\x4f\x93\xdb\x36\xb2\xbf\xfb\x53\xa0\x14\x57\xd9\xae\xd8\x92\xe3\xf7\x2e\xcf\x97\xd4\xbc\xd8\x49\x66\x37\x5e\x4f\x79\x26\xbb\x87\x78\x5c\x05\x91\x2d\x09\x09\x09\x30\x00\x38\x33\x5a\xef\x7c\xf7\x2d\xf0\x9f\x08\x02\x20\x41\x8a\xd2\xc8\x0e\x0f\xa9\x78\x28\xa0\xd1\xdd\x68\x34\x7e\xdd\xf8\xf7\xf9\x11\x42\x33\x49\x64\x04\xb3\xd7\x68\x76\x86\xfe\x76\xf9\xfe\x1f\xe8\x32\xd8\x40\x8c\xd1\x8a\x71\x74\x79\x8b\xd7\x6b\xe0\xe8\xd5\xfc\x25\x3a\xbb\x38\x9f\xcf\x9e\xab\x0a\x24\x54\xa5\x37\x52\x26\xaf\x17\x0b\x91\x17\x99\x13\xb6\xb8\x79\xb5\x10\x59\xdd\xf9\xef\x82\xd1\x6f\xf2\xc2\x8f\xf3\x4f\xb5\x1a\xea\xc7\x17\x45\x41\xc6\xd7\x8b\x90\xe3\x95\x7c\xf1\xf2\x7f\x8b\xca\x45\x3d\xb9\x4d\x32\xa6\xd8\xf2\x77\x08\x64\xfe\x8d\xc3\x9f\x29\xe1\xa0\x9a\xff\xed\x11\x42\x08\xcd\x8a\xd6\xb3\x9f\x15\x67\x74\xc5\xca\x7f\x27\x58\x6e\xc4\xec\x11\x42\xd7\x59\x5d\x1c\x86\x44\x12\x46\x71\x74\xc1\x59\x02\x5c\x12\x10\xb3\xd7\x68\x85\x23\x01\x59\x81\x04\x4b\x09\x9c\x6a\xbf\x7e\xce\x49\x7d\xba\x7b\x51\xfd\xa1\x44\xe2\xb0\x52\xac\x7d\xb3\x08\x61\x45\x68\x46\x56\x2c\x6e\x80\x86\x8c\xbf\xbd\x93\x40\x05\x61\x74\x96\x95\xbe\x7f\x84\xd0\x7d\x4e\xde\x42\xb7\xe4\xbe\x46\xbb\x14\x5b\x48\x4e\xe8\xba\x90\x05\xa1\x19\xd0\x34\xae\xc4\xce\xbe\xbc\x9a\xbf\x9c\x15\x7f\x5d\x57\xc5\x42\x10\x01\x27\x89\xe2\x48\x51\xb9\xda\x40\xd5\x87\x37\xc0\x15\x5f\x88\xad\x90\xdc\x10\x81\x42\x16\xa4\x31\x50\x39\x2f\x38\xad\xab\xb0\x53\xd8\xac\x94\x56\x6f\xc3\x84\xf4\x11\xa4\x50\xb3\xfa\xe9\xd3\x6f\x9f\x3e\xdf\x2f\xd0\xeb\x8f\x1f\x3f\x7e\xbc\xfe\xf6\xe9\xf7\xaf\x5f\x7f\xfc\x18\x7e\xfb\xec\xfb\xc7\xb3\x36\x79\x54\x43\xe8\x29\xc5\x31\x20\xc6\x11\x49\x9e\xe5\x12\x41\x66\xa0\xe8\xed\x1d\x8e\x93\x08\x5e\xa3\x27\x3b\xc3\x7c\xa2\x73\xba\xc4\x02\x2e\xb0\xdc\xf4\xe5\x76\xd1\xca\x96\xa2\x8a\x94\xcd\x21\xc9\x6c\xec\x2c\x70\x42\x9e\x34\x74\x9d\x19\x7c\xcd\x20\x9c\xea\x2e\x0a\xfe\x42\x84\xd4\x29\x04\x8c\x8a\xb4\x41\xa2\xc1\xdc\x19\x8a\x88\x90\x4a\x49\xef\xce\xdf\xbd\x45\x4a\x52\x81\x70\x10\x40\x22\x21\x44\xcb\x6d\xc5\xec\x4e\x3c\x1c\x45\xef\x57\x9a\xb5\x7d\xae\xfe\xe5\xe4\x31\x86\x90\xe0\xab\x6d\x02\x3b\x2e\xcb\x11\x90\xd9\xa8\xc6\x77\xc2\x59\x98\x06\xfd\xf9\x2e\x78\x45\x01\xa6\xa8\xa0\x71\x5c\xbe\x33\xa7\xd2\xd9\x5f\x95\xef\xd9\xd5\xac\xfd\xdc\x5d\xbf\x5e\xb8\xd1\x3e\xc7\x31\x48\xe0\x5e\x4c\x14\x65\xdf\xb8\xa8\x71\x10\x09\xa3\xc2\xc7\x02\xcb\xa2\x4e\x5a\x02\x82\x94\x13\xb9\xf5\x30\xe6\xb2\xa4\xb5\xfe\x9b\x3e\x7a\xb2\x55\xd2\xa8\x4a\xbc\x16\xb6\x71\x8e\x39\xc7\xdb\x9d\xe1\x10\x09\x71\xbd\x9c\xb3\x41\x89\xd7\xa5\x89\xdc\x57\xb5\x53\x4a\xfe\x4c\xe1\xbc\xa0\x21\x79\x0a\x1a\x0f\x70\xa7\x5c\x08\x8e\xde\xb0\xc0\x43\x24\xad\x74\x63\x0e\xb1\xd9\x90\xe1\xb0\x2d\x13\xa7\x6d\x78\xfd\x04\x14\x38\x8e\x90\xaa\xce\x63\xac\x3e\x23\xbc\x64\xa9\xb4\xf8\x03\x63\xde\xcd\xbe\x16\x13\x4a\x55\xac\x82\x12\xc6\xac\xd4\x35\xf7\x22\xd4\x3a\xff\x22\x73\x0e\x6e\x51\xa0\x75\x1e\xae\x8f\xe8\x5d\xc7\x59\xe6\xe4\x9a\x18\x8d\xd6\x1c\x53\x84\x4d\xb7\x67\x28\x37\x09\x84\x69\x88\x12\x0e\x01\x11\x80\x32\xa2\xf5\xb9\xaa\xc6\xd9\x73\x53\xab\xfb\xb4\x2e\x20\xc6\x54\x92\xa0\x9a\xf3\x69\x1a\x2f\x81\x77\x37\xae\x53\x1a\xce\x40\xc4\xa8\x82\x1c\xb5\xef\xda\x24\x7d\xb9\x61\x69\x14\xa2\x25\xa0\x90\xac\x56\xc0\x81\x4a\xb4\xe2\x2c\xce\x4a\x64\x7a\x9a\x23\xf4\x13\x91\x3f\xa7\x4b\xf4\x63\x84\x6f\x18\x87\x10\xbd\xc3\xfc\x8f\x90\xdd\x52\x44\x04\xc2\x51\xc4\x6e\x21\x74\x48\x21\x81\xc7\xe2\xfd\xea\x12\xf8\x0d\x09\xf6\xe9\x47\x35\xaf\x67\xc4\x14\xf7\x22\x27\x97\xe1\xe2\x76\x2d\x06\x8c\x4a\x1c\x48\x3f\x73\x2d\x0b\x5b\x29\x45\x24\x00\x2a\x0c\x11\xec\x94\xca\xc2\xa6\xc1\x37\x21\x43\x83\x3b\x5f\x97\xf1\x43\x5e\x53\x73\x19\xa5\x36\xd8\x2d\x05\x2e\x34\x0b\xeb\x39\xfc\x1d\x63\x51\x01\xbd\x3d\xbb\x90\x84\x40\x25\x59\x6d\x09\x5d\xa3\x1c\x37\xe6\x5c\x16\x9a\x40\x09\x70\xc1\xe8\x82\xf1\x35\xa6\xe4\xdf\x99\x5c\x8e\x9e\x4d\x79\xb4\x27\x2f\xbf\x7e\xf8\x05\x25\x8c\x50\xa9\x98\x29\x90\x62\x60\xea\x75\xae\x13\xca\xbf\x2b\x1a\x29\x27\x76\xd6\x20\xc6\x64\x5f\xe6\x32\x1a\x08\x87\x21\x07\x21\xbc\xb4\xe4\xe0\x32\x67\xa6\xcd\xf3\x1e\xcd\xd9\x6b\xb6\x6f\x8e\x27\xa7\xed\xdb\xe7\xbc\xcc\x1a\x07\xce\x6f\x87\x33\xf0\xba\x51\x17\x22\x66\x78\x79\x8e\xce\xe5\x13\x81\x80\x06\x2c\xe5\x78\x0d\xa1\xb2\xb8\x54\xa8\x79\x09\xbd\xbf\x3c\x47\x01\x8b\x13\x2c\xc9\x32\xaa\xaa\x1d\xd5\xee\xab\x36\xbd\x6c\xfd\x54\x6c\xc8\x08\x01\x3c\xbd\xe7\x07\x88\xb0\x24\x37\x79\x90\x28\x4a\x1d\x10\x1a\x92\x1b\x12\xa6\x38\x42\x40\xc3\x4c\x43\x62\x8e\xae\x36\xb0\x45\x71\x2a\xa4\x9a\x23\x79\x59\xb1\xa8\xf2\xa4\x0c\x60\x9f\xcc\x8d\x40\xf5\x80\xca\xa8\x99\xc3\xa7\x85\x1f\x31\x25\xa9\x82\xc5\x6d\xbd\xd8\x36\x76\x7c\x02\x28\x97\xf6\x1d\x74\x3b\x11\x7e\x91\xae\x32\xf8\x6c\xf4\xe6\x7b\x9a\xa5\x1f\x62\xc6\x21\xcf\x9a\xe5\xed\x8b\x02\xf3\x2c\x33\x33\xdf\x00\xca\xc9\x09\xb4\x04\xf5\xa5\x08\xd7\xc3\x02\x18\x66\xf1\xab\x1e\x83\x37\x4c\xcd\x12\xc1\x1d\x50\xf6\xaa\xbd\xfe\xe2\x73\x48\x38\x08\xa0\x32\x9b\x18\x44\x86\x0b\x6a\xc1\xaa\x26\x96\x2d\x96\x3c\xa0\x54\x65\x73\x87\x15\xca\x15\xe5\xf5\x94\x46\x9f\x33\x1a\x0c\x9a\xb1\x5a\xd9\x6a\x95\xcd\xcb\x7e\xec\x9a\xc5\x94\x3b\x37\x26\x31\xd7\xfc\xe4\x1f\x13\x8c\x31\x75\x9c\xba\xf7\x87\x3c\xa1\xb7\x4f\x17\x1b\x09\x82\x98\xc4\x70\x95\xd3\xe8\x4c\x48\x5a\xa6\xd6\x2a\x3d\x56\x42\x80\x9f\xaf\xae\x2e\x50\x0c\x42\xe0\x35\x34\x3c\x8a\x62\x03\x37\xba\xb2\x27\x04\xda\x25\x8d\x06\xe2\xa0\x13\x8a\xf3\xf5\xec\x10\x72\x67\x88\x90\x3d\x4b\x64\xeb\xaa\xda\x8f\xf7\x5a\x75\x47\x9a\xa8\x51\x70\x26\xd2\x38\xc6\x7c\xbb\x57\xfc\xbd\xe4\x04\x56\xa8\xa0\x54\x9a\x45\xd5\xf7\x0f\x16\xfc\x57\x1c\x3c\xdf\x23\xba\x77\x38\xda\x16\x4b\x31\x53\x6a\x4d\x9a\x15\x63\xe7\xe1\x18\x69\x9f\x22\xe0\x24\xbb\x94\x4b\x97\xee\x2d\xf9\x70\x87\x72\x7b\xe6\xc4\x33\x2a\x66\x5e\x1c\x35\x72\xe3\x2d\xda\x73\xe4\xc7\x51\x6d\xa4\xa1\x2a\x4f\xde\x94\xcb\xb2\x3e\x31\x48\xae\x82\xce\xc9\xc8\x65\xcd\xc3\xb7\x34\xb6\x2b\xdf\x58\x65\x78\x6e\x73\xac\x5e\x24\x0d\x3f\xdc\x70\x23\xc6\xda\x52\x0b\x2d\x63\x7d\xa9\x49\x2d\x54\x48\x28\xc0\x12\x9c\xe3\x63\xc9\x58\x04\x98\x36\x07\xc8\x0a\xa7\x91\xd4\xf0\xbc\xc1\xa8\xb9\x70\xd0\xc6\xa9\xb6\x78\x80\x5a\xa3\xb4\x2c\xf4\x18\x0b\x8a\x9d\xd0\xb4\x55\x10\xee\x0d\xc5\xd6\xe0\x99\x93\xdc\xa1\x04\xbb\xf1\xa7\x23\xd1\xd1\x97\x8c\x87\x13\x0a\x21\x02\xe9\x99\x25\xed\x20\xc5\x92\x66\x3c\x32\x9c\xd6\x06\xb0\x31\x5c\x86\x29\x0a\xcb\x60\x33\x12\xa5\x91\xfc\x96\x75\xd0\x59\xd7\x13\xbd\xd3\x23\x79\xdd\x2a\x90\xa6\x38\x06\x91\x39\x7f\x20\x72\x03\x1c\x2d\x01\x61\xba\x45\x37\x38\x22\x61\x8e\x71\x85\xc4\x32\x15\x28\x60\x61\x16\xb8\x3d\x29\xdc\x4d\x3d\x2f\x12\x13\x7d\xc8\x7e\x37\xee\xa8\x7f\xfa\xdb\xcb\x17\xff\x77\xfd\xf9\x7f\xee\x9f\x3d\xfe\xcf\xa7\xa7\x45\xfb\xcf\x1e\xf7\xf3\xe0\xff\xc4\x51\x0a\x8e\x4c\xcb\x01\xdc\x0a\x65\xb2\x01\x83\xed\x3d\xe4\xa9\xa3\x4e\x2d\x59\xc5\xe8\x2f\x48\x7d\x5a\x6e\x37\xbf\x5c\x9f\x35\x13\x64\x14\xfa\xef\x0b\x68\xa6\x0d\xb4\x8e\xf1\xa8\xff\xbb\x60\xf4\x03\x64\xab\x5b\x81\x65\x51\xe6\xda\xca\xfa\xf0\xb0\xac\x3e\x9c\xca\x26\x0e\x1d\xdb\x57\x5b\xbb\xb4\x9a\xa6\xb6\x9b\x1a\x6b\xd1\x9a\x9e\x7e\x33\x9a\xec\x41\x69\x45\x22\xb8\xb4\x51\xeb\x04\x77\xca\x6f\x7b\x7b\xc8\xb2\xb0\x95\x92\x25\x5b\xd0\x42\xaa\x2a\xdd\x32\x78\x4f\x0c\xab\x68\x46\x6c\xea\x6d\xf4\x5c\x5e\xde\xc4\xac\xa5\xf9\xd1\x00\x9f\x7d\x98\x65\x24\xbd\xc7\x97\xd4\xb3\x3a\xa8\x2b\xa0\x34\x76\xf9\x65\x5f\x2d\x25\x95\x1b\xcf\xd6\xf4\x9b\x5f\x09\x95\xb0\x36\x3f\xdb\xd0\x39\x2a\x93\x1c\x9d\x03\xa2\x4a\xca\xf5\xf6\x10\xb6\x94\x89\x0b\x6a\x70\x12\x13\x49\x6e\x40\xe4\x29\x12\x2b\xbd\x80\x45\x11\x04\xaa\xc2\x8f\x56\x9e\x5c\x6b\xec\x8d\x5a\x0e\x14\x59\x06\x2b\x1e\x24\xcb\xc2\x56\x4a\x31\xbe\x23\x71\x1a\xfb\x51\x2a\x0b\x3b\x1c\x48\x10\xa5\x82\xdc\xc0\xbb\x3e\x24\x8d\x5a\x76\x2e\x09\xed\xc1\x65\x51\xb8\x83\xcb\x3e\x24\x8d\x5a\x2e\x5d\xfe\x02\x74\x2d\x3d\xf1\xef\xae\xb8\x4b\xe6\x5e\xd4\xaa\xe2\x2e\x5c\x5e\xec\x0e\xf5\x5b\x0c\xcb\x0a\xbb\xa4\x3c\xf7\x1f\x2a\x55\x69\x97\x8c\x7d\x68\x95\xa5\xad\xb4\xf4\x9c\xa5\x07\xb9\x7a\x05\xbb\xad\x50\x6f\xfb\xa0\x4e\x9b\x48\x23\x49\x92\x28\x87\x19\x3e\x32\xee\xca\x3b\x46\x7e\x7f\x18\x64\xcc\xcc\x0f\x34\xe9\x36\x8b\xb7\x6c\xa8\xa5\x5b\x54\x4c\x54\x5b\x15\x3a\xf1\x6c\x2d\xfe\x96\xc8\x0d\xba\x7b\x81\x88\xc8\x23\xab\xee\x7d\x3b\x92\xa7\x60\x29\xe3\xdc\xff\xb8\x64\xe1\xf6\xa2\x5a\x59\xdc\x6f\xeb\x45\x7d\x6a\xd1\x76\x1e\xea\xb8\xf1\xfa\x14\xd3\x36\x63\xe5\xd7\xf3\xe4\xbe\x25\xbd\x5e\x05\xeb\x73\x74\xb5\x21\x2a\x2e\x4e\xa3\x30\xdf\xbf\x43\x28\x2a\xd1\xa5\x2a\x9d\x8a\xfd\x76\xd8\x8d\xbc\x67\x65\xc7\xb8\x03\x45\xec\xa3\xb0\x37\x8a\x70\x4c\x68\x91\x51\x8e\x58\x80\xed\x4a\xf3\x81\x62\xca\x96\xbb\xf1\x52\xcd\x80\xfb\xe4\x4a\x5d\x6c\xdf\x6e\x20\x4b\x80\x30\x8e\x28\x93\xf9\xe9\x8d\x8a\x6d\xd5\x59\x65\x7b\xaa\x44\x9e\xc0\xc2\xd1\x7c\x40\x26\xd6\x1a\xce\xf9\xc5\x69\x7b\x6c\xec\xc8\x71\x7b\xe5\x21\x2e\xd3\xe5\x65\x93\x91\x53\x0b\x7b\x3a\xc7\xfa\x17\x6a\x01\xa7\x33\xd0\xf4\x40\x0f\x39\x87\xda\xe4\x54\x87\x3a\xd5\xe3\xc7\xa6\x8e\x20\xd4\x11\xb2\x4e\xb1\xe9\x14\x9b\x4e\xb1\xe9\x14\x9b\xfe\x15\x63\xd3\x47\xf5\xff\x97\x38\xe9\xcf\x14\xf8\x76\x82\x49\x13\x4c\xaa\x7d\xcd\x6c\x62\x42\x49\x87\x43\x49\x19\x33\x6f\xe3\x44\x6e\x9b\xab\x8a\x3e\x86\xaa\x99\x52\x1b\x5b\x59\x33\x02\x09\xa0\x21\xa1\x6b\x84\x6b\x66\xbb\xdc\x16\x0c\xd3\x68\xab\xec\x36\x4b\xd8\x60\x8a\x40\x31\x85\x6e\x14\x57\x13\xc2\xfb\x92\x10\xde\xbf\x88\xdc\xbc\x53\x5e\x7f\x82\x7a\x13\xd4\x9b\xa0\xde\x04\xf5\x90\x01\xf5\x94\xcb\x7b\x83\x25\x9e\xd0\xde\x84\xf6\x6a\x5f\x4b\xb3\x98\x00\xdf\x04\xf8\x6c\xbc\x7f\x19\x80\xaf\xf1\x71\x45\x22\x98\x40\xe0\x04\x02\x27\x10\xd8\x29\xf5\x04\x02\xff\x4a\x20\x30\xc1\x72\xf3\x65\x02\x40\xd7\xc1\xd1\xe2\x6b\xf1\xa9\x7b\xfb\xe4\x20\xc0\x68\x9d\xd4\xb4\xd3\x96\xb5\xa6\xd1\x41\x20\xe6\x89\xc3\x48\x65\x58\x13\x84\x9c\x56\x56\x3b\x0c\xe0\x6b\x83\x5c\x13\xd2\x9a\x90\xd6\x84\xb4\x26\xa4\x85\x0c\xa4\x45\x19\xfd\xff\x63\x6c\x52\xb5\x1f\x1e\x19\x74\x3a\xcd\xb9\x69\xce\xa6\x3a\x0f\x7a\x2d\x19\xc7\x81\x14\x5d\xcb\xd5\x03\xc9\x39\xd0\xb0\xd1\xb3\xcd\xfb\x7a\x2d\x5d\x3a\x48\xe1\xfa\x2e\xe6\x81\x42\x18\x86\xd6\xc1\xbe\xb1\x23\xd3\xf7\x34\xed\x19\x0a\x0b\xc4\x48\x44\xfd\x22\x50\xb6\x42\x58\xbb\xe5\x3d\xa7\x73\xd4\x8b\xc4\x8c\x70\x61\xec\x73\xee\xc3\x81\x8b\xf5\xe2\xd7\x52\x3e\xcf\xeb\xeb\x17\x3b\x71\x16\xda\x7d\xb8\xde\xf0\x7a\x8f\x06\x2d\xa7\x40\x7b\xc1\x9d\x41\x4d\xb6\x61\xa2\x4e\x9f\x3d\xa0\xc5\xae\xe3\x1c\x1d\x40\x6c\x48\x8b\x63\xa0\xb5\x01\xed\x8e\x02\xe9\x86\xc8\x3b\x06\xee\xdb\x4b\xde\xbd\xc0\xa1\x6f\xcb\xda\xfc\xc2\x44\x16\x87\x9c\x17\x31\xd3\x30\x20\x39\x42\xcb\x6f\xf2\xf1\xf4\x72\x10\xf8\x1c\xa0\xf3\xbd\x10\xea\x21\x35\x7d\xe8\x86\xdb\x15\xed\x81\x81\x07\x28\xbb\x13\x28\xc7\xf8\xce\x7d\x8d\xc2\x31\xb4\x7e\x94\xd6\xdb\x55\xef\x4a\xfb\xed\xc3\x40\x3e\xeb\x9f\xe9\x99\x0f\xdf\x08\x65\x88\x27\x73\x86\x31\x9d\x47\xdf\x55\x19\xba\x3d\xee\x15\x0a\xcd\x8c\xaa\x5e\xb9\xf6\x57\x33\x73\x5a\xa1\x89\x7b\x3b\xa0\xb2\xa4\xc2\xf6\xc1\x53\xb5\x00\xca\x23\xe5\xf4\x60\x6a\xb4\x2d\x74\xea\x4e\xed\x3b\xe3\x47\xfb\xed\x82\x3d\x19\xd4\x3b\x6b\xaf\xae\x2b\x2f\x57\xb3\x82\x68\xcb\xed\x88\x2e\xe1\x5c\xd7\x26\xfa\x0a\x65\xe7\xce\x11\x33\xb4\xdd\x66\xe3\x37\xf6\xfa\x70\xd6\x4f\xa1\x21\x51\xd8\x3c\x26\x14\x4b\xc6\x87\x44\x27\x1c\x70\xf8\x9e\x46\xce\xab\x21\x07\x5f\xc1\x76\x17\x1b\x77\xb4\xda\x75\xa0\x0a\x3a\x30\xe1\xf8\x97\x32\x16\x2b\x00\x75\x85\xee\x62\x46\xef\xd3\x85\xb5\x6b\x60\xbe\xf2\x30\x7a\x8c\x0b\x4b\xa6\xd0\xf9\x64\x42\xe7\x07\x41\x41\xe3\x2c\x5d\xf9\x6d\xe9\x39\x98\x3b\x3b\x5d\x67\xd4\x5c\xed\xf2\xf0\x48\x7b\xbd\x2d\x31\xdd\x3f\x34\xad\x44\x76\x51\x9a\x56\x22\xa7\x95\xc8\x69\x25\xf2\xe1\x56\x22\x1f\x00\x32\x6a\x73\x92\xed\xe1\xc6\x7d\x9f\x49\x2c\x69\x7e\xc8\x31\x4c\x0c\xb4\xf2\x54\x3b\x79\x3b\x9e\x4d\xb4\xd1\x18\x3e\x5f\x9a\x93\xa2\x11\xc3\xda\x27\x0b\xaf\x37\x2e\x5c\x37\xfb\xeb\x9a\xd6\xc3\xac\xc3\xcc\xf8\x1e\x5b\x9d\xac\x22\x64\xb7\xed\x26\xb8\xf3\xb9\x3c\xbb\x1f\xe2\xb0\x22\x77\x43\x6a\x62\x29\x39\x59\xa6\xe6\xe5\xcd\x7b\x83\xc0\x5b\x8e\x93\x64\xac\xeb\xca\x4f\x65\xac\x4a\xbc\x1e\xcd\x82\xfa\x3c\x70\x36\xb6\xb5\xed\x79\xef\xec\x68\x00\xff\x54\xfa\xb5\xe3\xf1\xdb\xe1\xbe\xce\x76\x17\xaf\x57\xb6\x6b\x89\x05\x09\xce\x52\xb9\x01\x2a\x49\xbe\xd9\xf4\xd2\xb8\x7a\xbf\x91\x02\xf3\x22\x8c\x13\xf2\x77\xd8\x8e\x43\x8b\xe1\x54\x6e\x5e\x9d\xc7\x49\x44\x02\x22\xc7\xa4\x79\x81\x85\xb8\x65\x3c\x1c\x93\xe6\x59\xa2\xf8\x1c\x51\x95\x05\xd9\x20\x00\x21\x7e\x60\x21\x58\xa9\x56\xff\xbe\xb6\x5a\x5e\x5b\x3f\x1f\xd6\xd3\x3c\xc4\x4d\xba\x99\xb4\x63\x6e\x7d\x3e\x3d\x57\xd2\x18\x5f\x47\xe8\xc3\x06\x8a\x68\x6c\x7f\x3b\x72\x0f\xe7\xe2\x77\x77\xf1\xd0\x99\xab\xdf\x2e\xfe\xd6\xbb\xcd\x1a\xb9\x90\xd1\xaf\xf2\x38\x3d\xdb\x74\xf8\xeb\xe3\xda\xe8\x2a\x62\xb7\xda\x1b\x07\xa9\xdc\x30\x5e\xbc\x68\xfb\x6b\x9f\x97\xf1\xc6\xb1\xd8\x5c\x29\x1e\x49\x30\xc5\xf7\xde\xad\x91\x42\xf9\xdd\xed\x89\x80\x25\xbe\x37\xd7\xe7\x32\x5c\xe6\x35\xac\xd4\x0c\x2d\xf7\x90\xc4\xe3\xf5\xe3\x2f\x7f\x54\x18\x88\xe3\x61\x47\x85\x64\x7f\xc0\xd7\x3f\x1a\x92\x42\xe9\xc7\x1e\x0d\x95\x76\xa7\x51\xa0\x8f\x02\x1b\x46\x9e\x06\x42\xd1\xf2\x01\x07\x02\xde\xe9\x7d\x1a\x0b\xa7\x32\x16\xcc\xc0\xee\xc4\x90\xd2\x5f\x6f\x98\x54\x5d\xf2\x95\xe1\xa7\x69\x10\x3a\x06\xe1\x65\xb3\x17\x47\x58\x78\xd0\x45\xd6\x5b\xd5\x5f\x25\x1d\x71\x49\xa6\x7a\x64\xda\xd0\x6f\xc7\x3a\x4c\xe3\x09\xc0\x6e\x96\x2c\xa7\xa7\x77\x34\x10\x05\x08\x21\x44\x92\x65\x77\xdf\x20\x5c\xbc\xe7\x97\x3f\xf4\x1a\x45\xd6\xe7\x27\x4a\xde\x74\x27\x66\x11\x7d\x70\xba\xd3\x78\xf9\x1e\x0d\xca\xc8\x39\xde\x7c\xb3\xa6\xe1\xbc\xd7\xc1\x6a\x6f\xb3\x0e\x52\xbe\xe4\x98\x8a\x15\x70\x94\x70\x26\x59\xc0\xa2\xf2\x1c\xfb\xd9\xc5\xf9\xbc\xd5\x92\x9c\xa3\xdf\xe6\x1e\xb3\x0d\x49\xba\x87\x50\x5f\x84\xfe\xe9\xd6\xf8\xbb\xe6\xf0\x7a\xeb\xa6\x65\x3b\x86\x8b\x79\x93\xf5\x59\x20\x6e\xb4\xa7\x44\xf4\x3f\xa5\xfe\x67\x42\x12\xdb\xd3\xe7\xbb\xa5\xa3\x8c\x5c\x2b\x97\xbb\xbb\x7f\x8e\xc5\x6e\xed\x43\x5c\xbf\x74\xc8\x8f\xff\xe6\xd6\xbe\x91\xb6\xf5\x95\xe4\xed\x93\xc4\xa8\x5b\xf9\x76\x4d\x35\xb7\xd8\x8c\xb6\x7d\xaf\x72\xe0\xb6\xbd\x01\x63\x9e\x76\xab\x1a\x32\x76\xe4\x8c\x76\xc2\xad\x6c\xa2\x65\xf7\xcf\xf8\xa7\xda\x2a\xb9\x8c\x3d\x3c\xa3\x9d\x64\x33\xe5\x1a\xb5\x2d\xfb\x86\xa2\x5a\x7f\x19\x5b\x7f\xc6\x3f\xd1\x53\xd3\xe2\x41\x5b\xd3\x4f\xf0\xec\xb0\x42\x73\x43\xd2\x68\x27\xd3\x6a\x6a\x34\xf6\x4e\x1e\x52\x8b\x87\x6c\xcc\xae\x44\xfb\x9e\xa7\x51\x4f\x9d\x55\x03\x81\x8e\x67\xfc\xb4\x69\xf0\x3a\x18\xf2\x40\xd0\xf6\xa8\x34\xe3\xc9\x98\xaf\xf6\xda\x24\xd3\xeb\x60\xb9\x0e\xd3\x1f\xa9\xff\xee\x1f\xfd\x37\x00\x00\xff\xff\x69\x5d\x0a\x6a\x39\x9d\x00\x00") +var _v2SchemaJSON = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x5d\x4f\x93\xdb\x36\xb2\xbf\xfb\x53\xa0\x14\x57\xd9\xae\xd8\x92\xe3\xf7\x2e\xcf\x97\xd4\xbc\xd8\x49\x66\x37\x5e\x4f\x79\x26\xbb\x87\x78\x5c\x05\x91\x2d\x09\x09\x09\x30\x00\x38\x33\x5a\xef\x7c\xf7\x2d\xf0\x9f\x08\x02\x20\x41\x8a\xd2\xc8\x0e\x0f\xa9\x78\x28\xa0\xd1\xdd\x68\x34\x7e\xdd\xf8\xf7\xf9\x11\x42\x33\x49\x64\x04\xb3\xd7\x68\x76\x86\xfe\x76\xf9\xfe\x1f\xe8\x32\xd8\x40\x8c\xd1\x8a\x71\x74\x79\x8b\xd7\x6b\xe0\xe8\xd5\xfc\x25\x3a\xbb\x38\x9f\xcf\x9e\xab\x0a\x24\x54\xa5\x37\x52\x26\xaf\x17\x0b\x91\x17\x99\x13\xb6\xb8\x79\xb5\x10\x59\xdd\xf9\xef\x82\xd1\x6f\xf2\xc2\x8f\xf3\x4f\xb5\x1a\xea\xc7\x17\x45\x41\xc6\xd7\x8b\x90\xe3\x95\x7c\xf1\xf2\x7f\x8b\xca\x45\x3d\xb9\x4d\x32\xa6\xd8\xf2\x77\x08\x64\xfe\x8d\xc3\x9f\x29\xe1\xa0\x9a\xff\xed\x11\x42\x08\xcd\x8a\xd6\xb3\x9f\x15\x67\x74\xc5\xca\x7f\x27\x58\x6e\xc4\xec\x11\x42\xd7\x59\x5d\x1c\x86\x44\x12\x46\x71\x74\xc1\x59\x02\x5c\x12\x10\xb3\xd7\x68\x85\x23\x01\x59\x81\x04\x4b\x09\x9c\x6a\xbf\x7e\xce\x49\x7d\xba\x7b\x51\xfd\xa1\x44\xe2\xb0\x52\xac\x7d\xb3\x08\x61\x45\x68\x46\x56\x2c\x6e\x80\x86\x8c\xbf\xbd\x93\x40\x05\x61\x74\x96\x95\xbe\x7f\x84\xd0\x7d\x4e\xde\x42\xb7\xe4\xbe\x46\xbb\x14\x5b\x48\x4e\xe8\xba\x90\x05\xa1\x19\xd0\x34\xae\xc4\xce\xbe\xbc\x9a\xbf\x9c\x15\x7f\x5d\x57\xc5\x42\x10\x01\x27\x89\xe2\x48\x51\xb9\xda\x40\xd5\x87\x37\xc0\x15\x5f\x88\xad\x90\xdc\x10\x81\x42\x16\xa4\x31\x50\x39\x2f\x38\xad\xab\xb0\x53\xd8\xac\x94\x56\x6f\xc3\x84\xf4\x11\xa4\x50\xb3\xfa\xe9\xd3\x6f\x9f\x3e\xdf\x2f\xd0\xeb\x8f\x1f\x3f\x7e\xbc\xfe\xf6\xe9\xf7\xaf\x5f\x7f\xfc\x18\x7e\xfb\xec\xfb\xc7\xb3\x36\x79\x54\x43\xe8\x29\xc5\x31\x20\xc6\x11\x49\x9e\xe5\x12\x41\x66\xa0\xe8\xed\x1d\x8e\x93\x08\x5e\xa3\x27\x3b\xc3\x7c\xa2\x73\xba\xc4\x02\x2e\xb0\xdc\xf4\xe5\x76\xd1\xca\x96\xa2\x8a\x94\xcd\x21\xc9\x6c\xec\x2c\x70\x42\x9e\x34\x74\x9d\x19\x7c\xcd\x20\x9c\xea\x2e\x0a\xfe\x42\x84\xd4\x29\x04\x8c\x8a\xb4\x41\xa2\xc1\xdc\x19\x8a\x88\x90\x4a\x49\xef\xce\xdf\xbd\x45\x4a\x52\x81\x70\x10\x40\x22\x21\x44\xcb\x6d\xc5\xec\x4e\x3c\x1c\x45\xef\x57\x9a\xb5\x7d\xae\xfe\xe5\xe4\x31\x86\x90\xe0\xab\x6d\x02\x3b\x2e\xcb\x11\x90\xd9\xa8\xc6\x77\xc2\x59\x98\x06\xfd\xf9\x2e\x78\x45\x01\xa6\xa8\xa0\x71\x5c\xbe\x33\xa7\xd2\xd9\x5f\x95\xef\xd9\xd5\xac\xfd\xdc\x5d\xbf\x5e\xb8\xd1\x3e\xc7\x31\x48\xe0\x5e\x4c\x14\x65\xdf\xb8\xa8\x71\x10\x09\xa3\xc2\xc7\x02\xcb\xa2\x4e\x5a\x02\x82\x94\x13\xb9\xf5\x30\xe6\xb2\xa4\xb5\xfe\x9b\x3e\x7a\xb2\x55\xd2\xa8\x4a\xbc\x16\xb6\x71\x8e\x39\xc7\xdb\x9d\xe1\x10\x09\x71\xbd\x9c\xb3\x41\x89\xd7\xa5\x89\xdc\x57\xb5\x53\x4a\xfe\x4c\xe1\xbc\xa0\x21\x79\x0a\x1a\x0f\x70\xa7\x5c\x08\x8e\xde\xb0\xc0\x43\x24\xad\x74\x63\x0e\xb1\xd9\x90\xe1\xb0\x2d\x13\xa7\x6d\x78\xfd\x04\x14\x38\x8e\x90\xaa\xce\x63\xac\x3e\x23\xbc\x64\xa9\xb4\xf8\x03\x63\xde\xcd\xbe\x16\x13\x4a\x55\xac\x82\x12\xc6\xac\xd4\x35\xf7\x22\xd4\x3a\xff\x22\x73\x0e\x6e\x51\xa0\x75\x1e\xae\x8f\xe8\x5d\xc7\x59\xe6\xe4\x9a\x18\x8d\xd6\x1c\x53\x84\x4d\xb7\x67\x28\x37\x09\x84\x69\x88\x12\x0e\x01\x11\x80\x32\xa2\xf5\xb9\xaa\xc6\xd9\x73\x53\xab\xfb\xb4\x2e\x20\xc6\x54\x92\xa0\x9a\xf3\x69\x1a\x2f\x81\x77\x37\xae\x53\x1a\xce\x40\xc4\xa8\x82\x1c\xb5\xef\xda\x24\x7d\xb9\x61\x69\x14\xa2\x25\xa0\x90\xac\x56\xc0\x81\x4a\xb4\xe2\x2c\xce\x4a\x64\x7a\x9a\x23\xf4\x13\x91\x3f\xa7\x4b\xf4\x63\x84\x6f\x18\x87\x10\xbd\xc3\xfc\x8f\x90\xdd\x52\x44\x04\xc2\x51\xc4\x6e\x21\x74\x48\x21\x81\xc7\xe2\xfd\xea\x12\xf8\x0d\x09\xf6\xe9\x47\x35\xaf\x67\xc4\x14\xf7\x22\x27\x97\xe1\xe2\x76\x2d\x06\x8c\x4a\x1c\x48\x3f\x73\x2d\x0b\x5b\x29\x45\x24\x00\x2a\x0c\x11\xec\x94\xca\xc2\xa6\xc1\x37\x21\x43\x83\x3b\x5f\x97\xf1\x43\x5e\x53\x73\x19\xa5\x36\xd8\x2d\x05\x2e\x34\x0b\xeb\x39\xfc\x1d\x63\x51\x01\xbd\x3d\xbb\x90\x84\x40\x25\x59\x6d\x09\x5d\xa3\x1c\x37\xe6\x5c\x16\x9a\x40\x09\x70\xc1\xe8\x82\xf1\x35\xa6\xe4\xdf\x99\x5c\x8e\x9e\x4d\x79\xb4\x27\x2f\xbf\x7e\xf8\x05\x25\x8c\x50\xa9\x98\x29\x90\x62\x60\xea\x75\xae\x13\xca\xbf\x2b\x1a\x29\x27\x76\xd6\x20\xc6\x64\x5f\xe6\x32\x1a\x08\x87\x21\x07\x21\xbc\xb4\xe4\xe0\x32\x67\xa6\xcd\xf3\x1e\xcd\xd9\x6b\xb6\x6f\x8e\x27\xa7\xed\xdb\xe7\xbc\xcc\x1a\x07\xce\x6f\x87\x33\xf0\xba\x51\x17\x22\x66\x78\x79\x8e\xce\xe5\x13\x81\x80\x06\x2c\xe5\x78\x0d\xa1\xb2\xb8\x54\xa8\x79\x09\xbd\xbf\x3c\x47\x01\x8b\x13\x2c\xc9\x32\xaa\xaa\x1d\xd5\xee\xab\x36\xbd\x6c\xfd\x54\x6c\xc8\x08\x01\x3c\xbd\xe7\x07\x88\xb0\x24\x37\x79\x90\x28\x4a\x1d\x10\x1a\x92\x1b\x12\xa6\x38\x42\x40\xc3\x4c\x43\x62\x8e\xae\x36\xb0\x45\x71\x2a\xa4\x9a\x23\x79\x59\xb1\xa8\xf2\xa4\x0c\x60\x9f\xcc\x8d\x40\xf5\x80\xca\xa8\x99\xc3\xa7\x85\x1f\x31\x25\xa9\x82\xc5\x6d\xbd\xd8\x36\x76\x7c\x02\x28\x97\xf6\x1d\x74\x3b\x11\x7e\x91\xae\x32\xf8\x6c\xf4\xe6\x7b\x9a\xa5\x1f\x62\xc6\x21\xcf\x9a\xe5\xed\x8b\x02\xf3\x2c\x33\x33\xdf\x00\xca\xc9\x09\xb4\x04\xf5\xa5\x08\xd7\xc3\x02\x18\x66\xf1\xab\x1e\x83\x37\x4c\xcd\x12\xc1\x1d\x50\xf6\xaa\xbd\xfe\xe2\x73\x48\x38\x08\xa0\x32\x9b\x18\x44\x86\x0b\x6a\xc1\xaa\x26\x96\x2d\x96\x3c\xa0\x54\x65\x73\x87\x15\xca\x15\xe5\xf5\x94\x46\x9f\x33\x1a\x0c\x9a\xb1\x5a\xd9\x6a\x95\xcd\xcb\x7e\xec\x9a\xc5\x94\x3b\x37\x26\x31\xd7\xfc\xe4\x1f\x13\x8c\x31\x75\x9c\xba\xf7\x87\x3c\xa1\xb7\x4f\x17\x1b\x09\x82\x98\xc4\x70\x95\xd3\xe8\x4c\x48\x5a\xa6\xd6\x2a\x3d\x56\x42\x80\x9f\xaf\xae\x2e\x50\x0c\x42\xe0\x35\x34\x3c\x8a\x62\x03\x37\xba\xb2\x27\x04\xda\x25\x8d\x06\xe2\xa0\x13\x8a\xf3\xf5\xec\x10\x72\x67\x88\x90\x3d\x4b\x64\xeb\xaa\xda\x8f\xf7\x5a\x75\x47\x9a\xa8\x51\x70\x26\xd2\x38\xc6\x7c\xbb\x57\xfc\xbd\xe4\x04\x56\xa8\xa0\x54\x9a\x45\xd5\xf7\x0f\x16\xfc\x57\x1c\x3c\xdf\x23\xba\x77\x38\xda\x16\x4b\x31\x53\x6a\x4d\x9a\x15\x63\xe7\xe1\x18\x69\x9f\x22\xe0\x24\xbb\x94\x4b\x97\xee\x2d\xf9\x70\x87\x72\x7b\xe6\xc4\x33\x2a\x66\x5e\x1c\x35\x72\xe3\x2d\xda\x73\xe4\xc7\x51\x6d\xa4\xa1\x2a\x4f\xde\x94\xcb\xb2\x3e\x31\x48\xae\x82\xce\xc9\xc8\x65\xcd\xc3\xb7\x34\xb6\x2b\xdf\x58\x65\x78\x6e\x73\xac\x5e\x24\x0d\x3f\xdc\x70\x23\xc6\xda\x52\x0b\x2d\x63\x7d\xa9\x49\x2d\x54\x48\x28\xc0\x12\x9c\xe3\x63\xc9\x58\x04\x98\x36\x07\xc8\x0a\xa7\x91\xd4\xf0\xbc\xc1\xa8\xb9\x70\xd0\xc6\xa9\xb6\x78\x80\x5a\xa3\xb4\x2c\xf4\x18\x0b\x8a\x9d\xd0\xb4\x55\x10\xee\x0d\xc5\xd6\xe0\x99\x93\xdc\xa1\x04\xbb\xf1\xa7\x23\xd1\xd1\x97\x8c\x87\x13\x0a\x21\x02\xe9\x99\x25\xed\x20\xc5\x92\x66\x3c\x32\x9c\xd6\x06\xb0\x31\x5c\x86\x29\x0a\xcb\x60\x33\x12\xa5\x91\xfc\x96\x75\xd0\x59\xd7\x13\xbd\xd3\x23\x79\xdd\x2a\x90\xa6\x38\x06\x91\x39\x7f\x20\x72\x03\x1c\x2d\x01\x61\xba\x45\x37\x38\x22\x61\x8e\x71\x85\xc4\x32\x15\x28\x60\x61\x16\xb8\x3d\x29\xdc\x4d\x3d\x2f\x12\x13\x7d\xc8\x7e\x37\xee\xa8\x7f\xfa\xdb\xcb\x17\xff\x77\xfd\xf9\x7f\xee\x9f\x3d\xfe\xcf\xa7\xa7\x45\xfb\xcf\x1e\xf7\xf3\xe0\xff\xc4\x51\x0a\x8e\x4c\xcb\x01\xdc\x0a\x65\xb2\x01\x83\xed\x3d\xe4\xa9\xa3\x4e\x2d\x59\xc5\xe8\x2f\x48\x7d\x5a\x6e\x37\xbf\x5c\x9f\x35\x13\x64\x14\xfa\xef\x0b\x68\xa6\x0d\xb4\x8e\xf1\xa8\xff\xbb\x60\xf4\x03\x64\xab\x5b\x81\x65\x51\xe6\xda\xca\xfa\xf0\xb0\xac\x3e\x9c\xca\x26\x0e\x1d\xdb\x57\x5b\xbb\xb4\x9a\xa6\xb6\x9b\x1a\x6b\xd1\x9a\x9e\x7e\x33\x9a\xec\x41\x69\x45\x22\xb8\xb4\x51\xeb\x04\x77\xca\x6f\x7b\x7b\xc8\xb2\xb0\x95\x92\x25\x5b\xd0\x42\xaa\x2a\xdd\x32\x78\x4f\x0c\xab\x68\x46\x6c\xea\x6d\xf4\x5c\x5e\xde\xc4\xac\xa5\xf9\xd1\x00\x9f\x7d\x98\x65\x24\xbd\xc7\x97\xd4\xb3\x3a\xa8\x2b\xa0\x34\x76\xf9\x65\x5f\x2d\x25\x95\x1b\xcf\xd6\xf4\x9b\x5f\x09\x95\xb0\x36\x3f\xdb\xd0\x39\x2a\x93\x1c\x9d\x03\xa2\x4a\xca\xf5\xf6\x10\xb6\x94\x89\x0b\x6a\x70\x12\x13\x49\x6e\x40\xe4\x29\x12\x2b\xbd\x80\x45\x11\x04\xaa\xc2\x8f\x56\x9e\x5c\x6b\xec\x8d\x5a\x0e\x14\x59\x06\x2b\x1e\x24\xcb\xc2\x56\x4a\x31\xbe\x23\x71\x1a\xfb\x51\x2a\x0b\x3b\x1c\x48\x10\xa5\x82\xdc\xc0\xbb\x3e\x24\x8d\x5a\x76\x2e\x09\xed\xc1\x65\x51\xb8\x83\xcb\x3e\x24\x8d\x5a\x2e\x5d\xfe\x02\x74\x2d\x3d\xf1\xef\xae\xb8\x4b\xe6\x5e\xd4\xaa\xe2\x2e\x5c\x5e\xec\x0e\xf5\x5b\x0c\xcb\x0a\xbb\xa4\x3c\xf7\x1f\x2a\x55\x69\x97\x8c\x7d\x68\x95\xa5\xad\xb4\xf4\x9c\xa5\x07\xb9\x7a\x05\xbb\xad\x50\x6f\xfb\xa0\x4e\x9b\x48\x23\x49\x92\x28\x87\x19\x3e\x32\xee\xca\x3b\x46\x7e\x7f\x18\x64\xcc\xcc\x0f\x34\xe9\x36\x8b\xb7\x6c\xa8\xa5\x5b\x54\x4c\x54\x5b\x15\x3a\xf1\x6c\x2d\xfe\x96\xc8\x0d\xba\x7b\x81\x88\xc8\x23\xab\xee\x7d\x3b\x92\xa7\x60\x29\xe3\xdc\xff\xb8\x64\xe1\xf6\xa2\x5a\x59\xdc\x6f\xeb\x45\x7d\x6a\xd1\x76\x1e\xea\xb8\xf1\xfa\x14\xd3\x36\x63\xe5\xd7\xf3\xe4\xbe\x25\xbd\x5e\x05\xeb\x73\x74\xb5\x21\x2a\x2e\x4e\xa3\x30\xdf\xbf\x43\x28\x2a\xd1\xa5\x2a\x9d\x8a\xfd\x76\xd8\x8d\xbc\x67\x65\xc7\xb8\x03\x45\xec\xa3\xb0\x37\x8a\x70\x4c\x68\x91\x51\x8e\x58\x80\xed\x4a\xf3\x81\x62\xca\x96\xbb\xf1\x52\xcd\x80\xfb\xe4\x4a\x5d\x6c\xdf\x6e\x20\x4b\x80\x30\x8e\x28\x93\xf9\xe9\x8d\x8a\x6d\xd5\x59\x65\x7b\xaa\x44\x9e\xc0\xc2\xd1\x7c\x40\x26\xd6\x1a\xce\xf9\xc5\x69\x7b\x6c\xec\xc8\x71\x7b\xe5\x21\x2e\xd3\xe5\x65\x93\x91\x53\x0b\x7b\x3a\xc7\xfa\x17\x6a\x01\xa7\x33\xd0\xf4\x40\x0f\x39\x87\xda\xe4\x54\x87\x3a\xd5\xe3\xc7\xa6\x8e\x20\xd4\x11\xb2\x4e\xb1\xe9\x14\x9b\x4e\xb1\xe9\x14\x9b\xfe\x15\x63\xd3\x47\xf5\xff\x97\x38\xe9\xcf\x14\xf8\x76\x82\x49\x13\x4c\xaa\x7d\xcd\x6c\x62\x42\x49\x87\x43\x49\x19\x33\x6f\xe3\x44\x6e\x9b\xab\x8a\x3e\x86\xaa\x99\x52\x1b\x5b\x59\x33\x02\x09\xa0\x21\xa1\x6b\x84\x6b\x66\xbb\xdc\x16\x0c\xd3\x68\xab\xec\x36\x4b\xd8\x60\x8a\x40\x31\x85\x6e\x14\x57\x13\xc2\xfb\x92\x10\xde\xbf\x88\xdc\xbc\x53\x5e\x7f\x82\x7a\x13\xd4\x9b\xa0\xde\x04\xf5\x90\x01\xf5\x94\xcb\x7b\x83\x25\x9e\xd0\xde\x84\xf6\x6a\x5f\x4b\xb3\x98\x00\xdf\x04\xf8\x6c\xbc\x7f\x19\x80\xaf\xf1\x71\x45\x22\x98\x40\xe0\x04\x02\x27\x10\xd8\x29\xf5\x04\x02\xff\x4a\x20\x30\xc1\x72\xf3\x65\x02\x40\xd7\xc1\xd1\xe2\x6b\xf1\xa9\x7b\xfb\xe4\x20\xc0\x68\x9d\xd4\xb4\xd3\x96\xb5\xa6\xd1\x41\x20\xe6\x89\xc3\x48\x65\x58\x13\x84\x9c\x56\x56\x3b\x0c\xe0\x6b\x83\x5c\x13\xd2\x9a\x90\xd6\x84\xb4\x26\xa4\x85\x0c\xa4\x45\x19\xfd\xff\x63\x6c\x52\xb5\x1f\x1e\x19\x74\x3a\xcd\xb9\x69\xce\xa6\x3a\x0f\x7a\x2d\x19\xc7\x81\x14\x5d\xcb\xd5\x03\xc9\x39\xd0\xb0\xd1\xb3\xcd\xfb\x7a\x2d\x5d\x3a\x48\xe1\xfa\x2e\xe6\x81\x42\x18\x86\xd6\xc1\xbe\xb1\x23\xd3\xf7\x34\xed\x19\x0a\x0b\xc4\x48\x44\xfd\x22\x50\xb6\x42\x58\xbb\xe5\x3d\xa7\x73\xd4\x8b\xc4\x8c\x70\x61\xec\x73\xee\xc3\x81\x8b\xf5\xe2\xd7\x52\x3e\xcf\xeb\xeb\x17\x3b\x71\x16\xda\x7d\xb8\xde\xf0\x7a\x8f\x06\x2d\xa7\x40\x7b\xc1\x9d\x41\x4d\xb6\x61\xa2\x4e\x9f\x3d\xa0\xc5\xae\xe3\x1c\x1d\x40\x6c\x48\x8b\x63\xa0\xb5\x01\xed\x8e\x02\xe9\x86\xc8\x3b\x06\xee\xdb\x4b\xde\xbd\xc0\xa1\x6f\xcb\xda\xfc\xc2\x44\x16\x87\x9c\x17\x31\xd3\x30\x20\x39\x42\xcb\x6f\xf2\xf1\xf4\x72\x10\xf8\x1c\xa0\xf3\xbd\x10\xea\x21\x35\x7d\xe8\x86\xdb\x15\xed\x81\x81\x07\x28\xbb\x13\x28\xc7\xf8\xce\x7d\x8d\xc2\x31\xb4\x7e\x94\xd6\xdb\x55\xef\x4a\xfb\xed\xc3\x40\x3e\xeb\x9f\xe9\x99\x0f\xdf\x08\x65\x88\x27\x73\x86\x31\x9d\x47\xdf\x55\x19\xba\x3d\xee\x15\x0a\xcd\x8c\xaa\x5e\xb9\xf6\x57\x33\x73\x5a\xa1\x89\x7b\x3b\xa0\xb2\xa4\xc2\xf6\xc1\x53\xb5\x00\xca\x23\xe5\xf4\x60\x6a\xb4\x2d\x74\xea\x4e\xed\x3b\xe3\x47\xfb\xed\x82\x3d\x19\xd4\x3b\x6b\xaf\xae\x2b\x2f\x57\xb3\x82\x68\xcb\xed\x88\x2e\xe1\x5c\xd7\x26\xfa\x0a\x65\xe7\xce\x11\x33\xb4\xdd\x66\xe3\x37\xf6\xfa\x70\xd6\x4f\xa1\x21\x51\xd8\x3c\x26\x14\x4b\xc6\x87\x44\x27\x1c\x70\xf8\x9e\x46\xce\xab\x21\x07\x5f\xc1\x76\x17\x1b\x77\xb4\xda\x75\xa0\x0a\x3a\x30\xe1\xf8\x97\x32\x16\x2b\x00\x75\x85\xee\x62\x46\xef\xd3\x85\xb5\x6b\x60\xbe\xf2\x30\x7a\x8c\x0b\x4b\xa6\xd0\xf9\x64\x42\xe7\x07\x41\x41\xe3\x2c\x5d\xf9\x6d\xe9\x39\x98\x3b\x3b\x5d\x67\xd4\x5c\xed\xf2\xf0\x48\x7b\xbd\x2d\x31\xdd\x3f\x34\xad\x44\x76\x51\x9a\x56\x22\xa7\x95\xc8\x69\x25\xf2\xe1\x56\x22\x1f\x00\x32\x6a\x73\x92\xed\xe1\xc6\x7d\x9f\x49\x2c\x69\x7e\xc8\x31\x4c\x0c\xb4\xf2\x54\x3b\x79\x3b\x9e\x4d\xb4\xd1\x18\x3e\x5f\x9a\x93\xa2\x11\xc3\xda\x27\x0b\xaf\x37\x2e\x5c\x37\xfb\xeb\x9a\xd6\xc3\xac\xc3\xcc\xf8\x1e\x5b\x9d\xac\x22\x64\xb7\xed\x26\xb8\xf3\xb9\x3c\xbb\x1f\xe2\xb0\x22\x77\x43\x6a\x62\x29\x39\x59\xa6\xe6\xe5\xcd\x7b\x83\xc0\x5b\x8e\x93\x64\xac\xeb\xca\x4f\x65\xac\x4a\xbc\x1e\xcd\x82\xfa\x3c\x70\x36\xb6\xb5\xed\x79\xef\xec\x68\x00\xff\x54\xfa\xb5\xe3\xf1\xdb\xe1\xbe\xce\x76\x17\xaf\x57\xb6\x6b\x89\x05\x09\xce\x52\xb9\x01\x2a\x49\xbe\xd9\xf4\xd2\xb8\x7a\xbf\x91\x02\xf3\x22\x8c\x13\xf2\x77\xd8\x8e\x43\x8b\xe1\x54\x6e\x5e\x9d\xc7\x49\x44\x02\x22\xc7\xa4\x79\x81\x85\xb8\x65\x3c\x1c\x93\xe6\x59\xa2\xf8\x1c\x51\x95\x05\xd9\x20\x00\x21\x7e\x60\x21\x58\xa9\x56\xff\xbe\xb6\x5a\x5e\x5b\x3f\x1f\xd6\xd3\x3c\xc4\x4d\xba\x99\xb4\x63\x6e\x7d\x3e\x3d\x57\xd2\x18\x5f\x47\xe8\xc3\x06\x8a\x68\x6c\x7f\x3b\x72\x0f\xe7\xe2\x77\x77\xf1\xd0\x99\xab\xdf\x2e\xfe\xd6\xbb\xcd\x1a\xb9\x90\xd1\xaf\xf2\x38\x3d\xdb\x74\xf8\xeb\xe3\xda\xe8\x2a\x62\xb7\xda\x1b\x07\xa9\xdc\x30\x5e\xbc\x68\xfb\x6b\x9f\x97\xf1\xc6\xb1\xd8\x5c\x29\x1e\x49\x30\xc5\xf7\xde\xad\x91\x42\xf9\xdd\xed\x89\x80\x25\xbe\x37\xd7\xe7\x32\x5c\xe6\x35\xac\xd4\x0c\x2d\xf7\x90\xc4\xe3\xf5\xe3\x2f\x7f\x54\x18\x88\xe3\x61\x47\x85\x64\x7f\xc0\xd7\x3f\x1a\x92\x42\xe9\xc7\x1e\x0d\x95\x76\xa7\x51\xa0\x8f\x02\x1b\x46\x9e\x06\x42\xd1\xf2\x01\x07\x02\xde\xe9\x7d\x1a\x0b\xa7\x32\x16\xcc\xc0\xee\xc4\x90\xd2\x5f\x6f\x98\x54\x5d\xf2\x95\xe1\xa7\x69\x10\x3a\x06\xe1\x65\xb3\x17\x47\x58\x78\xd0\x45\xd6\x5b\xd5\x5f\x25\x1d\x71\x49\xa6\x7a\x64\xda\xd0\x6f\xc7\x3a\x4c\xe3\x09\xc0\x6e\x96\x2c\xa7\xa7\x77\x34\x10\x05\x08\x21\x44\x92\x65\x77\xdf\x20\x5c\xbc\xe7\x97\x3f\xf4\x1a\x45\xd6\xe7\x27\x4a\xde\x74\x27\x66\x11\x7d\x70\xba\xd3\x78\xf9\x1e\x0d\xca\xc8\x39\xde\x7c\xb3\xa6\xe1\xbc\xd7\xc1\x6a\x6f\xb3\x0e\x52\xbe\xe4\x98\x8a\x15\x70\x94\x70\x26\x59\xc0\xa2\xf2\x1c\xfb\xd9\xc5\xf9\xbc\xd5\x92\x9c\xa3\xdf\xe6\x1e\xb3\x0d\x49\xba\x87\x50\x5f\x84\xfe\xe9\xd6\xf8\xbb\xe6\xf0\x7a\xeb\xa6\x65\x3b\x86\x8b\x79\x93\xf5\x59\x20\x6e\xb4\xa7\x44\xf4\x3f\xa5\xfe\x67\x42\x12\xdb\xd3\xe7\xbb\xa5\xa3\x8c\x5c\x2b\x97\xbb\xbb\x7f\x8e\xc5\x6e\xed\x43\x5c\xbf\x74\xc8\x8f\xff\xe6\xd6\xbe\x91\xb6\xf5\x95\xe4\xed\x93\xc4\xa8\x5b\xf9\x76\x4d\x35\xb7\xd8\x8c\xb6\x7d\xaf\x72\xe0\xb6\xbd\x01\x63\x9e\x76\xab\x1a\x32\x76\xe4\x8c\x76\xc2\xad\x6c\xa2\x65\xf7\xcf\xf8\xa7\xda\x2a\xb9\x8c\x3d\x3c\xa3\x9d\x64\x33\xe5\x1a\xb5\x2d\xfb\x86\xa2\x5a\x7f\x19\x5b\x7f\xc6\x3f\xd1\x53\xd3\xe2\x41\x5b\xd3\x4f\xf0\xec\xb0\x42\x73\x43\xd2\x68\x27\xd3\x6a\x6a\x34\xf6\x4e\x1e\x52\x8b\x87\x6c\xcc\xae\x44\xfb\x9e\xa7\x51\x4f\x9d\x55\x03\x81\x8e\x67\xfc\xb4\x69\xf0\x3a\x18\xf2\x40\xd0\xf6\xa8\x34\xe3\xc9\x98\xaf\xf6\xda\x24\xd3\xeb\x60\xb9\x0e\xd3\x1f\xa9\xff\xee\x1f\xfd\x37\x00\x00\xff\xff\x69\x5d\x0a\x6a\x39\x9d\x00\x00") func v2SchemaJSONBytes() ([]byte, error) { return bindataRead( @@ -104,8 +105,8 @@ func v2SchemaJSON() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "v2/schema.json", size: 40249, mode: os.FileMode(420), modTime: time.Unix(1482389892, 0)} - a := &asset{bytes: bytes, info: info} + info := bindataFileInfo{name: "v2/schema.json", size: 40249, mode: os.FileMode(436), modTime: time.Unix(1540282154, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xcb, 0x25, 0x27, 0xe8, 0x46, 0xae, 0x22, 0xc4, 0xf4, 0x8b, 0x1, 0x32, 0x4d, 0x1f, 0xf8, 0xdf, 0x75, 0x15, 0xc8, 0x2d, 0xc7, 0xed, 0xe, 0x7e, 0x0, 0x75, 0xc0, 0xf9, 0xd2, 0x1f, 0x75, 0x57}} return a, nil } @@ -113,8 +114,8 @@ func v2SchemaJSON() (*asset, error) { // It returns an error if the asset could not be found or // could not be loaded. func Asset(name string) ([]byte, error) { - cannonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[cannonicalName]; ok { + canonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[canonicalName]; ok { a, err := f() if err != nil { return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) @@ -124,6 +125,12 @@ func Asset(name string) ([]byte, error) { return nil, fmt.Errorf("Asset %s not found", name) } +// AssetString returns the asset contents as a string (instead of a []byte). +func AssetString(name string) (string, error) { + data, err := Asset(name) + return string(data), err +} + // MustAsset is like Asset but panics when Asset would return an error. // It simplifies safe initialization of global variables. func MustAsset(name string) []byte { @@ -135,12 +142,18 @@ func MustAsset(name string) []byte { return a } +// MustAssetString is like AssetString but panics when Asset would return an +// error. It simplifies safe initialization of global variables. +func MustAssetString(name string) string { + return string(MustAsset(name)) +} + // AssetInfo loads and returns the asset info for the given name. // It returns an error if the asset could not be found or // could not be loaded. func AssetInfo(name string) (os.FileInfo, error) { - cannonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[cannonicalName]; ok { + canonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[canonicalName]; ok { a, err := f() if err != nil { return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) @@ -150,6 +163,33 @@ func AssetInfo(name string) (os.FileInfo, error) { return nil, fmt.Errorf("AssetInfo %s not found", name) } +// AssetDigest returns the digest of the file with the given name. It returns an +// error if the asset could not be found or the digest could not be loaded. +func AssetDigest(name string) ([sha256.Size]byte, error) { + canonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[canonicalName]; ok { + a, err := f() + if err != nil { + return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s can't read by error: %v", name, err) + } + return a.digest, nil + } + return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s not found", name) +} + +// Digests returns a map of all known files and their checksums. +func Digests() (map[string][sha256.Size]byte, error) { + mp := make(map[string][sha256.Size]byte, len(_bindata)) + for name := range _bindata { + a, err := _bindata[name]() + if err != nil { + return nil, err + } + mp[name] = a.digest + } + return mp, nil +} + // AssetNames returns the names of the assets. func AssetNames() []string { names := make([]string, 0, len(_bindata)) @@ -162,6 +202,7 @@ func AssetNames() []string { // _bindata is a table, holding each asset generator, mapped to its name. var _bindata = map[string]func() (*asset, error){ "jsonschema-draft-04.json": jsonschemaDraft04JSON, + "v2/schema.json": v2SchemaJSON, } @@ -174,15 +215,15 @@ var _bindata = map[string]func() (*asset, error){ // img/ // a.png // b.png -// then AssetDir("data") would return []string{"foo.txt", "img"} -// AssetDir("data/img") would return []string{"a.png", "b.png"} -// AssetDir("foo.txt") and AssetDir("notexist") would return an error +// then AssetDir("data") would return []string{"foo.txt", "img"}, +// AssetDir("data/img") would return []string{"a.png", "b.png"}, +// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and // AssetDir("") will return []string{"data"}. func AssetDir(name string) ([]string, error) { node := _bintree if len(name) != 0 { - cannonicalName := strings.Replace(name, "\\", "/", -1) - pathList := strings.Split(cannonicalName, "/") + canonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(canonicalName, "/") for _, p := range pathList { node = node.Children[p] if node == nil { @@ -204,6 +245,7 @@ type bintree struct { Func func() (*asset, error) Children map[string]*bintree } + var _bintree = &bintree{nil, map[string]*bintree{ "jsonschema-draft-04.json": &bintree{jsonschemaDraft04JSON, map[string]*bintree{}}, "v2": &bintree{nil, map[string]*bintree{ @@ -211,7 +253,7 @@ var _bintree = &bintree{nil, map[string]*bintree{ }}, }} -// RestoreAsset restores an asset under the given directory +// RestoreAsset restores an asset under the given directory. func RestoreAsset(dir, name string) error { data, err := Asset(name) if err != nil { @@ -229,14 +271,10 @@ func RestoreAsset(dir, name string) error { if err != nil { return err } - err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) - if err != nil { - return err - } - return nil + return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) } -// RestoreAssets restores an asset under the given directory recursively +// RestoreAssets restores an asset under the given directory recursively. func RestoreAssets(dir, name string) error { children, err := AssetDir(name) // File @@ -254,7 +292,6 @@ func RestoreAssets(dir, name string) error { } func _filePath(dir, name string) string { - cannonicalName := strings.Replace(name, "\\", "/", -1) - return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) + canonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(canonicalName, "/")...)...) } - diff --git a/vendor/github.com/go-openapi/spec/cache.go b/vendor/github.com/go-openapi/spec/cache.go new file mode 100644 index 0000000000..3fada0daef --- /dev/null +++ b/vendor/github.com/go-openapi/spec/cache.go @@ -0,0 +1,60 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import "sync" + +// ResolutionCache a cache for resolving urls +type ResolutionCache interface { + Get(string) (interface{}, bool) + Set(string, interface{}) +} + +type simpleCache struct { + lock sync.RWMutex + store map[string]interface{} +} + +// Get retrieves a cached URI +func (s *simpleCache) Get(uri string) (interface{}, bool) { + debugLog("getting %q from resolution cache", uri) + s.lock.RLock() + v, ok := s.store[uri] + debugLog("got %q from resolution cache: %t", uri, ok) + + s.lock.RUnlock() + return v, ok +} + +// Set caches a URI +func (s *simpleCache) Set(uri string, data interface{}) { + s.lock.Lock() + s.store[uri] = data + s.lock.Unlock() +} + +var resCache ResolutionCache + +func init() { + resCache = initResolutionCache() +} + +// initResolutionCache initializes the URI resolution cache +func initResolutionCache() ResolutionCache { + return &simpleCache{store: map[string]interface{}{ + "http://swagger.io/v2/schema.json": MustLoadSwagger20Schema(), + "http://json-schema.org/draft-04/schema": MustLoadJSONSchemaDraft04(), + }} +} diff --git a/vendor/github.com/go-openapi/spec/contact_info_test.go b/vendor/github.com/go-openapi/spec/contact_info_test.go deleted file mode 100644 index 5e644d0f0e..0000000000 --- a/vendor/github.com/go-openapi/spec/contact_info_test.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "testing" -) - -var contactInfoJSON = `{"name":"wordnik api team","url":"http://developer.wordnik.com","email":"some@mailayada.dkdkd"}` -var contactInfoYAML = `name: wordnik api team -url: http://developer.wordnik.com -email: some@mailayada.dkdkd -` -var contactInfo = ContactInfo{ - Name: "wordnik api team", - URL: "http://developer.wordnik.com", - Email: "some@mailayada.dkdkd", -} - -func TestIntegrationContactInfo(t *testing.T) { - assertSerializeJSON(t, contactInfo, contactInfoJSON) - assertSerializeYAML(t, contactInfo, contactInfoYAML) - assertParsesJSON(t, contactInfoJSON, contactInfo) - assertParsesYAML(t, contactInfoYAML, contactInfo) -} diff --git a/vendor/github.com/go-openapi/spec/debug.go b/vendor/github.com/go-openapi/spec/debug.go new file mode 100644 index 0000000000..389c528ff6 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/debug.go @@ -0,0 +1,47 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "fmt" + "log" + "os" + "path/filepath" + "runtime" +) + +var ( + // Debug is true when the SWAGGER_DEBUG env var is not empty. + // It enables a more verbose logging of this package. + Debug = os.Getenv("SWAGGER_DEBUG") != "" + // specLogger is a debug logger for this package + specLogger *log.Logger +) + +func init() { + debugOptions() +} + +func debugOptions() { + specLogger = log.New(os.Stdout, "spec:", log.LstdFlags) +} + +func debugLog(msg string, args ...interface{}) { + // A private, trivial trace logger, based on go-openapi/spec/expander.go:debugLog() + if Debug { + _, file1, pos1, _ := runtime.Caller(1) + specLogger.Printf("%s:%d: %s", filepath.Base(file1), pos1, fmt.Sprintf(msg, args...)) + } +} diff --git a/vendor/github.com/go-openapi/spec/expander.go b/vendor/github.com/go-openapi/spec/expander.go index 7af80691fb..1e7fc8c490 100644 --- a/vendor/github.com/go-openapi/spec/expander.go +++ b/vendor/github.com/go-openapi/spec/expander.go @@ -17,103 +17,72 @@ package spec import ( "encoding/json" "fmt" - "log" - "net/url" - "os" - "path/filepath" - "reflect" "strings" - "sync" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -var ( - // Debug enables logging when SWAGGER_DEBUG env var is not empty - Debug = os.Getenv("SWAGGER_DEBUG") != "" ) -// ExpandOptions provides options for expand. +// ExpandOptions provides options for spec expand type ExpandOptions struct { - RelativeBase string - SkipSchemas bool - ContinueOnError bool -} - -// ResolutionCache a cache for resolving urls -type ResolutionCache interface { - Get(string) (interface{}, bool) - Set(string, interface{}) -} - -type simpleCache struct { - lock sync.Mutex - store map[string]interface{} -} - -var resCache ResolutionCache - -func init() { - resCache = initResolutionCache() -} - -func initResolutionCache() ResolutionCache { - return &simpleCache{store: map[string]interface{}{ - "http://swagger.io/v2/schema.json": MustLoadSwagger20Schema(), - "http://json-schema.org/draft-04/schema": MustLoadJSONSchemaDraft04(), - }} -} - -func (s *simpleCache) Get(uri string) (interface{}, bool) { - debugLog("getting %q from resolution cache", uri) - s.lock.Lock() - v, ok := s.store[uri] - debugLog("got %q from resolution cache: %t", uri, ok) - - s.lock.Unlock() - return v, ok -} - -func (s *simpleCache) Set(uri string, data interface{}) { - s.lock.Lock() - s.store[uri] = data - s.lock.Unlock() + RelativeBase string + SkipSchemas bool + ContinueOnError bool + AbsoluteCircularRef bool } // ResolveRefWithBase resolves a reference against a context root with preservation of base path func ResolveRefWithBase(root interface{}, ref *Ref, opts *ExpandOptions) (*Schema, error) { - resolver, err := defaultSchemaLoader(root, nil, opts, nil) + resolver, err := defaultSchemaLoader(root, opts, nil, nil) if err != nil { return nil, err } + specBasePath := "" + if opts != nil && opts.RelativeBase != "" { + specBasePath, _ = absPath(opts.RelativeBase) + } result := new(Schema) - if err := resolver.Resolve(ref, result); err != nil { + if err := resolver.Resolve(ref, result, specBasePath); err != nil { return nil, err } return result, nil } // ResolveRef resolves a reference against a context root +// ref is guaranteed to be in root (no need to go to external files) +// ResolveRef is ONLY called from the code generation module func ResolveRef(root interface{}, ref *Ref) (*Schema, error) { - return ResolveRefWithBase(root, ref, nil) + res, _, err := ref.GetPointer().Get(root) + if err != nil { + panic(err) + } + switch sch := res.(type) { + case Schema: + return &sch, nil + case *Schema: + return sch, nil + case map[string]interface{}: + b, _ := json.Marshal(sch) + newSch := new(Schema) + _ = json.Unmarshal(b, newSch) + return newSch, nil + default: + return nil, fmt.Errorf("unknown type for the resolved reference") + } } -// ResolveParameter resolves a paramter reference against a context root +// ResolveParameter resolves a parameter reference against a context root func ResolveParameter(root interface{}, ref Ref) (*Parameter, error) { return ResolveParameterWithBase(root, ref, nil) } -// ResolveParameterWithBase resolves a paramter reference against a context root and base path +// ResolveParameterWithBase resolves a parameter reference against a context root and base path func ResolveParameterWithBase(root interface{}, ref Ref, opts *ExpandOptions) (*Parameter, error) { - resolver, err := defaultSchemaLoader(root, nil, opts, nil) + resolver, err := defaultSchemaLoader(root, opts, nil, nil) if err != nil { return nil, err } result := new(Parameter) - if err := resolver.Resolve(&ref, result); err != nil { + if err := resolver.Resolve(&ref, result, ""); err != nil { return nil, err } return result, nil @@ -126,27 +95,33 @@ func ResolveResponse(root interface{}, ref Ref) (*Response, error) { // ResolveResponseWithBase resolves response a reference against a context root and base path func ResolveResponseWithBase(root interface{}, ref Ref, opts *ExpandOptions) (*Response, error) { - resolver, err := defaultSchemaLoader(root, nil, opts, nil) + resolver, err := defaultSchemaLoader(root, opts, nil, nil) if err != nil { return nil, err } result := new(Response) - if err := resolver.Resolve(&ref, result); err != nil { + if err := resolver.Resolve(&ref, result, ""); err != nil { return nil, err } return result, nil } -// ResolveItems resolves header and parameter items reference against a context root and base path +// ResolveItems resolves parameter items reference against a context root and base path. +// +// NOTE: stricly speaking, this construct is not supported by Swagger 2.0. +// Similarly, $ref are forbidden in response headers. func ResolveItems(root interface{}, ref Ref, opts *ExpandOptions) (*Items, error) { - resolver, err := defaultSchemaLoader(root, nil, opts, nil) + resolver, err := defaultSchemaLoader(root, opts, nil, nil) if err != nil { return nil, err } - + basePath := "" + if opts.RelativeBase != "" { + basePath = opts.RelativeBase + } result := new(Items) - if err := resolver.Resolve(&ref, result); err != nil { + if err := resolver.Resolve(&ref, result, basePath); err != nil { return nil, err } return result, nil @@ -154,393 +129,68 @@ func ResolveItems(root interface{}, ref Ref, opts *ExpandOptions) (*Items, error // ResolvePathItem resolves response a path item against a context root and base path func ResolvePathItem(root interface{}, ref Ref, opts *ExpandOptions) (*PathItem, error) { - resolver, err := defaultSchemaLoader(root, nil, opts, nil) + resolver, err := defaultSchemaLoader(root, opts, nil, nil) if err != nil { return nil, err } - + basePath := "" + if opts.RelativeBase != "" { + basePath = opts.RelativeBase + } result := new(PathItem) - if err := resolver.Resolve(&ref, result); err != nil { + if err := resolver.Resolve(&ref, result, basePath); err != nil { return nil, err } return result, nil } -type schemaLoader struct { - loadingRef *Ref - startingRef *Ref - currentRef *Ref - root interface{} - options *ExpandOptions - cache ResolutionCache - loadDoc func(string) (json.RawMessage, error) -} - -var idPtr, _ = jsonpointer.New("/id") -var refPtr, _ = jsonpointer.New("/$ref") - -// PathLoader function to use when loading remote refs -var PathLoader func(string) (json.RawMessage, error) - -func init() { - PathLoader = func(path string) (json.RawMessage, error) { - data, err := swag.LoadFromFileOrHTTP(path) - if err != nil { - return nil, err - } - return json.RawMessage(data), nil - } -} - -func defaultSchemaLoader( - root interface{}, - ref *Ref, - expandOptions *ExpandOptions, - cache ResolutionCache) (*schemaLoader, error) { - - if cache == nil { - cache = resCache - } - if expandOptions == nil { - expandOptions = &ExpandOptions{} - } - - var ptr *jsonpointer.Pointer - if ref != nil { - ptr = ref.GetPointer() - } - - currentRef := nextRef(root, ref, ptr) - - return &schemaLoader{ - loadingRef: ref, - startingRef: ref, - currentRef: currentRef, - root: root, - options: expandOptions, - cache: cache, - loadDoc: func(path string) (json.RawMessage, error) { - debugLog("fetching document at %q", path) - return PathLoader(path) - }, - }, nil -} - -func idFromNode(node interface{}) (*Ref, error) { - if idValue, _, err := idPtr.Get(node); err == nil { - if refStr, ok := idValue.(string); ok && refStr != "" { - idRef, err := NewRef(refStr) - if err != nil { - return nil, err - } - return &idRef, nil - } - } - return nil, nil -} - -func nextRef(startingNode interface{}, startingRef *Ref, ptr *jsonpointer.Pointer) *Ref { - if startingRef == nil { - return nil - } - - if ptr == nil { - return startingRef - } - - ret := startingRef - var idRef *Ref - node := startingNode - - for _, tok := range ptr.DecodedTokens() { - node, _, _ = jsonpointer.GetForToken(node, tok) - if node == nil { - break - } - - idRef, _ = idFromNode(node) - if idRef != nil { - nw, err := ret.Inherits(*idRef) - if err != nil { - break - } - ret = nw - } - - refRef, _, _ := refPtr.Get(node) - if refRef != nil { - var rf Ref - switch value := refRef.(type) { - case string: - rf, _ = NewRef(value) - } - nw, err := ret.Inherits(rf) - if err != nil { - break - } - nwURL := nw.GetURL() - if nwURL.Scheme == "file" || (nwURL.Scheme == "" && nwURL.Host == "") { - nwpt := filepath.ToSlash(nwURL.Path) - if filepath.IsAbs(nwpt) { - _, err := os.Stat(nwpt) - if err != nil { - nwURL.Path = filepath.Join(".", nwpt) - } - } - } - - ret = nw - } - - } - - return ret -} - -func debugLog(msg string, args ...interface{}) { - if Debug { - log.Printf(msg, args...) - } -} - -func normalizeFileRef(ref *Ref, relativeBase string) *Ref { - refURL := ref.GetURL() - debugLog("normalizing %s against %s (%s)", ref.String(), relativeBase, refURL.String()) - if strings.HasPrefix(refURL.String(), "#") { - return ref - } - - if refURL.Scheme == "file" || (refURL.Scheme == "" && refURL.Host == "") { - filePath := refURL.Path - debugLog("normalizing file path: %s", filePath) - - if !filepath.IsAbs(filepath.FromSlash(filePath)) && len(relativeBase) != 0 { - debugLog("joining %s with %s", relativeBase, filePath) - if fi, err := os.Stat(filepath.FromSlash(relativeBase)); err == nil { - if !fi.IsDir() { - relativeBase = filepath.Dir(filepath.FromSlash(relativeBase)) - } - } - filePath = filepath.Join(filepath.FromSlash(relativeBase), filepath.FromSlash(filePath)) - } - if !filepath.IsAbs(filepath.FromSlash(filePath)) { - pwd, err := os.Getwd() - if err == nil { - debugLog("joining cwd %s with %s", pwd, filePath) - filePath = filepath.Join(pwd, filepath.FromSlash(filePath)) - } - } - - debugLog("cleaning %s", filePath) - filePath = filepath.Clean(filepath.FromSlash(filePath)) - _, err := os.Stat(filepath.FromSlash(filePath)) - if err == nil { - debugLog("rewriting url %s to scheme \"\" path %s", refURL.String(), filePath) - slp := filepath.FromSlash(filePath) - if filepath.IsAbs(slp) && filepath.Separator == '\\' && len(slp) > 1 && slp[1] == ':' && ('a' <= slp[0] && slp[0] <= 'z' || 'A' <= slp[0] && slp[0] <= 'Z') { - slp = slp[2:] - } - refURL.Scheme = "" - refURL.Path = filepath.ToSlash(slp) - debugLog("new url with joined filepath: %s", refURL.String()) - *ref = MustCreateRef(refURL.String()) - } - } - - debugLog("refurl: %s", ref.GetURL().String()) - return ref -} - -func (r *schemaLoader) resolveRef(currentRef, ref *Ref, node, target interface{}) error { - tgt := reflect.ValueOf(target) - if tgt.Kind() != reflect.Ptr { - return fmt.Errorf("resolve ref: target needs to be a pointer") - } - - oldRef := currentRef - if currentRef != nil { - debugLog("resolve ref current %s new %s", currentRef.String(), ref.String()) - nextRef := nextRef(node, ref, currentRef.GetPointer()) - if nextRef == nil || nextRef.GetURL() == nil { - return nil - } - var err error - currentRef, err = currentRef.Inherits(*nextRef) - debugLog("resolved ref current %s", currentRef.String()) - if err != nil { - return err - } - } - - if currentRef == nil { - currentRef = ref - } - - refURL := currentRef.GetURL() - if refURL == nil { - return nil - } - if currentRef.IsRoot() { - nv := reflect.ValueOf(node) - reflect.Indirect(tgt).Set(reflect.Indirect(nv)) - return nil - } - - if strings.HasPrefix(refURL.String(), "#") { - res, _, err := ref.GetPointer().Get(node) - if err != nil { - res, _, err = ref.GetPointer().Get(r.root) - if err != nil { - return err - } - } - rv := reflect.Indirect(reflect.ValueOf(res)) - tgtType := reflect.Indirect(tgt).Type() - if rv.Type().AssignableTo(tgtType) { - reflect.Indirect(tgt).Set(reflect.Indirect(reflect.ValueOf(res))) - } else { - if err := swag.DynamicJSONToStruct(rv.Interface(), target); err != nil { - return err - } - } - - return nil - } - - relativeBase := "" - if r.options != nil && r.options.RelativeBase != "" { - relativeBase = r.options.RelativeBase - } - normalizeFileRef(currentRef, relativeBase) - debugLog("current ref normalized file: %s", currentRef.String()) - normalizeFileRef(ref, relativeBase) - debugLog("ref normalized file: %s", currentRef.String()) - - data, _, _, err := r.load(currentRef.GetURL()) - if err != nil { - return err - } - - if ((oldRef == nil && currentRef != nil) || - (oldRef != nil && currentRef == nil) || - oldRef.String() != currentRef.String()) && - ((oldRef == nil && ref != nil) || - (oldRef != nil && ref == nil) || - (oldRef.String() != ref.String())) { - - return r.resolveRef(currentRef, ref, data, target) - } - - var res interface{} - if currentRef.String() != "" { - res, _, err = currentRef.GetPointer().Get(data) - if err != nil { - if strings.HasPrefix(ref.String(), "#") { - if r.loadingRef != nil { - rr, er := r.loadingRef.Inherits(*ref) - if er != nil { - return er - } - refURL = rr.GetURL() - - data, _, _, err = r.load(refURL) - if err != nil { - return err - } - } else { - data = r.root - } - } - - res, _, err = ref.GetPointer().Get(data) - if err != nil { - return err - } - } - } else { - res = data - } - - if err := swag.DynamicJSONToStruct(res, target); err != nil { - return err - } - - return nil -} - -func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error) { - debugLog("loading schema from url: %s", refURL) - toFetch := *refURL - toFetch.Fragment = "" - - data, fromCache := r.cache.Get(toFetch.String()) - if !fromCache { - b, err := r.loadDoc(toFetch.String()) - if err != nil { - return nil, url.URL{}, false, err - } - - if err := json.Unmarshal(b, &data); err != nil { - return nil, url.URL{}, false, err - } - r.cache.Set(toFetch.String(), data) - } - - return data, toFetch, fromCache, nil -} - -func (r *schemaLoader) Resolve(ref *Ref, target interface{}) error { - return r.resolveRef(r.currentRef, ref, r.root, target) -} - -func (r *schemaLoader) reset() { - ref := r.startingRef - - var ptr *jsonpointer.Pointer - if ref != nil { - ptr = ref.GetPointer() - } - - r.currentRef = nextRef(r.root, ref, ptr) -} - // ExpandSpec expands the references in a swagger spec func ExpandSpec(spec *Swagger, options *ExpandOptions) error { - resolver, err := defaultSchemaLoader(spec, nil, options, nil) + resolver, err := defaultSchemaLoader(spec, options, nil, nil) // Just in case this ever returns an error. - if shouldStopOnError(err, resolver.options) { + if resolver.shouldStopOnError(err) { return err } + // getting the base path of the spec to adjust all subsequent reference resolutions + specBasePath := "" + if options != nil && options.RelativeBase != "" { + specBasePath, _ = absPath(options.RelativeBase) + } + if options == nil || !options.SkipSchemas { for key, definition := range spec.Definitions { var def *Schema var err error - if def, err = expandSchema(definition, []string{"#/definitions/" + key}, resolver); shouldStopOnError(err, resolver.options) { + if def, err = expandSchema(definition, []string{fmt.Sprintf("#/definitions/%s", key)}, resolver, specBasePath); resolver.shouldStopOnError(err) { return err } - resolver.reset() - spec.Definitions[key] = *def + if def != nil { + spec.Definitions[key] = *def + } } } - for key, parameter := range spec.Parameters { - if err := expandParameter(¶meter, resolver); shouldStopOnError(err, resolver.options) { + for key := range spec.Parameters { + parameter := spec.Parameters[key] + if err := expandParameterOrResponse(¶meter, resolver, specBasePath); resolver.shouldStopOnError(err) { return err } spec.Parameters[key] = parameter } - for key, response := range spec.Responses { - if err := expandResponse(&response, resolver); shouldStopOnError(err, resolver.options) { + for key := range spec.Responses { + response := spec.Responses[key] + if err := expandParameterOrResponse(&response, resolver, specBasePath); resolver.shouldStopOnError(err) { return err } spec.Responses[key] = response } if spec.Paths != nil { - for key, path := range spec.Paths.Paths { - if err := expandPathItem(&path, resolver); shouldStopOnError(err, resolver.options) { + for key := range spec.Paths.Paths { + path := spec.Paths.Paths[key] + if err := expandPathItem(&path, resolver, specBasePath); resolver.shouldStopOnError(err) { return err } spec.Paths.Paths[key] = path @@ -550,81 +200,75 @@ func ExpandSpec(spec *Swagger, options *ExpandOptions) error { return nil } -func shouldStopOnError(err error, opts *ExpandOptions) bool { - if err != nil && !opts.ContinueOnError { - return true - } - - if err != nil { - log.Println(err) +// baseForRoot loads in the cache the root document and produces a fake "root" base path entry +// for further $ref resolution +func baseForRoot(root interface{}, cache ResolutionCache) string { + // cache the root document to resolve $ref's + const rootBase = "root" + if root != nil { + base, _ := absPath(rootBase) + normalizedBase := normalizeAbsPath(base) + debugLog("setting root doc in cache at: %s", normalizedBase) + if cache == nil { + cache = resCache + } + cache.Set(normalizedBase, root) + return rootBase } - - return false + return "" } -// ExpandSchema expands the refs in the schema object +// ExpandSchema expands the refs in the schema object with reference to the root object +// go-openapi/validate uses this function +// notice that it is impossible to reference a json schema in a different file other than root func ExpandSchema(schema *Schema, root interface{}, cache ResolutionCache) error { - return ExpandSchemaWithBasePath(schema, root, cache, nil) + opts := &ExpandOptions{ + // when a root is specified, cache the root as an in-memory document for $ref retrieval + RelativeBase: baseForRoot(root, cache), + SkipSchemas: false, + ContinueOnError: false, + // when no base path is specified, remaining $ref (circular) are rendered with an absolute path + AbsoluteCircularRef: true, + } + return ExpandSchemaWithBasePath(schema, cache, opts) } // ExpandSchemaWithBasePath expands the refs in the schema object, base path configured through expand options -func ExpandSchemaWithBasePath(schema *Schema, root interface{}, cache ResolutionCache, opts *ExpandOptions) error { +func ExpandSchemaWithBasePath(schema *Schema, cache ResolutionCache, opts *ExpandOptions) error { if schema == nil { return nil } - if root == nil { - root = schema - } - nrr, _ := NewRef(schema.ID) - var rrr *Ref - if nrr.String() != "" { - switch rt := root.(type) { - case *Schema: - rid, _ := NewRef(rt.ID) - rrr, _ = rid.Inherits(nrr) - case *Swagger: - rid, _ := NewRef(rt.ID) - rrr, _ = rid.Inherits(nrr) - } + var basePath string + if opts.RelativeBase != "" { + basePath, _ = absPath(opts.RelativeBase) } - resolver, err := defaultSchemaLoader(root, rrr, opts, cache) + resolver, err := defaultSchemaLoader(nil, opts, cache, nil) if err != nil { return err } refs := []string{""} - if rrr != nil { - refs[0] = rrr.String() - } var s *Schema - if s, err = expandSchema(*schema, refs, resolver); err != nil { + if s, err = expandSchema(*schema, refs, resolver, basePath); err != nil { return err } *schema = *s return nil } -func expandItems(target Schema, parentRefs []string, resolver *schemaLoader) (*Schema, error) { +func expandItems(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) { if target.Items != nil { if target.Items.Schema != nil { - t, err := expandSchema(*target.Items.Schema, parentRefs, resolver) + t, err := expandSchema(*target.Items.Schema, parentRefs, resolver, basePath) if err != nil { - if target.Items.Schema.ID == "" { - target.Items.Schema.ID = target.ID - if err != nil { - t, err = expandSchema(*target.Items.Schema, parentRefs, resolver) - if err != nil { - return nil, err - } - } - } + return nil, err } *target.Items.Schema = *t } for i := range target.Items.Schemas { - t, err := expandSchema(target.Items.Schemas[i], parentRefs, resolver) + t, err := expandSchema(target.Items.Schemas[i], parentRefs, resolver, basePath) if err != nil { return nil, err } @@ -634,74 +278,96 @@ func expandItems(target Schema, parentRefs []string, resolver *schemaLoader) (*S return &target, nil } -func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader) (*Schema, error) { +func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) { if target.Ref.String() == "" && target.Ref.IsRoot() { - debugLog("skipping expand schema for no ref and root: %v", resolver.root) + // normalizing is important + newRef := normalizeFileRef(&target.Ref, basePath) + target.Ref = *newRef + return &target, nil + + } - return resolver.root.(*Schema), nil + // change the base path of resolution when an ID is encountered + // otherwise the basePath should inherit the parent's + // important: ID can be relative path + if target.ID != "" { + debugLog("schema has ID: %s", target.ID) + // handling the case when id is a folder + // remember that basePath has to be a file + refPath := target.ID + if strings.HasSuffix(target.ID, "/") { + // path.Clean here would not work correctly if basepath is http + refPath = fmt.Sprintf("%s%s", refPath, "placeholder.json") + } + basePath = normalizePaths(refPath, basePath) } var t *Schema - var basePath string - b, _ := json.Marshal(target) - debugLog("Target is: %s", string(b)) - for target.Ref.String() != "" { - if swag.ContainsStringsCI(parentRefs, target.Ref.String()) { + // if Ref is found, everything else doesn't matter + // Ref also changes the resolution scope of children expandSchema + if target.Ref.String() != "" { + // here the resolution scope is changed because a $ref was encountered + normalizedRef := normalizeFileRef(&target.Ref, basePath) + normalizedBasePath := normalizedRef.RemoteURI() + + if resolver.isCircular(normalizedRef, basePath, parentRefs...) { + // this means there is a cycle in the recursion tree: return the Ref + // - circular refs cannot be expanded. We leave them as ref. + // - denormalization means that a new local file ref is set relative to the original basePath + debugLog("shortcut circular ref: basePath: %s, normalizedPath: %s, normalized ref: %s", + basePath, normalizedBasePath, normalizedRef.String()) + if !resolver.options.AbsoluteCircularRef { + target.Ref = *denormalizeFileRef(normalizedRef, normalizedBasePath, resolver.context.basePath) + } else { + target.Ref = *normalizedRef + } return &target, nil } - basePath = target.Ref.RemoteURI() - debugLog("\n\n\n\n\nbasePath: %s", basePath) - b, _ := json.Marshal(target) - debugLog("calling Resolve with target: %s", string(b)) - if err := resolver.Resolve(&target.Ref, &t); shouldStopOnError(err, resolver.options) { - return &target, err - } - if swag.ContainsStringsCI(parentRefs, target.Ref.String()) { - debugLog("ref already exists in parent") - return &target, nil + debugLog("basePath: %s: calling Resolve with target: %#v", basePath, target) + if err := resolver.Resolve(&target.Ref, &t, basePath); resolver.shouldStopOnError(err) { + return nil, err } - parentRefs = append(parentRefs, target.Ref.String()) + if t != nil { - target = *t + parentRefs = append(parentRefs, normalizedRef.String()) + var err error + transitiveResolver, err := resolver.transitiveResolver(basePath, target.Ref) + if transitiveResolver.shouldStopOnError(err) { + return nil, err + } + + basePath = resolver.updateBasePath(transitiveResolver, normalizedBasePath) + + return expandSchema(*t, parentRefs, transitiveResolver, basePath) } } - if target.Ref.String() == "" { - b, _ := json.Marshal(target) - debugLog("before: %s", string(b)) - modifyRefs(&target, basePath) - b, _ = json.Marshal(target) - debugLog("after: %s", string(b)) - } - t, err := expandItems(target, parentRefs, resolver) - if shouldStopOnError(err, resolver.options) { + + t, err := expandItems(target, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { return &target, err } if t != nil { target = *t } - resolver.reset() - for i := range target.AllOf { - t, err := expandSchema(target.AllOf[i], parentRefs, resolver) - if shouldStopOnError(err, resolver.options) { + t, err := expandSchema(target.AllOf[i], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { return &target, err } - if t != nil { - target.AllOf[i] = *t - } + target.AllOf[i] = *t } for i := range target.AnyOf { - t, err := expandSchema(target.AnyOf[i], parentRefs, resolver) - if shouldStopOnError(err, resolver.options) { + t, err := expandSchema(target.AnyOf[i], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { return &target, err } target.AnyOf[i] = *t } for i := range target.OneOf { - t, err := expandSchema(target.OneOf[i], parentRefs, resolver) - if shouldStopOnError(err, resolver.options) { + t, err := expandSchema(target.OneOf[i], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { return &target, err } if t != nil { @@ -709,8 +375,8 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader) (* } } if target.Not != nil { - t, err := expandSchema(*target.Not, parentRefs, resolver) - if shouldStopOnError(err, resolver.options) { + t, err := expandSchema(*target.Not, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { return &target, err } if t != nil { @@ -718,8 +384,8 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader) (* } } for k := range target.Properties { - t, err := expandSchema(target.Properties[k], parentRefs, resolver) - if shouldStopOnError(err, resolver.options) { + t, err := expandSchema(target.Properties[k], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { return &target, err } if t != nil { @@ -727,8 +393,8 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader) (* } } if target.AdditionalProperties != nil && target.AdditionalProperties.Schema != nil { - t, err := expandSchema(*target.AdditionalProperties.Schema, parentRefs, resolver) - if shouldStopOnError(err, resolver.options) { + t, err := expandSchema(*target.AdditionalProperties.Schema, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { return &target, err } if t != nil { @@ -736,8 +402,8 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader) (* } } for k := range target.PatternProperties { - t, err := expandSchema(target.PatternProperties[k], parentRefs, resolver) - if shouldStopOnError(err, resolver.options) { + t, err := expandSchema(target.PatternProperties[k], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { return &target, err } if t != nil { @@ -746,8 +412,8 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader) (* } for k := range target.Dependencies { if target.Dependencies[k].Schema != nil { - t, err := expandSchema(*target.Dependencies[k].Schema, parentRefs, resolver) - if shouldStopOnError(err, resolver.options) { + t, err := expandSchema(*target.Dependencies[k].Schema, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { return &target, err } if t != nil { @@ -756,8 +422,8 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader) (* } } if target.AdditionalItems != nil && target.AdditionalItems.Schema != nil { - t, err := expandSchema(*target.AdditionalItems.Schema, parentRefs, resolver) - if shouldStopOnError(err, resolver.options) { + t, err := expandSchema(*target.AdditionalItems.Schema, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { return &target, err } if t != nil { @@ -765,8 +431,8 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader) (* } } for k := range target.Definitions { - t, err := expandSchema(target.Definitions[k], parentRefs, resolver) - if shouldStopOnError(err, resolver.options) { + t, err := expandSchema(target.Definitions[k], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { return &target, err } if t != nil { @@ -776,55 +442,54 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader) (* return &target, nil } -func expandPathItem(pathItem *PathItem, resolver *schemaLoader) error { +func expandPathItem(pathItem *PathItem, resolver *schemaLoader, basePath string) error { if pathItem == nil { return nil } + parentRefs := []string{} + if err := resolver.deref(pathItem, parentRefs, basePath); resolver.shouldStopOnError(err) { + return err + } if pathItem.Ref.String() != "" { - if err := resolver.Resolve(&pathItem.Ref, &pathItem); err != nil { + var err error + resolver, err = resolver.transitiveResolver(basePath, pathItem.Ref) + if resolver.shouldStopOnError(err) { return err } - resolver.reset() - pathItem.Ref = Ref{} } + pathItem.Ref = Ref{} for idx := range pathItem.Parameters { - if err := expandParameter(&(pathItem.Parameters[idx]), resolver); shouldStopOnError(err, resolver.options) { + if err := expandParameterOrResponse(&(pathItem.Parameters[idx]), resolver, basePath); resolver.shouldStopOnError(err) { return err } } - if err := expandOperation(pathItem.Get, resolver); shouldStopOnError(err, resolver.options) { - return err - } - if err := expandOperation(pathItem.Head, resolver); shouldStopOnError(err, resolver.options) { - return err - } - if err := expandOperation(pathItem.Options, resolver); shouldStopOnError(err, resolver.options) { - return err - } - if err := expandOperation(pathItem.Put, resolver); shouldStopOnError(err, resolver.options) { - return err - } - if err := expandOperation(pathItem.Post, resolver); shouldStopOnError(err, resolver.options) { - return err - } - if err := expandOperation(pathItem.Patch, resolver); shouldStopOnError(err, resolver.options) { - return err + ops := []*Operation{ + pathItem.Get, + pathItem.Head, + pathItem.Options, + pathItem.Put, + pathItem.Post, + pathItem.Patch, + pathItem.Delete, } - if err := expandOperation(pathItem.Delete, resolver); shouldStopOnError(err, resolver.options) { - return err + for _, op := range ops { + if err := expandOperation(op, resolver, basePath); resolver.shouldStopOnError(err) { + return err + } } return nil } -func expandOperation(op *Operation, resolver *schemaLoader) error { +func expandOperation(op *Operation, resolver *schemaLoader, basePath string) error { if op == nil { return nil } - for i, param := range op.Parameters { - if err := expandParameter(¶m, resolver); shouldStopOnError(err, resolver.options) { + for i := range op.Parameters { + param := op.Parameters[i] + if err := expandParameterOrResponse(¶m, resolver, basePath); resolver.shouldStopOnError(err) { return err } op.Parameters[i] = param @@ -832,11 +497,12 @@ func expandOperation(op *Operation, resolver *schemaLoader) error { if op.Responses != nil { responses := op.Responses - if err := expandResponse(responses.Default, resolver); shouldStopOnError(err, resolver.options) { + if err := expandParameterOrResponse(responses.Default, resolver, basePath); resolver.shouldStopOnError(err) { return err } - for code, response := range responses.StatusCodeResponses { - if err := expandResponse(&response, resolver); shouldStopOnError(err, resolver.options) { + for code := range responses.StatusCodeResponses { + response := responses.StatusCodeResponses[code] + if err := expandParameterOrResponse(&response, resolver, basePath); resolver.shouldStopOnError(err) { return err } responses.StatusCodeResponses[code] = response @@ -845,64 +511,140 @@ func expandOperation(op *Operation, resolver *schemaLoader) error { return nil } -func expandResponse(response *Response, resolver *schemaLoader) error { - if response == nil { - return nil +// ExpandResponseWithRoot expands a response based on a root document, not a fetchable document +func ExpandResponseWithRoot(response *Response, root interface{}, cache ResolutionCache) error { + opts := &ExpandOptions{ + RelativeBase: baseForRoot(root, cache), + SkipSchemas: false, + ContinueOnError: false, + // when no base path is specified, remaining $ref (circular) are rendered with an absolute path + AbsoluteCircularRef: true, + } + resolver, err := defaultSchemaLoader(root, opts, nil, nil) + if err != nil { + return err } - var parentRefs []string + return expandParameterOrResponse(response, resolver, opts.RelativeBase) +} - if response.Ref.String() != "" { - parentRefs = append(parentRefs, response.Ref.String()) - if err := resolver.Resolve(&response.Ref, response); shouldStopOnError(err, resolver.options) { - return err - } - resolver.reset() - response.Ref = Ref{} +// ExpandResponse expands a response based on a basepath +// This is the exported version of expandResponse +// all refs inside response will be resolved relative to basePath +func ExpandResponse(response *Response, basePath string) error { + var specBasePath string + if basePath != "" { + specBasePath, _ = absPath(basePath) + } + opts := &ExpandOptions{ + RelativeBase: specBasePath, + } + resolver, err := defaultSchemaLoader(nil, opts, nil, nil) + if err != nil { + return err } - if !resolver.options.SkipSchemas && response.Schema != nil { - parentRefs = append(parentRefs, response.Schema.Ref.String()) - debugLog("response ref: %s", response.Schema.Ref) - if err := resolver.Resolve(&response.Schema.Ref, &response.Schema); shouldStopOnError(err, resolver.options) { - return err + return expandParameterOrResponse(response, resolver, opts.RelativeBase) +} + +// ExpandParameterWithRoot expands a parameter based on a root document, not a fetchable document +func ExpandParameterWithRoot(parameter *Parameter, root interface{}, cache ResolutionCache) error { + opts := &ExpandOptions{ + RelativeBase: baseForRoot(root, cache), + SkipSchemas: false, + ContinueOnError: false, + // when no base path is specified, remaining $ref (circular) are rendered with an absolute path + AbsoluteCircularRef: true, + } + resolver, err := defaultSchemaLoader(root, opts, nil, nil) + if err != nil { + return err + } + + return expandParameterOrResponse(parameter, resolver, opts.RelativeBase) +} + +// ExpandParameter expands a parameter based on a basepath. +// This is the exported version of expandParameter +// all refs inside parameter will be resolved relative to basePath +func ExpandParameter(parameter *Parameter, basePath string) error { + var specBasePath string + if basePath != "" { + specBasePath, _ = absPath(basePath) + } + opts := &ExpandOptions{ + RelativeBase: specBasePath, + } + resolver, err := defaultSchemaLoader(nil, opts, nil, nil) + if err != nil { + return err + } + + return expandParameterOrResponse(parameter, resolver, opts.RelativeBase) +} + +func getRefAndSchema(input interface{}) (*Ref, *Schema, error) { + var ref *Ref + var sch *Schema + switch refable := input.(type) { + case *Parameter: + if refable == nil { + return nil, nil, nil } - s, err := expandSchema(*response.Schema, parentRefs, resolver) - if shouldStopOnError(err, resolver.options) { - return err + ref = &refable.Ref + sch = refable.Schema + case *Response: + if refable == nil { + return nil, nil, nil } - resolver.reset() - *response.Schema = *s + ref = &refable.Ref + sch = refable.Schema + default: + return nil, nil, fmt.Errorf("expand: unsupported type %T. Input should be of type *Parameter or *Response", input) } - return nil + return ref, sch, nil } -func expandParameter(parameter *Parameter, resolver *schemaLoader) error { - if parameter == nil { +func expandParameterOrResponse(input interface{}, resolver *schemaLoader, basePath string) error { + ref, _, err := getRefAndSchema(input) + if err != nil { + return err + } + if ref == nil { return nil } - - var parentRefs []string - - if parameter.Ref.String() != "" { - parentRefs = append(parentRefs, parameter.Ref.String()) - if err := resolver.Resolve(¶meter.Ref, parameter); shouldStopOnError(err, resolver.options) { + parentRefs := []string{} + if err := resolver.deref(input, parentRefs, basePath); resolver.shouldStopOnError(err) { + return err + } + ref, sch, _ := getRefAndSchema(input) + if ref.String() != "" { + transitiveResolver, err := resolver.transitiveResolver(basePath, *ref) + if transitiveResolver.shouldStopOnError(err) { return err } - resolver.reset() - parameter.Ref = Ref{} + basePath = resolver.updateBasePath(transitiveResolver, basePath) + resolver = transitiveResolver } - if !resolver.options.SkipSchemas && parameter.Schema != nil { - parentRefs = append(parentRefs, parameter.Schema.Ref.String()) - if err := resolver.Resolve(¶meter.Schema.Ref, ¶meter.Schema); shouldStopOnError(err, resolver.options) { - return err + + if sch != nil && sch.Ref.String() != "" { + // schema expanded to a $ref in another root + var ern error + sch.Ref, ern = NewRef(normalizePaths(sch.Ref.String(), ref.RemoteURI())) + if ern != nil { + return ern } - s, err := expandSchema(*parameter.Schema, parentRefs, resolver) - if shouldStopOnError(err, resolver.options) { + } + if ref != nil { + *ref = Ref{} + } + + if !resolver.options.SkipSchemas && sch != nil { + s, err := expandSchema(*sch, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { return err } - resolver.reset() - *parameter.Schema = *s + *sch = *s } return nil } diff --git a/vendor/github.com/go-openapi/spec/expander_test.go b/vendor/github.com/go-openapi/spec/expander_test.go deleted file mode 100644 index 09c8573cfa..0000000000 --- a/vendor/github.com/go-openapi/spec/expander_test.go +++ /dev/null @@ -1,1162 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "io/ioutil" - "net/http" - "net/http/httptest" - "testing" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" - "github.com/stretchr/testify/assert" -) - -func jsonDoc(path string) (json.RawMessage, error) { - data, err := swag.LoadFromFileOrHTTP(path) - if err != nil { - return nil, err - } - return json.RawMessage(data), nil -} - -func TestExpandsKnownRef(t *testing.T) { - schema := RefProperty("http://json-schema.org/draft-04/schema#") - if assert.NoError(t, ExpandSchema(schema, nil, nil)) { - assert.Equal(t, "Core schema meta-schema", schema.Description) - } -} - -func TestExpandResponseSchema(t *testing.T) { - fp := "./fixtures/local_expansion/spec.json" - b, err := jsonDoc(fp) - if assert.NoError(t, err) { - var spec Swagger - if err := json.Unmarshal(b, &spec); assert.NoError(t, err) { - err := ExpandSpec(&spec, &ExpandOptions{RelativeBase: fp}) - if assert.NoError(t, err) { - sch := spec.Paths.Paths["/item"].Get.Responses.StatusCodeResponses[200].Schema - if assert.NotNil(t, sch) { - assert.Empty(t, sch.Ref.String()) - assert.Contains(t, sch.Type, "object") - assert.Len(t, sch.Properties, 2) - } - } - } - } -} - -func TestSpecExpansion(t *testing.T) { - spec := new(Swagger) - // resolver, err := defaultSchemaLoader(spec, nil, nil) - // assert.NoError(t, err) - - err := ExpandSpec(spec, nil) - assert.NoError(t, err) - - specDoc, err := jsonDoc("fixtures/expansion/all-the-things.json") - assert.NoError(t, err) - - spec = new(Swagger) - err = json.Unmarshal(specDoc, spec) - assert.NoError(t, err) - - pet := spec.Definitions["pet"] - errorModel := spec.Definitions["errorModel"] - petResponse := spec.Responses["petResponse"] - petResponse.Schema = &pet - stringResponse := spec.Responses["stringResponse"] - tagParam := spec.Parameters["tag"] - idParam := spec.Parameters["idParam"] - - err = ExpandSpec(spec, nil) - assert.NoError(t, err) - - assert.Equal(t, tagParam, spec.Parameters["query"]) - assert.Equal(t, petResponse, spec.Responses["petResponse"]) - assert.Equal(t, petResponse, spec.Responses["anotherPet"]) - assert.Equal(t, pet, *spec.Responses["petResponse"].Schema) - assert.Equal(t, stringResponse, *spec.Paths.Paths["/"].Get.Responses.Default) - assert.Equal(t, petResponse, spec.Paths.Paths["/"].Get.Responses.StatusCodeResponses[200]) - assert.Equal(t, pet, *spec.Paths.Paths["/pets"].Get.Responses.StatusCodeResponses[200].Schema.Items.Schema) - assert.Equal(t, errorModel, *spec.Paths.Paths["/pets"].Get.Responses.Default.Schema) - assert.Equal(t, pet, spec.Definitions["petInput"].AllOf[0]) - assert.Equal(t, spec.Definitions["petInput"], *spec.Paths.Paths["/pets"].Post.Parameters[0].Schema) - assert.Equal(t, petResponse, spec.Paths.Paths["/pets"].Post.Responses.StatusCodeResponses[200]) - assert.Equal(t, errorModel, *spec.Paths.Paths["/pets"].Post.Responses.Default.Schema) - pi := spec.Paths.Paths["/pets/{id}"] - assert.Equal(t, idParam, pi.Get.Parameters[0]) - assert.Equal(t, petResponse, pi.Get.Responses.StatusCodeResponses[200]) - assert.Equal(t, errorModel, *pi.Get.Responses.Default.Schema) - assert.Equal(t, idParam, pi.Delete.Parameters[0]) - assert.Equal(t, errorModel, *pi.Delete.Responses.Default.Schema) -} - -func TestResponseExpansion(t *testing.T) { - specDoc, err := jsonDoc("fixtures/expansion/all-the-things.json") - assert.NoError(t, err) - - spec := new(Swagger) - err = json.Unmarshal(specDoc, spec) - assert.NoError(t, err) - - resolver, err := defaultSchemaLoader(spec, nil, nil, nil) - assert.NoError(t, err) - - resp := spec.Responses["anotherPet"] - expected := spec.Responses["petResponse"] - - err = expandResponse(&resp, resolver) - assert.NoError(t, err) - assert.Equal(t, expected, resp) - - resp2 := spec.Paths.Paths["/"].Get.Responses.Default - expected = spec.Responses["stringResponse"] - - err = expandResponse(resp2, resolver) - assert.NoError(t, err) - assert.Equal(t, expected, *resp2) - - resp = spec.Paths.Paths["/"].Get.Responses.StatusCodeResponses[200] - expected = spec.Responses["petResponse"] - - err = expandResponse(&resp, resolver) - assert.NoError(t, err) - // assert.Equal(t, expected, resp) -} - -func TestIssue3(t *testing.T) { - spec := new(Swagger) - specDoc, err := jsonDoc("fixtures/expansion/overflow.json") - assert.NoError(t, err) - - err = json.Unmarshal(specDoc, spec) - assert.NoError(t, err) - - assert.NotPanics(t, func() { - err = ExpandSpec(spec, nil) - assert.NoError(t, err) - }, "Calling expand spec with circular refs, should not panic!") -} - -func TestParameterExpansion(t *testing.T) { - paramDoc, err := jsonDoc("fixtures/expansion/params.json") - assert.NoError(t, err) - - spec := new(Swagger) - err = json.Unmarshal(paramDoc, spec) - assert.NoError(t, err) - - resolver, err := defaultSchemaLoader(spec, nil, nil, nil) - assert.NoError(t, err) - - param := spec.Parameters["query"] - expected := spec.Parameters["tag"] - - err = expandParameter(¶m, resolver) - assert.NoError(t, err) - assert.Equal(t, expected, param) - - param = spec.Paths.Paths["/cars/{id}"].Parameters[0] - expected = spec.Parameters["id"] - - err = expandParameter(¶m, resolver) - assert.NoError(t, err) - assert.Equal(t, expected, param) -} - -func TestCircularRefsExpansion(t *testing.T) { - carsDoc, err := jsonDoc("fixtures/expansion/circularRefs.json") - assert.NoError(t, err) - - spec := new(Swagger) - err = json.Unmarshal(carsDoc, spec) - assert.NoError(t, err) - - resolver, err := defaultSchemaLoader(spec, nil, nil, nil) - assert.NoError(t, err) - schema := spec.Definitions["car"] - - assert.NotPanics(t, func() { - _, err = expandSchema(schema, []string{"#/definitions/car"}, resolver) - assert.NoError(t, err) - }, "Calling expand schema with circular refs, should not panic!") -} - -func TestContinueOnErrorExpansion(t *testing.T) { - missingRefDoc, err := jsonDoc("fixtures/expansion/missingRef.json") - assert.NoError(t, err) - - testCase := struct { - Input *Swagger `json:"input"` - Expected *Swagger `json:"expected"` - }{} - err = json.Unmarshal(missingRefDoc, &testCase) - assert.NoError(t, err) - - opts := &ExpandOptions{ - ContinueOnError: true, - } - err = ExpandSpec(testCase.Input, opts) - assert.NoError(t, err) - assert.Equal(t, testCase.Input, testCase.Expected, "Should continue expanding spec when a definition can't be found.") - - doc, err := jsonDoc("fixtures/expansion/missingItemRef.json") - spec := new(Swagger) - err = json.Unmarshal(doc, spec) - assert.NoError(t, err) - - assert.NotPanics(t, func() { - err = ExpandSpec(spec, opts) - assert.NoError(t, err) - }, "Array of missing refs should not cause a panic, and continue to expand spec.") -} - -func TestIssue415(t *testing.T) { - doc, err := jsonDoc("fixtures/expansion/clickmeter.json") - assert.NoError(t, err) - - spec := new(Swagger) - err = json.Unmarshal(doc, spec) - assert.NoError(t, err) - - assert.NotPanics(t, func() { - err = ExpandSpec(spec, nil) - assert.NoError(t, err) - }, "Calling expand spec with response schemas that have circular refs, should not panic!") -} - -func TestCircularSpecExpansion(t *testing.T) { - doc, err := jsonDoc("fixtures/expansion/circularSpec.json") - assert.NoError(t, err) - - spec := new(Swagger) - err = json.Unmarshal(doc, spec) - assert.NoError(t, err) - - assert.NotPanics(t, func() { - err = ExpandSpec(spec, nil) - assert.NoError(t, err) - }, "Calling expand spec with circular refs, should not panic!") -} - -func TestItemsExpansion(t *testing.T) { - carsDoc, err := jsonDoc("fixtures/expansion/schemas2.json") - assert.NoError(t, err) - - spec := new(Swagger) - err = json.Unmarshal(carsDoc, spec) - assert.NoError(t, err) - - resolver, err := defaultSchemaLoader(spec, nil, nil, nil) - assert.NoError(t, err) - - schema := spec.Definitions["car"] - oldBrand := schema.Properties["brand"] - assert.NotEmpty(t, oldBrand.Items.Schema.Ref.String()) - assert.NotEqual(t, spec.Definitions["brand"], oldBrand) - - _, err = expandSchema(schema, []string{"#/definitions/car"}, resolver) - assert.NoError(t, err) - - newBrand := schema.Properties["brand"] - assert.Empty(t, newBrand.Items.Schema.Ref.String()) - assert.Equal(t, spec.Definitions["brand"], *newBrand.Items.Schema) - - schema = spec.Definitions["truck"] - assert.NotEmpty(t, schema.Items.Schema.Ref.String()) - - s, err := expandSchema(schema, []string{"#/definitions/truck"}, resolver) - schema = *s - assert.NoError(t, err) - assert.Empty(t, schema.Items.Schema.Ref.String()) - assert.Equal(t, spec.Definitions["car"], *schema.Items.Schema) - - sch := new(Schema) - _, err = expandSchema(*sch, []string{""}, resolver) - assert.NoError(t, err) - - schema = spec.Definitions["batch"] - s, err = expandSchema(schema, []string{"#/definitions/batch"}, resolver) - schema = *s - assert.NoError(t, err) - assert.Empty(t, schema.Items.Schema.Items.Schema.Ref.String()) - assert.Equal(t, *schema.Items.Schema.Items.Schema, spec.Definitions["brand"]) - - schema = spec.Definitions["batch2"] - s, err = expandSchema(schema, []string{"#/definitions/batch2"}, resolver) - schema = *s - assert.NoError(t, err) - assert.Empty(t, schema.Items.Schemas[0].Items.Schema.Ref.String()) - assert.Empty(t, schema.Items.Schemas[1].Items.Schema.Ref.String()) - assert.Equal(t, *schema.Items.Schemas[0].Items.Schema, spec.Definitions["brand"]) - assert.Equal(t, *schema.Items.Schemas[1].Items.Schema, spec.Definitions["tag"]) - - schema = spec.Definitions["allofBoth"] - s, err = expandSchema(schema, []string{"#/definitions/allofBoth"}, resolver) - schema = *s - assert.NoError(t, err) - assert.Empty(t, schema.AllOf[0].Items.Schema.Ref.String()) - assert.Empty(t, schema.AllOf[1].Items.Schema.Ref.String()) - assert.Equal(t, *schema.AllOf[0].Items.Schema, spec.Definitions["brand"]) - assert.Equal(t, *schema.AllOf[1].Items.Schema, spec.Definitions["tag"]) - - schema = spec.Definitions["anyofBoth"] - s, err = expandSchema(schema, []string{"#/definitions/anyofBoth"}, resolver) - schema = *s - assert.NoError(t, err) - assert.Empty(t, schema.AnyOf[0].Items.Schema.Ref.String()) - assert.Empty(t, schema.AnyOf[1].Items.Schema.Ref.String()) - assert.Equal(t, *schema.AnyOf[0].Items.Schema, spec.Definitions["brand"]) - assert.Equal(t, *schema.AnyOf[1].Items.Schema, spec.Definitions["tag"]) - - schema = spec.Definitions["oneofBoth"] - s, err = expandSchema(schema, []string{"#/definitions/oneofBoth"}, resolver) - schema = *s - assert.NoError(t, err) - assert.Empty(t, schema.OneOf[0].Items.Schema.Ref.String()) - assert.Empty(t, schema.OneOf[1].Items.Schema.Ref.String()) - assert.Equal(t, *schema.OneOf[0].Items.Schema, spec.Definitions["brand"]) - assert.Equal(t, *schema.OneOf[1].Items.Schema, spec.Definitions["tag"]) - - schema = spec.Definitions["notSomething"] - s, err = expandSchema(schema, []string{"#/definitions/notSomething"}, resolver) - schema = *s - assert.NoError(t, err) - assert.Empty(t, schema.Not.Items.Schema.Ref.String()) - assert.Equal(t, *schema.Not.Items.Schema, spec.Definitions["tag"]) - - schema = spec.Definitions["withAdditional"] - s, err = expandSchema(schema, []string{"#/definitions/withAdditional"}, resolver) - schema = *s - assert.NoError(t, err) - assert.Empty(t, schema.AdditionalProperties.Schema.Items.Schema.Ref.String()) - assert.Equal(t, *schema.AdditionalProperties.Schema.Items.Schema, spec.Definitions["tag"]) - - schema = spec.Definitions["withAdditionalItems"] - s, err = expandSchema(schema, []string{"#/definitions/withAdditionalItems"}, resolver) - schema = *s - assert.NoError(t, err) - assert.Empty(t, schema.AdditionalItems.Schema.Items.Schema.Ref.String()) - assert.Equal(t, *schema.AdditionalItems.Schema.Items.Schema, spec.Definitions["tag"]) - - schema = spec.Definitions["withPattern"] - s, err = expandSchema(schema, []string{"#/definitions/withPattern"}, resolver) - schema = *s - assert.NoError(t, err) - prop := schema.PatternProperties["^x-ab"] - assert.Empty(t, prop.Items.Schema.Ref.String()) - assert.Equal(t, *prop.Items.Schema, spec.Definitions["tag"]) - - schema = spec.Definitions["deps"] - s, err = expandSchema(schema, []string{"#/definitions/deps"}, resolver) - schema = *s - assert.NoError(t, err) - prop2 := schema.Dependencies["something"] - assert.Empty(t, prop2.Schema.Items.Schema.Ref.String()) - assert.Equal(t, *prop2.Schema.Items.Schema, spec.Definitions["tag"]) - - schema = spec.Definitions["defined"] - s, err = expandSchema(schema, []string{"#/definitions/defined"}, resolver) - schema = *s - assert.NoError(t, err) - prop = schema.Definitions["something"] - assert.Empty(t, prop.Items.Schema.Ref.String()) - assert.Equal(t, *prop.Items.Schema, spec.Definitions["tag"]) -} - -func TestSchemaExpansion(t *testing.T) { - carsDoc, err := jsonDoc("fixtures/expansion/schemas1.json") - assert.NoError(t, err) - - spec := new(Swagger) - err = json.Unmarshal(carsDoc, spec) - assert.NoError(t, err) - - resolver, err := defaultSchemaLoader(spec, nil, nil, nil) - assert.NoError(t, err) - - schema := spec.Definitions["car"] - oldBrand := schema.Properties["brand"] - assert.NotEmpty(t, oldBrand.Ref.String()) - assert.NotEqual(t, spec.Definitions["brand"], oldBrand) - - s, err := expandSchema(schema, []string{"#/definitions/car"}, resolver) - schema = *s - assert.NoError(t, err) - - newBrand := schema.Properties["brand"] - assert.Empty(t, newBrand.Ref.String()) - assert.Equal(t, spec.Definitions["brand"], newBrand) - - schema = spec.Definitions["truck"] - assert.NotEmpty(t, schema.Ref.String()) - - s, err = expandSchema(schema, []string{"#/definitions/truck"}, resolver) - schema = *s - assert.NoError(t, err) - assert.Empty(t, schema.Ref.String()) - assert.Equal(t, spec.Definitions["car"], schema) - - sch := new(Schema) - _, err = expandSchema(*sch, []string{""}, resolver) - assert.NoError(t, err) - - schema = spec.Definitions["batch"] - s, err = expandSchema(schema, []string{"#/definitions/batch"}, resolver) - schema = *s - assert.NoError(t, err) - assert.Empty(t, schema.Items.Schema.Ref.String()) - assert.Equal(t, *schema.Items.Schema, spec.Definitions["brand"]) - - schema = spec.Definitions["batch2"] - s, err = expandSchema(schema, []string{"#/definitions/batch2"}, resolver) - schema = *s - assert.NoError(t, err) - assert.Empty(t, schema.Items.Schemas[0].Ref.String()) - assert.Empty(t, schema.Items.Schemas[1].Ref.String()) - assert.Equal(t, schema.Items.Schemas[0], spec.Definitions["brand"]) - assert.Equal(t, schema.Items.Schemas[1], spec.Definitions["tag"]) - - schema = spec.Definitions["allofBoth"] - s, err = expandSchema(schema, []string{"#/definitions/allofBoth"}, resolver) - schema = *s - assert.NoError(t, err) - assert.Empty(t, schema.AllOf[0].Ref.String()) - assert.Empty(t, schema.AllOf[1].Ref.String()) - assert.Equal(t, schema.AllOf[0], spec.Definitions["brand"]) - assert.Equal(t, schema.AllOf[1], spec.Definitions["tag"]) - - schema = spec.Definitions["anyofBoth"] - s, err = expandSchema(schema, []string{"#/definitions/anyofBoth"}, resolver) - schema = *s - assert.NoError(t, err) - assert.Empty(t, schema.AnyOf[0].Ref.String()) - assert.Empty(t, schema.AnyOf[1].Ref.String()) - assert.Equal(t, schema.AnyOf[0], spec.Definitions["brand"]) - assert.Equal(t, schema.AnyOf[1], spec.Definitions["tag"]) - - schema = spec.Definitions["oneofBoth"] - s, err = expandSchema(schema, []string{"#/definitions/oneofBoth"}, resolver) - schema = *s - assert.NoError(t, err) - assert.Empty(t, schema.OneOf[0].Ref.String()) - assert.Empty(t, schema.OneOf[1].Ref.String()) - assert.Equal(t, schema.OneOf[0], spec.Definitions["brand"]) - assert.Equal(t, schema.OneOf[1], spec.Definitions["tag"]) - - schema = spec.Definitions["notSomething"] - s, err = expandSchema(schema, []string{"#/definitions/notSomething"}, resolver) - schema = *s - assert.NoError(t, err) - assert.Empty(t, schema.Not.Ref.String()) - assert.Equal(t, *schema.Not, spec.Definitions["tag"]) - - schema = spec.Definitions["withAdditional"] - s, err = expandSchema(schema, []string{"#/definitions/withAdditional"}, resolver) - schema = *s - assert.NoError(t, err) - assert.Empty(t, schema.AdditionalProperties.Schema.Ref.String()) - assert.Equal(t, *schema.AdditionalProperties.Schema, spec.Definitions["tag"]) - - schema = spec.Definitions["withAdditionalItems"] - s, err = expandSchema(schema, []string{"#/definitions/withAdditionalItems"}, resolver) - schema = *s - assert.NoError(t, err) - assert.Empty(t, schema.AdditionalItems.Schema.Ref.String()) - assert.Equal(t, *schema.AdditionalItems.Schema, spec.Definitions["tag"]) - - schema = spec.Definitions["withPattern"] - s, err = expandSchema(schema, []string{"#/definitions/withPattern"}, resolver) - schema = *s - assert.NoError(t, err) - prop := schema.PatternProperties["^x-ab"] - assert.Empty(t, prop.Ref.String()) - assert.Equal(t, prop, spec.Definitions["tag"]) - - schema = spec.Definitions["deps"] - s, err = expandSchema(schema, []string{"#/definitions/deps"}, resolver) - schema = *s - assert.NoError(t, err) - prop2 := schema.Dependencies["something"] - assert.Empty(t, prop2.Schema.Ref.String()) - assert.Equal(t, *prop2.Schema, spec.Definitions["tag"]) - - schema = spec.Definitions["defined"] - s, err = expandSchema(schema, []string{"#/definitions/defined"}, resolver) - schema = *s - assert.NoError(t, err) - prop = schema.Definitions["something"] - assert.Empty(t, prop.Ref.String()) - assert.Equal(t, prop, spec.Definitions["tag"]) - -} - -func TestDefaultResolutionCache(t *testing.T) { - - cache := initResolutionCache() - - sch, ok := cache.Get("not there") - assert.False(t, ok) - assert.Nil(t, sch) - - sch, ok = cache.Get("http://swagger.io/v2/schema.json") - assert.True(t, ok) - assert.Equal(t, swaggerSchema, sch) - - sch, ok = cache.Get("http://json-schema.org/draft-04/schema") - assert.True(t, ok) - assert.Equal(t, jsonSchema, sch) - - cache.Set("something", "here") - sch, ok = cache.Get("something") - assert.True(t, ok) - assert.Equal(t, "here", sch) -} - -func resolutionContextServer() *httptest.Server { - var servedAt string - server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { - // fmt.Println("got a request for", req.URL.String()) - if req.URL.Path == "/resolution.json" { - - b, _ := ioutil.ReadFile("fixtures/specs/resolution.json") - var ctnt map[string]interface{} - json.Unmarshal(b, &ctnt) - ctnt["id"] = servedAt - - rw.Header().Set("Content-Type", "application/json") - rw.WriteHeader(200) - bb, _ := json.Marshal(ctnt) - rw.Write(bb) - return - } - if req.URL.Path == "/resolution2.json" { - b, _ := ioutil.ReadFile("fixtures/specs/resolution2.json") - var ctnt map[string]interface{} - json.Unmarshal(b, &ctnt) - ctnt["id"] = servedAt - - rw.Header().Set("Content-Type", "application/json") - rw.WriteHeader(200) - bb, _ := json.Marshal(ctnt) - rw.Write(bb) - return - } - - if req.URL.Path == "/boolProp.json" { - rw.Header().Set("Content-Type", "application/json") - rw.WriteHeader(200) - b, _ := json.Marshal(map[string]interface{}{ - "type": "boolean", - }) - _, _ = rw.Write(b) - return - } - - if req.URL.Path == "/deeper/stringProp.json" { - rw.Header().Set("Content-Type", "application/json") - rw.WriteHeader(200) - b, _ := json.Marshal(map[string]interface{}{ - "type": "string", - }) - rw.Write(b) - return - } - - if req.URL.Path == "/deeper/arrayProp.json" { - rw.Header().Set("Content-Type", "application/json") - rw.WriteHeader(200) - b, _ := json.Marshal(map[string]interface{}{ - "type": "array", - "items": map[string]interface{}{ - "type": "file", - }, - }) - rw.Write(b) - return - } - - rw.WriteHeader(http.StatusNotFound) - })) - servedAt = server.URL - return server -} - -func TestResolveRemoteRef_RootSame(t *testing.T) { - specs := "fixtures/specs" - fileserver := http.FileServer(http.Dir(specs)) - server := httptest.NewServer(fileserver) - defer server.Close() - - rootDoc := new(Swagger) - b, err := ioutil.ReadFile("fixtures/specs/refed.json") - if assert.NoError(t, err) && assert.NoError(t, json.Unmarshal(b, rootDoc)) { - var result_0 Swagger - ref_0, _ := NewRef(server.URL + "/refed.json#") - resolver_0, _ := defaultSchemaLoader(rootDoc, nil, nil, nil) - if assert.NoError(t, resolver_0.Resolve(&ref_0, &result_0)) { - assertSpecs(t, result_0, *rootDoc) - } - - var result_1 Swagger - ref_1, _ := NewRef("./refed.json") - resolver_1, _ := defaultSchemaLoader(rootDoc, nil, &ExpandOptions{ - RelativeBase: (specs), - }, nil) - if assert.NoError(t, resolver_1.Resolve(&ref_1, &result_1)) { - assertSpecs(t, result_1, *rootDoc) - } - } -} - -func TestResolveRemoteRef_FromFragment(t *testing.T) { - specs := "fixtures/specs" - fileserver := http.FileServer(http.Dir(specs)) - server := httptest.NewServer(fileserver) - defer server.Close() - - rootDoc := new(Swagger) - b, err := ioutil.ReadFile("fixtures/specs/refed.json") - if assert.NoError(t, err) && assert.NoError(t, json.Unmarshal(b, rootDoc)) { - var tgt Schema - ref, err := NewRef(server.URL + "/refed.json#/definitions/pet") - if assert.NoError(t, err) { - resolver := &schemaLoader{root: rootDoc, cache: initResolutionCache(), loadDoc: jsonDoc} - if assert.NoError(t, resolver.Resolve(&ref, &tgt)) { - assert.Equal(t, []string{"id", "name"}, tgt.Required) - } - } - } -} - -func TestResolveRemoteRef_FromInvalidFragment(t *testing.T) { - specs := "fixtures/specs" - fileserver := http.FileServer(http.Dir(specs)) - server := httptest.NewServer(fileserver) - defer server.Close() - - rootDoc := new(Swagger) - b, err := ioutil.ReadFile("fixtures/specs/refed.json") - if assert.NoError(t, err) && assert.NoError(t, json.Unmarshal(b, rootDoc)) { - var tgt Schema - ref, err := NewRef(server.URL + "/refed.json#/definitions/NotThere") - if assert.NoError(t, err) { - resolver, _ := defaultSchemaLoader(rootDoc, nil, nil, nil) - assert.Error(t, resolver.Resolve(&ref, &tgt)) - } - } -} - -func TestResolveRemoteRef_WithResolutionContext(t *testing.T) { - server := resolutionContextServer() - defer server.Close() - - rootDoc := new(Swagger) - b, err := ioutil.ReadFile("fixtures/specs/refed.json") - if assert.NoError(t, err) && assert.NoError(t, json.Unmarshal(b, rootDoc)) { - var tgt Schema - ref, err := NewRef(server.URL + "/resolution.json#/definitions/bool") - if assert.NoError(t, err) { - resolver, _ := defaultSchemaLoader(rootDoc, nil, nil, nil) - if assert.NoError(t, resolver.Resolve(&ref, &tgt)) { - assert.Equal(t, StringOrArray([]string{"boolean"}), tgt.Type) - } - } - } -} - -func TestResolveRemoteRef_WithNestedResolutionContext(t *testing.T) { - server := resolutionContextServer() - defer server.Close() - - rootDoc := new(Swagger) - b, err := ioutil.ReadFile("fixtures/specs/refed.json") - if assert.NoError(t, err) && assert.NoError(t, json.Unmarshal(b, rootDoc)) { - var tgt Schema - ref, err := NewRef(server.URL + "/resolution.json#/items/items") - if assert.NoError(t, err) { - resolver, _ := defaultSchemaLoader(rootDoc, nil, nil, nil) - if assert.NoError(t, resolver.Resolve(&ref, &tgt)) { - assert.Equal(t, StringOrArray([]string{"string"}), tgt.Type) - } - } - } -} - -func TestResolveRemoteRef_WithNestedResolutionContextWithFragment(t *testing.T) { - server := resolutionContextServer() - defer server.Close() - - rootDoc := new(Swagger) - b, err := ioutil.ReadFile("fixtures/specs/refed.json") - if assert.NoError(t, err) && assert.NoError(t, json.Unmarshal(b, rootDoc)) { - var tgt Schema - ref, err := NewRef(server.URL + "/resolution2.json#/items/items") - if assert.NoError(t, err) { - resolver, _ := defaultSchemaLoader(rootDoc, nil, nil, nil) - if assert.NoError(t, resolver.Resolve(&ref, &tgt)) { - assert.Equal(t, StringOrArray([]string{"file"}), tgt.Type) - } - } - } -} - -func TestResolveRemoteRef_ToParameter(t *testing.T) { - specs := "fixtures/specs" - fileserver := http.FileServer(http.Dir(specs)) - server := httptest.NewServer(fileserver) - defer server.Close() - - rootDoc := new(Swagger) - b, err := ioutil.ReadFile("fixtures/specs/refed.json") - if assert.NoError(t, err) && assert.NoError(t, json.Unmarshal(b, rootDoc)) { - var tgt Parameter - ref, err := NewRef(server.URL + "/refed.json#/parameters/idParam") - if assert.NoError(t, err) { - - resolver, _ := defaultSchemaLoader(rootDoc, nil, nil, nil) - if assert.NoError(t, resolver.Resolve(&ref, &tgt)) { - assert.Equal(t, "id", tgt.Name) - assert.Equal(t, "path", tgt.In) - assert.Equal(t, "ID of pet to fetch", tgt.Description) - assert.True(t, tgt.Required) - assert.Equal(t, "integer", tgt.Type) - assert.Equal(t, "int64", tgt.Format) - } - } - } -} - -func TestResolveRemoteRef_ToPathItem(t *testing.T) { - specs := "fixtures/specs" - fileserver := http.FileServer(http.Dir(specs)) - server := httptest.NewServer(fileserver) - defer server.Close() - - rootDoc := new(Swagger) - b, err := ioutil.ReadFile("fixtures/specs/refed.json") - if assert.NoError(t, err) && assert.NoError(t, json.Unmarshal(b, rootDoc)) { - var tgt PathItem - ref, err := NewRef(server.URL + "/refed.json#/paths/" + jsonpointer.Escape("/pets/{id}")) - if assert.NoError(t, err) { - - resolver, _ := defaultSchemaLoader(rootDoc, nil, nil, nil) - if assert.NoError(t, resolver.Resolve(&ref, &tgt)) { - assert.Equal(t, rootDoc.Paths.Paths["/pets/{id}"].Get, tgt.Get) - } - } - } -} - -func TestResolveRemoteRef_ToResponse(t *testing.T) { - specs := "fixtures/specs" - fileserver := http.FileServer(http.Dir(specs)) - server := httptest.NewServer(fileserver) - defer server.Close() - - rootDoc := new(Swagger) - b, err := ioutil.ReadFile("fixtures/specs/refed.json") - if assert.NoError(t, err) && assert.NoError(t, json.Unmarshal(b, rootDoc)) { - var tgt Response - ref, err := NewRef(server.URL + "/refed.json#/responses/petResponse") - if assert.NoError(t, err) { - - resolver, _ := defaultSchemaLoader(rootDoc, nil, nil, nil) - if assert.NoError(t, resolver.Resolve(&ref, &tgt)) { - assert.Equal(t, rootDoc.Responses["petResponse"], tgt) - } - } - } -} - -func TestResolveLocalRef_SameRoot(t *testing.T) { - rootDoc := new(Swagger) - json.Unmarshal(PetStoreJSONMessage, rootDoc) - - result := new(Swagger) - ref, _ := NewRef("#") - resolver, _ := defaultSchemaLoader(rootDoc, nil, nil, nil) - err := resolver.Resolve(&ref, result) - if assert.NoError(t, err) { - assert.Equal(t, rootDoc, result) - } -} - -func TestResolveLocalRef_FromFragment(t *testing.T) { - rootDoc := new(Swagger) - json.Unmarshal(PetStoreJSONMessage, rootDoc) - - var tgt Schema - ref, err := NewRef("#/definitions/Category") - if assert.NoError(t, err) { - resolver, _ := defaultSchemaLoader(rootDoc, nil, nil, nil) - err := resolver.Resolve(&ref, &tgt) - if assert.NoError(t, err) { - assert.Equal(t, "Category", tgt.ID) - } - } -} - -func TestResolveLocalRef_FromInvalidFragment(t *testing.T) { - rootDoc := new(Swagger) - json.Unmarshal(PetStoreJSONMessage, rootDoc) - - var tgt Schema - ref, err := NewRef("#/definitions/NotThere") - if assert.NoError(t, err) { - resolver, _ := defaultSchemaLoader(rootDoc, nil, nil, nil) - err := resolver.Resolve(&ref, &tgt) - assert.Error(t, err) - } -} - -func TestResolveLocalRef_Parameter(t *testing.T) { - rootDoc := new(Swagger) - b, err := ioutil.ReadFile("fixtures/specs/refed.json") - if assert.NoError(t, err) && assert.NoError(t, json.Unmarshal(b, rootDoc)) { - var tgt Parameter - ref, err := NewRef("#/parameters/idParam") - if assert.NoError(t, err) { - resolver, _ := defaultSchemaLoader(rootDoc, nil, nil, nil) - if assert.NoError(t, resolver.Resolve(&ref, &tgt)) { - assert.Equal(t, "id", tgt.Name) - assert.Equal(t, "path", tgt.In) - assert.Equal(t, "ID of pet to fetch", tgt.Description) - assert.True(t, tgt.Required) - assert.Equal(t, "integer", tgt.Type) - assert.Equal(t, "int64", tgt.Format) - } - } - } -} - -func TestResolveLocalRef_PathItem(t *testing.T) { - rootDoc := new(Swagger) - b, err := ioutil.ReadFile("fixtures/specs/refed.json") - if assert.NoError(t, err) && assert.NoError(t, json.Unmarshal(b, rootDoc)) { - var tgt PathItem - ref, err := NewRef("#/paths/" + jsonpointer.Escape("/pets/{id}")) - if assert.NoError(t, err) { - resolver, _ := defaultSchemaLoader(rootDoc, nil, nil, nil) - if assert.NoError(t, resolver.Resolve(&ref, &tgt)) { - assert.Equal(t, rootDoc.Paths.Paths["/pets/{id}"].Get, tgt.Get) - } - } - } -} - -func TestResolveLocalRef_Response(t *testing.T) { - rootDoc := new(Swagger) - b, err := ioutil.ReadFile("fixtures/specs/refed.json") - if assert.NoError(t, err) && assert.NoError(t, json.Unmarshal(b, rootDoc)) { - var tgt Response - ref, err := NewRef("#/responses/petResponse") - if assert.NoError(t, err) { - resolver, _ := defaultSchemaLoader(rootDoc, nil, nil, nil) - if assert.NoError(t, resolver.Resolve(&ref, &tgt)) { - assert.Equal(t, rootDoc.Responses["petResponse"], tgt) - } - } - } -} - -// PetStoreJSONMessage json raw message for Petstore20 -var PetStoreJSONMessage = json.RawMessage([]byte(PetStore20)) - -// PetStore20 json doc for swagger 2.0 pet store -const PetStore20 = `{ - "swagger": "2.0", - "info": { - "version": "1.0.0", - "title": "Swagger Petstore", - "contact": { - "name": "Wordnik API Team", - "url": "http://developer.wordnik.com" - }, - "license": { - "name": "Creative Commons 4.0 International", - "url": "http://creativecommons.org/licenses/by/4.0/" - } - }, - "host": "petstore.swagger.wordnik.com", - "basePath": "/api", - "schemes": [ - "http" - ], - "paths": { - "/pets": { - "get": { - "security": [ - { - "basic": [] - } - ], - "tags": [ "Pet Operations" ], - "operationId": "getAllPets", - "parameters": [ - { - "name": "status", - "in": "query", - "description": "The status to filter by", - "type": "string" - }, - { - "name": "limit", - "in": "query", - "description": "The maximum number of results to return", - "type": "integer", - "format": "int64" - } - ], - "summary": "Finds all pets in the system", - "responses": { - "200": { - "description": "Pet response", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/Pet" - } - } - }, - "default": { - "description": "Unexpected error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - }, - "post": { - "security": [ - { - "basic": [] - } - ], - "tags": [ "Pet Operations" ], - "operationId": "createPet", - "summary": "Creates a new pet", - "consumes": ["application/x-yaml"], - "produces": ["application/x-yaml"], - "parameters": [ - { - "name": "pet", - "in": "body", - "description": "The Pet to create", - "required": true, - "schema": { - "$ref": "#/definitions/newPet" - } - } - ], - "responses": { - "200": { - "description": "Created Pet response", - "schema": { - "$ref": "#/definitions/Pet" - } - }, - "default": { - "description": "Unexpected error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - } - }, - "/pets/{id}": { - "delete": { - "security": [ - { - "apiKey": [] - } - ], - "description": "Deletes the Pet by id", - "operationId": "deletePet", - "parameters": [ - { - "name": "id", - "in": "path", - "description": "ID of pet to delete", - "required": true, - "type": "integer", - "format": "int64" - } - ], - "responses": { - "204": { - "description": "pet deleted" - }, - "default": { - "description": "unexpected error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - }, - "get": { - "tags": [ "Pet Operations" ], - "operationId": "getPetById", - "summary": "Finds the pet by id", - "responses": { - "200": { - "description": "Pet response", - "schema": { - "$ref": "#/definitions/Pet" - } - }, - "default": { - "description": "Unexpected error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - }, - "parameters": [ - { - "name": "id", - "in": "path", - "description": "ID of pet", - "required": true, - "type": "integer", - "format": "int64" - } - ] - } - }, - "definitions": { - "Category": { - "id": "Category", - "properties": { - "id": { - "format": "int64", - "type": "integer" - }, - "name": { - "type": "string" - } - } - }, - "Pet": { - "id": "Pet", - "properties": { - "category": { - "$ref": "#/definitions/Category" - }, - "id": { - "description": "unique identifier for the pet", - "format": "int64", - "maximum": 100.0, - "minimum": 0.0, - "type": "integer" - }, - "name": { - "type": "string" - }, - "photoUrls": { - "items": { - "type": "string" - }, - "type": "array" - }, - "status": { - "description": "pet status in the store", - "enum": [ - "available", - "pending", - "sold" - ], - "type": "string" - }, - "tags": { - "items": { - "$ref": "#/definitions/Tag" - }, - "type": "array" - } - }, - "required": [ - "id", - "name" - ] - }, - "newPet": { - "anyOf": [ - { - "$ref": "#/definitions/Pet" - }, - { - "required": [ - "name" - ] - } - ] - }, - "Tag": { - "id": "Tag", - "properties": { - "id": { - "format": "int64", - "type": "integer" - }, - "name": { - "type": "string" - } - } - }, - "Error": { - "required": [ - "code", - "message" - ], - "properties": { - "code": { - "type": "integer", - "format": "int32" - }, - "message": { - "type": "string" - } - } - } - }, - "consumes": [ - "application/json", - "application/xml" - ], - "produces": [ - "application/json", - "application/xml", - "text/plain", - "text/html" - ], - "securityDefinitions": { - "basic": { - "type": "basic" - }, - "apiKey": { - "type": "apiKey", - "in": "header", - "name": "X-API-KEY" - } - } -} -` diff --git a/vendor/github.com/go-openapi/spec/external_docs_test.go b/vendor/github.com/go-openapi/spec/external_docs_test.go deleted file mode 100644 index 14c5ef1561..0000000000 --- a/vendor/github.com/go-openapi/spec/external_docs_test.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "testing" -) - -func TestIntegrationExternalDocs(t *testing.T) { - var extDocs = ExternalDocumentation{"the name", "the url"} - const extDocsYAML = "description: the name\nurl: the url\n" - const extDocsJSON = `{"description":"the name","url":"the url"}` - assertSerializeJSON(t, extDocs, extDocsJSON) - assertSerializeYAML(t, extDocs, extDocsYAML) - assertParsesJSON(t, extDocsJSON, extDocs) - assertParsesYAML(t, extDocsYAML, extDocs) -} diff --git a/vendor/github.com/go-openapi/spec/go.mod b/vendor/github.com/go-openapi/spec/go.mod new file mode 100644 index 0000000000..5af64c10b5 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/go.mod @@ -0,0 +1,16 @@ +module github.com/go-openapi/spec + +require ( + github.com/PuerkitoBio/purell v1.1.0 // indirect + github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/go-openapi/jsonpointer v0.17.0 + github.com/go-openapi/jsonreference v0.17.0 + github.com/go-openapi/swag v0.17.0 + github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/testify v1.2.2 + golang.org/x/net v0.0.0-20181005035420-146acd28ed58 // indirect + golang.org/x/text v0.3.0 // indirect + gopkg.in/yaml.v2 v2.2.1 +) diff --git a/vendor/github.com/go-openapi/spec/go.sum b/vendor/github.com/go-openapi/spec/go.sum new file mode 100644 index 0000000000..ab6bfb6086 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/go.sum @@ -0,0 +1,22 @@ +github.com/PuerkitoBio/purell v1.1.0 h1:rmGxhojJlM0tuKtfdvliR84CFHljx9ag64t2xmVkjK4= +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-openapi/jsonpointer v0.17.0 h1:Bpl2DtZ6k7wKqfFs7e+4P08+M9I3FQgn09a1UsRUQbk= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonreference v0.17.0 h1:d/o7/fsLWWQZACbihvZxcyLQ59jfUVs7WOJv/ak7T7A= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/swag v0.17.0 h1:7wu+dZ5k83kvUWeAb+WUkFiUhDzwGqzTR/NhWzeo1JU= +github.com/go-openapi/swag v0.17.0/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 h1:2gxZ0XQIU/5z3Z3bUBu+FXuk2pFbkN6tcwi/pjyaDic= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58 h1:otZG8yDCO4LVps5+9bxOeNiCvgmOyt96J3roHTYs7oE= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/go-openapi/spec/header.go b/vendor/github.com/go-openapi/spec/header.go index 85c4d454c1..39efe452bb 100644 --- a/vendor/github.com/go-openapi/spec/header.go +++ b/vendor/github.com/go-openapi/spec/header.go @@ -22,6 +22,11 @@ import ( "github.com/go-openapi/swag" ) +const ( + jsonArray = "array" +) + +// HeaderProps describes a response header type HeaderProps struct { Description string `json:"description,omitempty"` } @@ -56,7 +61,7 @@ func (h *Header) Typed(tpe, format string) *Header { // CollectionOf a fluent builder method for an array item func (h *Header) CollectionOf(items *Items, format string) *Header { - h.Type = "array" + h.Type = jsonArray h.Items = items h.CollectionFormat = format return h @@ -153,7 +158,7 @@ func (h Header) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3), nil } -// UnmarshalJSON marshal this from JSON +// UnmarshalJSON unmarshals this header from JSON func (h *Header) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &h.CommonValidations); err != nil { return err @@ -164,32 +169,29 @@ func (h *Header) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &h.VendorExtensible); err != nil { return err } - if err := json.Unmarshal(data, &h.HeaderProps); err != nil { - return err - } - return nil + return json.Unmarshal(data, &h.HeaderProps) } // JSONLookup look up a value by the json property name -func (p Header) JSONLookup(token string) (interface{}, error) { - if ex, ok := p.Extensions[token]; ok { +func (h Header) JSONLookup(token string) (interface{}, error) { + if ex, ok := h.Extensions[token]; ok { return &ex, nil } - r, _, err := jsonpointer.GetForToken(p.CommonValidations, token) + r, _, err := jsonpointer.GetForToken(h.CommonValidations, token) if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { return nil, err } if r != nil { return r, nil } - r, _, err = jsonpointer.GetForToken(p.SimpleSchema, token) + r, _, err = jsonpointer.GetForToken(h.SimpleSchema, token) if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { return nil, err } if r != nil { return r, nil } - r, _, err = jsonpointer.GetForToken(p.HeaderProps, token) + r, _, err = jsonpointer.GetForToken(h.HeaderProps, token) return r, err } diff --git a/vendor/github.com/go-openapi/spec/header_test.go b/vendor/github.com/go-openapi/spec/header_test.go deleted file mode 100644 index a07d174fd4..0000000000 --- a/vendor/github.com/go-openapi/spec/header_test.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "testing" - - "github.com/stretchr/testify/assert" -) - -func float64Ptr(f float64) *float64 { - return &f -} -func int64Ptr(f int64) *int64 { - return &f -} - -var header = Header{ - VendorExtensible: VendorExtensible{Extensions: map[string]interface{}{ - "x-framework": "swagger-go", - }}, - HeaderProps: HeaderProps{Description: "the description of this header"}, - SimpleSchema: SimpleSchema{ - Items: &Items{ - Refable: Refable{Ref: MustCreateRef("Cat")}, - }, - Type: "string", - Format: "date", - Default: "8", - }, - CommonValidations: CommonValidations{ - Maximum: float64Ptr(100), - ExclusiveMaximum: true, - ExclusiveMinimum: true, - Minimum: float64Ptr(5), - MaxLength: int64Ptr(100), - MinLength: int64Ptr(5), - Pattern: "\\w{1,5}\\w+", - MaxItems: int64Ptr(100), - MinItems: int64Ptr(5), - UniqueItems: true, - MultipleOf: float64Ptr(5), - Enum: []interface{}{"hello", "world"}, - }, -} - -var headerJSON = `{ - "items": { - "$ref": "Cat" - }, - "x-framework": "swagger-go", - "description": "the description of this header", - "maximum": 100, - "minimum": 5, - "exclusiveMaximum": true, - "exclusiveMinimum": true, - "maxLength": 100, - "minLength": 5, - "pattern": "\\w{1,5}\\w+", - "maxItems": 100, - "minItems": 5, - "uniqueItems": true, - "multipleOf": 5, - "enum": ["hello", "world"], - "type": "string", - "format": "date", - "default": "8" -}` - -func TestIntegrationHeader(t *testing.T) { - var actual Header - if assert.NoError(t, json.Unmarshal([]byte(headerJSON), &actual)) { - assert.EqualValues(t, actual, header) - } - - assertParsesJSON(t, headerJSON, header) -} diff --git a/vendor/github.com/go-openapi/spec/info.go b/vendor/github.com/go-openapi/spec/info.go index fb8b7c4ac5..c458b49b21 100644 --- a/vendor/github.com/go-openapi/spec/info.go +++ b/vendor/github.com/go-openapi/spec/info.go @@ -52,14 +52,14 @@ func (e Extensions) GetBool(key string) (bool, bool) { // GetStringSlice gets a string value from the extensions func (e Extensions) GetStringSlice(key string) ([]string, bool) { if v, ok := e[strings.ToLower(key)]; ok { - arr, ok := v.([]interface{}) - if !ok { + arr, isSlice := v.([]interface{}) + if !isSlice { return nil, false } var strs []string for _, iface := range arr { - str, ok := iface.(string) - if !ok { + str, isString := iface.(string) + if !isString { return nil, false } strs = append(strs, str) @@ -161,8 +161,5 @@ func (i *Info) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &i.InfoProps); err != nil { return err } - if err := json.Unmarshal(data, &i.VendorExtensible); err != nil { - return err - } - return nil + return json.Unmarshal(data, &i.VendorExtensible) } diff --git a/vendor/github.com/go-openapi/spec/info_test.go b/vendor/github.com/go-openapi/spec/info_test.go deleted file mode 100644 index fc40c16303..0000000000 --- a/vendor/github.com/go-openapi/spec/info_test.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "testing" - - "github.com/stretchr/testify/assert" -) - -var infoJSON = `{ - "description": "A sample API that uses a petstore as an example to demonstrate features in the swagger-2.0 specification", - "title": "Swagger Sample API", - "termsOfService": "http://helloreverb.com/terms/", - "contact": { - "name": "wordnik api team", - "url": "http://developer.wordnik.com" - }, - "license": { - "name": "Creative Commons 4.0 International", - "url": "http://creativecommons.org/licenses/by/4.0/" - }, - "version": "1.0.9-abcd", - "x-framework": "go-swagger" -}` - -var info = Info{ - InfoProps: InfoProps{ - Version: "1.0.9-abcd", - Title: "Swagger Sample API", - Description: "A sample API that uses a petstore as an example to demonstrate features in the swagger-2.0 specification", - TermsOfService: "http://helloreverb.com/terms/", - Contact: &ContactInfo{Name: "wordnik api team", URL: "http://developer.wordnik.com"}, - License: &License{Name: "Creative Commons 4.0 International", URL: "http://creativecommons.org/licenses/by/4.0/"}, - }, - VendorExtensible: VendorExtensible{map[string]interface{}{"x-framework": "go-swagger"}}, -} - -func TestIntegrationInfo_Serialize(t *testing.T) { - b, err := json.MarshalIndent(info, "", "\t") - if assert.NoError(t, err) { - assert.Equal(t, infoJSON, string(b)) - } -} - -func TestIntegrationInfo_Deserialize(t *testing.T) { - actual := Info{} - err := json.Unmarshal([]byte(infoJSON), &actual) - if assert.NoError(t, err) { - assert.EqualValues(t, info, actual) - } -} diff --git a/vendor/github.com/go-openapi/spec/items.go b/vendor/github.com/go-openapi/spec/items.go index 492423ef7f..78389317e4 100644 --- a/vendor/github.com/go-openapi/spec/items.go +++ b/vendor/github.com/go-openapi/spec/items.go @@ -22,6 +22,11 @@ import ( "github.com/go-openapi/swag" ) +const ( + jsonRef = "$ref" +) + +// SimpleSchema describe swagger simple schemas for parameters and headers type SimpleSchema struct { Type string `json:"type,omitempty"` Format string `json:"format,omitempty"` @@ -31,6 +36,7 @@ type SimpleSchema struct { Example interface{} `json:"example,omitempty"` } +// TypeName return the type (or format) of a simple schema func (s *SimpleSchema) TypeName() string { if s.Format != "" { return s.Format @@ -38,6 +44,7 @@ func (s *SimpleSchema) TypeName() string { return s.Type } +// ItemsTypeName yields the type of items in a simple schema array func (s *SimpleSchema) ItemsTypeName() string { if s.Items == nil { return "" @@ -45,6 +52,7 @@ func (s *SimpleSchema) ItemsTypeName() string { return s.Items.TypeName() } +// CommonValidations describe common JSON-schema validations type CommonValidations struct { Maximum *float64 `json:"maximum,omitempty"` ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"` @@ -85,7 +93,7 @@ func (i *Items) Typed(tpe, format string) *Items { // CollectionOf a fluent builder method for an array item func (i *Items) CollectionOf(items *Items, format string) *Items { - i.Type = "array" + i.Type = jsonArray i.Items = items i.CollectionFormat = format return i @@ -212,18 +220,18 @@ func (i Items) MarshalJSON() ([]byte, error) { } // JSONLookup look up a value by the json property name -func (p Items) JSONLookup(token string) (interface{}, error) { - if token == "$ref" { - return &p.Ref, nil +func (i Items) JSONLookup(token string) (interface{}, error) { + if token == jsonRef { + return &i.Ref, nil } - r, _, err := jsonpointer.GetForToken(p.CommonValidations, token) + r, _, err := jsonpointer.GetForToken(i.CommonValidations, token) if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { return nil, err } if r != nil { return r, nil } - r, _, err = jsonpointer.GetForToken(p.SimpleSchema, token) + r, _, err = jsonpointer.GetForToken(i.SimpleSchema, token) return r, err } diff --git a/vendor/github.com/go-openapi/spec/items_test.go b/vendor/github.com/go-openapi/spec/items_test.go deleted file mode 100644 index 2f9ac11b43..0000000000 --- a/vendor/github.com/go-openapi/spec/items_test.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "testing" - - "github.com/stretchr/testify/assert" -) - -var items = Items{ - Refable: Refable{Ref: MustCreateRef("Dog")}, - CommonValidations: CommonValidations{ - Maximum: float64Ptr(100), - ExclusiveMaximum: true, - ExclusiveMinimum: true, - Minimum: float64Ptr(5), - MaxLength: int64Ptr(100), - MinLength: int64Ptr(5), - Pattern: "\\w{1,5}\\w+", - MaxItems: int64Ptr(100), - MinItems: int64Ptr(5), - UniqueItems: true, - MultipleOf: float64Ptr(5), - Enum: []interface{}{"hello", "world"}, - }, - SimpleSchema: SimpleSchema{ - Type: "string", - Format: "date", - Items: &Items{ - Refable: Refable{Ref: MustCreateRef("Cat")}, - }, - CollectionFormat: "csv", - Default: "8", - }, -} - -var itemsJSON = `{ - "items": { - "$ref": "Cat" - }, - "$ref": "Dog", - "maximum": 100, - "minimum": 5, - "exclusiveMaximum": true, - "exclusiveMinimum": true, - "maxLength": 100, - "minLength": 5, - "pattern": "\\w{1,5}\\w+", - "maxItems": 100, - "minItems": 5, - "uniqueItems": true, - "multipleOf": 5, - "enum": ["hello", "world"], - "type": "string", - "format": "date", - "collectionFormat": "csv", - "default": "8" -}` - -func TestIntegrationItems(t *testing.T) { - var actual Items - if assert.NoError(t, json.Unmarshal([]byte(itemsJSON), &actual)) { - assert.EqualValues(t, actual, items) - } - - assertParsesJSON(t, itemsJSON, items) -} diff --git a/vendor/github.com/go-openapi/spec/license_test.go b/vendor/github.com/go-openapi/spec/license_test.go deleted file mode 100644 index 8ed51a3529..0000000000 --- a/vendor/github.com/go-openapi/spec/license_test.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import "testing" - -func TestIntegrationLicense(t *testing.T) { - license := License{"the name", "the url"} - const licenseJSON = `{"name":"the name","url":"the url"}` - const licenseYAML = "name: the name\nurl: the url\n" - - assertSerializeJSON(t, license, licenseJSON) - assertSerializeYAML(t, license, licenseYAML) - assertParsesJSON(t, licenseJSON, license) - assertParsesYAML(t, licenseYAML, license) -} diff --git a/vendor/github.com/go-openapi/spec/normalizer.go b/vendor/github.com/go-openapi/spec/normalizer.go new file mode 100644 index 0000000000..b8957e7c0c --- /dev/null +++ b/vendor/github.com/go-openapi/spec/normalizer.go @@ -0,0 +1,152 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "fmt" + "net/url" + "os" + "path" + "path/filepath" + "strings" +) + +// normalize absolute path for cache. +// on Windows, drive letters should be converted to lower as scheme in net/url.URL +func normalizeAbsPath(path string) string { + u, err := url.Parse(path) + if err != nil { + debugLog("normalize absolute path failed: %s", err) + return path + } + return u.String() +} + +// base or refPath could be a file path or a URL +// given a base absolute path and a ref path, return the absolute path of refPath +// 1) if refPath is absolute, return it +// 2) if refPath is relative, join it with basePath keeping the scheme, hosts, and ports if exists +// base could be a directory or a full file path +func normalizePaths(refPath, base string) string { + refURL, _ := url.Parse(refPath) + if path.IsAbs(refURL.Path) || filepath.IsAbs(refPath) { + // refPath is actually absolute + if refURL.Host != "" { + return refPath + } + parts := strings.Split(refPath, "#") + result := filepath.FromSlash(parts[0]) + if len(parts) == 2 { + result += "#" + parts[1] + } + return result + } + + // relative refPath + baseURL, _ := url.Parse(base) + if !strings.HasPrefix(refPath, "#") { + // combining paths + if baseURL.Host != "" { + baseURL.Path = path.Join(path.Dir(baseURL.Path), refURL.Path) + } else { // base is a file + newBase := fmt.Sprintf("%s#%s", filepath.Join(filepath.Dir(base), filepath.FromSlash(refURL.Path)), refURL.Fragment) + return newBase + } + + } + // copying fragment from ref to base + baseURL.Fragment = refURL.Fragment + return baseURL.String() +} + +// denormalizePaths returns to simplest notation on file $ref, +// i.e. strips the absolute path and sets a path relative to the base path. +// +// This is currently used when we rewrite ref after a circular ref has been detected +func denormalizeFileRef(ref *Ref, relativeBase, originalRelativeBase string) *Ref { + debugLog("denormalizeFileRef for: %s", ref.String()) + + if ref.String() == "" || ref.IsRoot() || ref.HasFragmentOnly { + return ref + } + // strip relativeBase from URI + relativeBaseURL, _ := url.Parse(relativeBase) + relativeBaseURL.Fragment = "" + + if relativeBaseURL.IsAbs() && strings.HasPrefix(ref.String(), relativeBase) { + // this should work for absolute URI (e.g. http://...): we have an exact match, just trim prefix + r, _ := NewRef(strings.TrimPrefix(ref.String(), relativeBase)) + return &r + } + + if relativeBaseURL.IsAbs() { + // other absolute URL get unchanged (i.e. with a non-empty scheme) + return ref + } + + // for relative file URIs: + originalRelativeBaseURL, _ := url.Parse(originalRelativeBase) + originalRelativeBaseURL.Fragment = "" + if strings.HasPrefix(ref.String(), originalRelativeBaseURL.String()) { + // the resulting ref is in the expanded spec: return a local ref + r, _ := NewRef(strings.TrimPrefix(ref.String(), originalRelativeBaseURL.String())) + return &r + } + + // check if we may set a relative path, considering the original base path for this spec. + // Example: + // spec is located at /mypath/spec.json + // my normalized ref points to: /mypath/item.json#/target + // expected result: item.json#/target + parts := strings.Split(ref.String(), "#") + relativePath, err := filepath.Rel(path.Dir(originalRelativeBaseURL.String()), parts[0]) + if err != nil { + // there is no common ancestor (e.g. different drives on windows) + // leaves the ref unchanged + return ref + } + if len(parts) == 2 { + relativePath += "#" + parts[1] + } + r, _ := NewRef(relativePath) + return &r +} + +// relativeBase could be an ABSOLUTE file path or an ABSOLUTE URL +func normalizeFileRef(ref *Ref, relativeBase string) *Ref { + // This is important for when the reference is pointing to the root schema + if ref.String() == "" { + r, _ := NewRef(relativeBase) + return &r + } + + debugLog("normalizing %s against %s", ref.String(), relativeBase) + + s := normalizePaths(ref.String(), relativeBase) + r, _ := NewRef(s) + return &r +} + +// absPath returns the absolute path of a file +func absPath(fname string) (string, error) { + if strings.HasPrefix(fname, "http") { + return fname, nil + } + if filepath.IsAbs(fname) { + return fname, nil + } + wd, err := os.Getwd() + return filepath.Join(wd, fname), err +} diff --git a/vendor/github.com/go-openapi/spec/operation.go b/vendor/github.com/go-openapi/spec/operation.go index de1db6f020..b1ebd59945 100644 --- a/vendor/github.com/go-openapi/spec/operation.go +++ b/vendor/github.com/go-openapi/spec/operation.go @@ -15,17 +15,31 @@ package spec import ( + "bytes" + "encoding/gob" "encoding/json" + "sort" "github.com/go-openapi/jsonpointer" "github.com/go-openapi/swag" ) +func init() { + //gob.Register(map[string][]interface{}{}) + gob.Register(map[string]interface{}{}) + gob.Register([]interface{}{}) +} + +// OperationProps describes an operation +// +// NOTES: +// - schemes, when present must be from [http, https, ws, wss]: see validate +// - Security is handled as a special case: see MarshalJSON function type OperationProps struct { Description string `json:"description,omitempty"` Consumes []string `json:"consumes,omitempty"` Produces []string `json:"produces,omitempty"` - Schemes []string `json:"schemes,omitempty"` // the scheme, when present must be from [http, https, ws, wss] + Schemes []string `json:"schemes,omitempty"` Tags []string `json:"tags,omitempty"` Summary string `json:"summary,omitempty"` ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` @@ -36,6 +50,31 @@ type OperationProps struct { Responses *Responses `json:"responses,omitempty"` } +// MarshalJSON takes care of serializing operation properties to JSON +// +// We use a custom marhaller here to handle a special cases related to +// the Security field. We need to preserve zero length slice +// while omitting the field when the value is nil/unset. +func (op OperationProps) MarshalJSON() ([]byte, error) { + type Alias OperationProps + if op.Security == nil { + return json.Marshal(&struct { + Security []map[string][]string `json:"security,omitempty"` + *Alias + }{ + Security: op.Security, + Alias: (*Alias)(&op), + }) + } + return json.Marshal(&struct { + Security []map[string][]string `json:"security"` + *Alias + }{ + Security: op.Security, + Alias: (*Alias)(&op), + }) +} + // Operation describes a single API operation on a path. // // For more information: http://goo.gl/8us55a#operationObject @@ -50,11 +89,17 @@ func (o *Operation) SuccessResponse() (*Response, int, bool) { return nil, 0, false } - for k, v := range o.Responses.StatusCodeResponses { - if k/100 == 2 { - return &v, k, true + responseCodes := make([]int, 0, len(o.Responses.StatusCodeResponses)) + for k := range o.Responses.StatusCodeResponses { + if k >= 200 && k < 300 { + responseCodes = append(responseCodes, k) } } + if len(responseCodes) > 0 { + sort.Ints(responseCodes) + v := o.Responses.StatusCodeResponses[responseCodes[0]] + return &v, responseCodes[0], true + } return o.Responses.Default, 0, false } @@ -73,10 +118,7 @@ func (o *Operation) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &o.OperationProps); err != nil { return err } - if err := json.Unmarshal(data, &o.VendorExtensible); err != nil { - return err - } - return nil + return json.Unmarshal(data, &o.VendorExtensible) } // MarshalJSON converts this items object to JSON @@ -190,7 +232,7 @@ func (o *Operation) AddParam(param *Parameter) *Operation { // RemoveParam removes a parameter from the operation func (o *Operation) RemoveParam(name, in string) *Operation { for i, p := range o.Parameters { - if p.Name == name && p.In == name { + if p.Name == name && p.In == in { o.Parameters = append(o.Parameters[:i], o.Parameters[i+1:]...) return o } @@ -231,3 +273,126 @@ func (o *Operation) RespondsWith(code int, response *Response) *Operation { o.Responses.StatusCodeResponses[code] = *response return o } + +type opsAlias OperationProps + +type gobAlias struct { + Security []map[string]struct { + List []string + Pad bool + } + Alias *opsAlias + SecurityIsEmpty bool +} + +// GobEncode provides a safe gob encoder for Operation, including empty security requirements +func (o Operation) GobEncode() ([]byte, error) { + raw := struct { + Ext VendorExtensible + Props OperationProps + }{ + Ext: o.VendorExtensible, + Props: o.OperationProps, + } + var b bytes.Buffer + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Operation, including empty security requirements +func (o *Operation) GobDecode(b []byte) error { + var raw struct { + Ext VendorExtensible + Props OperationProps + } + + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + o.VendorExtensible = raw.Ext + o.OperationProps = raw.Props + return nil +} + +// GobEncode provides a safe gob encoder for Operation, including empty security requirements +func (op OperationProps) GobEncode() ([]byte, error) { + raw := gobAlias{ + Alias: (*opsAlias)(&op), + } + + var b bytes.Buffer + if op.Security == nil { + // nil security requirement + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + if len(op.Security) == 0 { + // empty, but non-nil security requirement + raw.SecurityIsEmpty = true + raw.Alias.Security = nil + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + raw.Security = make([]map[string]struct { + List []string + Pad bool + }, 0, len(op.Security)) + for _, req := range op.Security { + v := make(map[string]struct { + List []string + Pad bool + }, len(req)) + for k, val := range req { + v[k] = struct { + List []string + Pad bool + }{ + List: val, + } + } + raw.Security = append(raw.Security, v) + } + + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Operation, including empty security requirements +func (op *OperationProps) GobDecode(b []byte) error { + var raw gobAlias + + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + if raw.Alias == nil { + return nil + } + + switch { + case raw.SecurityIsEmpty: + // empty, but non-nil security requirement + raw.Alias.Security = []map[string][]string{} + case len(raw.Alias.Security) == 0: + // nil security requirement + raw.Alias.Security = nil + default: + raw.Alias.Security = make([]map[string][]string, 0, len(raw.Security)) + for _, req := range raw.Security { + v := make(map[string][]string, len(req)) + for k, val := range req { + v[k] = make([]string, 0, len(val.List)) + v[k] = append(v[k], val.List...) + } + raw.Alias.Security = append(raw.Alias.Security, v) + } + } + + *op = *(*OperationProps)(raw.Alias) + return nil +} diff --git a/vendor/github.com/go-openapi/spec/operation_test.go b/vendor/github.com/go-openapi/spec/operation_test.go deleted file mode 100644 index 113bf6c065..0000000000 --- a/vendor/github.com/go-openapi/spec/operation_test.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "testing" - - "github.com/stretchr/testify/assert" -) - -var operation = Operation{ - VendorExtensible: VendorExtensible{ - Extensions: map[string]interface{}{ - "x-framework": "go-swagger", - }, - }, - OperationProps: OperationProps{ - Description: "operation description", - Consumes: []string{"application/json", "application/x-yaml"}, - Produces: []string{"application/json", "application/x-yaml"}, - Schemes: []string{"http", "https"}, - Tags: []string{"dogs"}, - Summary: "the summary of the operation", - ID: "sendCat", - Deprecated: true, - Security: []map[string][]string{ - map[string][]string{ - "apiKey": []string{}, - }, - }, - Parameters: []Parameter{ - Parameter{Refable: Refable{Ref: MustCreateRef("Cat")}}, - }, - Responses: &Responses{ - ResponsesProps: ResponsesProps{ - Default: &Response{ - ResponseProps: ResponseProps{ - Description: "void response", - }, - }, - }, - }, - }, -} - -var operationJSON = `{ - "description": "operation description", - "x-framework": "go-swagger", - "consumes": [ "application/json", "application/x-yaml" ], - "produces": [ "application/json", "application/x-yaml" ], - "schemes": ["http", "https"], - "tags": ["dogs"], - "summary": "the summary of the operation", - "operationId": "sendCat", - "deprecated": true, - "security": [ { "apiKey": [] } ], - "parameters": [{"$ref":"Cat"}], - "responses": { - "default": { - "description": "void response" - } - } -}` - -func TestIntegrationOperation(t *testing.T) { - var actual Operation - if assert.NoError(t, json.Unmarshal([]byte(operationJSON), &actual)) { - assert.EqualValues(t, actual, operation) - } - - assertParsesJSON(t, operationJSON, operation) -} diff --git a/vendor/github.com/go-openapi/spec/parameter.go b/vendor/github.com/go-openapi/spec/parameter.go index 71aee1e806..cecdff5456 100644 --- a/vendor/github.com/go-openapi/spec/parameter.go +++ b/vendor/github.com/go-openapi/spec/parameter.go @@ -39,7 +39,8 @@ func PathParam(name string) *Parameter { // BodyParam creates a body parameter func BodyParam(name string, schema *Schema) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "body", Schema: schema}, SimpleSchema: SimpleSchema{Type: "object"}} + return &Parameter{ParamProps: ParamProps{Name: name, In: "body", Schema: schema}, + SimpleSchema: SimpleSchema{Type: "object"}} } // FormDataParam creates a body parameter @@ -49,12 +50,15 @@ func FormDataParam(name string) *Parameter { // FileParam creates a body parameter func FileParam(name string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}, SimpleSchema: SimpleSchema{Type: "file"}} + return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}, + SimpleSchema: SimpleSchema{Type: "file"}} } // SimpleArrayParam creates a param for a simple array (string, int, date etc) func SimpleArrayParam(name, tpe, fmt string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name}, SimpleSchema: SimpleSchema{Type: "array", CollectionFormat: "csv", Items: &Items{SimpleSchema: SimpleSchema{Type: "string", Format: fmt}}}} + return &Parameter{ParamProps: ParamProps{Name: name}, + SimpleSchema: SimpleSchema{Type: jsonArray, CollectionFormat: "csv", + Items: &Items{SimpleSchema: SimpleSchema{Type: "string", Format: fmt}}}} } // ParamRef creates a parameter that's a json reference @@ -64,25 +68,44 @@ func ParamRef(uri string) *Parameter { return p } +// ParamProps describes the specific attributes of an operation parameter +// +// NOTE: +// - Schema is defined when "in" == "body": see validate +// - AllowEmptyValue is allowed where "in" == "query" || "formData" type ParamProps struct { Description string `json:"description,omitempty"` Name string `json:"name,omitempty"` In string `json:"in,omitempty"` Required bool `json:"required,omitempty"` - Schema *Schema `json:"schema,omitempty"` // when in == "body" - AllowEmptyValue bool `json:"allowEmptyValue,omitempty"` // when in == "query" || "formData" + Schema *Schema `json:"schema,omitempty"` + AllowEmptyValue bool `json:"allowEmptyValue,omitempty"` } // Parameter a unique parameter is defined by a combination of a [name](#parameterName) and [location](#parameterIn). // // There are five possible parameter types. -// * Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`, the path parameter is `itemId`. +// * Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part +// of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`, +// the path parameter is `itemId`. // * Query - Parameters that are appended to the URL. For example, in `/items?id=###`, the query parameter is `id`. // * Header - Custom headers that are expected as part of the request. -// * Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be *one* body parameter. The name of the body parameter has no effect on the parameter itself and is used for documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist together for the same operation. -// * Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or `multipart/form-data` are used as the content type of the request (in Swagger's definition, the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be declared together with a body parameter for the same operation. Form parameters have a different format based on the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4): -// * `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload. For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple parameters that are being transferred. -// * `multipart/form-data` - each parameter takes a section in the payload with an internal header. For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is `submit-name`. This type of form parameters is more commonly used for file transfers. +// * Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be +// _one_ body parameter. The name of the body parameter has no effect on the parameter itself and is used for +// documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist +// together for the same operation. +// * Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or +// `multipart/form-data` are used as the content type of the request (in Swagger's definition, +// the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used +// to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be +// declared together with a body parameter for the same operation. Form parameters have a different format based on +// the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4). +// * `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload. +// For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple +// parameters that are being transferred. +// * `multipart/form-data` - each parameter takes a section in the payload with an internal header. +// For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is +// `submit-name`. This type of form parameters is more commonly used for file transfers. // // For more information: http://goo.gl/8us55a#parameterObject type Parameter struct { @@ -98,7 +121,7 @@ func (p Parameter) JSONLookup(token string) (interface{}, error) { if ex, ok := p.Extensions[token]; ok { return &ex, nil } - if token == "$ref" { + if token == jsonRef { return &p.Ref, nil } @@ -147,7 +170,7 @@ func (p *Parameter) Typed(tpe, format string) *Parameter { // CollectionOf a fluent builder method for an array parameter func (p *Parameter) CollectionOf(items *Items, format string) *Parameter { - p.Type = "array" + p.Type = jsonArray p.Items = items p.CollectionFormat = format return p @@ -269,10 +292,7 @@ func (p *Parameter) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { return err } - if err := json.Unmarshal(data, &p.ParamProps); err != nil { - return err - } - return nil + return json.Unmarshal(data, &p.ParamProps) } // MarshalJSON converts this items object to JSON diff --git a/vendor/github.com/go-openapi/spec/parameters_test.go b/vendor/github.com/go-openapi/spec/parameters_test.go deleted file mode 100644 index 424f663329..0000000000 --- a/vendor/github.com/go-openapi/spec/parameters_test.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "testing" - - "github.com/stretchr/testify/assert" -) - -var parameter = Parameter{ - VendorExtensible: VendorExtensible{Extensions: map[string]interface{}{ - "x-framework": "swagger-go", - }}, - Refable: Refable{Ref: MustCreateRef("Dog")}, - CommonValidations: CommonValidations{ - Maximum: float64Ptr(100), - ExclusiveMaximum: true, - ExclusiveMinimum: true, - Minimum: float64Ptr(5), - MaxLength: int64Ptr(100), - MinLength: int64Ptr(5), - Pattern: "\\w{1,5}\\w+", - MaxItems: int64Ptr(100), - MinItems: int64Ptr(5), - UniqueItems: true, - MultipleOf: float64Ptr(5), - Enum: []interface{}{"hello", "world"}, - }, - SimpleSchema: SimpleSchema{ - Type: "string", - Format: "date", - CollectionFormat: "csv", - Items: &Items{ - Refable: Refable{Ref: MustCreateRef("Cat")}, - }, - Default: "8", - }, - ParamProps: ParamProps{ - Name: "param-name", - In: "header", - Required: true, - Schema: &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}}, - Description: "the description of this parameter", - }, -} - -var parameterJSON = `{ - "items": { - "$ref": "Cat" - }, - "x-framework": "swagger-go", - "$ref": "Dog", - "description": "the description of this parameter", - "maximum": 100, - "minimum": 5, - "exclusiveMaximum": true, - "exclusiveMinimum": true, - "maxLength": 100, - "minLength": 5, - "pattern": "\\w{1,5}\\w+", - "maxItems": 100, - "minItems": 5, - "uniqueItems": true, - "multipleOf": 5, - "enum": ["hello", "world"], - "type": "string", - "format": "date", - "name": "param-name", - "in": "header", - "required": true, - "schema": { - "type": "string" - }, - "collectionFormat": "csv", - "default": "8" -}` - -func TestIntegrationParameter(t *testing.T) { - var actual Parameter - if assert.NoError(t, json.Unmarshal([]byte(parameterJSON), &actual)) { - assert.EqualValues(t, actual, parameter) - } - - assertParsesJSON(t, parameterJSON, parameter) -} - -func TestParameterSerialization(t *testing.T) { - items := &Items{ - SimpleSchema: SimpleSchema{Type: "string"}, - } - - intItems := &Items{ - SimpleSchema: SimpleSchema{Type: "int", Format: "int32"}, - } - - assertSerializeJSON(t, QueryParam("").Typed("string", ""), `{"type":"string","in":"query"}`) - - assertSerializeJSON(t, - QueryParam("").CollectionOf(items, "multi"), - `{"type":"array","items":{"type":"string"},"collectionFormat":"multi","in":"query"}`) - - assertSerializeJSON(t, PathParam("").Typed("string", ""), `{"type":"string","in":"path","required":true}`) - - assertSerializeJSON(t, - PathParam("").CollectionOf(items, "multi"), - `{"type":"array","items":{"type":"string"},"collectionFormat":"multi","in":"path","required":true}`) - - assertSerializeJSON(t, - PathParam("").CollectionOf(intItems, "multi"), - `{"type":"array","items":{"type":"int","format":"int32"},"collectionFormat":"multi","in":"path","required":true}`) - - assertSerializeJSON(t, HeaderParam("").Typed("string", ""), `{"type":"string","in":"header","required":true}`) - - assertSerializeJSON(t, - HeaderParam("").CollectionOf(items, "multi"), - `{"type":"array","items":{"type":"string"},"collectionFormat":"multi","in":"header","required":true}`) - schema := &Schema{SchemaProps: SchemaProps{ - Properties: map[string]Schema{ - "name": Schema{SchemaProps: SchemaProps{ - Type: []string{"string"}, - }}, - }, - }} - - refSchema := &Schema{ - SchemaProps: SchemaProps{Ref: MustCreateRef("Cat")}, - } - - assertSerializeJSON(t, - BodyParam("", schema), - `{"type":"object","in":"body","schema":{"properties":{"name":{"type":"string"}}}}`) - - assertSerializeJSON(t, - BodyParam("", refSchema), - `{"type":"object","in":"body","schema":{"$ref":"Cat"}}`) - - // array body param - assertSerializeJSON(t, - BodyParam("", ArrayProperty(RefProperty("Cat"))), - `{"type":"object","in":"body","schema":{"type":"array","items":{"$ref":"Cat"}}}`) - -} diff --git a/vendor/github.com/go-openapi/spec/path_item.go b/vendor/github.com/go-openapi/spec/path_item.go index 9ab3ec5383..68fc8e9014 100644 --- a/vendor/github.com/go-openapi/spec/path_item.go +++ b/vendor/github.com/go-openapi/spec/path_item.go @@ -21,7 +21,7 @@ import ( "github.com/go-openapi/swag" ) -// pathItemProps the path item specific properties +// PathItemProps the path item specific properties type PathItemProps struct { Get *Operation `json:"get,omitempty"` Put *Operation `json:"put,omitempty"` @@ -50,7 +50,7 @@ func (p PathItem) JSONLookup(token string) (interface{}, error) { if ex, ok := p.Extensions[token]; ok { return &ex, nil } - if token == "$ref" { + if token == jsonRef { return &p.Ref, nil } r, _, err := jsonpointer.GetForToken(p.PathItemProps, token) @@ -65,10 +65,7 @@ func (p *PathItem) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { return err } - if err := json.Unmarshal(data, &p.PathItemProps); err != nil { - return err - } - return nil + return json.Unmarshal(data, &p.PathItemProps) } // MarshalJSON converts this items object to JSON diff --git a/vendor/github.com/go-openapi/spec/path_item_test.go b/vendor/github.com/go-openapi/spec/path_item_test.go deleted file mode 100644 index ea77e6a9b3..0000000000 --- a/vendor/github.com/go-openapi/spec/path_item_test.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "testing" - - "github.com/stretchr/testify/assert" -) - -var pathItem = PathItem{ - Refable: Refable{Ref: MustCreateRef("Dog")}, - VendorExtensible: VendorExtensible{ - Extensions: map[string]interface{}{ - "x-framework": "go-swagger", - }, - }, - PathItemProps: PathItemProps{ - Get: &Operation{ - OperationProps: OperationProps{Description: "get operation description"}, - }, - Put: &Operation{ - OperationProps: OperationProps{Description: "put operation description"}, - }, - Post: &Operation{ - OperationProps: OperationProps{Description: "post operation description"}, - }, - Delete: &Operation{ - OperationProps: OperationProps{Description: "delete operation description"}, - }, - Options: &Operation{ - OperationProps: OperationProps{Description: "options operation description"}, - }, - Head: &Operation{ - OperationProps: OperationProps{Description: "head operation description"}, - }, - Patch: &Operation{ - OperationProps: OperationProps{Description: "patch operation description"}, - }, - Parameters: []Parameter{ - Parameter{ - ParamProps: ParamProps{In: "path"}, - }, - }, - }, -} - -var pathItemJSON = `{ - "$ref": "Dog", - "x-framework": "go-swagger", - "get": { "description": "get operation description" }, - "put": { "description": "put operation description" }, - "post": { "description": "post operation description" }, - "delete": { "description": "delete operation description" }, - "options": { "description": "options operation description" }, - "head": { "description": "head operation description" }, - "patch": { "description": "patch operation description" }, - "parameters": [{"in":"path"}] -}` - -func TestIntegrationPathItem(t *testing.T) { - var actual PathItem - if assert.NoError(t, json.Unmarshal([]byte(pathItemJSON), &actual)) { - assert.EqualValues(t, actual, pathItem) - } - - assertParsesJSON(t, pathItemJSON, pathItem) -} diff --git a/vendor/github.com/go-openapi/spec/paths_test.go b/vendor/github.com/go-openapi/spec/paths_test.go deleted file mode 100644 index 5ccfd4a0aa..0000000000 --- a/vendor/github.com/go-openapi/spec/paths_test.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "testing" - - "github.com/stretchr/testify/assert" -) - -var paths = Paths{ - VendorExtensible: VendorExtensible{Extensions: map[string]interface{}{"x-framework": "go-swagger"}}, - Paths: map[string]PathItem{ - "/": PathItem{ - Refable: Refable{Ref: MustCreateRef("cats")}, - }, - }, -} - -var pathsJSON = `{"x-framework":"go-swagger","/":{"$ref":"cats"}}` - -func TestIntegrationPaths(t *testing.T) { - var actual Paths - if assert.NoError(t, json.Unmarshal([]byte(pathsJSON), &actual)) { - assert.EqualValues(t, actual, paths) - } - - assertParsesJSON(t, pathsJSON, paths) - -} diff --git a/vendor/github.com/go-openapi/spec/properties_test.go b/vendor/github.com/go-openapi/spec/properties_test.go deleted file mode 100644 index 90bd32c9e9..0000000000 --- a/vendor/github.com/go-openapi/spec/properties_test.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "testing" -) - -func TestPropertySerialization(t *testing.T) { - strProp := StringProperty() - strProp.Enum = append(strProp.Enum, "a", "b") - - prop := &Schema{SchemaProps: SchemaProps{ - Items: &SchemaOrArray{Schemas: []Schema{ - Schema{SchemaProps: SchemaProps{Type: []string{"string"}}}, - Schema{SchemaProps: SchemaProps{Type: []string{"string"}}}, - }}, - }} - - var propSerData = []struct { - Schema *Schema - JSON string - }{ - {BooleanProperty(), `{"type":"boolean"}`}, - {DateProperty(), `{"type":"string","format":"date"}`}, - {DateTimeProperty(), `{"type":"string","format":"date-time"}`}, - {Float64Property(), `{"type":"number","format":"double"}`}, - {Float32Property(), `{"type":"number","format":"float"}`}, - {Int32Property(), `{"type":"integer","format":"int32"}`}, - {Int64Property(), `{"type":"integer","format":"int64"}`}, - {MapProperty(StringProperty()), `{"type":"object","additionalProperties":{"type":"string"}}`}, - {MapProperty(Int32Property()), `{"type":"object","additionalProperties":{"type":"integer","format":"int32"}}`}, - {RefProperty("Dog"), `{"$ref":"Dog"}`}, - {StringProperty(), `{"type":"string"}`}, - {strProp, `{"type":"string","enum":["a","b"]}`}, - {ArrayProperty(StringProperty()), `{"type":"array","items":{"type":"string"}}`}, - {prop, `{"items":[{"type":"string"},{"type":"string"}]}`}, - } - - for _, v := range propSerData { - t.Log("roundtripping for", v.JSON) - assertSerializeJSON(t, v.Schema, v.JSON) - assertParsesJSON(t, v.JSON, v.Schema) - } - -} diff --git a/vendor/github.com/go-openapi/spec/ref.go b/vendor/github.com/go-openapi/spec/ref.go index 4833b87e2f..08ff869b2f 100644 --- a/vendor/github.com/go-openapi/spec/ref.go +++ b/vendor/github.com/go-openapi/spec/ref.go @@ -15,6 +15,8 @@ package spec import ( + "bytes" + "encoding/gob" "encoding/json" "net/http" "os" @@ -145,7 +147,32 @@ func (r *Ref) UnmarshalJSON(d []byte) error { if err := json.Unmarshal(d, &v); err != nil { return err } + return r.fromMap(v) +} + +// GobEncode provides a safe gob encoder for Ref +func (r Ref) GobEncode() ([]byte, error) { + var b bytes.Buffer + raw, err := r.MarshalJSON() + if err != nil { + return nil, err + } + err = gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Ref +func (r *Ref) GobDecode(b []byte) error { + var raw []byte + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + return json.Unmarshal(raw, r) +} +func (r *Ref) fromMap(v map[string]interface{}) error { if v == nil { return nil } diff --git a/vendor/github.com/go-openapi/spec/refmodifier.go b/vendor/github.com/go-openapi/spec/refmodifier.go deleted file mode 100644 index 8482608ea7..0000000000 --- a/vendor/github.com/go-openapi/spec/refmodifier.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2017 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "fmt" -) - -func modifyItemsRefs(target *Schema, basePath string) { - if target.Items != nil { - if target.Items.Schema != nil { - modifyRefs(target.Items.Schema, basePath) - } - for i := range target.Items.Schemas { - s := target.Items.Schemas[i] - modifyRefs(&s, basePath) - target.Items.Schemas[i] = s - } - } -} - -func modifyRefs(target *Schema, basePath string) { - if target.Ref.String() != "" { - if target.Ref.RemoteURI() == basePath { - return - } - newURL := fmt.Sprintf("%s%s", basePath, target.Ref.String()) - target.Ref, _ = NewRef(newURL) - } - - modifyItemsRefs(target, basePath) - for i := range target.AllOf { - modifyRefs(&target.AllOf[i], basePath) - } - for i := range target.AnyOf { - modifyRefs(&target.AnyOf[i], basePath) - } - for i := range target.OneOf { - modifyRefs(&target.OneOf[i], basePath) - } - if target.Not != nil { - modifyRefs(target.Not, basePath) - } - for k := range target.Properties { - s := target.Properties[k] - modifyRefs(&s, basePath) - target.Properties[k] = s - } - if target.AdditionalProperties != nil && target.AdditionalProperties.Schema != nil { - modifyRefs(target.AdditionalProperties.Schema, basePath) - } - for k := range target.PatternProperties { - s := target.PatternProperties[k] - modifyRefs(&s, basePath) - target.PatternProperties[k] = s - } - for k := range target.Dependencies { - if target.Dependencies[k].Schema != nil { - modifyRefs(target.Dependencies[k].Schema, basePath) - } - } - if target.AdditionalItems != nil && target.AdditionalItems.Schema != nil { - modifyRefs(target.AdditionalItems.Schema, basePath) - } - for k := range target.Definitions { - s := target.Definitions[k] - modifyRefs(&s, basePath) - target.Definitions[k] = s - } -} diff --git a/vendor/github.com/go-openapi/spec/refmodifier_test.go b/vendor/github.com/go-openapi/spec/refmodifier_test.go deleted file mode 100644 index c456cb43fe..0000000000 --- a/vendor/github.com/go-openapi/spec/refmodifier_test.go +++ /dev/null @@ -1,335 +0,0 @@ -// Copyright 2017 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "testing" - - "github.com/stretchr/testify/assert" -) - -var testJsonSchema = `{ - "id": "http://json-schema.org/draft-04/schema#", - "$schema": "http://json-schema.org/draft-04/schema#", - "description": "Core schema meta-schema", - "definitions": { - "schemaArray": { - "type": "array", - "minItems": 1, - "items": { "$ref": "#" } - }, - "positiveInteger": { - "type": "integer", - "minimum": 0 - }, - "positiveIntegerDefault0": { - "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ] - }, - "simpleTypes": { - "enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ] - }, - "stringArray": { - "type": "array", - "items": { "type": "string" }, - "minItems": 1, - "uniqueItems": true - } - }, - "type": "object", - "properties": { - "id": { - "type": "string", - "format": "uri" - }, - "$schema": { - "type": "string", - "format": "uri" - }, - "title": { - "type": "string" - }, - "description": { - "type": "string" - }, - "default": {}, - "multipleOf": { - "type": "number", - "minimum": 0, - "exclusiveMinimum": true - }, - "maximum": { - "type": "number" - }, - "exclusiveMaximum": { - "type": "boolean", - "default": false - }, - "minimum": { - "type": "number" - }, - "exclusiveMinimum": { - "type": "boolean", - "default": false - }, - "maxLength": { "$ref": "#/definitions/positiveInteger" }, - "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" }, - "pattern": { - "type": "string", - "format": "regex" - }, - "additionalItems": { - "anyOf": [ - { "type": "boolean" }, - { "$ref": "#" } - ], - "default": {} - }, - "items": { - "anyOf": [ - { "$ref": "#" }, - { "$ref": "#/definitions/schemaArray" } - ], - "default": {} - }, - "maxItems": { "$ref": "#/definitions/positiveInteger" }, - "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" }, - "uniqueItems": { - "type": "boolean", - "default": false - }, - "maxProperties": { "$ref": "#/definitions/positiveInteger" }, - "minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" }, - "required": { "$ref": "#/definitions/stringArray" }, - "additionalProperties": { - "anyOf": [ - { "type": "boolean" }, - { "$ref": "#" } - ], - "default": {} - }, - "definitions": { - "type": "object", - "additionalProperties": { "$ref": "#" }, - "default": {} - }, - "properties": { - "type": "object", - "additionalProperties": { "$ref": "#" }, - "default": {} - }, - "patternProperties": { - "type": "object", - "additionalProperties": { "$ref": "#" }, - "default": {} - }, - "dependencies": { - "type": "object", - "additionalProperties": { - "anyOf": [ - { "$ref": "#" }, - { "$ref": "#/definitions/stringArray" } - ] - } - }, - "enum": { - "type": "array", - "minItems": 1, - "uniqueItems": true - }, - "type": { - "anyOf": [ - { "$ref": "#/definitions/simpleTypes" }, - { - "type": "array", - "items": { "$ref": "#/definitions/simpleTypes" }, - "minItems": 1, - "uniqueItems": true - } - ] - }, - "allOf": { "$ref": "#/definitions/schemaArray" }, - "anyOf": { "$ref": "#/definitions/schemaArray" }, - "oneOf": { "$ref": "#/definitions/schemaArray" }, - "not": { "$ref": "#" } - }, - "dependencies": { - "exclusiveMaximum": [ "maximum" ], - "exclusiveMinimum": [ "minimum" ] - }, - "default": {} -} -` - -var modifiedTestJsonSchema = `{ - "id": "http://json-schema.org/draft-04/schema#", - "$schema": "http://json-schema.org/draft-04/schema", - "description": "Core schema meta-schema", - "definitions": { - "schemaArray": { - "type": "array", - "minItems": 1, - "items": { "$ref": "" } - }, - "positiveInteger": { - "type": "integer", - "minimum": 0 - }, - "positiveIntegerDefault0": { - "allOf": [ { "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" }, { "default": 0 } ] - }, - "simpleTypes": { - "enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ] - }, - "stringArray": { - "type": "array", - "items": { "type": "string" }, - "minItems": 1, - "uniqueItems": true - } - }, - "type": "object", - "properties": { - "id": { - "type": "string", - "format": "uri" - }, - "$schema": { - "type": "string", - "format": "uri" - }, - "title": { - "type": "string" - }, - "description": { - "type": "string" - }, - "default": {}, - "multipleOf": { - "type": "number", - "minimum": 0, - "exclusiveMinimum": true - }, - "maximum": { - "type": "number" - }, - "exclusiveMaximum": { - "type": "boolean", - "default": false - }, - "minimum": { - "type": "number" - }, - "exclusiveMinimum": { - "type": "boolean", - "default": false - }, - "maxLength": { "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" }, - "minLength": { "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" }, - "pattern": { - "type": "string", - "format": "regex" - }, - "additionalItems": { - "anyOf": [ - { "type": "boolean" }, - { "$ref": "" } - ], - "default": {} - }, - "items": { - "anyOf": [ - { "$ref": "" }, - { "$ref": "http://json-schema.org/draft-04/schema#/definitions/schemaArray" } - ], - "default": {} - }, - "maxItems": { "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" }, - "minItems": { "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" }, - "uniqueItems": { - "type": "boolean", - "default": false - }, - "maxProperties": { "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" }, - "minProperties": { "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" }, - "required": { "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" }, - "additionalProperties": { - "anyOf": [ - { "type": "boolean" }, - { "$ref": "" } - ], - "default": {} - }, - "definitions": { - "type": "object", - "additionalProperties": { "$ref": "" }, - "default": {} - }, - "properties": { - "type": "object", - "additionalProperties": { "$ref": "" }, - "default": {} - }, - "patternProperties": { - "type": "object", - "additionalProperties": { "$ref": "" }, - "default": {} - }, - "dependencies": { - "type": "object", - "additionalProperties": { - "anyOf": [ - { "$ref": "" }, - { "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" } - ] - } - }, - "enum": { - "type": "array", - "minItems": 1, - "uniqueItems": true - }, - "type": { - "anyOf": [ - { "$ref": "http://json-schema.org/draft-04/schema#/definitions/simpleTypes" }, - { - "type": "array", - "items": { "$ref": "http://json-schema.org/draft-04/schema#/definitions/simpleTypes" }, - "minItems": 1, - "uniqueItems": true - } - ] - }, - "allOf": { "$ref": "http://json-schema.org/draft-04/schema#/definitions/schemaArray" }, - "anyOf": { "$ref": "http://json-schema.org/draft-04/schema#/definitions/schemaArray" }, - "oneOf": { "$ref": "http://json-schema.org/draft-04/schema#/definitions/schemaArray" }, - "not": { "$ref": "" } - }, - "dependencies": { - "exclusiveMaximum": [ "maximum" ], - "exclusiveMinimum": [ "minimum" ] - }, - "default": {} -} -` - -func TestRefModifier(t *testing.T) { - sch := Schema{} - assert.NoError(t, json.Unmarshal([]byte(testJsonSchema), &sch)) - modifyRefs(&sch, "http://json-schema.org/draft-04/schema") - b, err := sch.MarshalJSON() - assert.NoError(t, err) - assert.JSONEq(t, modifiedTestJsonSchema, string(b)) -} diff --git a/vendor/github.com/go-openapi/spec/response.go b/vendor/github.com/go-openapi/spec/response.go index a32b039eaf..27729c1d93 100644 --- a/vendor/github.com/go-openapi/spec/response.go +++ b/vendor/github.com/go-openapi/spec/response.go @@ -39,15 +39,15 @@ type Response struct { } // JSONLookup look up a value by the json property name -func (p Response) JSONLookup(token string) (interface{}, error) { - if ex, ok := p.Extensions[token]; ok { +func (r Response) JSONLookup(token string) (interface{}, error) { + if ex, ok := r.Extensions[token]; ok { return &ex, nil } if token == "$ref" { - return &p.Ref, nil + return &r.Ref, nil } - r, _, err := jsonpointer.GetForToken(p.ResponseProps, token) - return r, err + ptr, _, err := jsonpointer.GetForToken(r.ResponseProps, token) + return ptr, err } // UnmarshalJSON hydrates this items instance with the data from JSON @@ -58,10 +58,7 @@ func (r *Response) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &r.Refable); err != nil { return err } - if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { - return err - } - return nil + return json.Unmarshal(data, &r.VendorExtensible) } // MarshalJSON converts this items object to JSON diff --git a/vendor/github.com/go-openapi/spec/response_test.go b/vendor/github.com/go-openapi/spec/response_test.go deleted file mode 100644 index 2a3ca40935..0000000000 --- a/vendor/github.com/go-openapi/spec/response_test.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2017 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "testing" - - "github.com/stretchr/testify/assert" -) - -var response = Response{ - Refable: Refable{Ref: MustCreateRef("Dog")}, - VendorExtensible: VendorExtensible{ - Extensions: map[string]interface{}{ - "x-go-name": "PutDogExists", - }, - }, - ResponseProps: ResponseProps{ - Description: "Dog exists", - Schema: &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}}, - }, -} - -var responseJSON = `{ - "$ref": "Dog", - "x-go-name": "PutDogExists", - "description": "Dog exists", - "schema": { - "type": "string" - } -}` - -func TestIntegrationResponse(t *testing.T) { - var actual Response - if assert.NoError(t, json.Unmarshal([]byte(responseJSON), &actual)) { - assert.EqualValues(t, actual, response) - } - - assertParsesJSON(t, responseJSON, response) -} diff --git a/vendor/github.com/go-openapi/spec/responses.go b/vendor/github.com/go-openapi/spec/responses.go index 3ab06697f2..4efb6f868b 100644 --- a/vendor/github.com/go-openapi/spec/responses.go +++ b/vendor/github.com/go-openapi/spec/responses.go @@ -85,11 +85,15 @@ func (r Responses) MarshalJSON() ([]byte, error) { return concated, nil } +// ResponsesProps describes all responses for an operation. +// It tells what is the default response and maps all responses with a +// HTTP status code. type ResponsesProps struct { Default *Response StatusCodeResponses map[int]Response } +// MarshalJSON marshals responses as JSON func (r ResponsesProps) MarshalJSON() ([]byte, error) { toser := map[string]Response{} if r.Default != nil { @@ -101,6 +105,7 @@ func (r ResponsesProps) MarshalJSON() ([]byte, error) { return json.Marshal(toser) } +// UnmarshalJSON unmarshals responses from JSON func (r *ResponsesProps) UnmarshalJSON(data []byte) error { var res map[string]Response if err := json.Unmarshal(data, &res); err != nil { diff --git a/vendor/github.com/go-openapi/spec/schema.go b/vendor/github.com/go-openapi/spec/schema.go index 1cdcc163f1..ce30d26e32 100644 --- a/vendor/github.com/go-openapi/spec/schema.go +++ b/vendor/github.com/go-openapi/spec/schema.go @@ -89,7 +89,8 @@ func DateTimeProperty() *Schema { // MapProperty creates a map property func MapProperty(property *Schema) *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"object"}, AdditionalProperties: &SchemaOrBool{Allows: true, Schema: property}}} + return &Schema{SchemaProps: SchemaProps{Type: []string{"object"}, + AdditionalProperties: &SchemaOrBool{Allows: true, Schema: property}}} } // RefProperty creates a ref property @@ -135,6 +136,10 @@ func (r *SchemaURL) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &v); err != nil { return err } + return r.fromMap(v) +} + +func (r *SchemaURL) fromMap(v map[string]interface{}) error { if v == nil { return nil } @@ -151,54 +156,7 @@ func (r *SchemaURL) UnmarshalJSON(data []byte) error { return nil } -// type ExtraSchemaProps map[string]interface{} - -// // JSONSchema represents a structure that is a json schema draft 04 -// type JSONSchema struct { -// SchemaProps -// ExtraSchemaProps -// } - -// // MarshalJSON marshal this to JSON -// func (s JSONSchema) MarshalJSON() ([]byte, error) { -// b1, err := json.Marshal(s.SchemaProps) -// if err != nil { -// return nil, err -// } -// b2, err := s.Ref.MarshalJSON() -// if err != nil { -// return nil, err -// } -// b3, err := s.Schema.MarshalJSON() -// if err != nil { -// return nil, err -// } -// b4, err := json.Marshal(s.ExtraSchemaProps) -// if err != nil { -// return nil, err -// } -// return swag.ConcatJSON(b1, b2, b3, b4), nil -// } - -// // UnmarshalJSON marshal this from JSON -// func (s *JSONSchema) UnmarshalJSON(data []byte) error { -// var sch JSONSchema -// if err := json.Unmarshal(data, &sch.SchemaProps); err != nil { -// return err -// } -// if err := json.Unmarshal(data, &sch.Ref); err != nil { -// return err -// } -// if err := json.Unmarshal(data, &sch.Schema); err != nil { -// return err -// } -// if err := json.Unmarshal(data, &sch.ExtraSchemaProps); err != nil { -// return err -// } -// *s = sch -// return nil -// } - +// SchemaProps describes a JSON schema (draft 4) type SchemaProps struct { ID string `json:"id,omitempty"` Ref Ref `json:"-"` @@ -236,6 +194,7 @@ type SchemaProps struct { Definitions Definitions `json:"definitions,omitempty"` } +// SwaggerSchemaProps are additional properties supported by swagger schemas, but not JSON-schema (draft 4) type SwaggerSchemaProps struct { Discriminator string `json:"discriminator,omitempty"` ReadOnly bool `json:"readOnly,omitempty"` @@ -345,7 +304,7 @@ func (s *Schema) AddType(tpe, format string) *Schema { // CollectionOf a fluent builder method for an array parameter func (s *Schema) CollectionOf(items Schema) *Schema { - s.Type = []string{"array"} + s.Type = []string{jsonArray} s.Items = &SchemaOrArray{Schema: &items} return s } @@ -582,18 +541,17 @@ func (s Schema) MarshalJSON() ([]byte, error) { // UnmarshalJSON marshal this from JSON func (s *Schema) UnmarshalJSON(data []byte) error { - var sch Schema - if err := json.Unmarshal(data, &sch.SchemaProps); err != nil { - return err - } - if err := json.Unmarshal(data, &sch.Ref); err != nil { + props := struct { + SchemaProps + SwaggerSchemaProps + }{} + if err := json.Unmarshal(data, &props); err != nil { return err } - if err := json.Unmarshal(data, &sch.Schema); err != nil { - return err - } - if err := json.Unmarshal(data, &sch.SwaggerSchemaProps); err != nil { - return err + + sch := Schema{ + SchemaProps: props.SchemaProps, + SwaggerSchemaProps: props.SwaggerSchemaProps, } var d map[string]interface{} @@ -601,6 +559,9 @@ func (s *Schema) UnmarshalJSON(data []byte) error { return err } + _ = sch.Ref.fromMap(d) + _ = sch.Schema.fromMap(d) + delete(d, "$ref") delete(d, "$schema") for _, pn := range swag.DefaultJSONNameProvider.GetJSONNames(s) { diff --git a/vendor/github.com/go-openapi/spec/schema_loader.go b/vendor/github.com/go-openapi/spec/schema_loader.go new file mode 100644 index 0000000000..c34a96fa04 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/schema_loader.go @@ -0,0 +1,275 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "log" + "net/url" + "reflect" + "strings" + + "github.com/go-openapi/swag" +) + +// PathLoader function to use when loading remote refs +var PathLoader func(string) (json.RawMessage, error) + +func init() { + PathLoader = func(path string) (json.RawMessage, error) { + data, err := swag.LoadFromFileOrHTTP(path) + if err != nil { + return nil, err + } + return json.RawMessage(data), nil + } +} + +// resolverContext allows to share a context during spec processing. +// At the moment, it just holds the index of circular references found. +type resolverContext struct { + // circulars holds all visited circular references, which allows shortcuts. + // NOTE: this is not just a performance improvement: it is required to figure out + // circular references which participate several cycles. + // This structure is privately instantiated and needs not be locked against + // concurrent access, unless we chose to implement a parallel spec walking. + circulars map[string]bool + basePath string +} + +func newResolverContext(originalBasePath string) *resolverContext { + return &resolverContext{ + circulars: make(map[string]bool), + basePath: originalBasePath, // keep the root base path in context + } +} + +type schemaLoader struct { + root interface{} + options *ExpandOptions + cache ResolutionCache + context *resolverContext + loadDoc func(string) (json.RawMessage, error) +} + +func (r *schemaLoader) transitiveResolver(basePath string, ref Ref) (*schemaLoader, error) { + if ref.IsRoot() || ref.HasFragmentOnly { + return r, nil + } + + baseRef, _ := NewRef(basePath) + currentRef := normalizeFileRef(&ref, basePath) + if strings.HasPrefix(currentRef.String(), baseRef.String()) { + return r, nil + } + + // Set a new root to resolve against + rootURL := currentRef.GetURL() + rootURL.Fragment = "" + root, _ := r.cache.Get(rootURL.String()) + + // shallow copy of resolver options to set a new RelativeBase when + // traversing multiple documents + newOptions := r.options + newOptions.RelativeBase = rootURL.String() + debugLog("setting new root: %s", newOptions.RelativeBase) + resolver, err := defaultSchemaLoader(root, newOptions, r.cache, r.context) + if err != nil { + return nil, err + } + + return resolver, nil +} + +func (r *schemaLoader) updateBasePath(transitive *schemaLoader, basePath string) string { + if transitive != r { + debugLog("got a new resolver") + if transitive.options != nil && transitive.options.RelativeBase != "" { + basePath, _ = absPath(transitive.options.RelativeBase) + debugLog("new basePath = %s", basePath) + } + } + return basePath +} + +func (r *schemaLoader) resolveRef(ref *Ref, target interface{}, basePath string) error { + tgt := reflect.ValueOf(target) + if tgt.Kind() != reflect.Ptr { + return fmt.Errorf("resolve ref: target needs to be a pointer") + } + + refURL := ref.GetURL() + if refURL == nil { + return nil + } + + var res interface{} + var data interface{} + var err error + // Resolve against the root if it isn't nil, and if ref is pointing at the root, or has a fragment only which means + // it is pointing somewhere in the root. + root := r.root + if (ref.IsRoot() || ref.HasFragmentOnly) && root == nil && basePath != "" { + if baseRef, erb := NewRef(basePath); erb == nil { + root, _, _, _ = r.load(baseRef.GetURL()) + } + } + if (ref.IsRoot() || ref.HasFragmentOnly) && root != nil { + data = root + } else { + baseRef := normalizeFileRef(ref, basePath) + debugLog("current ref is: %s", ref.String()) + debugLog("current ref normalized file: %s", baseRef.String()) + data, _, _, err = r.load(baseRef.GetURL()) + if err != nil { + return err + } + } + + res = data + if ref.String() != "" { + res, _, err = ref.GetPointer().Get(data) + if err != nil { + return err + } + } + return swag.DynamicJSONToStruct(res, target) +} + +func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error) { + debugLog("loading schema from url: %s", refURL) + toFetch := *refURL + toFetch.Fragment = "" + + normalized := normalizeAbsPath(toFetch.String()) + + data, fromCache := r.cache.Get(normalized) + if !fromCache { + b, err := r.loadDoc(normalized) + if err != nil { + return nil, url.URL{}, false, err + } + + if err := json.Unmarshal(b, &data); err != nil { + return nil, url.URL{}, false, err + } + r.cache.Set(normalized, data) + } + + return data, toFetch, fromCache, nil +} + +// isCircular detects cycles in sequences of $ref. +// It relies on a private context (which needs not be locked). +func (r *schemaLoader) isCircular(ref *Ref, basePath string, parentRefs ...string) (foundCycle bool) { + normalizedRef := normalizePaths(ref.String(), basePath) + if _, ok := r.context.circulars[normalizedRef]; ok { + // circular $ref has been already detected in another explored cycle + foundCycle = true + return + } + foundCycle = swag.ContainsStringsCI(parentRefs, normalizedRef) + if foundCycle { + r.context.circulars[normalizedRef] = true + } + return +} + +// Resolve resolves a reference against basePath and stores the result in target +// Resolve is not in charge of following references, it only resolves ref by following its URL +// if the schema that ref is referring to has more refs in it. Resolve doesn't resolve them +// if basePath is an empty string, ref is resolved against the root schema stored in the schemaLoader struct +func (r *schemaLoader) Resolve(ref *Ref, target interface{}, basePath string) error { + return r.resolveRef(ref, target, basePath) +} + +func (r *schemaLoader) deref(input interface{}, parentRefs []string, basePath string) error { + var ref *Ref + switch refable := input.(type) { + case *Schema: + ref = &refable.Ref + case *Parameter: + ref = &refable.Ref + case *Response: + ref = &refable.Ref + case *PathItem: + ref = &refable.Ref + default: + return fmt.Errorf("deref: unsupported type %T", input) + } + + curRef := ref.String() + if curRef != "" { + normalizedRef := normalizeFileRef(ref, basePath) + normalizedBasePath := normalizedRef.RemoteURI() + + if r.isCircular(normalizedRef, basePath, parentRefs...) { + return nil + } + + if err := r.resolveRef(ref, input, basePath); r.shouldStopOnError(err) { + return err + } + + // NOTE(fredbi): removed basePath check => needs more testing + if ref.String() != "" && ref.String() != curRef { + parentRefs = append(parentRefs, normalizedRef.String()) + return r.deref(input, parentRefs, normalizedBasePath) + } + } + + return nil +} + +func (r *schemaLoader) shouldStopOnError(err error) bool { + if err != nil && !r.options.ContinueOnError { + return true + } + + if err != nil { + log.Println(err) + } + + return false +} + +func defaultSchemaLoader( + root interface{}, + expandOptions *ExpandOptions, + cache ResolutionCache, + context *resolverContext) (*schemaLoader, error) { + + if cache == nil { + cache = resCache + } + if expandOptions == nil { + expandOptions = &ExpandOptions{} + } + absBase, _ := absPath(expandOptions.RelativeBase) + if context == nil { + context = newResolverContext(absBase) + } + return &schemaLoader{ + root: root, + options: expandOptions, + cache: cache, + context: context, + loadDoc: func(path string) (json.RawMessage, error) { + debugLog("fetching document at %q", path) + return PathLoader(path) + }, + }, nil +} diff --git a/vendor/github.com/go-openapi/spec/schema_test.go b/vendor/github.com/go-openapi/spec/schema_test.go deleted file mode 100644 index 2e49fbc159..0000000000 --- a/vendor/github.com/go-openapi/spec/schema_test.go +++ /dev/null @@ -1,205 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "testing" - - "github.com/stretchr/testify/assert" -) - -var schema = Schema{ - VendorExtensible: VendorExtensible{Extensions: map[string]interface{}{"x-framework": "go-swagger"}}, - SchemaProps: SchemaProps{ - Ref: MustCreateRef("Cat"), - Type: []string{"string"}, - Format: "date", - Description: "the description of this schema", - Title: "the title", - Default: "blah", - Maximum: float64Ptr(100), - ExclusiveMaximum: true, - ExclusiveMinimum: true, - Minimum: float64Ptr(5), - MaxLength: int64Ptr(100), - MinLength: int64Ptr(5), - Pattern: "\\w{1,5}\\w+", - MaxItems: int64Ptr(100), - MinItems: int64Ptr(5), - UniqueItems: true, - MultipleOf: float64Ptr(5), - Enum: []interface{}{"hello", "world"}, - MaxProperties: int64Ptr(5), - MinProperties: int64Ptr(1), - Required: []string{"id", "name"}, - Items: &SchemaOrArray{Schema: &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}}}, - AllOf: []Schema{Schema{SchemaProps: SchemaProps{Type: []string{"string"}}}}, - Properties: map[string]Schema{ - "id": Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int64"}}, - "name": Schema{SchemaProps: SchemaProps{Type: []string{"string"}}}, - }, - AdditionalProperties: &SchemaOrBool{Allows: true, Schema: &Schema{SchemaProps: SchemaProps{ - Type: []string{"integer"}, - Format: "int32", - }}}, - }, - SwaggerSchemaProps: SwaggerSchemaProps{ - Discriminator: "not this", - ReadOnly: true, - XML: &XMLObject{"sch", "io", "sw", true, true}, - ExternalDocs: &ExternalDocumentation{ - Description: "the documentation etc", - URL: "http://readthedocs.org/swagger", - }, - Example: []interface{}{ - map[string]interface{}{ - "id": 1, - "name": "a book", - }, - map[string]interface{}{ - "id": 2, - "name": "the thing", - }, - }, - }, -} - -var schemaJSON = `{ - "x-framework": "go-swagger", - "$ref": "Cat", - "description": "the description of this schema", - "maximum": 100, - "minimum": 5, - "exclusiveMaximum": true, - "exclusiveMinimum": true, - "maxLength": 100, - "minLength": 5, - "pattern": "\\w{1,5}\\w+", - "maxItems": 100, - "minItems": 5, - "uniqueItems": true, - "multipleOf": 5, - "enum": ["hello", "world"], - "type": "string", - "format": "date", - "title": "the title", - "default": "blah", - "maxProperties": 5, - "minProperties": 1, - "required": ["id", "name"], - "items": { - "type": "string" - }, - "allOf": [ - { - "type": "string" - } - ], - "properties": { - "id": { - "type": "integer", - "format": "int64" - }, - "name": { - "type": "string" - } - }, - "discriminator": "not this", - "readOnly": true, - "xml": { - "name": "sch", - "namespace": "io", - "prefix": "sw", - "wrapped": true, - "attribute": true - }, - "externalDocs": { - "description": "the documentation etc", - "url": "http://readthedocs.org/swagger" - }, - "example": [ - { - "id": 1, - "name": "a book" - }, - { - "id": 2, - "name": "the thing" - } - ], - "additionalProperties": { - "type": "integer", - "format": "int32" - } -} -` - -func TestSchema(t *testing.T) { - - expected := map[string]interface{}{} - json.Unmarshal([]byte(schemaJSON), &expected) - b, err := json.Marshal(schema) - if assert.NoError(t, err) { - var actual map[string]interface{} - json.Unmarshal(b, &actual) - assert.Equal(t, expected, actual) - } - - actual2 := Schema{} - if assert.NoError(t, json.Unmarshal([]byte(schemaJSON), &actual2)) { - assert.Equal(t, schema.Ref, actual2.Ref) - assert.Equal(t, schema.Description, actual2.Description) - assert.Equal(t, schema.Maximum, actual2.Maximum) - assert.Equal(t, schema.Minimum, actual2.Minimum) - assert.Equal(t, schema.ExclusiveMinimum, actual2.ExclusiveMinimum) - assert.Equal(t, schema.ExclusiveMaximum, actual2.ExclusiveMaximum) - assert.Equal(t, schema.MaxLength, actual2.MaxLength) - assert.Equal(t, schema.MinLength, actual2.MinLength) - assert.Equal(t, schema.Pattern, actual2.Pattern) - assert.Equal(t, schema.MaxItems, actual2.MaxItems) - assert.Equal(t, schema.MinItems, actual2.MinItems) - assert.True(t, actual2.UniqueItems) - assert.Equal(t, schema.MultipleOf, actual2.MultipleOf) - assert.Equal(t, schema.Enum, actual2.Enum) - assert.Equal(t, schema.Type, actual2.Type) - assert.Equal(t, schema.Format, actual2.Format) - assert.Equal(t, schema.Title, actual2.Title) - assert.Equal(t, schema.MaxProperties, actual2.MaxProperties) - assert.Equal(t, schema.MinProperties, actual2.MinProperties) - assert.Equal(t, schema.Required, actual2.Required) - assert.Equal(t, schema.Items, actual2.Items) - assert.Equal(t, schema.AllOf, actual2.AllOf) - assert.Equal(t, schema.Properties, actual2.Properties) - assert.Equal(t, schema.Discriminator, actual2.Discriminator) - assert.Equal(t, schema.ReadOnly, actual2.ReadOnly) - assert.Equal(t, schema.XML, actual2.XML) - assert.Equal(t, schema.ExternalDocs, actual2.ExternalDocs) - assert.Equal(t, schema.AdditionalProperties, actual2.AdditionalProperties) - assert.Equal(t, schema.Extensions, actual2.Extensions) - examples := actual2.Example.([]interface{}) - expEx := schema.Example.([]interface{}) - ex1 := examples[0].(map[string]interface{}) - ex2 := examples[1].(map[string]interface{}) - exp1 := expEx[0].(map[string]interface{}) - exp2 := expEx[1].(map[string]interface{}) - - assert.EqualValues(t, exp1["id"], ex1["id"]) - assert.Equal(t, exp1["name"], ex1["name"]) - assert.EqualValues(t, exp2["id"], ex2["id"]) - assert.Equal(t, exp2["name"], ex2["name"]) - } - -} diff --git a/vendor/github.com/go-openapi/spec/security_scheme.go b/vendor/github.com/go-openapi/spec/security_scheme.go index 22d4f10af2..fe353842a6 100644 --- a/vendor/github.com/go-openapi/spec/security_scheme.go +++ b/vendor/github.com/go-openapi/spec/security_scheme.go @@ -78,6 +78,7 @@ func OAuth2AccessToken(authorizationURL, tokenURL string) *SecurityScheme { }} } +// SecuritySchemeProps describes a swagger security scheme in the securityDefinitions section type SecuritySchemeProps struct { Description string `json:"description,omitempty"` Type string `json:"type"` @@ -135,8 +136,5 @@ func (s *SecurityScheme) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &s.SecuritySchemeProps); err != nil { return err } - if err := json.Unmarshal(data, &s.VendorExtensible); err != nil { - return err - } - return nil + return json.Unmarshal(data, &s.VendorExtensible) } diff --git a/vendor/github.com/go-openapi/spec/structs_test.go b/vendor/github.com/go-openapi/spec/structs_test.go deleted file mode 100644 index bfa59ee03a..0000000000 --- a/vendor/github.com/go-openapi/spec/structs_test.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "reflect" - "testing" - - "github.com/stretchr/testify/assert" - "gopkg.in/yaml.v2" -) - -func assertSerializeJSON(t testing.TB, actual interface{}, expected string) bool { - ser, err := json.Marshal(actual) - if err != nil { - return assert.Fail(t, "unable to marshal to json (%s): %#v", err, actual) - } - return assert.Equal(t, string(ser), expected) -} - -func assertParsesJSON(t testing.TB, actual string, expected interface{}) bool { - tpe := reflect.TypeOf(expected) - var pointed bool - if tpe.Kind() == reflect.Ptr { - tpe = tpe.Elem() - pointed = true - } - - parsed := reflect.New(tpe) - err := json.Unmarshal([]byte(actual), parsed.Interface()) - if err != nil { - return assert.Fail(t, "unable to unmarshal from json (%s): %s", err, actual) - } - act := parsed.Interface() - if !pointed { - act = reflect.Indirect(parsed).Interface() - } - return assert.Equal(t, act, expected) -} - -func assertSerializeYAML(t testing.TB, actual interface{}, expected string) bool { - ser, err := yaml.Marshal(actual) - if err != nil { - return assert.Fail(t, "unable to marshal to yaml (%s): %#v", err, actual) - } - return assert.Equal(t, string(ser), expected) -} - -func assertParsesYAML(t testing.TB, actual string, expected interface{}) bool { - tpe := reflect.TypeOf(expected) - var pointed bool - if tpe.Kind() == reflect.Ptr { - tpe = tpe.Elem() - pointed = true - } - parsed := reflect.New(tpe) - err := yaml.Unmarshal([]byte(actual), parsed.Interface()) - if err != nil { - return assert.Fail(t, "unable to unmarshal from yaml (%s): %s", err, actual) - } - act := parsed.Interface() - if !pointed { - act = reflect.Indirect(parsed).Interface() - } - return assert.EqualValues(t, act, expected) -} - -func TestSerialization_SerializeJSON(t *testing.T) { - assertSerializeJSON(t, []string{"hello"}, "[\"hello\"]") - assertSerializeJSON(t, []string{"hello", "world", "and", "stuff"}, "[\"hello\",\"world\",\"and\",\"stuff\"]") - assertSerializeJSON(t, StringOrArray(nil), "null") - assertSerializeJSON(t, SchemaOrArray{Schemas: []Schema{Schema{SchemaProps: SchemaProps{Type: []string{"string"}}}}}, "[{\"type\":\"string\"}]") - assertSerializeJSON(t, SchemaOrArray{ - Schemas: []Schema{ - Schema{SchemaProps: SchemaProps{Type: []string{"string"}}}, - Schema{SchemaProps: SchemaProps{Type: []string{"string"}}}, - }}, "[{\"type\":\"string\"},{\"type\":\"string\"}]") - assertSerializeJSON(t, SchemaOrArray{}, "null") -} - -func TestSerialization_DeserializeJSON(t *testing.T) { - // String - assertParsesJSON(t, "\"hello\"", StringOrArray([]string{"hello"})) - assertParsesJSON(t, "[\"hello\",\"world\",\"and\",\"stuff\"]", StringOrArray([]string{"hello", "world", "and", "stuff"})) - assertParsesJSON(t, "[\"hello\",\"world\",null,\"stuff\"]", StringOrArray([]string{"hello", "world", "", "stuff"})) - assertParsesJSON(t, "null", StringOrArray(nil)) - - // Schema - assertParsesJSON(t, "{\"type\":\"string\"}", SchemaOrArray{Schema: &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}}}) - assertParsesJSON(t, "[{\"type\":\"string\"},{\"type\":\"string\"}]", &SchemaOrArray{ - Schemas: []Schema{ - Schema{SchemaProps: SchemaProps{Type: []string{"string"}}}, - Schema{SchemaProps: SchemaProps{Type: []string{"string"}}}, - }, - }) - assertParsesJSON(t, "null", SchemaOrArray{}) -} diff --git a/vendor/github.com/go-openapi/spec/swagger.go b/vendor/github.com/go-openapi/spec/swagger.go index 23780c78a2..454617e585 100644 --- a/vendor/github.com/go-openapi/spec/swagger.go +++ b/vendor/github.com/go-openapi/spec/swagger.go @@ -24,7 +24,8 @@ import ( ) // Swagger this is the root document object for the API specification. -// It combines what previously was the Resource Listing and API Declaration (version 1.2 and earlier) together into one document. +// It combines what previously was the Resource Listing and API Declaration (version 1.2 and earlier) +// together into one document. // // For more information: http://goo.gl/8us55a#swagger-object- type Swagger struct { @@ -67,16 +68,22 @@ func (s *Swagger) UnmarshalJSON(data []byte) error { return nil } +// SwaggerProps captures the top-level properties of an Api specification +// +// NOTE: validation rules +// - the scheme, when present must be from [http, https, ws, wss] +// - BasePath must start with a leading "/" +// - Paths is required type SwaggerProps struct { ID string `json:"id,omitempty"` Consumes []string `json:"consumes,omitempty"` Produces []string `json:"produces,omitempty"` - Schemes []string `json:"schemes,omitempty"` // the scheme, when present must be from [http, https, ws, wss] + Schemes []string `json:"schemes,omitempty"` Swagger string `json:"swagger,omitempty"` Info *Info `json:"info,omitempty"` Host string `json:"host,omitempty"` - BasePath string `json:"basePath,omitempty"` // must start with a leading "/" - Paths *Paths `json:"paths"` // required + BasePath string `json:"basePath,omitempty"` + Paths *Paths `json:"paths"` Definitions Definitions `json:"definitions,omitempty"` Parameters map[string]Parameter `json:"parameters,omitempty"` Responses map[string]Response `json:"responses,omitempty"` @@ -243,9 +250,9 @@ func (s *StringOrArray) UnmarshalJSON(data []byte) error { if single == nil { return nil } - switch single.(type) { + switch v := single.(type) { case string: - *s = StringOrArray([]string{single.(string)}) + *s = StringOrArray([]string{v}) return nil default: return fmt.Errorf("only string or array is allowed, not %T", single) diff --git a/vendor/github.com/go-openapi/spec/swagger_test.go b/vendor/github.com/go-openapi/spec/swagger_test.go deleted file mode 100644 index f7b3d9022d..0000000000 --- a/vendor/github.com/go-openapi/spec/swagger_test.go +++ /dev/null @@ -1,365 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "fmt" - "reflect" - "testing" - - "github.com/go-openapi/swag" - "github.com/stretchr/testify/assert" -) - -var spec = Swagger{ - SwaggerProps: SwaggerProps{ - ID: "http://localhost:3849/api-docs", - Swagger: "2.0", - Consumes: []string{"application/json", "application/x-yaml"}, - Produces: []string{"application/json"}, - Schemes: []string{"http", "https"}, - Info: &info, - Host: "some.api.out.there", - BasePath: "/", - Paths: &paths, - Definitions: map[string]Schema{"Category": {SchemaProps: SchemaProps{Type: []string{"string"}}}}, - Parameters: map[string]Parameter{ - "categoryParam": {ParamProps: ParamProps{Name: "category", In: "query"}, SimpleSchema: SimpleSchema{Type: "string"}}, - }, - Responses: map[string]Response{ - "EmptyAnswer": { - ResponseProps: ResponseProps{ - Description: "no data to return for this operation", - }, - }, - }, - SecurityDefinitions: map[string]*SecurityScheme{ - "internalApiKey": APIKeyAuth("api_key", "header"), - }, - Security: []map[string][]string{ - {"internalApiKey": {}}, - }, - Tags: []Tag{NewTag("pets", "", nil)}, - ExternalDocs: &ExternalDocumentation{"the name", "the url"}, - }, - VendorExtensible: VendorExtensible{map[string]interface{}{ - "x-some-extension": "vendor", - "x-schemes": []interface{}{"unix", "amqp"}, - }}, -} - -var specJSON = `{ - "id": "http://localhost:3849/api-docs", - "consumes": ["application/json", "application/x-yaml"], - "produces": ["application/json"], - "schemes": ["http", "https"], - "swagger": "2.0", - "info": { - "contact": { - "name": "wordnik api team", - "url": "http://developer.wordnik.com" - }, - "description": "A sample API that uses a petstore as an example to demonstrate features in the swagger-2.0 specification", - "license": { - "name": "Creative Commons 4.0 International", - "url": "http://creativecommons.org/licenses/by/4.0/" - }, - "termsOfService": "http://helloreverb.com/terms/", - "title": "Swagger Sample API", - "version": "1.0.9-abcd", - "x-framework": "go-swagger" - }, - "host": "some.api.out.there", - "basePath": "/", - "paths": {"x-framework":"go-swagger","/":{"$ref":"cats"}}, - "definitions": { "Category": { "type": "string"} }, - "parameters": { - "categoryParam": { - "name": "category", - "in": "query", - "type": "string" - } - }, - "responses": { "EmptyAnswer": { "description": "no data to return for this operation" } }, - "securityDefinitions": { - "internalApiKey": { - "type": "apiKey", - "in": "header", - "name": "api_key" - } - }, - "security": [{"internalApiKey":[]}], - "tags": [{"name":"pets"}], - "externalDocs": {"description":"the name","url":"the url"}, - "x-some-extension": "vendor", - "x-schemes": ["unix","amqp"] -}` - -// -// func verifySpecSerialize(specJSON []byte, spec Swagger) { -// expected := map[string]interface{}{} -// json.Unmarshal(specJSON, &expected) -// b, err := json.MarshalIndent(spec, "", " ") -// So(err, ShouldBeNil) -// var actual map[string]interface{} -// err = json.Unmarshal(b, &actual) -// So(err, ShouldBeNil) -// compareSpecMaps(actual, expected) -// } - -func assertEquivalent(t testing.TB, actual, expected interface{}) bool { - if actual == nil || expected == nil || reflect.DeepEqual(actual, expected) { - return true - } - - actualType := reflect.TypeOf(actual) - expectedType := reflect.TypeOf(expected) - if reflect.TypeOf(actual).ConvertibleTo(expectedType) { - expectedValue := reflect.ValueOf(expected) - if swag.IsZero(expectedValue) && swag.IsZero(reflect.ValueOf(actual)) { - return true - } - - // Attempt comparison after type conversion - if reflect.DeepEqual(actual, expectedValue.Convert(actualType).Interface()) { - return true - } - } - - // Last ditch effort - if fmt.Sprintf("%#v", expected) == fmt.Sprintf("%#v", actual) { - return true - } - errFmt := "Expected: '%T(%#v)'\nActual: '%T(%#v)'\n(Should be equivalent)!" - return assert.Fail(t, errFmt, expected, expected, actual, actual) -} - -func ShouldBeEquivalentTo(actual interface{}, expecteds ...interface{}) string { - expected := expecteds[0] - if actual == nil || expected == nil { - return "" - } - - if reflect.DeepEqual(expected, actual) { - return "" - } - - actualType := reflect.TypeOf(actual) - expectedType := reflect.TypeOf(expected) - if reflect.TypeOf(actual).ConvertibleTo(expectedType) { - expectedValue := reflect.ValueOf(expected) - if swag.IsZero(expectedValue) && swag.IsZero(reflect.ValueOf(actual)) { - return "" - } - - // Attempt comparison after type conversion - if reflect.DeepEqual(actual, expectedValue.Convert(actualType).Interface()) { - return "" - } - } - - // Last ditch effort - if fmt.Sprintf("%#v", expected) == fmt.Sprintf("%#v", actual) { - return "" - } - errFmt := "Expected: '%T(%#v)'\nActual: '%T(%#v)'\n(Should be equivalent)!" - return fmt.Sprintf(errFmt, expected, expected, actual, actual) - -} - -func assertSpecMaps(t testing.TB, actual, expected map[string]interface{}) bool { - res := true - if id, ok := expected["id"]; ok { - res = assert.Equal(t, id, actual["id"]) - } - res = res && assert.Equal(t, expected["consumes"], actual["consumes"]) - res = res && assert.Equal(t, expected["produces"], actual["produces"]) - res = res && assert.Equal(t, expected["schemes"], actual["schemes"]) - res = res && assert.Equal(t, expected["swagger"], actual["swagger"]) - res = res && assert.Equal(t, expected["info"], actual["info"]) - res = res && assert.Equal(t, expected["host"], actual["host"]) - res = res && assert.Equal(t, expected["basePath"], actual["basePath"]) - res = res && assert.Equal(t, expected["paths"], actual["paths"]) - res = res && assert.Equal(t, expected["definitions"], actual["definitions"]) - res = res && assert.Equal(t, expected["responses"], actual["responses"]) - res = res && assert.Equal(t, expected["securityDefinitions"], actual["securityDefinitions"]) - res = res && assert.Equal(t, expected["tags"], actual["tags"]) - res = res && assert.Equal(t, expected["externalDocs"], actual["externalDocs"]) - res = res && assert.Equal(t, expected["x-some-extension"], actual["x-some-extension"]) - res = res && assert.Equal(t, expected["x-schemes"], actual["x-schemes"]) - - return res -} - -// -// func compareSpecMaps(actual, expected map[string]interface{}) { -// if id, ok := expected["id"]; ok { -// So(actual["id"], ShouldEqual, id) -// } -// //So(actual["$schema"], ShouldEqual, SwaggerSchemaURL) -// So(actual["consumes"], ShouldResemble, expected["consumes"]) -// So(actual["produces"], ShouldResemble, expected["produces"]) -// So(actual["schemes"], ShouldResemble, expected["schemes"]) -// So(actual["swagger"], ShouldEqual, expected["swagger"]) -// So(actual["info"], ShouldResemble, expected["info"]) -// So(actual["host"], ShouldEqual, expected["host"]) -// So(actual["basePath"], ShouldEqual, expected["basePath"]) -// So(actual["paths"], ShouldBeEquivalentTo, expected["paths"]) -// So(actual["definitions"], ShouldBeEquivalentTo, expected["definitions"]) -// So(actual["responses"], ShouldBeEquivalentTo, expected["responses"]) -// So(actual["securityDefinitions"], ShouldResemble, expected["securityDefinitions"]) -// So(actual["tags"], ShouldResemble, expected["tags"]) -// So(actual["externalDocs"], ShouldResemble, expected["externalDocs"]) -// So(actual["x-some-extension"], ShouldResemble, expected["x-some-extension"]) -// So(actual["x-schemes"], ShouldResemble, expected["x-schemes"]) -// } - -func assertSpecs(t testing.TB, actual, expected Swagger) bool { - expected.Swagger = "2.0" - return assert.Equal(t, actual, expected) -} - -// -// func compareSpecs(actual Swagger, spec Swagger) { -// spec.Swagger = "2.0" -// So(actual, ShouldBeEquivalentTo, spec) -// } - -func assertSpecJSON(t testing.TB, specJSON []byte) bool { - var expected map[string]interface{} - if !assert.NoError(t, json.Unmarshal(specJSON, &expected)) { - return false - } - - obj := Swagger{} - if !assert.NoError(t, json.Unmarshal(specJSON, &obj)) { - return false - } - - cb, err := json.MarshalIndent(obj, "", " ") - if assert.NoError(t, err) { - return false - } - var actual map[string]interface{} - if !assert.NoError(t, json.Unmarshal(cb, &actual)) { - return false - } - return assertSpecMaps(t, actual, expected) -} - -// func verifySpecJSON(specJSON []byte) { -// //Println() -// //Println("json to verify", string(specJson)) -// var expected map[string]interface{} -// err := json.Unmarshal(specJSON, &expected) -// So(err, ShouldBeNil) -// -// obj := Swagger{} -// err = json.Unmarshal(specJSON, &obj) -// So(err, ShouldBeNil) -// -// //spew.Dump(obj) -// -// cb, err := json.MarshalIndent(obj, "", " ") -// So(err, ShouldBeNil) -// //Println() -// //Println("Marshalling to json returned", string(cb)) -// -// var actual map[string]interface{} -// err = json.Unmarshal(cb, &actual) -// So(err, ShouldBeNil) -// //Println() -// //spew.Dump(expected) -// //spew.Dump(actual) -// //fmt.Printf("comparing %s\n\t%#v\nto\n\t%#+v\n", fileName, expected, actual) -// compareSpecMaps(actual, expected) -// } - -func TestSwaggerSpec_Serialize(t *testing.T) { - expected := make(map[string]interface{}) - json.Unmarshal([]byte(specJSON), &expected) - b, err := json.MarshalIndent(spec, "", " ") - if assert.NoError(t, err) { - var actual map[string]interface{} - err := json.Unmarshal(b, &actual) - if assert.NoError(t, err) { - assert.EqualValues(t, actual, expected) - } - } -} - -func TestSwaggerSpec_Deserialize(t *testing.T) { - var actual Swagger - err := json.Unmarshal([]byte(specJSON), &actual) - if assert.NoError(t, err) { - assert.EqualValues(t, actual, spec) - } -} - -func TestVendorExtensionStringSlice(t *testing.T) { - var actual Swagger - err := json.Unmarshal([]byte(specJSON), &actual) - if assert.NoError(t, err) { - schemes, ok := actual.Extensions.GetStringSlice("x-schemes") - if assert.True(t, ok) { - assert.EqualValues(t, []string{"unix", "amqp"}, schemes) - } - } -} - -func TestOptionalSwaggerProps_Serialize(t *testing.T) { - minimalJsonSpec := []byte(`{ - "swagger": "2.0", - "info": { - "version": "0.0.0", - "title": "Simple API" - }, - "paths": { - "/": { - "get": { - "responses": { - "200": { - "description": "OK" - } - } - } - } - } -}`) - - var minimalSpec Swagger - err := json.Unmarshal(minimalJsonSpec, &minimalSpec) - if assert.NoError(t, err) { - bytes, err := json.Marshal(&minimalSpec) - if assert.NoError(t, err) { - var ms map[string]interface{} - if err := json.Unmarshal(bytes, &ms); assert.NoError(t, err) { - assert.NotContains(t, ms, "consumes") - assert.NotContains(t, ms, "produces") - assert.NotContains(t, ms, "schemes") - assert.NotContains(t, ms, "host") - assert.NotContains(t, ms, "basePath") - assert.NotContains(t, ms, "definitions") - assert.NotContains(t, ms, "parameters") - assert.NotContains(t, ms, "responses") - assert.NotContains(t, ms, "securityDefinitions") - assert.NotContains(t, ms, "security") - assert.NotContains(t, ms, "tags") - assert.NotContains(t, ms, "externalDocs") - } - } - } -} diff --git a/vendor/github.com/go-openapi/spec/tag.go b/vendor/github.com/go-openapi/spec/tag.go index 97f555840c..faa3d3de1e 100644 --- a/vendor/github.com/go-openapi/spec/tag.go +++ b/vendor/github.com/go-openapi/spec/tag.go @@ -21,6 +21,7 @@ import ( "github.com/go-openapi/swag" ) +// TagProps describe a tag entry in the top level tags section of a swagger spec type TagProps struct { Description string `json:"description,omitempty"` Name string `json:"name,omitempty"` @@ -29,10 +30,11 @@ type TagProps struct { // NewTag creates a new tag func NewTag(name, description string, externalDocs *ExternalDocumentation) Tag { - return Tag{TagProps: TagProps{description, name, externalDocs}} + return Tag{TagProps: TagProps{Description: description, Name: name, ExternalDocs: externalDocs}} } -// Tag allows adding meta data to a single tag that is used by the [Operation Object](http://goo.gl/8us55a#operationObject). +// Tag allows adding meta data to a single tag that is used by the +// [Operation Object](http://goo.gl/8us55a#operationObject). // It is not mandatory to have a Tag Object per tag used there. // // For more information: http://goo.gl/8us55a#tagObject diff --git a/vendor/github.com/go-openapi/spec/unused.go b/vendor/github.com/go-openapi/spec/unused.go new file mode 100644 index 0000000000..aa12b56f6e --- /dev/null +++ b/vendor/github.com/go-openapi/spec/unused.go @@ -0,0 +1,174 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +/* + +import ( + "net/url" + "os" + "path" + "path/filepath" + + "github.com/go-openapi/jsonpointer" +) + + // Some currently unused functions and definitions that + // used to be part of the expander. + + // Moved here for the record and possible future reuse + +var ( + idPtr, _ = jsonpointer.New("/id") + refPtr, _ = jsonpointer.New("/$ref") +) + +func idFromNode(node interface{}) (*Ref, error) { + if idValue, _, err := idPtr.Get(node); err == nil { + if refStr, ok := idValue.(string); ok && refStr != "" { + idRef, err := NewRef(refStr) + if err != nil { + return nil, err + } + return &idRef, nil + } + } + return nil, nil +} + +func nextRef(startingNode interface{}, startingRef *Ref, ptr *jsonpointer.Pointer) *Ref { + if startingRef == nil { + return nil + } + + if ptr == nil { + return startingRef + } + + ret := startingRef + var idRef *Ref + node := startingNode + + for _, tok := range ptr.DecodedTokens() { + node, _, _ = jsonpointer.GetForToken(node, tok) + if node == nil { + break + } + + idRef, _ = idFromNode(node) + if idRef != nil { + nw, err := ret.Inherits(*idRef) + if err != nil { + break + } + ret = nw + } + + refRef, _, _ := refPtr.Get(node) + if refRef != nil { + var rf Ref + switch value := refRef.(type) { + case string: + rf, _ = NewRef(value) + } + nw, err := ret.Inherits(rf) + if err != nil { + break + } + nwURL := nw.GetURL() + if nwURL.Scheme == "file" || (nwURL.Scheme == "" && nwURL.Host == "") { + nwpt := filepath.ToSlash(nwURL.Path) + if filepath.IsAbs(nwpt) { + _, err := os.Stat(nwpt) + if err != nil { + nwURL.Path = filepath.Join(".", nwpt) + } + } + } + + ret = nw + } + + } + + return ret +} + +// basePathFromSchemaID returns a new basePath based on an existing basePath and a schema ID +func basePathFromSchemaID(oldBasePath, id string) string { + u, err := url.Parse(oldBasePath) + if err != nil { + panic(err) + } + uid, err := url.Parse(id) + if err != nil { + panic(err) + } + + if path.IsAbs(uid.Path) { + return id + } + u.Path = path.Join(path.Dir(u.Path), uid.Path) + return u.String() +} +*/ + +// type ExtraSchemaProps map[string]interface{} + +// // JSONSchema represents a structure that is a json schema draft 04 +// type JSONSchema struct { +// SchemaProps +// ExtraSchemaProps +// } + +// // MarshalJSON marshal this to JSON +// func (s JSONSchema) MarshalJSON() ([]byte, error) { +// b1, err := json.Marshal(s.SchemaProps) +// if err != nil { +// return nil, err +// } +// b2, err := s.Ref.MarshalJSON() +// if err != nil { +// return nil, err +// } +// b3, err := s.Schema.MarshalJSON() +// if err != nil { +// return nil, err +// } +// b4, err := json.Marshal(s.ExtraSchemaProps) +// if err != nil { +// return nil, err +// } +// return swag.ConcatJSON(b1, b2, b3, b4), nil +// } + +// // UnmarshalJSON marshal this from JSON +// func (s *JSONSchema) UnmarshalJSON(data []byte) error { +// var sch JSONSchema +// if err := json.Unmarshal(data, &sch.SchemaProps); err != nil { +// return err +// } +// if err := json.Unmarshal(data, &sch.Ref); err != nil { +// return err +// } +// if err := json.Unmarshal(data, &sch.Schema); err != nil { +// return err +// } +// if err := json.Unmarshal(data, &sch.ExtraSchemaProps); err != nil { +// return err +// } +// *s = sch +// return nil +// } diff --git a/vendor/github.com/go-openapi/spec/xml_object_test.go b/vendor/github.com/go-openapi/spec/xml_object_test.go deleted file mode 100644 index fda3b10841..0000000000 --- a/vendor/github.com/go-openapi/spec/xml_object_test.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestXmlObject_Serialize(t *testing.T) { - obj1 := XMLObject{} - actual, err := json.Marshal(obj1) - if assert.NoError(t, err) { - assert.Equal(t, "{}", string(actual)) - } - - obj2 := XMLObject{ - Name: "the name", - Namespace: "the namespace", - Prefix: "the prefix", - Attribute: true, - Wrapped: true, - } - - actual, err = json.Marshal(obj2) - if assert.NoError(t, err) { - var ad map[string]interface{} - if assert.NoError(t, json.Unmarshal(actual, &ad)) { - assert.Equal(t, obj2.Name, ad["name"]) - assert.Equal(t, obj2.Namespace, ad["namespace"]) - assert.Equal(t, obj2.Prefix, ad["prefix"]) - assert.True(t, ad["attribute"].(bool)) - assert.True(t, ad["wrapped"].(bool)) - } - } -} - -func TestXmlObject_Deserialize(t *testing.T) { - expected := XMLObject{} - actual := XMLObject{} - if assert.NoError(t, json.Unmarshal([]byte("{}"), &actual)) { - assert.Equal(t, expected, actual) - } - - completed := `{"name":"the name","namespace":"the namespace","prefix":"the prefix","attribute":true,"wrapped":true}` - expected = XMLObject{"the name", "the namespace", "the prefix", true, true} - actual = XMLObject{} - if assert.NoError(t, json.Unmarshal([]byte(completed), &actual)) { - assert.Equal(t, expected, actual) - } -} diff --git a/vendor/github.com/go-openapi/swag/.golangci.yml b/vendor/github.com/go-openapi/swag/.golangci.yml new file mode 100644 index 0000000000..6b237e4a79 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/.golangci.yml @@ -0,0 +1,20 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 25 + maligned: + suggest-new: true + dupl: + threshold: 100 + goconst: + min-len: 3 + min-occurrences: 2 + +linters: + enable-all: true + disable: + - maligned + - lll diff --git a/vendor/github.com/go-openapi/swag/.travis.yml b/vendor/github.com/go-openapi/swag/.travis.yml index 24c69bdf36..bd3a2e527d 100644 --- a/vendor/github.com/go-openapi/swag/.travis.yml +++ b/vendor/github.com/go-openapi/swag/.travis.yml @@ -1,14 +1,16 @@ -language: go +after_success: +- bash <(curl -s https://codecov.io/bash) go: -- 1.8 +- '1.9' +- 1.10.x +- 1.11.x install: - go get -u github.com/stretchr/testify - go get -u github.com/mailru/easyjson - go get -u gopkg.in/yaml.v2 -script: -- go test -v -race -cover -coverprofile=coverage.txt -covermode=atomic ./... -after_success: -- bash <(curl -s https://codecov.io/bash) +language: go notifications: slack: secure: QUWvCkBBK09GF7YtEvHHVt70JOkdlNBG0nIKu/5qc4/nW5HP8I2w0SEf/XR2je0eED1Qe3L/AfMCWwrEj+IUZc3l4v+ju8X8R3Lomhme0Eb0jd1MTMCuPcBT47YCj0M7RON7vXtbFfm1hFJ/jLe5+9FXz0hpXsR24PJc5ZIi/ogNwkaPqG4BmndzecpSh0vc2FJPZUD9LT0I09REY/vXR0oQAalLkW0asGD5taHZTUZq/kBpsNxaAFrLM23i4mUcf33M5fjLpvx5LRICrX/57XpBrDh2TooBU6Qj3CgoY0uPRYUmSNxbVx1czNzl2JtEpb5yjoxfVPQeg0BvQM00G8LJINISR+ohrjhkZmAqchDupAX+yFrxTtORa78CtnIL6z/aTNlgwwVD8kvL/1pFA/JWYmKDmz93mV/+6wubGzNSQCstzjkFA4/iZEKewKUoRIAi/fxyscP6L/rCpmY/4llZZvrnyTqVbt6URWpopUpH4rwYqreXAtJxJsfBJIeSmUIiDIOMGkCTvyTEW3fWGmGoqWtSHLoaWDyAIGb7azb+KvfpWtEcoPFWfSWU+LGee0A/YsUhBl7ADB9A0CJEuR8q4BPpKpfLwPKSiKSAXL7zDkyjExyhtgqbSl2jS+rKIHOZNL8JkCcTP2MKMVd563C5rC5FMKqu3S9m2b6380E= +script: +- go test -v -race -cover -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/swag/README.md b/vendor/github.com/go-openapi/swag/README.md index 5d43728e87..459a3e18d7 100644 --- a/vendor/github.com/go-openapi/swag/README.md +++ b/vendor/github.com/go-openapi/swag/README.md @@ -1,12 +1,23 @@ # Swag [![Build Status](https://travis-ci.org/go-openapi/swag.svg?branch=master)](https://travis-ci.org/go-openapi/swag) [![codecov](https://codecov.io/gh/go-openapi/swag/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/swag) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/swag/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/swag?status.svg)](http://godoc.org/github.com/go-openapi/swag) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/swag/master/LICENSE) +[![GoDoc](https://godoc.org/github.com/go-openapi/swag?status.svg)](http://godoc.org/github.com/go-openapi/swag) +[![GolangCI](https://golangci.com/badges/github.com/go-openapi/swag.svg)](https://golangci.com) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/swag)](https://goreportcard.com/report/github.com/go-openapi/swag) -Contains a bunch of helper functions: +Contains a bunch of helper functions for go-openapi and go-swagger projects. -* convert between value and pointers for builtins -* convert from string to builtin +You may also use it standalone for your projects. + +* convert between value and pointers for builtin types +* convert from string to builtin types (wraps strconv) * fast json concatenation * search in path * load from file or http -* name manglin \ No newline at end of file +* name mangling + + +This repo has only few dependencies outside of the standard library: + +* JSON utilities depend on github.com/mailru/easyjson +* YAML utilities depend on gopkg.in/yaml.v2 diff --git a/vendor/github.com/go-openapi/swag/convert.go b/vendor/github.com/go-openapi/swag/convert.go index 2bf5ecbba2..4e446ff703 100644 --- a/vendor/github.com/go-openapi/swag/convert.go +++ b/vendor/github.com/go-openapi/swag/convert.go @@ -22,8 +22,9 @@ import ( // same as ECMA Number.MAX_SAFE_INTEGER and Number.MIN_SAFE_INTEGER const ( - maxJSONFloat = float64(1<<53 - 1) // 9007199254740991.0 2^53 - 1 - minJSONFloat = -float64(1<<53 - 1) //-9007199254740991.0 -2^53 - 1 + maxJSONFloat = float64(1<<53 - 1) // 9007199254740991.0 2^53 - 1 + minJSONFloat = -float64(1<<53 - 1) //-9007199254740991.0 -2^53 - 1 + epsilon float64 = 1e-9 ) // IsFloat64AJSONInteger allow for integers [-2^53, 2^53-1] inclusive @@ -31,21 +32,39 @@ func IsFloat64AJSONInteger(f float64) bool { if math.IsNaN(f) || math.IsInf(f, 0) || f < minJSONFloat || f > maxJSONFloat { return false } - - return f == float64(int64(f)) || f == float64(uint64(f)) -} - -var evaluatesAsTrue = map[string]struct{}{ - "true": struct{}{}, - "1": struct{}{}, - "yes": struct{}{}, - "ok": struct{}{}, - "y": struct{}{}, - "on": struct{}{}, - "selected": struct{}{}, - "checked": struct{}{}, - "t": struct{}{}, - "enabled": struct{}{}, + fa := math.Abs(f) + g := float64(uint64(f)) + ga := math.Abs(g) + + diff := math.Abs(f - g) + + // more info: https://floating-point-gui.de/errors/comparison/#look-out-for-edge-cases + if f == g { // best case + return true + } else if f == float64(int64(f)) || f == float64(uint64(f)) { // optimistic case + return true + } else if f == 0 || g == 0 || diff < math.SmallestNonzeroFloat64 { // very close to 0 values + return diff < (epsilon * math.SmallestNonzeroFloat64) + } + // check the relative error + return diff/math.Min(fa+ga, math.MaxFloat64) < epsilon +} + +var evaluatesAsTrue map[string]struct{} + +func init() { + evaluatesAsTrue = map[string]struct{}{ + "true": {}, + "1": {}, + "yes": {}, + "ok": {}, + "y": {}, + "on": {}, + "selected": {}, + "checked": {}, + "t": {}, + "enabled": {}, + } } // ConvertBool turn a string into a boolean diff --git a/vendor/github.com/go-openapi/swag/convert_test.go b/vendor/github.com/go-openapi/swag/convert_test.go deleted file mode 100644 index 2f00732366..0000000000 --- a/vendor/github.com/go-openapi/swag/convert_test.go +++ /dev/null @@ -1,215 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package swag - -import ( - "math" - "strconv" - "testing" - - "github.com/stretchr/testify/assert" -) - -// These are really dumb tests - -func TestConvertBool(t *testing.T) { - for k := range evaluatesAsTrue { - r, err := ConvertBool(k) - if assert.NoError(t, err) { - assert.True(t, r) - } - } - for _, k := range []string{"a", "", "0", "false", "unchecked"} { - r, err := ConvertBool(k) - if assert.NoError(t, err) { - assert.False(t, r) - } - } -} - -func TestConvertFloat32(t *testing.T) { - validFloats := []float32{1.0, -1, math.MaxFloat32, math.SmallestNonzeroFloat32, 0, 5.494430303} - invalidFloats := []string{"a", strconv.FormatFloat(math.MaxFloat64, 'f', -1, 64), "true"} - - for _, f := range validFloats { - c, err := ConvertFloat32(FormatFloat32(f)) - if assert.NoError(t, err) { - assert.EqualValues(t, f, c) - } - } - for _, f := range invalidFloats { - _, err := ConvertFloat32(f) - assert.Error(t, err, "expected '"+f+"' to generate an error") - } -} - -func TestConvertFloat64(t *testing.T) { - validFloats := []float64{1.0, -1, float64(math.MaxFloat32), float64(math.SmallestNonzeroFloat32), math.MaxFloat64, math.SmallestNonzeroFloat64, 0, 5.494430303} - invalidFloats := []string{"a", "true"} - - for _, f := range validFloats { - c, err := ConvertFloat64(FormatFloat64(f)) - if assert.NoError(t, err) { - assert.EqualValues(t, f, c) - } - } - for _, f := range invalidFloats { - _, err := ConvertFloat64(f) - assert.Error(t, err, "expected '"+f+"' to generate an error") - } -} - -func TestConvertInt8(t *testing.T) { - validInts := []int8{0, 1, -1, math.MaxInt8, math.MinInt8} - invalidInts := []string{"1.233", "a", "false", strconv.Itoa(int(math.MaxInt64))} - - for _, f := range validInts { - c, err := ConvertInt8(FormatInt8(f)) - if assert.NoError(t, err) { - assert.EqualValues(t, f, c) - } - } - for _, f := range invalidInts { - _, err := ConvertInt8(f) - assert.Error(t, err, "expected '"+f+"' to generate an error") - } -} - -func TestConvertInt16(t *testing.T) { - validInts := []int16{0, 1, -1, math.MaxInt8, math.MinInt8, math.MaxInt16, math.MinInt16} - invalidInts := []string{"1.233", "a", "false", strconv.Itoa(int(math.MaxInt64))} - - for _, f := range validInts { - c, err := ConvertInt16(FormatInt16(f)) - if assert.NoError(t, err) { - assert.EqualValues(t, f, c) - } - } - for _, f := range invalidInts { - _, err := ConvertInt16(f) - assert.Error(t, err, "expected '"+f+"' to generate an error") - } -} - -func TestConvertInt32(t *testing.T) { - validInts := []int32{0, 1, -1, math.MaxInt8, math.MinInt8, math.MaxInt16, math.MinInt16, math.MinInt32, math.MaxInt32} - invalidInts := []string{"1.233", "a", "false", strconv.Itoa(int(math.MaxInt64))} - - for _, f := range validInts { - c, err := ConvertInt32(FormatInt32(f)) - if assert.NoError(t, err) { - assert.EqualValues(t, f, c) - } - } - for _, f := range invalidInts { - _, err := ConvertInt32(f) - assert.Error(t, err, "expected '"+f+"' to generate an error") - } -} - -func TestConvertInt64(t *testing.T) { - validInts := []int64{0, 1, -1, math.MaxInt8, math.MinInt8, math.MaxInt16, math.MinInt16, math.MinInt32, math.MaxInt32, math.MaxInt64, math.MinInt64} - invalidInts := []string{"1.233", "a", "false"} - - for _, f := range validInts { - c, err := ConvertInt64(FormatInt64(f)) - if assert.NoError(t, err) { - assert.EqualValues(t, f, c) - } - } - for _, f := range invalidInts { - _, err := ConvertInt64(f) - assert.Error(t, err, "expected '"+f+"' to generate an error") - } -} - -func TestConvertUint8(t *testing.T) { - validInts := []uint8{0, 1, math.MaxUint8} - invalidInts := []string{"1.233", "a", "false", strconv.FormatUint(math.MaxUint64, 10)} - - for _, f := range validInts { - c, err := ConvertUint8(FormatUint8(f)) - if assert.NoError(t, err) { - assert.EqualValues(t, f, c) - } - } - for _, f := range invalidInts { - _, err := ConvertUint8(f) - assert.Error(t, err, "expected '"+f+"' to generate an error") - } -} - -func TestConvertUint16(t *testing.T) { - validUints := []uint16{0, 1, math.MaxUint8, math.MaxUint16} - invalidUints := []string{"1.233", "a", "false", strconv.FormatUint(math.MaxUint64, 10)} - - for _, f := range validUints { - c, err := ConvertUint16(FormatUint16(f)) - if assert.NoError(t, err) { - assert.EqualValues(t, f, c) - } - } - for _, f := range invalidUints { - _, err := ConvertUint16(f) - assert.Error(t, err, "expected '"+f+"' to generate an error") - } -} - -func TestConvertUint32(t *testing.T) { - validUints := []uint32{0, 1, math.MaxUint8, math.MaxUint16, math.MaxUint32} - invalidUints := []string{"1.233", "a", "false", strconv.FormatUint(math.MaxUint64, 10)} - - for _, f := range validUints { - c, err := ConvertUint32(FormatUint32(f)) - if assert.NoError(t, err) { - assert.EqualValues(t, f, c) - } - } - for _, f := range invalidUints { - _, err := ConvertUint32(f) - assert.Error(t, err, "expected '"+f+"' to generate an error") - } -} - -func TestConvertUint64(t *testing.T) { - validUints := []uint64{0, 1, math.MaxUint8, math.MaxUint16, math.MaxUint32, math.MaxUint64} - invalidUints := []string{"1.233", "a", "false"} - - for _, f := range validUints { - c, err := ConvertUint64(FormatUint64(f)) - if assert.NoError(t, err) { - assert.EqualValues(t, f, c) - } - } - for _, f := range invalidUints { - _, err := ConvertUint64(f) - assert.Error(t, err, "expected '"+f+"' to generate an error") - } -} - -func TestIsFloat64AJSONInteger(t *testing.T) { - assert.False(t, IsFloat64AJSONInteger(math.Inf(1))) - assert.False(t, IsFloat64AJSONInteger(maxJSONFloat+1)) - - assert.False(t, IsFloat64AJSONInteger(minJSONFloat-1)) - assert.True(t, IsFloat64AJSONInteger(1.0)) - assert.True(t, IsFloat64AJSONInteger(maxJSONFloat)) - assert.True(t, IsFloat64AJSONInteger(minJSONFloat)) -} - -func TestFormatBool(t *testing.T) { - assert.Equal(t, "true", FormatBool(true)) - assert.Equal(t, "false", FormatBool(false)) -} diff --git a/vendor/github.com/go-openapi/swag/convert_types_test.go b/vendor/github.com/go-openapi/swag/convert_types_test.go deleted file mode 100644 index 978cf3a1d3..0000000000 --- a/vendor/github.com/go-openapi/swag/convert_types_test.go +++ /dev/null @@ -1,579 +0,0 @@ -package swag - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -var testCasesStringSlice = [][]string{ - {"a", "b", "c", "d", "e"}, - {"a", "b", "", "", "e"}, -} - -func TestStringSlice(t *testing.T) { - for idx, in := range testCasesStringSlice { - if in == nil { - continue - } - out := StringSlice(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) - for i := range out { - assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) - } - - out2 := StringValueSlice(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) - } -} - -var testCasesStringValueSlice = [][]*string{ - {String("a"), String("b"), nil, String("c")}, -} - -func TestStringValueSlice(t *testing.T) { - for idx, in := range testCasesStringValueSlice { - if in == nil { - continue - } - out := StringValueSlice(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) - for i := range out { - if in[i] == nil { - assert.Empty(t, out[i], "Unexpected value at idx %d", idx) - } else { - assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) - } - } - - out2 := StringSlice(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - for i := range out2 { - if in[i] == nil { - assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) - } else { - assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) - } - } - } -} - -var testCasesStringMap = []map[string]string{ - {"a": "1", "b": "2", "c": "3"}, -} - -func TestStringMap(t *testing.T) { - for idx, in := range testCasesStringMap { - if in == nil { - continue - } - out := StringMap(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) - for i := range out { - assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) - } - - out2 := StringValueMap(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) - } -} - -var testCasesBoolSlice = [][]bool{ - {true, true, false, false}, -} - -func TestBoolSlice(t *testing.T) { - for idx, in := range testCasesBoolSlice { - if in == nil { - continue - } - out := BoolSlice(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) - for i := range out { - assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) - } - - out2 := BoolValueSlice(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) - } -} - -var testCasesBoolValueSlice = [][]*bool{} - -func TestBoolValueSlice(t *testing.T) { - for idx, in := range testCasesBoolValueSlice { - if in == nil { - continue - } - out := BoolValueSlice(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) - for i := range out { - if in[i] == nil { - assert.Empty(t, out[i], "Unexpected value at idx %d", idx) - } else { - assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) - } - } - - out2 := BoolSlice(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - for i := range out2 { - if in[i] == nil { - assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) - } else { - assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) - } - } - } -} - -var testCasesBoolMap = []map[string]bool{ - {"a": true, "b": false, "c": true}, -} - -func TestBoolMap(t *testing.T) { - for idx, in := range testCasesBoolMap { - if in == nil { - continue - } - out := BoolMap(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) - for i := range out { - assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) - } - - out2 := BoolValueMap(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) - } -} - -var testCasesIntSlice = [][]int{ - {1, 2, 3, 4}, -} - -func TestIntSlice(t *testing.T) { - for idx, in := range testCasesIntSlice { - if in == nil { - continue - } - out := IntSlice(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) - for i := range out { - assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) - } - - out2 := IntValueSlice(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) - } -} - -var testCasesIntValueSlice = [][]*int{} - -func TestIntValueSlice(t *testing.T) { - for idx, in := range testCasesIntValueSlice { - if in == nil { - continue - } - out := IntValueSlice(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) - for i := range out { - if in[i] == nil { - assert.Empty(t, out[i], "Unexpected value at idx %d", idx) - } else { - assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) - } - } - - out2 := IntSlice(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - for i := range out2 { - if in[i] == nil { - assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) - } else { - assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) - } - } - } -} - -var testCasesIntMap = []map[string]int{ - {"a": 3, "b": 2, "c": 1}, -} - -func TestIntMap(t *testing.T) { - for idx, in := range testCasesIntMap { - if in == nil { - continue - } - out := IntMap(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) - for i := range out { - assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) - } - - out2 := IntValueMap(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) - } -} - -var testCasesInt64Slice = [][]int64{ - {1, 2, 3, 4}, -} - -func TestInt64Slice(t *testing.T) { - for idx, in := range testCasesInt64Slice { - if in == nil { - continue - } - out := Int64Slice(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) - for i := range out { - assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) - } - - out2 := Int64ValueSlice(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) - } -} - -var testCasesInt64ValueSlice = [][]*int64{} - -func TestInt64ValueSlice(t *testing.T) { - for idx, in := range testCasesInt64ValueSlice { - if in == nil { - continue - } - out := Int64ValueSlice(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) - for i := range out { - if in[i] == nil { - assert.Empty(t, out[i], "Unexpected value at idx %d", idx) - } else { - assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) - } - } - - out2 := Int64Slice(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - for i := range out2 { - if in[i] == nil { - assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) - } else { - assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) - } - } - } -} - -var testCasesInt64Map = []map[string]int64{ - {"a": 3, "b": 2, "c": 1}, -} - -func TestInt64Map(t *testing.T) { - for idx, in := range testCasesInt64Map { - if in == nil { - continue - } - out := Int64Map(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) - for i := range out { - assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) - } - - out2 := Int64ValueMap(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) - } -} - -var testCasesFloat64Slice = [][]float64{ - {1, 2, 3, 4}, -} - -func TestFloat64Slice(t *testing.T) { - for idx, in := range testCasesFloat64Slice { - if in == nil { - continue - } - out := Float64Slice(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) - for i := range out { - assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) - } - - out2 := Float64ValueSlice(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) - } -} - -var testCasesUintSlice = [][]uint{ - {1, 2, 3, 4}, -} - -func TestUintSlice(t *testing.T) { - for idx, in := range testCasesUintSlice { - if in == nil { - continue - } - out := UintSlice(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) - for i := range out { - assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) - } - - out2 := UintValueSlice(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) - } -} - -var testCasesUintValueSlice = [][]*uint{} - -func TestUintValueSlice(t *testing.T) { - for idx, in := range testCasesUintValueSlice { - if in == nil { - continue - } - out := UintValueSlice(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) - for i := range out { - if in[i] == nil { - assert.Empty(t, out[i], "Unexpected value at idx %d", idx) - } else { - assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) - } - } - - out2 := UintSlice(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - for i := range out2 { - if in[i] == nil { - assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) - } else { - assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) - } - } - } -} - -var testCasesUintMap = []map[string]uint{ - {"a": 3, "b": 2, "c": 1}, -} - -func TestUintMap(t *testing.T) { - for idx, in := range testCasesUintMap { - if in == nil { - continue - } - out := UintMap(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) - for i := range out { - assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) - } - - out2 := UintValueMap(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) - } -} - -var testCasesUint64Slice = [][]uint64{ - {1, 2, 3, 4}, -} - -func TestUint64Slice(t *testing.T) { - for idx, in := range testCasesUint64Slice { - if in == nil { - continue - } - out := Uint64Slice(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) - for i := range out { - assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) - } - - out2 := Uint64ValueSlice(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) - } -} - -var testCasesUint64ValueSlice = [][]*uint64{} - -func TestUint64ValueSlice(t *testing.T) { - for idx, in := range testCasesUint64ValueSlice { - if in == nil { - continue - } - out := Uint64ValueSlice(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) - for i := range out { - if in[i] == nil { - assert.Empty(t, out[i], "Unexpected value at idx %d", idx) - } else { - assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) - } - } - - out2 := Uint64Slice(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - for i := range out2 { - if in[i] == nil { - assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) - } else { - assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) - } - } - } -} - -var testCasesUint64Map = []map[string]uint64{ - {"a": 3, "b": 2, "c": 1}, -} - -func TestUint64Map(t *testing.T) { - for idx, in := range testCasesUint64Map { - if in == nil { - continue - } - out := Uint64Map(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) - for i := range out { - assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) - } - - out2 := Uint64ValueMap(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) - } -} - -var testCasesFloat64ValueSlice = [][]*float64{} - -func TestFloat64ValueSlice(t *testing.T) { - for idx, in := range testCasesFloat64ValueSlice { - if in == nil { - continue - } - out := Float64ValueSlice(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) - for i := range out { - if in[i] == nil { - assert.Empty(t, out[i], "Unexpected value at idx %d", idx) - } else { - assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) - } - } - - out2 := Float64Slice(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - for i := range out2 { - if in[i] == nil { - assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) - } else { - assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) - } - } - } -} - -var testCasesFloat64Map = []map[string]float64{ - {"a": 3, "b": 2, "c": 1}, -} - -func TestFloat64Map(t *testing.T) { - for idx, in := range testCasesFloat64Map { - if in == nil { - continue - } - out := Float64Map(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) - for i := range out { - assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) - } - - out2 := Float64ValueMap(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) - } -} - -var testCasesTimeSlice = [][]time.Time{ - {time.Now(), time.Now().AddDate(100, 0, 0)}, -} - -func TestTimeSlice(t *testing.T) { - for idx, in := range testCasesTimeSlice { - if in == nil { - continue - } - out := TimeSlice(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) - for i := range out { - assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) - } - - out2 := TimeValueSlice(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) - } -} - -var testCasesTimeValueSlice = [][]*time.Time{} - -func TestTimeValueSlice(t *testing.T) { - for idx, in := range testCasesTimeValueSlice { - if in == nil { - continue - } - out := TimeValueSlice(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) - for i := range out { - if in[i] == nil { - assert.Empty(t, out[i], "Unexpected value at idx %d", idx) - } else { - assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) - } - } - - out2 := TimeSlice(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - for i := range out2 { - if in[i] == nil { - assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) - } else { - assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) - } - } - } -} - -var testCasesTimeMap = []map[string]time.Time{ - {"a": time.Now().AddDate(-100, 0, 0), "b": time.Now()}, -} - -func TestTimeMap(t *testing.T) { - for idx, in := range testCasesTimeMap { - if in == nil { - continue - } - out := TimeMap(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) - for i := range out { - assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) - } - - out2 := TimeValueMap(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) - } -} diff --git a/vendor/github.com/go-openapi/swag/doc.go b/vendor/github.com/go-openapi/swag/doc.go new file mode 100644 index 0000000000..e01e1a0234 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/doc.go @@ -0,0 +1,33 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package swag contains a bunch of helper functions for go-openapi and go-swagger projects. + +You may also use it standalone for your projects. + + * convert between value and pointers for builtin types + * convert from string to builtin types (wraps strconv) + * fast json concatenation + * search in path + * load from file or http + * name mangling + + +This repo has only few dependencies outside of the standard library: + + * JSON utilities depend on github.com/mailru/easyjson + * YAML utilities depend on gopkg.in/yaml.v2 +*/ +package swag diff --git a/vendor/github.com/go-openapi/swag/go.mod b/vendor/github.com/go-openapi/swag/go.mod new file mode 100644 index 0000000000..9eb936a19b --- /dev/null +++ b/vendor/github.com/go-openapi/swag/go.mod @@ -0,0 +1,9 @@ +module github.com/go-openapi/swag + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/testify v1.2.2 + gopkg.in/yaml.v2 v2.2.1 +) diff --git a/vendor/github.com/go-openapi/swag/go.sum b/vendor/github.com/go-openapi/swag/go.sum new file mode 100644 index 0000000000..d6e717bd4e --- /dev/null +++ b/vendor/github.com/go-openapi/swag/go.sum @@ -0,0 +1,9 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 h1:2gxZ0XQIU/5z3Z3bUBu+FXuk2pFbkN6tcwi/pjyaDic= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/go-openapi/swag/json.go b/vendor/github.com/go-openapi/swag/json.go index cb20a6a0f7..33da5e4e7d 100644 --- a/vendor/github.com/go-openapi/swag/json.go +++ b/vendor/github.com/go-openapi/swag/json.go @@ -26,14 +26,21 @@ import ( "github.com/mailru/easyjson/jwriter" ) +// nullJSON represents a JSON object with null type +var nullJSON = []byte("null") + // DefaultJSONNameProvider the default cache for types var DefaultJSONNameProvider = NewNameProvider() const comma = byte(',') -var closers = map[byte]byte{ - '{': '}', - '[': ']', +var closers map[byte]byte + +func init() { + closers = map[byte]byte{ + '{': '}', + '[': ']', + } } type ejMarshaler interface { @@ -79,10 +86,7 @@ func DynamicJSONToStruct(data interface{}, target interface{}) error { if err != nil { return err } - if err := ReadJSON(b, target); err != nil { - return err - } - return nil + return ReadJSON(b, target) } // ConcatJSON concatenates multiple json objects efficiently @@ -90,17 +94,29 @@ func ConcatJSON(blobs ...[]byte) []byte { if len(blobs) == 0 { return nil } - if len(blobs) == 1 { + + last := len(blobs) - 1 + for blobs[last] == nil || bytes.Equal(blobs[last], nullJSON) { + // strips trailing null objects + last = last - 1 + if last < 0 { + // there was nothing but "null"s or nil... + return nil + } + } + if last == 0 { return blobs[0] } - last := len(blobs) - 1 var opening, closing byte - a := 0 - idx := 0 + var idx, a int buf := bytes.NewBuffer(nil) - for i, b := range blobs { + for i, b := range blobs[:last+1] { + if b == nil || bytes.Equal(b, nullJSON) { + // a null object is in the list: skip it + continue + } if len(b) > 0 && opening == 0 { // is this an array or an object? opening, closing = b[0], closers[b[0]] } @@ -245,7 +261,7 @@ func (n *NameProvider) GetJSONNames(subject interface{}) []string { names = n.makeNameIndex(tpe) } - var res []string + res := make([]string, 0, len(names.jsonNames)) for k := range names.jsonNames { res = append(res, k) } diff --git a/vendor/github.com/go-openapi/swag/json_test.go b/vendor/github.com/go-openapi/swag/json_test.go deleted file mode 100644 index a7d0d9821b..0000000000 --- a/vendor/github.com/go-openapi/swag/json_test.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package swag - -import ( - "reflect" - "testing" - - "github.com/stretchr/testify/assert" -) - -type testNameStruct struct { - Name string `json:"name"` - NotTheSame int64 `json:"plain"` - Ignored string `json:"-"` -} - -func TestNameProvider(t *testing.T) { - - provider := NewNameProvider() - - var obj = testNameStruct{} - - nm, ok := provider.GetGoName(obj, "name") - assert.True(t, ok) - assert.Equal(t, "Name", nm) - - nm, ok = provider.GetGoName(obj, "plain") - assert.True(t, ok) - assert.Equal(t, "NotTheSame", nm) - - nm, ok = provider.GetGoName(obj, "doesNotExist") - assert.False(t, ok) - assert.Empty(t, nm) - - nm, ok = provider.GetGoName(obj, "ignored") - assert.False(t, ok) - assert.Empty(t, nm) - - tpe := reflect.TypeOf(obj) - nm, ok = provider.GetGoNameForType(tpe, "name") - assert.True(t, ok) - assert.Equal(t, "Name", nm) - - nm, ok = provider.GetGoNameForType(tpe, "plain") - assert.True(t, ok) - assert.Equal(t, "NotTheSame", nm) - - nm, ok = provider.GetGoNameForType(tpe, "doesNotExist") - assert.False(t, ok) - assert.Empty(t, nm) - - nm, ok = provider.GetGoNameForType(tpe, "ignored") - assert.False(t, ok) - assert.Empty(t, nm) - - ptr := &obj - nm, ok = provider.GetGoName(ptr, "name") - assert.True(t, ok) - assert.Equal(t, "Name", nm) - - nm, ok = provider.GetGoName(ptr, "plain") - assert.True(t, ok) - assert.Equal(t, "NotTheSame", nm) - - nm, ok = provider.GetGoName(ptr, "doesNotExist") - assert.False(t, ok) - assert.Empty(t, nm) - - nm, ok = provider.GetGoName(ptr, "ignored") - assert.False(t, ok) - assert.Empty(t, nm) - - nm, ok = provider.GetJSONName(obj, "Name") - assert.True(t, ok) - assert.Equal(t, "name", nm) - - nm, ok = provider.GetJSONName(obj, "NotTheSame") - assert.True(t, ok) - assert.Equal(t, "plain", nm) - - nm, ok = provider.GetJSONName(obj, "DoesNotExist") - assert.False(t, ok) - assert.Empty(t, nm) - - nm, ok = provider.GetJSONName(obj, "Ignored") - assert.False(t, ok) - assert.Empty(t, nm) - - nm, ok = provider.GetJSONNameForType(tpe, "Name") - assert.True(t, ok) - assert.Equal(t, "name", nm) - - nm, ok = provider.GetJSONNameForType(tpe, "NotTheSame") - assert.True(t, ok) - assert.Equal(t, "plain", nm) - - nm, ok = provider.GetJSONNameForType(tpe, "doesNotExist") - assert.False(t, ok) - assert.Empty(t, nm) - - nm, ok = provider.GetJSONNameForType(tpe, "Ignored") - assert.False(t, ok) - assert.Empty(t, nm) - - nm, ok = provider.GetJSONName(ptr, "Name") - assert.True(t, ok) - assert.Equal(t, "name", nm) - - nm, ok = provider.GetJSONName(ptr, "NotTheSame") - assert.True(t, ok) - assert.Equal(t, "plain", nm) - - nm, ok = provider.GetJSONName(ptr, "doesNotExist") - assert.False(t, ok) - assert.Empty(t, nm) - - nm, ok = provider.GetJSONName(ptr, "Ignored") - assert.False(t, ok) - assert.Empty(t, nm) - - nms := provider.GetJSONNames(ptr) - assert.Len(t, nms, 2) - - assert.Len(t, provider.index, 1) - -} - -func TestJSONConcatenation(t *testing.T) { - assert.Nil(t, ConcatJSON()) - assert.Equal(t, ConcatJSON([]byte(`{"id":1}`)), []byte(`{"id":1}`)) - assert.Equal(t, ConcatJSON([]byte(`{}`), []byte(`{}`)), []byte(`{}`)) - assert.Equal(t, ConcatJSON([]byte(`[]`), []byte(`[]`)), []byte(`[]`)) - assert.Equal(t, ConcatJSON([]byte(`{"id":1}`), []byte(`{"name":"Rachel"}`)), []byte(`{"id":1,"name":"Rachel"}`)) - assert.Equal(t, ConcatJSON([]byte(`[{"id":1}]`), []byte(`[{"name":"Rachel"}]`)), []byte(`[{"id":1},{"name":"Rachel"}]`)) - assert.Equal(t, ConcatJSON([]byte(`{}`), []byte(`{"name":"Rachel"}`)), []byte(`{"name":"Rachel"}`)) - assert.Equal(t, ConcatJSON([]byte(`[]`), []byte(`[{"name":"Rachel"}]`)), []byte(`[{"name":"Rachel"}]`)) - assert.Equal(t, ConcatJSON([]byte(`{"id":1}`), []byte(`{}`)), []byte(`{"id":1}`)) - assert.Equal(t, ConcatJSON([]byte(`[{"id":1}]`), []byte(`[]`)), []byte(`[{"id":1}]`)) - assert.Equal(t, ConcatJSON([]byte(`{}`), []byte(`{}`), []byte(`{}`)), []byte(`{}`)) - assert.Equal(t, ConcatJSON([]byte(`[]`), []byte(`[]`), []byte(`[]`)), []byte(`[]`)) - assert.Equal(t, ConcatJSON([]byte(`{"id":1}`), []byte(`{"name":"Rachel"}`), []byte(`{"age":32}`)), []byte(`{"id":1,"name":"Rachel","age":32}`)) - assert.Equal(t, ConcatJSON([]byte(`[{"id":1}]`), []byte(`[{"name":"Rachel"}]`), []byte(`[{"age":32}]`)), []byte(`[{"id":1},{"name":"Rachel"},{"age":32}]`)) - assert.Equal(t, ConcatJSON([]byte(`{}`), []byte(`{"name":"Rachel"}`), []byte(`{"age":32}`)), []byte(`{"name":"Rachel","age":32}`)) - assert.Equal(t, ConcatJSON([]byte(`[]`), []byte(`[{"name":"Rachel"}]`), []byte(`[{"age":32}]`)), []byte(`[{"name":"Rachel"},{"age":32}]`)) - assert.Equal(t, ConcatJSON([]byte(`{"id":1}`), []byte(`{}`), []byte(`{"age":32}`)), []byte(`{"id":1,"age":32}`)) - assert.Equal(t, ConcatJSON([]byte(`[{"id":1}]`), []byte(`[]`), []byte(`[{"age":32}]`)), []byte(`[{"id":1},{"age":32}]`)) - assert.Equal(t, ConcatJSON([]byte(`{"id":1}`), []byte(`{"name":"Rachel"}`), []byte(`{}`)), []byte(`{"id":1,"name":"Rachel"}`)) - assert.Equal(t, ConcatJSON([]byte(`[{"id":1}]`), []byte(`[{"name":"Rachel"}]`), []byte(`[]`)), []byte(`[{"id":1},{"name":"Rachel"}]`)) - -} diff --git a/vendor/github.com/go-openapi/swag/loading.go b/vendor/github.com/go-openapi/swag/loading.go index 62ed1e80ab..70f4fb361c 100644 --- a/vendor/github.com/go-openapi/swag/loading.go +++ b/vendor/github.com/go-openapi/swag/loading.go @@ -43,7 +43,13 @@ func LoadStrategy(path string, local, remote func(string) ([]byte, error)) func( if strings.HasPrefix(path, "http") { return remote } - return func(pth string) ([]byte, error) { return local(filepath.FromSlash(pth)) } + return func(pth string) ([]byte, error) { + upth, err := pathUnescape(pth) + if err != nil { + return nil, err + } + return local(filepath.FromSlash(upth)) + } } func loadHTTPBytes(timeout time.Duration) func(path string) ([]byte, error) { diff --git a/vendor/github.com/go-openapi/swag/loading_test.go b/vendor/github.com/go-openapi/swag/loading_test.go deleted file mode 100644 index 7b8bdf48db..0000000000 --- a/vendor/github.com/go-openapi/swag/loading_test.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package swag - -import ( - "net/http" - "net/http/httptest" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestLoadFromHTTP(t *testing.T) { - - _, err := LoadFromFileOrHTTP("httx://12394:abd") - assert.Error(t, err) - - serv := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - rw.WriteHeader(http.StatusNotFound) - })) - defer serv.Close() - - _, err = LoadFromFileOrHTTP(serv.URL) - assert.Error(t, err) - - ts2 := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - rw.WriteHeader(http.StatusOK) - rw.Write([]byte("the content")) - })) - defer ts2.Close() - - d, err := LoadFromFileOrHTTP(ts2.URL) - assert.NoError(t, err) - assert.Equal(t, []byte("the content"), d) -} diff --git a/vendor/github.com/go-openapi/swag/net_test.go b/vendor/github.com/go-openapi/swag/net_test.go deleted file mode 100644 index 041db60a23..0000000000 --- a/vendor/github.com/go-openapi/swag/net_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package swag - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestSplitHostPort(t *testing.T) { - data := []struct { - Input string - Host string - Port int - Err bool - }{ - {"localhost:3933", "localhost", 3933, false}, - {"localhost:yellow", "", -1, true}, - {"localhost", "", -1, true}, - {"localhost:", "", -1, true}, - {"localhost:3933", "localhost", 3933, false}, - } - - for _, e := range data { - h, p, err := SplitHostPort(e.Input) - if (!e.Err && assert.NoError(t, err)) || (e.Err && assert.Error(t, err)) { - assert.Equal(t, e.Host, h) - assert.Equal(t, e.Port, p) - } - } -} diff --git a/vendor/github.com/go-openapi/swag/path_test.go b/vendor/github.com/go-openapi/swag/path_test.go deleted file mode 100644 index c5a6706467..0000000000 --- a/vendor/github.com/go-openapi/swag/path_test.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package swag - -import ( - "io/ioutil" - "os" - "path" - "path/filepath" - "runtime" - "testing" - - "github.com/stretchr/testify/assert" -) - -func makeDirStructure(t *testing.T, tgt string) (string, string, error) { - if tgt == "" { - tgt = "pkgpaths" - } - td, err := ioutil.TempDir("", tgt) - if err != nil { - return "", "", err - } - td2, err := ioutil.TempDir("", tgt+"-2") - if err != nil { - return "", "", err - } - realPath := filepath.Join(td, "src", "foo", "bar") - if err := os.MkdirAll(realPath, os.ModePerm); err != nil { - return "", "", err - } - linkPathBase := filepath.Join(td, "src", "baz") - if err := os.MkdirAll(linkPathBase, os.ModePerm); err != nil { - return "", "", err - } - linkPath := filepath.Join(linkPathBase, "das") - if err := os.Symlink(realPath, linkPath); err != nil { - return "", "", err - } - - realPath = filepath.Join(td2, "src", "fuu", "bir") - if err := os.MkdirAll(realPath, os.ModePerm); err != nil { - return "", "", err - } - linkPathBase = filepath.Join(td2, "src", "biz") - if err := os.MkdirAll(linkPathBase, os.ModePerm); err != nil { - return "", "", err - } - linkPath = filepath.Join(linkPathBase, "dis") - if err := os.Symlink(realPath, linkPath); err != nil { - return "", "", err - } - return td, td2, nil -} - -func TestFindPackage(t *testing.T) { - pth, pth2, err := makeDirStructure(t, "") - if err != nil { - t.Fatal(err) - } - defer func() { - os.RemoveAll(pth) - os.RemoveAll(pth2) - }() - - searchPath := pth + string(filepath.ListSeparator) + pth2 - // finds package when real name mentioned - pkg := FindInSearchPath(searchPath, "foo/bar") - assert.NotEmpty(t, pkg) - assertPath(t, path.Join(pth, "src", "foo", "bar"), pkg) - // finds package when real name is mentioned in secondary - pkg = FindInSearchPath(searchPath, "fuu/bir") - assert.NotEmpty(t, pkg) - assertPath(t, path.Join(pth2, "src", "fuu", "bir"), pkg) - // finds package when symlinked - pkg = FindInSearchPath(searchPath, "baz/das") - assert.NotEmpty(t, pkg) - assertPath(t, path.Join(pth, "src", "foo", "bar"), pkg) - // finds package when symlinked in secondary - pkg = FindInSearchPath(searchPath, "biz/dis") - assert.NotEmpty(t, pkg) - assertPath(t, path.Join(pth2, "src", "fuu", "bir"), pkg) - // return empty string when nothing is found - pkg = FindInSearchPath(searchPath, "not/there") - assert.Empty(t, pkg) -} - -func assertPath(t testing.TB, expected, actual string) bool { - fp, err := filepath.EvalSymlinks(expected) - if assert.NoError(t, err) { - return assert.Equal(t, fp, actual) - } - return true -} - -func TestFullGOPATH(t *testing.T) { - os.Unsetenv(GOPATHKey) - ngp := "/some/where:/other/place" - os.Setenv(GOPATHKey, ngp) - - ogp := os.Getenv(GOPATHKey) - defer os.Setenv(GOPATHKey, ogp) - - expected := ngp + ":" + runtime.GOROOT() - assert.Equal(t, expected, FullGoSearchPath()) -} diff --git a/vendor/github.com/go-openapi/swag/post_go18.go b/vendor/github.com/go-openapi/swag/post_go18.go new file mode 100644 index 0000000000..ef48086db3 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/post_go18.go @@ -0,0 +1,9 @@ +// +build go1.8 + +package swag + +import "net/url" + +func pathUnescape(path string) (string, error) { + return url.PathUnescape(path) +} diff --git a/vendor/github.com/go-openapi/swag/post_go19.go b/vendor/github.com/go-openapi/swag/post_go19.go new file mode 100644 index 0000000000..567680c793 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/post_go19.go @@ -0,0 +1,53 @@ +// +build go1.9 + +package swag + +import ( + "sort" + "sync" +) + +// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms. +// Since go1.9, this may be implemented with sync.Map. +type indexOfInitialisms struct { + sortMutex *sync.Mutex + index *sync.Map +} + +func newIndexOfInitialisms() *indexOfInitialisms { + return &indexOfInitialisms{ + sortMutex: new(sync.Mutex), + index: new(sync.Map), + } +} + +func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms { + m.sortMutex.Lock() + defer m.sortMutex.Unlock() + for k, v := range initial { + m.index.Store(k, v) + } + return m +} + +func (m *indexOfInitialisms) isInitialism(key string) bool { + _, ok := m.index.Load(key) + return ok +} + +func (m *indexOfInitialisms) add(key string) *indexOfInitialisms { + m.index.Store(key, true) + return m +} + +func (m *indexOfInitialisms) sorted() (result []string) { + m.sortMutex.Lock() + defer m.sortMutex.Unlock() + m.index.Range(func(key, value interface{}) bool { + k := key.(string) + result = append(result, k) + return true + }) + sort.Sort(sort.Reverse(byLength(result))) + return +} diff --git a/vendor/github.com/go-openapi/swag/pre_go18.go b/vendor/github.com/go-openapi/swag/pre_go18.go new file mode 100644 index 0000000000..860bb2bbb1 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/pre_go18.go @@ -0,0 +1,9 @@ +// +build !go1.8 + +package swag + +import "net/url" + +func pathUnescape(path string) (string, error) { + return url.QueryUnescape(path) +} diff --git a/vendor/github.com/go-openapi/swag/pre_go19.go b/vendor/github.com/go-openapi/swag/pre_go19.go new file mode 100644 index 0000000000..72c48ae752 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/pre_go19.go @@ -0,0 +1,55 @@ +// +build !go1.9 + +package swag + +import ( + "sort" + "sync" +) + +// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms. +// Before go1.9, this may be implemented with a mutex on the map. +type indexOfInitialisms struct { + getMutex *sync.Mutex + index map[string]bool +} + +func newIndexOfInitialisms() *indexOfInitialisms { + return &indexOfInitialisms{ + getMutex: new(sync.Mutex), + index: make(map[string]bool, 50), + } +} + +func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms { + m.getMutex.Lock() + defer m.getMutex.Unlock() + for k, v := range initial { + m.index[k] = v + } + return m +} + +func (m *indexOfInitialisms) isInitialism(key string) bool { + m.getMutex.Lock() + defer m.getMutex.Unlock() + _, ok := m.index[key] + return ok +} + +func (m *indexOfInitialisms) add(key string) *indexOfInitialisms { + m.getMutex.Lock() + defer m.getMutex.Unlock() + m.index[key] = true + return m +} + +func (m *indexOfInitialisms) sorted() (result []string) { + m.getMutex.Lock() + defer m.getMutex.Unlock() + for k := range m.index { + result = append(result, k) + } + sort.Sort(sort.Reverse(byLength(result))) + return +} diff --git a/vendor/github.com/go-openapi/swag/util.go b/vendor/github.com/go-openapi/swag/util.go index 0efb41735d..e659968fb1 100644 --- a/vendor/github.com/go-openapi/swag/util.go +++ b/vendor/github.com/go-openapi/swag/util.go @@ -18,63 +18,85 @@ import ( "math" "reflect" "regexp" - "sort" "strings" + "sync" "unicode" ) -// Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769 -var commonInitialisms = map[string]bool{ - "ACL": true, - "API": true, - "ASCII": true, - "CPU": true, - "CSS": true, - "DNS": true, - "EOF": true, - "GUID": true, - "HTML": true, - "HTTPS": true, - "HTTP": true, - "ID": true, - "IP": true, - "JSON": true, - "LHS": true, - "OAI": true, - "QPS": true, - "RAM": true, - "RHS": true, - "RPC": true, - "SLA": true, - "SMTP": true, - "SQL": true, - "SSH": true, - "TCP": true, - "TLS": true, - "TTL": true, - "UDP": true, - "UI": true, - "UID": true, - "UUID": true, - "URI": true, - "URL": true, - "UTF8": true, - "VM": true, - "XML": true, - "XMPP": true, - "XSRF": true, - "XSS": true, -} +// commonInitialisms are common acronyms that are kept as whole uppercased words. +var commonInitialisms *indexOfInitialisms + +// initialisms is a slice of sorted initialisms var initialisms []string +var once sync.Once + +var isInitialism func(string) bool + func init() { - for k := range commonInitialisms { - initialisms = append(initialisms, k) + // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769 + var configuredInitialisms = map[string]bool{ + "ACL": true, + "API": true, + "ASCII": true, + "CPU": true, + "CSS": true, + "DNS": true, + "EOF": true, + "GUID": true, + "HTML": true, + "HTTPS": true, + "HTTP": true, + "ID": true, + "IP": true, + "JSON": true, + "LHS": true, + "OAI": true, + "QPS": true, + "RAM": true, + "RHS": true, + "RPC": true, + "SLA": true, + "SMTP": true, + "SQL": true, + "SSH": true, + "TCP": true, + "TLS": true, + "TTL": true, + "UDP": true, + "UI": true, + "UID": true, + "UUID": true, + "URI": true, + "URL": true, + "UTF8": true, + "VM": true, + "XML": true, + "XMPP": true, + "XSRF": true, + "XSS": true, } - sort.Sort(sort.Reverse(byLength(initialisms))) + + // a thread-safe index of initialisms + commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms) + + // a test function + isInitialism = commonInitialisms.isInitialism +} + +func ensureSorted() { + initialisms = commonInitialisms.sorted() } -// JoinByFormat joins a string array by a known format: +const ( + //collectionFormatComma = "csv" + collectionFormatSpace = "ssv" + collectionFormatTab = "tsv" + collectionFormatPipe = "pipes" + collectionFormatMulti = "multi" +) + +// JoinByFormat joins a string array by a known format (e.g. swagger's collectionFormat attribute): // ssv: space separated value // tsv: tab separated value // pipes: pipe (|) separated value @@ -85,13 +107,13 @@ func JoinByFormat(data []string, format string) []string { } var sep string switch format { - case "ssv": + case collectionFormatSpace: sep = " " - case "tsv": + case collectionFormatTab: sep = "\t" - case "pipes": + case collectionFormatPipe: sep = "|" - case "multi": + case collectionFormatMulti: return data default: sep = "," @@ -104,19 +126,20 @@ func JoinByFormat(data []string, format string) []string { // tsv: tab separated value // pipes: pipe (|) separated value // csv: comma separated value (default) +// func SplitByFormat(data, format string) []string { if data == "" { return nil } var sep string switch format { - case "ssv": + case collectionFormatSpace: sep = " " - case "tsv": + case collectionFormatTab: sep = "\t" - case "pipes": + case collectionFormatPipe: sep = "|" - case "multi": + case collectionFormatMulti: return nil default: sep = "," @@ -143,7 +166,7 @@ func (s byLength) Less(i, j int) bool { } // Prepares strings by splitting by caps, spaces, dashes, and underscore -func split(str string) (words []string) { +func split(str string) []string { repl := strings.NewReplacer( "@", "At ", "&", "And ", @@ -166,13 +189,13 @@ func split(str string) (words []string) { str = rex1.ReplaceAllString(str, " $1") // check if consecutive single char things make up an initialism + once.Do(ensureSorted) for _, k := range initialisms { str = strings.Replace(str, rex1.ReplaceAllString(k, " $1"), " "+k, -1) } // Get the final list of words - words = rex2.FindAllString(str, -1) - - return + //words = rex2.FindAllString(str, -1) + return rex2.FindAllString(str, -1) } // Removes leading whitespaces @@ -204,42 +227,22 @@ func Camelize(word string) (camelized string) { // ToFileName lowercases and underscores a go type name func ToFileName(name string) string { - var out []string - cml := trim(name) - - // Camelize any capital word preceding a reserved keyword ("initialism") - // thus, upper-cased words preceding a common initialism will get separated - // e.g: ELBHTTPLoadBalancer becomes elb_http_load_balancer - rexPrevious := regexp.MustCompile(`(?P\p{Lu}{2,})(?:HTTP|OAI)`) - cml = rexPrevious.ReplaceAllStringFunc(cml, func(match string) (replaceInMatch string) { - for _, m := range rexPrevious.FindAllStringSubmatch(match, -1) { // [ match submatch ] - if m[1] != "" { - replaceInMatch = strings.Replace(m[0], m[1], Camelize(m[1]), -1) - } - } - return - }) - - // Pre-camelize reserved keywords ("initialisms") to avoid unnecessary hyphenization - for _, k := range initialisms { - cml = strings.Replace(cml, k, Camelize(k), -1) - } + in := split(name) + out := make([]string, 0, len(in)) - // Camelize other capital words to avoid unnecessary hyphenization - rexCase := regexp.MustCompile(`(\p{Lu}{2,})`) - cml = rexCase.ReplaceAllStringFunc(cml, Camelize) - - // Final split with hyphens - for _, w := range split(cml) { + for _, w := range in { out = append(out, lower(w)) } + return strings.Join(out, "_") } // ToCommandName lowercases and underscores a go type name func ToCommandName(name string) string { - var out []string - for _, w := range split(name) { + in := split(name) + out := make([]string, 0, len(in)) + + for _, w := range in { out = append(out, lower(w)) } return strings.Join(out, "-") @@ -247,9 +250,11 @@ func ToCommandName(name string) string { // ToHumanNameLower represents a code name as a human series of words func ToHumanNameLower(name string) string { - var out []string - for _, w := range split(name) { - if !commonInitialisms[upper(w)] { + in := split(name) + out := make([]string, 0, len(in)) + + for _, w := range in { + if !isInitialism(upper(w)) { out = append(out, lower(w)) } else { out = append(out, w) @@ -260,10 +265,12 @@ func ToHumanNameLower(name string) string { // ToHumanNameTitle represents a code name as a human series of words with the first letters titleized func ToHumanNameTitle(name string) string { - var out []string - for _, w := range split(name) { + in := split(name) + out := make([]string, 0, len(in)) + + for _, w := range in { uw := upper(w) - if !commonInitialisms[uw] { + if !isInitialism(uw) { out = append(out, upper(w[:1])+lower(w[1:])) } else { out = append(out, w) @@ -274,8 +281,10 @@ func ToHumanNameTitle(name string) string { // ToJSONName camelcases a name which can be underscored or pascal cased func ToJSONName(name string) string { - var out []string - for i, w := range split(name) { + in := split(name) + out := make([]string, 0, len(in)) + + for i, w := range in { if i == 0 { out = append(out, lower(w)) continue @@ -288,7 +297,7 @@ func ToJSONName(name string) string { // ToVarName camelcases a name which can be underscored or pascal cased func ToVarName(name string) string { res := ToGoName(name) - if _, ok := commonInitialisms[res]; ok { + if isInitialism(res) { return lower(res) } if len(res) <= 1 { @@ -299,11 +308,13 @@ func ToVarName(name string) string { // ToGoName translates a swagger name which can be underscored or camel cased to a name that golint likes func ToGoName(name string) string { - var out []string - for _, w := range split(name) { + in := split(name) + out := make([]string, 0, len(in)) + + for _, w := range in { uw := upper(w) mod := int(math.Min(float64(len(uw)), 2)) - if !commonInitialisms[uw] && !commonInitialisms[uw[:len(uw)-mod]] { + if !isInitialism(uw) && !isInitialism(uw[:len(uw)-mod]) { uw = upper(w[:1]) + lower(w[1:]) } out = append(out, uw) @@ -322,6 +333,16 @@ func ToGoName(name string) string { return result } +// ContainsStrings searches a slice of strings for a case-sensitive match +func ContainsStrings(coll []string, item string) bool { + for _, a := range coll { + if a == item { + return true + } + } + return false +} + // ContainsStringsCI searches a slice of strings for a case-insensitive match func ContainsStringsCI(coll []string, item string) bool { for _, a := range coll { @@ -366,6 +387,16 @@ func IsZero(data interface{}) bool { return false } +// AddInitialisms add additional initialisms +func AddInitialisms(words ...string) { + for _, word := range words { + //commonInitialisms[upper(word)] = true + commonInitialisms.add(upper(word)) + } + // sort again + initialisms = commonInitialisms.sorted() +} + // CommandLineOptionsGroup represents a group of user-defined command line options type CommandLineOptionsGroup struct { ShortDescription string diff --git a/vendor/github.com/go-openapi/swag/util_test.go b/vendor/github.com/go-openapi/swag/util_test.go deleted file mode 100644 index 3aeb925fa7..0000000000 --- a/vendor/github.com/go-openapi/swag/util_test.go +++ /dev/null @@ -1,286 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package swag - -import ( - "fmt" - "strings" - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -type translationSample struct { - str, out string -} - -func titleize(s string) string { return strings.ToTitle(s[:1]) + lower(s[1:]) } - -func TestToGoName(t *testing.T) { - samples := []translationSample{ - {"sample text", "SampleText"}, - {"sample-text", "SampleText"}, - {"sample_text", "SampleText"}, - {"sampleText", "SampleText"}, - {"sample 2 Text", "Sample2Text"}, - {"findThingById", "FindThingByID"}, - {"日本語sample 2 Text", "X日本語sample2Text"}, - {"日本語findThingById", "X日本語findThingByID"}, - {"findTHINGSbyID", "FindTHINGSbyID"}, - } - - for k := range commonInitialisms { - samples = append(samples, - translationSample{"sample " + lower(k) + " text", "Sample" + k + "Text"}, - translationSample{"sample-" + lower(k) + "-text", "Sample" + k + "Text"}, - translationSample{"sample_" + lower(k) + "_text", "Sample" + k + "Text"}, - translationSample{"sample" + titleize(k) + "Text", "Sample" + k + "Text"}, - translationSample{"sample " + lower(k), "Sample" + k}, - translationSample{"sample-" + lower(k), "Sample" + k}, - translationSample{"sample_" + lower(k), "Sample" + k}, - translationSample{"sample" + titleize(k), "Sample" + k}, - translationSample{"sample " + titleize(k) + " text", "Sample" + k + "Text"}, - translationSample{"sample-" + titleize(k) + "-text", "Sample" + k + "Text"}, - translationSample{"sample_" + titleize(k) + "_text", "Sample" + k + "Text"}, - ) - } - - for _, sample := range samples { - assert.Equal(t, sample.out, ToGoName(sample.str)) - } -} - -func TestContainsStringsCI(t *testing.T) { - list := []string{"hello", "world", "and", "such"} - - assert.True(t, ContainsStringsCI(list, "hELLo")) - assert.True(t, ContainsStringsCI(list, "world")) - assert.True(t, ContainsStringsCI(list, "AND")) - assert.False(t, ContainsStringsCI(list, "nuts")) -} - -func TestSplitByFormat(t *testing.T) { - expected := []string{"one", "two", "three"} - for _, fmt := range []string{"csv", "pipes", "tsv", "ssv", "multi"} { - - var actual []string - switch fmt { - case "multi": - assert.Nil(t, SplitByFormat("", fmt)) - assert.Nil(t, SplitByFormat("blah", fmt)) - case "ssv": - actual = SplitByFormat(strings.Join(expected, " "), fmt) - assert.EqualValues(t, expected, actual) - case "pipes": - actual = SplitByFormat(strings.Join(expected, "|"), fmt) - assert.EqualValues(t, expected, actual) - case "tsv": - actual = SplitByFormat(strings.Join(expected, "\t"), fmt) - assert.EqualValues(t, expected, actual) - default: - actual = SplitByFormat(strings.Join(expected, ","), fmt) - assert.EqualValues(t, expected, actual) - } - } -} - -func TestJoinByFormat(t *testing.T) { - for _, fmt := range []string{"csv", "pipes", "tsv", "ssv", "multi"} { - - lval := []string{"one", "two", "three"} - var expected []string - switch fmt { - case "multi": - expected = lval - case "ssv": - expected = []string{strings.Join(lval, " ")} - case "pipes": - expected = []string{strings.Join(lval, "|")} - case "tsv": - expected = []string{strings.Join(lval, "\t")} - default: - expected = []string{strings.Join(lval, ",")} - } - assert.Nil(t, JoinByFormat(nil, fmt)) - assert.EqualValues(t, expected, JoinByFormat(lval, fmt)) - } -} - -func TestToFileName(t *testing.T) { - samples := []translationSample{ - {"SampleText", "sample_text"}, - {"FindThingByID", "find_thing_by_id"}, - {"CAPWD.folwdBYlc", "capwd_folwd_bylc"}, - {"CAPWDfolwdBYlc", "capwdfolwd_bylc"}, - {"CAP_WD_folwdBYlc", "cap_wd_folwd_bylc"}, - {"TypeOAI_alias", "type_oai_alias"}, - {"Type_OAI_alias", "type_oai_alias"}, - {"Type_OAIAlias", "type_oai_alias"}, - {"ELB.HTTPLoadBalancer", "elb_http_load_balancer"}, - {"elbHTTPLoadBalancer", "elb_http_load_balancer"}, - {"ELBHTTPLoadBalancer", "elb_http_load_balancer"}, - } - for k := range commonInitialisms { - samples = append(samples, - translationSample{"Sample" + k + "Text", "sample_" + lower(k) + "_text"}, - ) - } - - for _, sample := range samples { - assert.Equal(t, sample.out, ToFileName(sample.str)) - } -} - -func TestToCommandName(t *testing.T) { - samples := []translationSample{ - {"SampleText", "sample-text"}, - {"FindThingByID", "find-thing-by-id"}, - } - - for k := range commonInitialisms { - samples = append(samples, - translationSample{"Sample" + k + "Text", "sample-" + lower(k) + "-text"}, - ) - } - - for _, sample := range samples { - assert.Equal(t, sample.out, ToCommandName(sample.str)) - } -} - -func TestToHumanName(t *testing.T) { - samples := []translationSample{ - {"SampleText", "sample text"}, - {"FindThingByID", "find thing by ID"}, - } - - for k := range commonInitialisms { - samples = append(samples, - translationSample{"Sample" + k + "Text", "sample " + k + " text"}, - ) - } - - for _, sample := range samples { - assert.Equal(t, sample.out, ToHumanNameLower(sample.str)) - } -} - -func TestToJSONName(t *testing.T) { - samples := []translationSample{ - {"SampleText", "sampleText"}, - {"FindThingByID", "findThingById"}, - } - - for k := range commonInitialisms { - samples = append(samples, - translationSample{"Sample" + k + "Text", "sample" + titleize(k) + "Text"}, - ) - } - - for _, sample := range samples { - assert.Equal(t, sample.out, ToJSONName(sample.str)) - } -} - -type SimpleZeroes struct { - ID string - Name string -} -type ZeroesWithTime struct { - Time time.Time -} - -func TestIsZero(t *testing.T) { - var strs [5]string - var strss []string - var a int - var b int8 - var c int16 - var d int32 - var e int64 - var f uint - var g uint8 - var h uint16 - var i uint32 - var j uint64 - var k map[string]string - var l interface{} - var m *SimpleZeroes - var n string - var o SimpleZeroes - var p ZeroesWithTime - var q time.Time - data := []struct { - Data interface{} - Expected bool - }{ - {a, true}, - {b, true}, - {c, true}, - {d, true}, - {e, true}, - {f, true}, - {g, true}, - {h, true}, - {i, true}, - {j, true}, - {k, true}, - {l, true}, - {m, true}, - {n, true}, - {o, true}, - {p, true}, - {q, true}, - {strss, true}, - {strs, true}, - {"", true}, - {nil, true}, - {1, false}, - {0, true}, - {int8(1), false}, - {int8(0), true}, - {int16(1), false}, - {int16(0), true}, - {int32(1), false}, - {int32(0), true}, - {int64(1), false}, - {int64(0), true}, - {uint(1), false}, - {uint(0), true}, - {uint8(1), false}, - {uint8(0), true}, - {uint16(1), false}, - {uint16(0), true}, - {uint32(1), false}, - {uint32(0), true}, - {uint64(1), false}, - {uint64(0), true}, - {0.0, true}, - {0.1, false}, - {float32(0.0), true}, - {float32(0.1), false}, - {float64(0.0), true}, - {float64(0.1), false}, - {[...]string{}, true}, - {[...]string{"hello"}, false}, - {[]string(nil), true}, - {[]string{"a"}, false}, - } - - for _, it := range data { - assert.Equal(t, it.Expected, IsZero(it.Data), fmt.Sprintf("%#v", it.Data)) - } -} diff --git a/vendor/github.com/go-openapi/swag/yaml.go b/vendor/github.com/go-openapi/swag/yaml.go index 26502f21d5..f458c81a84 100644 --- a/vendor/github.com/go-openapi/swag/yaml.go +++ b/vendor/github.com/go-openapi/swag/yaml.go @@ -42,6 +42,7 @@ func YAMLToJSON(data interface{}) (json.RawMessage, error) { return json.RawMessage(b), err } +// BytesToYAMLDoc converts a byte slice into a YAML document func BytesToYAMLDoc(data []byte) (interface{}, error) { var canary map[interface{}]interface{} // validate this is an object and not a different type if err := yaml.Unmarshal(data, &canary); err != nil { @@ -55,14 +56,17 @@ func BytesToYAMLDoc(data []byte) (interface{}, error) { return document, nil } +// JSONMapSlice represent a JSON object, with the order of keys maintained type JSONMapSlice []JSONMapItem +// MarshalJSON renders a JSONMapSlice as JSON func (s JSONMapSlice) MarshalJSON() ([]byte, error) { w := &jwriter.Writer{Flags: jwriter.NilMapAsEmpty | jwriter.NilSliceAsEmpty} s.MarshalEasyJSON(w) return w.BuildBytes() } +// MarshalEasyJSON renders a JSONMapSlice as JSON, using easyJSON func (s JSONMapSlice) MarshalEasyJSON(w *jwriter.Writer) { w.RawByte('{') @@ -78,11 +82,14 @@ func (s JSONMapSlice) MarshalEasyJSON(w *jwriter.Writer) { w.RawByte('}') } +// UnmarshalJSON makes a JSONMapSlice from JSON func (s *JSONMapSlice) UnmarshalJSON(data []byte) error { l := jlexer.Lexer{Data: data} s.UnmarshalEasyJSON(&l) return l.Error() } + +// UnmarshalEasyJSON makes a JSONMapSlice from JSON, using easyJSON func (s *JSONMapSlice) UnmarshalEasyJSON(in *jlexer.Lexer) { if in.IsNull() { in.Skip() @@ -99,23 +106,34 @@ func (s *JSONMapSlice) UnmarshalEasyJSON(in *jlexer.Lexer) { *s = result } +// JSONMapItem represents the value of a key in a JSON object held by JSONMapSlice type JSONMapItem struct { Key string Value interface{} } +// MarshalJSON renders a JSONMapItem as JSON func (s JSONMapItem) MarshalJSON() ([]byte, error) { w := &jwriter.Writer{Flags: jwriter.NilMapAsEmpty | jwriter.NilSliceAsEmpty} s.MarshalEasyJSON(w) return w.BuildBytes() } +// MarshalEasyJSON renders a JSONMapItem as JSON, using easyJSON func (s JSONMapItem) MarshalEasyJSON(w *jwriter.Writer) { w.String(s.Key) w.RawByte(':') w.Raw(WriteJSON(s.Value)) } +// UnmarshalJSON makes a JSONMapItem from JSON +func (s *JSONMapItem) UnmarshalJSON(data []byte) error { + l := jlexer.Lexer{Data: data} + s.UnmarshalEasyJSON(&l) + return l.Error() +} + +// UnmarshalEasyJSON makes a JSONMapItem from JSON, using easyJSON func (s *JSONMapItem) UnmarshalEasyJSON(in *jlexer.Lexer) { key := in.UnsafeString() in.WantColon() @@ -124,11 +142,6 @@ func (s *JSONMapItem) UnmarshalEasyJSON(in *jlexer.Lexer) { s.Key = key s.Value = value } -func (s *JSONMapItem) UnmarshalJSON(data []byte) error { - l := jlexer.Lexer{Data: data} - s.UnmarshalEasyJSON(&l) - return l.Error() -} func transformData(input interface{}) (out interface{}, err error) { switch in := input.(type) { @@ -146,9 +159,9 @@ func transformData(input interface{}) (out interface{}, err error) { return nil, fmt.Errorf("types don't match expect map key string or int got: %T", mi.Key) } - v, err := transformData(mi.Value) - if err != nil { - return nil, err + v, ert := transformData(mi.Value) + if ert != nil { + return nil, ert } nmi.Value = v o[i] = nmi @@ -167,9 +180,9 @@ func transformData(input interface{}) (out interface{}, err error) { return nil, fmt.Errorf("types don't match expect map key string or int got: %T", ke) } - v, err := transformData(va) - if err != nil { - return nil, err + v, ert := transformData(va) + if ert != nil { + return nil, ert } nmi.Value = v o = append(o, nmi) @@ -201,7 +214,7 @@ func YAMLDoc(path string) (json.RawMessage, error) { return nil, err } - return json.RawMessage(data), nil + return data, nil } // YAMLData loads a yaml document from either http or a file diff --git a/vendor/github.com/go-openapi/swag/yaml_test.go b/vendor/github.com/go-openapi/swag/yaml_test.go deleted file mode 100644 index ee32fab7d9..0000000000 --- a/vendor/github.com/go-openapi/swag/yaml_test.go +++ /dev/null @@ -1,444 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package swag - -import ( - "encoding/json" - "errors" - "net/http" - "net/http/httptest" - "testing" - - yaml "gopkg.in/yaml.v2" - - "github.com/stretchr/testify/assert" -) - -type failJSONMarhal struct { -} - -func (f failJSONMarhal) MarshalJSON() ([]byte, error) { - return nil, errors.New("expected") -} - -func TestLoadHTTPBytes(t *testing.T) { - _, err := LoadFromFileOrHTTP("httx://12394:abd") - assert.Error(t, err) - - serv := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - rw.WriteHeader(http.StatusNotFound) - })) - defer serv.Close() - - _, err = LoadFromFileOrHTTP(serv.URL) - assert.Error(t, err) - - ts2 := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - rw.WriteHeader(http.StatusOK) - rw.Write([]byte("the content")) - })) - defer ts2.Close() - - d, err := LoadFromFileOrHTTP(ts2.URL) - assert.NoError(t, err) - assert.Equal(t, []byte("the content"), d) -} - -func TestYAMLToJSON(t *testing.T) { - - sd := `--- -1: the int key value -name: a string value -'y': some value -` - var data yaml.MapSlice - yaml.Unmarshal([]byte(sd), &data) - - d, err := YAMLToJSON(data) - if assert.NoError(t, err) { - assert.Equal(t, `{"1":"the int key value","name":"a string value","y":"some value"}`, string(d)) - } - - data = append(data, yaml.MapItem{Key: true, Value: "the bool value"}) - d, err = YAMLToJSON(data) - assert.Error(t, err) - assert.Nil(t, d) - - data = data[:len(data)-1] - - tag := yaml.MapSlice{{Key: "name", Value: "tag name"}} - data = append(data, yaml.MapItem{Key: "tag", Value: tag}) - - d, err = YAMLToJSON(data) - assert.NoError(t, err) - assert.Equal(t, `{"1":"the int key value","name":"a string value","y":"some value","tag":{"name":"tag name"}}`, string(d)) - - tag = yaml.MapSlice{{Key: true, Value: "bool tag name"}} - data = append(data[:len(data)-1], yaml.MapItem{Key: "tag", Value: tag}) - - d, err = YAMLToJSON(data) - assert.Error(t, err) - assert.Nil(t, d) - - var lst []interface{} - lst = append(lst, "hello") - - d, err = YAMLToJSON(lst) - assert.NoError(t, err) - assert.Equal(t, []byte(`["hello"]`), []byte(d)) - - lst = append(lst, data) - - d, err = YAMLToJSON(lst) - assert.Error(t, err) - assert.Nil(t, d) - - // _, err := yamlToJSON(failJSONMarhal{}) - // assert.Error(t, err) - - _, err = BytesToYAMLDoc([]byte("- name: hello\n")) - assert.Error(t, err) - - dd, err := BytesToYAMLDoc([]byte("description: 'object created'\n")) - assert.NoError(t, err) - - d, err = YAMLToJSON(dd) - assert.NoError(t, err) - assert.Equal(t, json.RawMessage(`{"description":"object created"}`), d) -} - -func TestLoadStrategy(t *testing.T) { - - loader := func(p string) ([]byte, error) { - return []byte(yamlPetStore), nil - } - remLoader := func(p string) ([]byte, error) { - return []byte("not it"), nil - } - - ld := LoadStrategy("blah", loader, remLoader) - b, _ := ld("") - assert.Equal(t, []byte(yamlPetStore), b) - - serv := httptest.NewServer(http.HandlerFunc(yamlPestoreServer)) - defer serv.Close() - - s, err := YAMLDoc(serv.URL) - assert.NoError(t, err) - assert.NotNil(t, s) - - ts2 := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - rw.WriteHeader(http.StatusNotFound) - rw.Write([]byte("\n")) - })) - defer ts2.Close() - _, err = YAMLDoc(ts2.URL) - assert.Error(t, err) -} - -var yamlPestoreServer = func(rw http.ResponseWriter, r *http.Request) { - rw.WriteHeader(http.StatusOK) - rw.Write([]byte(yamlPetStore)) -} - -func TestWithYKey(t *testing.T) { - doc, err := BytesToYAMLDoc([]byte(withYKey)) - if assert.NoError(t, err) { - _, err := YAMLToJSON(doc) - if assert.Error(t, err) { - doc, err := BytesToYAMLDoc([]byte(withQuotedYKey)) - if assert.NoError(t, err) { - jsond, err := YAMLToJSON(doc) - if assert.NoError(t, err) { - var yt struct { - Definitions struct { - Viewbox struct { - Properties struct { - Y struct { - Type string `json:"type"` - } `json:"y"` - } `json:"properties"` - } `json:"viewbox"` - } `json:"definitions"` - } - if assert.NoError(t, json.Unmarshal(jsond, &yt)) { - assert.Equal(t, "integer", yt.Definitions.Viewbox.Properties.Y.Type) - } - } - } - } - - } -} - -const withQuotedYKey = `consumes: -- application/json -definitions: - viewBox: - type: object - properties: - x: - type: integer - format: int16 - # y -> types don't match: expect map key string or int get: bool - "y": - type: integer - format: int16 - width: - type: integer - format: int16 - height: - type: integer - format: int16 -info: - description: Test RESTful APIs - title: Test Server - version: 1.0.0 -basePath: /api -paths: - /test: - get: - operationId: findAll - parameters: - - name: since - in: query - type: integer - format: int64 - - name: limit - in: query - type: integer - format: int32 - default: 20 - responses: - 200: - description: Array[Trigger] - schema: - type: array - items: - $ref: "#/definitions/viewBox" -produces: -- application/json -schemes: -- https -swagger: "2.0" -` - -const withYKey = `consumes: -- application/json -definitions: - viewBox: - type: object - properties: - x: - type: integer - format: int16 - # y -> types don't match: expect map key string or int get: bool - y: - type: integer - format: int16 - width: - type: integer - format: int16 - height: - type: integer - format: int16 -info: - description: Test RESTful APIs - title: Test Server - version: 1.0.0 -basePath: /api -paths: - /test: - get: - operationId: findAll - parameters: - - name: since - in: query - type: integer - format: int64 - - name: limit - in: query - type: integer - format: int32 - default: 20 - responses: - 200: - description: Array[Trigger] - schema: - type: array - items: - $ref: "#/definitions/viewBox" -produces: -- application/json -schemes: -- https -swagger: "2.0" -` - -const yamlPetStore = `swagger: '2.0' -info: - version: '1.0.0' - title: Swagger Petstore - description: A sample API that uses a petstore as an example to demonstrate features in the swagger-2.0 specification - termsOfService: http://helloreverb.com/terms/ - contact: - name: Swagger API team - email: foo@example.com - url: http://swagger.io - license: - name: MIT - url: http://opensource.org/licenses/MIT -host: petstore.swagger.wordnik.com -basePath: /api -schemes: - - http -consumes: - - application/json -produces: - - application/json -paths: - /pets: - get: - description: Returns all pets from the system that the user has access to - operationId: findPets - produces: - - application/json - - application/xml - - text/xml - - text/html - parameters: - - name: tags - in: query - description: tags to filter by - required: false - type: array - items: - type: string - collectionFormat: csv - - name: limit - in: query - description: maximum number of results to return - required: false - type: integer - format: int32 - responses: - '200': - description: pet response - schema: - type: array - items: - $ref: '#/definitions/pet' - default: - description: unexpected error - schema: - $ref: '#/definitions/errorModel' - post: - description: Creates a new pet in the store. Duplicates are allowed - operationId: addPet - produces: - - application/json - parameters: - - name: pet - in: body - description: Pet to add to the store - required: true - schema: - $ref: '#/definitions/newPet' - responses: - '200': - description: pet response - schema: - $ref: '#/definitions/pet' - default: - description: unexpected error - schema: - $ref: '#/definitions/errorModel' - /pets/{id}: - get: - description: Returns a user based on a single ID, if the user does not have access to the pet - operationId: findPetById - produces: - - application/json - - application/xml - - text/xml - - text/html - parameters: - - name: id - in: path - description: ID of pet to fetch - required: true - type: integer - format: int64 - responses: - '200': - description: pet response - schema: - $ref: '#/definitions/pet' - default: - description: unexpected error - schema: - $ref: '#/definitions/errorModel' - delete: - description: deletes a single pet based on the ID supplied - operationId: deletePet - parameters: - - name: id - in: path - description: ID of pet to delete - required: true - type: integer - format: int64 - responses: - '204': - description: pet deleted - default: - description: unexpected error - schema: - $ref: '#/definitions/errorModel' -definitions: - pet: - required: - - id - - name - properties: - id: - type: integer - format: int64 - name: - type: string - tag: - type: string - newPet: - allOf: - - $ref: '#/definitions/pet' - - required: - - name - properties: - id: - type: integer - format: int64 - name: - type: string - errorModel: - required: - - code - - message - properties: - code: - type: integer - format: int32 - message: - type: string -` diff --git a/vendor/github.com/gogo/protobuf/.gitignore b/vendor/github.com/gogo/protobuf/.gitignore deleted file mode 100644 index 76009479d0..0000000000 --- a/vendor/github.com/gogo/protobuf/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -._* -*.js -*.js.map diff --git a/vendor/github.com/gogo/protobuf/.mailmap b/vendor/github.com/gogo/protobuf/.mailmap deleted file mode 100644 index bc00102193..0000000000 --- a/vendor/github.com/gogo/protobuf/.mailmap +++ /dev/null @@ -1,8 +0,0 @@ -Walter Schulze Walter Schulze -Walter Schulze -Walter Schulze awalterschulze -Walter Schulze awalterschulze@gmail.com -John Tuley -Anton Povarov -Denis Smirnov dennwc -DongYun Kang \ No newline at end of file diff --git a/vendor/github.com/gogo/protobuf/.travis.yml b/vendor/github.com/gogo/protobuf/.travis.yml deleted file mode 100644 index 7f69fba09e..0000000000 --- a/vendor/github.com/gogo/protobuf/.travis.yml +++ /dev/null @@ -1,20 +0,0 @@ -env: - - PROTOBUF_VERSION=2.6.1 - - PROTOBUF_VERSION=3.0.2 - - PROTOBUF_VERSION=3.4.0 - -before_install: - - ./install-protobuf.sh - - PATH=/home/travis/bin:$PATH protoc --version - -script: - - PATH=/home/travis/bin:$PATH make buildserverall - - echo $TRAVIS_GO_VERSION - - if [ "$TRAVIS_GO_VERSION" == 1.9 ] && [[ "$PROTOBUF_VERSION" == 3.4.0 ]]; then ! git status --porcelain | read || (git status; git diff; exit 1); fi - -language: go - -go: - - 1.8.3 - - 1.9 - diff --git a/vendor/github.com/gogo/protobuf/CONTRIBUTORS b/vendor/github.com/gogo/protobuf/CONTRIBUTORS index b1abc4d305..1b4f6c208a 100644 --- a/vendor/github.com/gogo/protobuf/CONTRIBUTORS +++ b/vendor/github.com/gogo/protobuf/CONTRIBUTORS @@ -11,6 +11,7 @@ John Shahid John Tuley Laurent Patrick Lee +Peter Edge Roger Johansson Sam Nguyen Sergio Arbeo diff --git a/vendor/github.com/gogo/protobuf/GOLANG_CONTRIBUTORS b/vendor/github.com/gogo/protobuf/GOLANG_CONTRIBUTORS deleted file mode 100644 index b368efb7f2..0000000000 --- a/vendor/github.com/gogo/protobuf/GOLANG_CONTRIBUTORS +++ /dev/null @@ -1,5 +0,0 @@ -The contributors to the Go protobuf repository: - -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. \ No newline at end of file diff --git a/vendor/github.com/gogo/protobuf/LICENSE b/vendor/github.com/gogo/protobuf/LICENSE index 7be0cc7b62..f57de90da8 100644 --- a/vendor/github.com/gogo/protobuf/LICENSE +++ b/vendor/github.com/gogo/protobuf/LICENSE @@ -1,7 +1,6 @@ -Protocol Buffers for Go with Gadgets - Copyright (c) 2013, The GoGo Authors. All rights reserved. -http://github.com/gogo/protobuf + +Protocol Buffers for Go with Gadgets Go support for Protocol Buffers - Google's data interchange format diff --git a/vendor/github.com/gogo/protobuf/Makefile b/vendor/github.com/gogo/protobuf/Makefile deleted file mode 100644 index 17e329367f..0000000000 --- a/vendor/github.com/gogo/protobuf/Makefile +++ /dev/null @@ -1,161 +0,0 @@ -# Protocol Buffers for Go with Gadgets -# -# Copyright (c) 2013, The GoGo Authors. All rights reserved. -# http://github.com/gogo/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -GO_VERSION:=$(shell go version) - -.PHONY: nuke regenerate tests clean install gofmt vet contributors - -all: clean install regenerate install tests errcheck vet - -buildserverall: clean install regenerate install tests vet js - -install: - go install ./proto - go install ./gogoproto - go install ./jsonpb - go install ./protoc-gen-gogo - go install ./protoc-gen-gofast - go install ./protoc-gen-gogofast - go install ./protoc-gen-gogofaster - go install ./protoc-gen-gogoslick - go install ./protoc-gen-gostring - go install ./protoc-min-version - go install ./protoc-gen-combo - go install ./gogoreplace - -clean: - go clean ./... - -nuke: - go clean -i ./... - -gofmt: - gofmt -l -s -w . - -regenerate: - make -C protoc-gen-gogo/descriptor regenerate - make -C protoc-gen-gogo/plugin regenerate - make -C protoc-gen-gogo/testdata regenerate - make -C gogoproto regenerate - make -C proto/testdata regenerate - make -C jsonpb/jsonpb_test_proto regenerate - make -C _conformance regenerate - make -C types regenerate - make -C test regenerate - make -C test/example regenerate - make -C test/unrecognized regenerate - make -C test/group regenerate - make -C test/unrecognizedgroup regenerate - make -C test/enumstringer regenerate - make -C test/unmarshalmerge regenerate - make -C test/moredefaults regenerate - make -C test/issue8 regenerate - make -C test/enumprefix regenerate - make -C test/enumcustomname regenerate - make -C test/packed regenerate - make -C test/protosize regenerate - make -C test/tags regenerate - make -C test/oneof regenerate - make -C test/oneof3 regenerate - make -C test/theproto3 regenerate - make -C test/mapdefaults regenerate - make -C test/mapsproto2 regenerate - make -C test/issue42order regenerate - make -C proto generate-test-pbs - make -C test/importdedup regenerate - make -C test/custombytesnonstruct regenerate - make -C test/required regenerate - make -C test/casttype regenerate - make -C test/castvalue regenerate - make -C vanity/test regenerate - make -C test/sizeunderscore regenerate - make -C test/issue34 regenerate - make -C test/empty-issue70 regenerate - make -C test/indeximport-issue72 regenerate - make -C test/fuzztests regenerate - make -C test/oneofembed regenerate - make -C test/asymetric-issue125 regenerate - make -C test/filedotname regenerate - make -C test/nopackage regenerate - make -C test/types regenerate - make -C test/proto3extension regenerate - make -C test/stdtypes regenerate - make -C test/data regenerate - make -C test/typedecl regenerate - make -C test/issue260 regenerate - make -C test/issue261 regenerate - make -C test/issue262 regenerate - make -C test/issue312 regenerate - make -C test/enumdecl regenerate - make -C test/typedecl_all regenerate - make -C test/enumdecl_all regenerate - make -C test/int64support regenerate - make -C test/issue322 regenerate - make -C test/issue330 regenerate - make gofmt - -tests: - go build ./test/enumprefix - go test ./... - (cd test/stdtypes && make test) - -vet: - go vet ./... - go tool vet --shadow . - -errcheck: - go get github.com/kisielk/errcheck - errcheck ./test/... - -drone: - sudo apt-get install protobuf-compiler - (cd $(GOPATH)/src/github.com/gogo/protobuf && make buildserverall) - -testall: - go get -u github.com/golang/protobuf/proto - make -C protoc-gen-gogo/testdata test - make -C vanity/test test - make -C test/registration test - make tests - -bench: - go get golang.org/x/tools/cmd/benchcmp - (cd test/mixbench && go build .) - ./test/mixbench/mixbench - -contributors: - git log --format='%aN <%aE>' | sort -fu > CONTRIBUTORS - -js: -ifeq (go1.9, $(findstring go1.9, $(GO_VERSION))) - go get -u github.com/gopherjs/gopherjs - gopherjs build github.com/gogo/protobuf/protoc-gen-gogo -endif - -update: - (cd protobuf && make update) diff --git a/vendor/github.com/gogo/protobuf/README b/vendor/github.com/gogo/protobuf/README deleted file mode 100644 index c820827238..0000000000 --- a/vendor/github.com/gogo/protobuf/README +++ /dev/null @@ -1,257 +0,0 @@ -GoGoProtobuf http://github.com/gogo/protobuf extends -GoProtobuf http://github.com/golang/protobuf - -# Go support for Protocol Buffers - -Google's data interchange format. -Copyright 2010 The Go Authors. -https://github.com/golang/protobuf - -This package and the code it generates requires at least Go 1.4. - -This software implements Go bindings for protocol buffers. For -information about protocol buffers themselves, see - https://developers.google.com/protocol-buffers/ - -## Installation ## - -To use this software, you must: -- Install the standard C++ implementation of protocol buffers from - https://developers.google.com/protocol-buffers/ -- Of course, install the Go compiler and tools from - https://golang.org/ - See - https://golang.org/doc/install - for details or, if you are using gccgo, follow the instructions at - https://golang.org/doc/install/gccgo -- Grab the code from the repository and install the proto package. - The simplest way is to run `go get -u github.com/golang/protobuf/protoc-gen-go`. - The compiler plugin, protoc-gen-go, will be installed in $GOBIN, - defaulting to $GOPATH/bin. It must be in your $PATH for the protocol - compiler, protoc, to find it. - -This software has two parts: a 'protocol compiler plugin' that -generates Go source files that, once compiled, can access and manage -protocol buffers; and a library that implements run-time support for -encoding (marshaling), decoding (unmarshaling), and accessing protocol -buffers. - -There is support for gRPC in Go using protocol buffers. -See the note at the bottom of this file for details. - -There are no insertion points in the plugin. - -GoGoProtobuf provides extensions for protocol buffers and GoProtobuf -see http://github.com/gogo/protobuf/gogoproto/doc.go - -## Using protocol buffers with Go ## - -Once the software is installed, there are two steps to using it. -First you must compile the protocol buffer definitions and then import -them, with the support library, into your program. - -To compile the protocol buffer definition, run protoc with the --gogo_out -parameter set to the directory you want to output the Go code to. - - protoc --gogo_out=. *.proto - -The generated files will be suffixed .pb.go. See the Test code below -for an example using such a file. - -The package comment for the proto library contains text describing -the interface provided in Go for protocol buffers. Here is an edited -version. - -If you are using any gogo.proto extensions you will need to specify the -proto_path to include the descriptor.proto and gogo.proto. -gogo.proto is located in github.com/gogo/protobuf/gogoproto -This should be fine, since your import is the same. -descriptor.proto is located in either github.com/gogo/protobuf/protobuf -or code.google.com/p/protobuf/trunk/src/ -Its import is google/protobuf/descriptor.proto so it might need some help. - - protoc --gogo_out=. -I=.:github.com/gogo/protobuf/protobuf *.proto - -========== - -The proto package converts data structures to and from the -wire format of protocol buffers. It works in concert with the -Go source code generated for .proto files by the protocol compiler. - -A summary of the properties of the protocol buffer interface -for a protocol buffer variable v: - - - Names are turned from camel_case to CamelCase for export. - - There are no methods on v to set fields; just treat - them as structure fields. - - There are getters that return a field's value if set, - and return the field's default value if unset. - The getters work even if the receiver is a nil message. - - The zero value for a struct is its correct initialization state. - All desired fields must be set before marshaling. - - A Reset() method will restore a protobuf struct to its zero state. - - Non-repeated fields are pointers to the values; nil means unset. - That is, optional or required field int32 f becomes F *int32. - - Repeated fields are slices. - - Helper functions are available to aid the setting of fields. - Helpers for getting values are superseded by the - GetFoo methods and their use is deprecated. - msg.Foo = proto.String("hello") // set field - - Constants are defined to hold the default values of all fields that - have them. They have the form Default_StructName_FieldName. - Because the getter methods handle defaulted values, - direct use of these constants should be rare. - - Enums are given type names and maps from names to values. - Enum values are prefixed with the enum's type name. Enum types have - a String method, and a Enum method to assist in message construction. - - Nested groups and enums have type names prefixed with the name of - the surrounding message type. - - Extensions are given descriptor names that start with E_, - followed by an underscore-delimited list of the nested messages - that contain it (if any) followed by the CamelCased name of the - extension field itself. HasExtension, ClearExtension, GetExtension - and SetExtension are functions for manipulating extensions. - - Oneof field sets are given a single field in their message, - with distinguished wrapper types for each possible field value. - - Marshal and Unmarshal are functions to encode and decode the wire format. - -When the .proto file specifies `syntax="proto3"`, there are some differences: - - - Non-repeated fields of non-message type are values instead of pointers. - - Enum types do not get an Enum method. - -Consider file test.proto, containing - -```proto - package example; - - enum FOO { X = 17; }; - - message Test { - required string label = 1; - optional int32 type = 2 [default=77]; - repeated int64 reps = 3; - optional group OptionalGroup = 4 { - required string RequiredField = 5; - } - } -``` - -To create and play with a Test object from the example package, - -```go - package main - - import ( - "log" - - "github.com/gogo/protobuf/proto" - "path/to/example" - ) - - func main() { - test := &example.Test { - Label: proto.String("hello"), - Type: proto.Int32(17), - Reps: []int64{1, 2, 3}, - Optionalgroup: &example.Test_OptionalGroup { - RequiredField: proto.String("good bye"), - }, - } - data, err := proto.Marshal(test) - if err != nil { - log.Fatal("marshaling error: ", err) - } - newTest := &example.Test{} - err = proto.Unmarshal(data, newTest) - if err != nil { - log.Fatal("unmarshaling error: ", err) - } - // Now test and newTest contain the same data. - if test.GetLabel() != newTest.GetLabel() { - log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) - } - // etc. - } -``` - - -## Parameters ## - -To pass extra parameters to the plugin, use a comma-separated -parameter list separated from the output directory by a colon: - - - protoc --gogo_out=plugins=grpc,import_path=mypackage:. *.proto - - -- `import_prefix=xxx` - a prefix that is added onto the beginning of - all imports. Useful for things like generating protos in a - subdirectory, or regenerating vendored protobufs in-place. -- `import_path=foo/bar` - used as the package if no input files - declare `go_package`. If it contains slashes, everything up to the - rightmost slash is ignored. -- `plugins=plugin1+plugin2` - specifies the list of sub-plugins to - load. The only plugin in this repo is `grpc`. -- `Mfoo/bar.proto=quux/shme` - declares that foo/bar.proto is - associated with Go package quux/shme. This is subject to the - import_prefix parameter. - -## gRPC Support ## - -If a proto file specifies RPC services, protoc-gen-go can be instructed to -generate code compatible with gRPC (http://www.grpc.io/). To do this, pass -the `plugins` parameter to protoc-gen-go; the usual way is to insert it into -the --go_out argument to protoc: - - protoc --gogo_out=plugins=grpc:. *.proto - -## Compatibility ## - -The library and the generated code are expected to be stable over time. -However, we reserve the right to make breaking changes without notice for the -following reasons: - -- Security. A security issue in the specification or implementation may come to - light whose resolution requires breaking compatibility. We reserve the right - to address such security issues. -- Unspecified behavior. There are some aspects of the Protocol Buffers - specification that are undefined. Programs that depend on such unspecified - behavior may break in future releases. -- Specification errors or changes. If it becomes necessary to address an - inconsistency, incompleteness, or change in the Protocol Buffers - specification, resolving the issue could affect the meaning or legality of - existing programs. We reserve the right to address such issues, including - updating the implementations. -- Bugs. If the library has a bug that violates the specification, a program - that depends on the buggy behavior may break if the bug is fixed. We reserve - the right to fix such bugs. -- Adding methods or fields to generated structs. These may conflict with field - names that already exist in a schema, causing applications to break. When the - code generator encounters a field in the schema that would collide with a - generated field or method name, the code generator will append an underscore - to the generated field or method name. -- Adding, removing, or changing methods or fields in generated structs that - start with `XXX`. These parts of the generated code are exported out of - necessity, but should not be considered part of the public API. -- Adding, removing, or changing unexported symbols in generated code. - -Any breaking changes outside of these will be announced 6 months in advance to -protobuf@googlegroups.com. - -You should, whenever possible, use generated code created by the `protoc-gen-go` -tool built at the same commit as the `proto` package. The `proto` package -declares package-level constants in the form `ProtoPackageIsVersionX`. -Application code and generated code may depend on one of these constants to -ensure that compilation will fail if the available version of the proto library -is too old. Whenever we make a change to the generated code that requires newer -library support, in the same commit we will increment the version number of the -generated code and declare a new package-level constant whose name incorporates -the latest version number. Removing a compatibility constant is considered a -breaking change and would be subject to the announcement policy stated above. - -## Plugins ## - -The `protoc-gen-go/generator` package exposes a plugin interface, -which is used by the gRPC code generation. This interface is not -supported and is subject to incompatible changes without notice. diff --git a/vendor/github.com/gogo/protobuf/Readme.md b/vendor/github.com/gogo/protobuf/Readme.md deleted file mode 100644 index b8eb4d14e4..0000000000 --- a/vendor/github.com/gogo/protobuf/Readme.md +++ /dev/null @@ -1,122 +0,0 @@ -# Protocol Buffers for Go with Gadgets - -[![Build Status](https://travis-ci.org/gogo/protobuf.svg?branch=master)](https://travis-ci.org/gogo/protobuf) - -gogoprotobuf is a fork of golang/protobuf with extra code generation features. - -This code generation is used to achieve: - - - fast marshalling and unmarshalling - - more canonical Go structures - - goprotobuf compatibility - - less typing by optionally generating extra helper code - - peace of mind by optionally generating test and benchmark code - - other serialization formats - -Keeping track of how up to date gogoprotobuf is relative to golang/protobuf is done in this -issue - -## Users - -These projects use gogoprotobuf: - - - etcd - blog - sample proto file - - spacemonkey - blog - - badoo - sample proto file - - mesos-go - sample proto file - - heka - the switch from golang/protobuf to gogo/protobuf when it was still on code.google.com - - cockroachdb - sample proto file - - go-ipfs - sample proto file - - rkive-go - sample proto file - - dropbox - - srclib - sample proto file - - adyoulike - - cloudfoundry - sample proto file - - kubernetes - go2idl built on top of gogoprotobuf - - dgraph - release notes - benchmarks - - centrifugo - release notes - blog - - docker swarmkit - sample proto file - - nats.io - go-nats-streaming - - tidb - Communication between tidb and tikv - - protoactor-go - vanity command that also generates actors from service definitions - - containerd - vanity command with custom field names that conforms to the golang convention. - - nakama - - proteus - - carbonzipper stack - - SendGrid - -Please let us know if you are using gogoprotobuf by posting on our GoogleGroup. - -### Mentioned - - - Cloudflare - go serialization talk - Albert Strasheim - - GopherCon 2014 Writing High Performance Databases in Go by Ben Johnson - - alecthomas' go serialization benchmarks - -## Getting Started - -There are several ways to use gogoprotobuf, but for all you need to install go and protoc. -After that you can choose: - - - Speed - - More Speed and more generated code - - Most Speed and most customization - -### Installation - -To install it, you must first have Go (at least version 1.6.3) installed (see [http://golang.org/doc/install](http://golang.org/doc/install)). Go 1.8.3 and 1.9 are continuously tested. - -Next, install the standard protocol buffer implementation from [https://github.com/google/protobuf](https://github.com/google/protobuf). -Most versions from 2.3.1 should not give any problems, but 2.6.1, 3.0.2 and 3.4.0 are continuously tested. - -### Speed - -Install the protoc-gen-gofast binary - - go get github.com/gogo/protobuf/protoc-gen-gofast - -Use it to generate faster marshaling and unmarshaling go code for your protocol buffers. - - protoc --gofast_out=. myproto.proto - -This does not allow you to use any of the other gogoprotobuf [extensions](https://github.com/gogo/protobuf/blob/master/extensions.md). - -### More Speed and more generated code - -Fields without pointers cause less time in the garbage collector. -More code generation results in more convenient methods. - -Other binaries are also included: - - protoc-gen-gogofast (same as gofast, but imports gogoprotobuf) - protoc-gen-gogofaster (same as gogofast, without XXX_unrecognized, less pointer fields) - protoc-gen-gogoslick (same as gogofaster, but with generated string, gostring and equal methods) - -Installing any of these binaries is easy. Simply run: - - go get github.com/gogo/protobuf/proto - go get github.com/gogo/protobuf/{binary} - go get github.com/gogo/protobuf/gogoproto - -These binaries allow you to using gogoprotobuf [extensions](https://github.com/gogo/protobuf/blob/master/extensions.md). - -### Most Speed and most customization - -Customizing the fields of the messages to be the fields that you actually want to use removes the need to copy between the structs you use and structs you use to serialize. -gogoprotobuf also offers more serialization formats and generation of tests and even more methods. - -Please visit the [extensions](https://github.com/gogo/protobuf/blob/master/extensions.md) page for more documentation. - -Install protoc-gen-gogo: - - go get github.com/gogo/protobuf/proto - go get github.com/gogo/protobuf/jsonpb - go get github.com/gogo/protobuf/protoc-gen-gogo - go get github.com/gogo/protobuf/gogoproto - -## GRPC - -It works the same as golang/protobuf, simply specify the plugin. -Here is an example using gofast: - - protoc --gofast_out=plugins=grpc:. my.proto diff --git a/vendor/github.com/gogo/protobuf/bench.md b/vendor/github.com/gogo/protobuf/bench.md deleted file mode 100644 index 16da66ad21..0000000000 --- a/vendor/github.com/gogo/protobuf/bench.md +++ /dev/null @@ -1,190 +0,0 @@ -# Benchmarks - -## How to reproduce - -For a comparison run: - - make bench - -followed by [benchcmp](http://code.google.com/p/go/source/browse/misc/benchcmp benchcmp) on the resulting files: - - $GOROOT/misc/benchcmp $GOPATH/src/github.com/gogo/protobuf/test/mixbench/marshal.txt $GOPATH/src/github.com/gogo/protobuf/test/mixbench/marshaler.txt - $GOROOT/misc/benchcmp $GOPATH/src/github.com/gogo/protobuf/test/mixbench/unmarshal.txt $GOPATH/src/github.com/gogo/protobuf/test/mixbench/unmarshaler.txt - -Benchmarks ran on Revision: 11c56be39364 - -June 2013 - -Processor 2,66 GHz Intel Core i7 - -Memory 8 GB 1067 MHz DDR3 - -## Marshaler - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
benchmarkold ns/opnew ns/opdelta
BenchmarkNidOptNativeProtoMarshal2656889-66.53%
BenchmarkNinOptNativeProtoMarshal26511015-61.71%
BenchmarkNidRepNativeProtoMarshal4266112519-70.65%
BenchmarkNinRepNativeProtoMarshal4230612354-70.80%
BenchmarkNidRepPackedNativeProtoMarshal3414811902-65.15%
BenchmarkNinRepPackedNativeProtoMarshal3337511969-64.14%
BenchmarkNidOptStructProtoMarshal71483727-47.86%
BenchmarkNinOptStructProtoMarshal69563481-49.96%
BenchmarkNidRepStructProtoMarshal4655119492-58.13%
BenchmarkNinRepStructProtoMarshal4671519043-59.24%
BenchmarkNidEmbeddedStructProtoMarshal52312050-60.81%
BenchmarkNinEmbeddedStructProtoMarshal46652000-57.13%
BenchmarkNidNestedStructProtoMarshal181106103604-42.79%
BenchmarkNinNestedStructProtoMarshal182053102069-43.93%
BenchmarkNidOptCustomProtoMarshal1209310-74.36%
BenchmarkNinOptCustomProtoMarshal1435277-80.70%
BenchmarkNidRepCustomProtoMarshal4126763-81.51%
BenchmarkNinRepCustomProtoMarshal3972769-80.64%
BenchmarkNinOptNativeUnionProtoMarshal973303-68.86%
BenchmarkNinOptStructUnionProtoMarshal1536521-66.08%
BenchmarkNinEmbeddedStructUnionProtoMarshal2327884-62.01%
BenchmarkNinNestedStructUnionProtoMarshal2070743-64.11%
BenchmarkTreeProtoMarshal1554838-46.07%
BenchmarkOrBranchProtoMarshal31562012-36.25%
BenchmarkAndBranchProtoMarshal31831996-37.29%
BenchmarkLeafProtoMarshal965606-37.20%
BenchmarkDeepTreeProtoMarshal23161283-44.60%
BenchmarkADeepBranchProtoMarshal27191492-45.13%
BenchmarkAndDeepBranchProtoMarshal46632922-37.34%
BenchmarkDeepLeafProtoMarshal18491016-45.05%
BenchmarkNilProtoMarshal43976-82.53%
BenchmarkNidOptEnumProtoMarshal514152-70.43%
BenchmarkNinOptEnumProtoMarshal550158-71.27%
BenchmarkNidRepEnumProtoMarshal647207-68.01%
BenchmarkNinRepEnumProtoMarshal662213-67.82%
BenchmarkTimerProtoMarshal934271-70.99%
BenchmarkMyExtendableProtoMarshal608185-69.57%
BenchmarkOtherExtenableProtoMarshal1112332-70.14%
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
benchmarkold MB/snew MB/sspeedup
BenchmarkNidOptNativeProtoMarshal126.86378.862.99x
BenchmarkNinOptNativeProtoMarshal114.27298.422.61x
BenchmarkNidRepNativeProtoMarshal164.25561.203.42x
BenchmarkNinRepNativeProtoMarshal166.10568.233.42x
BenchmarkNidRepPackedNativeProtoMarshal99.10283.972.87x
BenchmarkNinRepPackedNativeProtoMarshal101.30282.312.79x
BenchmarkNidOptStructProtoMarshal176.83339.071.92x
BenchmarkNinOptStructProtoMarshal163.59326.572.00x
BenchmarkNidRepStructProtoMarshal178.84427.492.39x
BenchmarkNinRepStructProtoMarshal178.70437.692.45x
BenchmarkNidEmbeddedStructProtoMarshal124.24317.562.56x
BenchmarkNinEmbeddedStructProtoMarshal132.03307.992.33x
BenchmarkNidNestedStructProtoMarshal192.91337.861.75x
BenchmarkNinNestedStructProtoMarshal192.44344.451.79x
BenchmarkNidOptCustomProtoMarshal29.77116.033.90x
BenchmarkNinOptCustomProtoMarshal22.29115.385.18x
BenchmarkNidRepCustomProtoMarshal35.14189.805.40x
BenchmarkNinRepCustomProtoMarshal36.50188.405.16x
BenchmarkNinOptNativeUnionProtoMarshal32.87105.393.21x
BenchmarkNinOptStructUnionProtoMarshal66.40195.762.95x
BenchmarkNinEmbeddedStructUnionProtoMarshal93.24245.262.63x
BenchmarkNinNestedStructUnionProtoMarshal57.49160.062.78x
BenchmarkTreeProtoMarshal137.64255.121.85x
BenchmarkOrBranchProtoMarshal137.80216.101.57x
BenchmarkAndBranchProtoMarshal136.64217.891.59x
BenchmarkLeafProtoMarshal214.48341.531.59x
BenchmarkDeepTreeProtoMarshal95.85173.031.81x
BenchmarkADeepBranchProtoMarshal82.73150.781.82x
BenchmarkAndDeepBranchProtoMarshal96.72153.981.59x
BenchmarkDeepLeafProtoMarshal117.34213.411.82x
BenchmarkNidOptEnumProtoMarshal3.8913.163.38x
BenchmarkNinOptEnumProtoMarshal1.826.303.46x
BenchmarkNidRepEnumProtoMarshal12.3638.503.11x
BenchmarkNinRepEnumProtoMarshal12.0837.533.11x
BenchmarkTimerProtoMarshal73.81253.873.44x
BenchmarkMyExtendableProtoMarshal13.1543.083.28x
BenchmarkOtherExtenableProtoMarshal24.2881.093.34x
- -## Unmarshaler - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
benchmarkold ns/opnew ns/opdelta
BenchmarkNidOptNativeProtoUnmarshal25211006-60.10%
BenchmarkNinOptNativeProtoUnmarshal25291750-30.80%
BenchmarkNidRepNativeProtoUnmarshal4906735299-28.06%
BenchmarkNinRepNativeProtoUnmarshal4799035456-26.12%
BenchmarkNidRepPackedNativeProtoUnmarshal2645623950-9.47%
BenchmarkNinRepPackedNativeProtoUnmarshal2649924037-9.29%
BenchmarkNidOptStructProtoUnmarshal68033873-43.07%
BenchmarkNinOptStructProtoUnmarshal67864154-38.79%
BenchmarkNidRepStructProtoUnmarshal5627631970-43.19%
BenchmarkNinRepStructProtoUnmarshal4875031832-34.70%
BenchmarkNidEmbeddedStructProtoUnmarshal45561973-56.69%
BenchmarkNinEmbeddedStructProtoUnmarshal44851975-55.96%
BenchmarkNidNestedStructProtoUnmarshal223395135844-39.19%
BenchmarkNinNestedStructProtoUnmarshal226446134022-40.82%
BenchmarkNidOptCustomProtoUnmarshal1859300-83.86%
BenchmarkNinOptCustomProtoUnmarshal1486402-72.95%
BenchmarkNidRepCustomProtoUnmarshal82291669-79.72%
BenchmarkNinRepCustomProtoUnmarshal82531649-80.02%
BenchmarkNinOptNativeUnionProtoUnmarshal840307-63.45%
BenchmarkNinOptStructUnionProtoUnmarshal1395639-54.19%
BenchmarkNinEmbeddedStructUnionProtoUnmarshal22971167-49.19%
BenchmarkNinNestedStructUnionProtoUnmarshal1820889-51.15%
BenchmarkTreeProtoUnmarshal1521720-52.66%
BenchmarkOrBranchProtoUnmarshal26691385-48.11%
BenchmarkAndBranchProtoUnmarshal26671420-46.76%
BenchmarkLeafProtoUnmarshal1171584-50.13%
BenchmarkDeepTreeProtoUnmarshal20651081-47.65%
BenchmarkADeepBranchProtoUnmarshal26951178-56.29%
BenchmarkAndDeepBranchProtoUnmarshal40551918-52.70%
BenchmarkDeepLeafProtoUnmarshal1758865-50.80%
BenchmarkNilProtoUnmarshal56463-88.79%
BenchmarkNidOptEnumProtoUnmarshal76273-90.34%
BenchmarkNinOptEnumProtoUnmarshal764163-78.66%
BenchmarkNidRepEnumProtoUnmarshal1078447-58.53%
BenchmarkNinRepEnumProtoUnmarshal1071479-55.28%
BenchmarkTimerProtoUnmarshal1128362-67.91%
BenchmarkMyExtendableProtoUnmarshal808217-73.14%
BenchmarkOtherExtenableProtoUnmarshal1233517-58.07%
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
benchmarkold MB/snew MB/sspeedup
BenchmarkNidOptNativeProtoUnmarshal133.67334.982.51x
BenchmarkNinOptNativeProtoUnmarshal119.77173.081.45x
BenchmarkNidRepNativeProtoUnmarshal143.23199.121.39x
BenchmarkNinRepNativeProtoUnmarshal146.07198.161.36x
BenchmarkNidRepPackedNativeProtoUnmarshal127.80141.041.10x
BenchmarkNinRepPackedNativeProtoUnmarshal127.55140.781.10x
BenchmarkNidOptStructProtoUnmarshal185.79326.311.76x
BenchmarkNinOptStructProtoUnmarshal167.68273.661.63x
BenchmarkNidRepStructProtoUnmarshal147.88260.391.76x
BenchmarkNinRepStructProtoUnmarshal171.20261.971.53x
BenchmarkNidEmbeddedStructProtoUnmarshal142.86329.422.31x
BenchmarkNinEmbeddedStructProtoUnmarshal137.33311.832.27x
BenchmarkNidNestedStructProtoUnmarshal154.97259.471.67x
BenchmarkNinNestedStructProtoUnmarshal154.32258.421.67x
BenchmarkNidOptCustomProtoUnmarshal19.36119.666.18x
BenchmarkNinOptCustomProtoUnmarshal21.5279.503.69x
BenchmarkNidRepCustomProtoUnmarshal17.6286.864.93x
BenchmarkNinRepCustomProtoUnmarshal17.5787.925.00x
BenchmarkNinOptNativeUnionProtoUnmarshal38.07104.122.73x
BenchmarkNinOptStructUnionProtoUnmarshal73.08159.542.18x
BenchmarkNinEmbeddedStructUnionProtoUnmarshal94.00185.921.98x
BenchmarkNinNestedStructUnionProtoUnmarshal65.35133.752.05x
BenchmarkTreeProtoUnmarshal141.28297.132.10x
BenchmarkOrBranchProtoUnmarshal162.56313.961.93x
BenchmarkAndBranchProtoUnmarshal163.06306.151.88x
BenchmarkLeafProtoUnmarshal176.72354.192.00x
BenchmarkDeepTreeProtoUnmarshal107.50205.301.91x
BenchmarkADeepBranchProtoUnmarshal83.48190.882.29x
BenchmarkAndDeepBranchProtoUnmarshal110.97234.602.11x
BenchmarkDeepLeafProtoUnmarshal123.40250.732.03x
BenchmarkNidOptEnumProtoUnmarshal2.6227.1610.37x
BenchmarkNinOptEnumProtoUnmarshal1.316.114.66x
BenchmarkNidRepEnumProtoUnmarshal7.4217.882.41x
BenchmarkNinRepEnumProtoUnmarshal7.4716.692.23x
BenchmarkTimerProtoUnmarshal61.12190.343.11x
BenchmarkMyExtendableProtoUnmarshal9.9036.713.71x
BenchmarkOtherExtenableProtoUnmarshal21.9052.132.38x
\ No newline at end of file diff --git a/vendor/github.com/gogo/protobuf/custom_types.md b/vendor/github.com/gogo/protobuf/custom_types.md deleted file mode 100644 index 3eed249b56..0000000000 --- a/vendor/github.com/gogo/protobuf/custom_types.md +++ /dev/null @@ -1,68 +0,0 @@ -# Custom types - -Custom types is a gogo protobuf extensions that allows for using a custom -struct type to decorate the underlying structure of the protocol message. - -# How to use - -## Defining the protobuf message - -```proto -message CustomType { - optional ProtoType Field = 1 [(gogoproto.customtype) = "T"]; -} - -message ProtoType { - optional string Field = 1; -} -``` - -or alternatively you can declare the field type in the protocol message to be -`bytes`: - -```proto -message BytesCustomType { - optional bytes Field = 1 [(gogoproto.customtype) = "T"]; -} -``` - -The downside of using `bytes` is that it makes it harder to generate protobuf -code in other languages. In either case, it is the user responsibility to -ensure that the custom type marshals and unmarshals to the expected wire -format. That is, in the first example, gogo protobuf will not attempt to ensure -that the wire format of `ProtoType` and `T` are wire compatible. - -## Custom type method signatures - -The custom type must define the following methods with the given -signatures. Assuming the custom type is called `T`: - -```go -func (t T) Marshal() ([]byte, error) {} -func (t *T) MarshalTo(data []byte) (n int, err error) {} -func (t *T) Unmarshal(data []byte) error {} - -func (t T) MarshalJSON() ([]byte, error) {} -func (t *T) UnmarshalJSON(data []byte) error {} - -// only required if the compare option is set -func (t T) Compare(other T) int {} -// only required if the equal option is set -func (t T) Equal(other T) bool {} -// only required if populate option is set -func NewPopulatedT(r randyThetest) *T {} -``` - -Check [t.go](test/t.go) for a full example - -# Warnings and issues - -`Warning about customtype: It is your responsibility to test all cases of your marshaling, unmarshaling and size methods implemented for your custom type.` - -Issues with customtype include: - * A Bytes method is not allowed. - * Defining a customtype as a fake proto message is broken. - * proto.Clone is broken. - * Using a proto message as a customtype is not allowed. - * cusomtype of type map can not UnmarshalText - * customtype of type struct cannot jsonpb unmarshal diff --git a/vendor/github.com/gogo/protobuf/extensions.md b/vendor/github.com/gogo/protobuf/extensions.md deleted file mode 100644 index fcfe17af26..0000000000 --- a/vendor/github.com/gogo/protobuf/extensions.md +++ /dev/null @@ -1,162 +0,0 @@ -# gogoprotobuf Extensions - -Here is an [example.proto](https://github.com/gogo/protobuf/blob/master/test/example/example.proto) which uses most of the gogoprotobuf code generation plugins. - -Please also look at the example [Makefile](https://github.com/gogo/protobuf/blob/master/test/example/Makefile) which shows how to specify the `descriptor.proto` and `gogo.proto` in your proto_path - -The documentation at [http://godoc.org/github.com/gogo/protobuf/gogoproto](http://godoc.org/github.com/gogo/protobuf/gogoproto) describes the extensions made to goprotobuf in more detail. - -Also see [http://godoc.org/github.com/gogo/protobuf/plugin/](http://godoc.org/github.com/gogo/protobuf/plugin/) for documentation of each of the extensions which have their own plugins. - -# Fast Marshalling and Unmarshalling - -Generating a `Marshal`, `MarshalTo`, `Size` (or `ProtoSize`) and `Unmarshal` method for a struct results in faster marshalling and unmarshalling than when using reflect. - -See [BenchComparison](https://github.com/gogo/protobuf/blob/master/bench.md) for a comparison between reflect and generated code used for marshalling and unmarshalling. - - - - - - - - - - - -
NameOptionTypeDescriptionDefault
marshalerMessageboolif true, a Marshal and MarshalTo method is generated for the specific messagefalse
sizerMessageboolif true, a Size method is generated for the specific messagefalse
unmarshaler Message bool if true, an Unmarshal method is generated for the specific message false
protosizerMessageboolif true, a ProtoSize method is generated for the specific messagefalse
unsafe_marshaler (deprecated) Message bool if true, a Marshal and MarshalTo method is generated. false
unsafe_unmarshaler (deprecated) Message bool if true, an Unmarshal method is generated. false
stable_marshaler Message bool if true, a Marshal and MarshalTo method is generated for the specific message, but unlike marshaler the output is guaranteed to be deterministic, at the sacrifice of some speed false
typedecl (beta) Message bool if false, type declaration of the message is excluded from the generated output. Requires the marshaler and unmarshaler to be generated. true
- -# More Canonical Go Structures - -Lots of times working with a goprotobuf struct will lead you to a place where you create another struct that is easier to work with and then have a function to copy the values between the two structs. - -You might also find that basic structs that started their life as part of an API need to be sent over the wire. With gob, you could just send it. With goprotobuf, you need to make a new struct. - -`gogoprotobuf` tries to fix these problems with the nullable, embed, customtype, customname, casttype, castkey and castvalue field extensions. - - - - - - - - - - - - - - - -
NameOptionTypeDescriptionDefault
nullable Field bool if false, a field is generated without a pointer (see warning below). true
embed Field bool if true, the field is generated as an embedded field. false
customtype Field string It works with the Marshal and Unmarshal methods, to allow you to have your own types in your struct, but marshal to bytes. For example, custom.Uuid or custom.Fixed128. For more information please refer to the CustomTypes document goprotobuf type
customname (beta) Field string Changes the generated fieldname. This is especially useful when generated methods conflict with fieldnames. goprotobuf field name
casttype (beta) Field string Changes the generated field type. It assumes that this type is castable to the original goprotobuf field type. It currently does not support maps, structs or enums. goprotobuf field type
castkey (beta) Field string Changes the generated fieldtype for a map key. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. goprotobuf field type
castvalue (beta) Field string Changes the generated fieldtype for a map value. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. goprotobuf field type
enum_customname (beta) Enum string Sets the type name of an enum. If goproto_enum_prefix is enabled, this value will be used as a prefix when generating enum values.goprotobuf enum type name. Helps with golint issues.
enumdecl (beta) Enum bool if false, type declaration of the enum is excluded from the generated output. Requires the marshaler and unmarshaler to be generated. true
enumvalue_customname (beta) Enum Value string Changes the generated enum name. Helps with golint issues.goprotobuf enum value name
stdtime Timestamp Field bool Changes the Well Known Timestamp Type to time.TimeTimestamp
stdduration Duration Field bool Changes the Well Known Duration Type to time.DurationDuration
- -`Warning about nullable: according to the Protocol Buffer specification, you should be able to tell whether a field is set or unset. With the option nullable=false this feature is lost, since your non-nullable fields will always be set.` - -# Goprotobuf Compatibility - -Gogoprotobuf is compatible with Goprotobuf, because it is compatible with protocol buffers (see the section on tests below). - -Gogoprotobuf generates the same code as goprotobuf if no extensions are used. - -The enumprefix, getters and stringer extensions can be used to remove some of the unnecessary code generated by goprotobuf. - - - - - - - - - - - -
NameOptionTypeDescriptionDefault
gogoproto_import File bool if false, the generated code imports github.com/golang/protobuf/proto instead of github.com/gogo/protobuf/proto. true
goproto_enum_prefix Enum bool if false, generates the enum constant names without the messagetype prefix true
goproto_getters Message bool if false, the message is generated without get methods, this is useful when you would rather want to use face true
goproto_stringer Message bool if false, the message is generated without the default string method, this is useful for rather using stringer true
goproto_enum_stringer (experimental) Enum bool if false, the enum is generated without the default string method, this is useful for rather using enum_stringer true
goproto_extensions_map (beta) Message bool if false, the extensions field is generated as type []byte instead of type map[int32]proto.Extension true
goproto_unrecognized (beta) Message bool if false, XXX_unrecognized field is not generated. This is useful to reduce GC pressure at the cost of losing information about unrecognized fields. true
goproto_registration (beta) File bool if true, the generated files will register all messages and types against both gogo/protobuf and golang/protobuf. This is necessary when using third-party packages which read registrations from golang/protobuf (such as the grpc-gateway). false
- -# Less Typing - -The Protocol Buffer language is very parseable and extra code can be easily generated for structures. - -Helper methods, functions and interfaces can be generated by triggering certain extensions like gostring. - - - - - - - - - - - - - -
NameOptionTypeDescriptionDefault
gostring Message bool if true, a `GoString` method is generated. This returns a string representing valid go code to reproduce the current state of the struct. false
onlyone (deprecated) Message bool if true, all fields must be nullable and only one of the fields may be set, like a union. Two methods are generated: `GetValue() interface{}` and `SetValue(v interface{}) (set bool)`. These provide easier interaction with a union. false
equal Message bool if true, an Equal method is generated false
compare Message bool if true, a Compare method is generated. This is very useful for quickly implementing sort on a list of protobuf structs false
verbose_equal Message bool if true, a verbose equal method is generated for the message. This returns an error which describes the exact element which is not equal to the exact element in the other struct. false
stringer Message bool if true, a String method is generated for the message. false
face Message bool if true, a function will be generated which can convert a structure which satisfies an interface (face) to the specified structure. This interface contains getters for each of the fields in the struct. The specified struct is also generated with the getters. This allows it to satisfy its own face. false
description (beta) Message bool if true, a Description method is generated for the message. false
populate Message bool if true, a `NewPopulated` function is generated. This is necessary for generated tests. false
enum_stringer (experimental) Enum bool if true, a String method is generated for an Enum false
- -Issues with Compare include: - * Oneof is not supported yet - * Not all Well Known Types are supported yet - * Maps are not supported - -#Peace of Mind - -Test and Benchmark generation is done with the following extensions: - - - - -
testgen Message bool if true, tests are generated for proto, json and prototext marshalling as well as for some of the other enabled plugins false
benchgen Message bool if true, benchmarks are generated for proto, json and prototext marshalling as well as for some of the other enabled plugins false
- -# More Serialization Formats - -Other serialization formats like xml and json typically use reflect to marshal and unmarshal structured data. Manipulating these structs into something other than the default Go requires editing tags. The following extensions provide ways of editing these tags for the generated protobuf structs. - - - - -
jsontag (beta) Field string if set, the json tag value between the double quotes is replaced with this string fieldname
moretags (beta) Field string if set, this string is appended to the tag string empty
- -Here is a longer explanation of jsontag and moretags - -# File Options - -Each of the boolean message and enum extensions also have a file extension: - - * `marshaler_all` - * `sizer_all` - * `protosizer_all` - * `unmarshaler_all` - * `unsafe_marshaler_all` - * `unsafe_unmarshaler_all` - * `stable_marshaler_all` - * `goproto_enum_prefix_all` - * `goproto_getters_all` - * `goproto_stringer_all` - * `goproto_enum_stringer_all` - * `goproto_extensions_map_all` - * `goproto_unrecognized_all` - * `gostring_all` - * `onlyone_all` - * `equal_all` - * `compare_all` - * `verbose_equal_all` - * `stringer_all` - * `enum_stringer_all` - * `face_all` - * `description_all` - * `populate_all` - * `testgen_all` - * `benchgen_all` - * `enumdecl_all` - * `typedecl_all` - -Each of these are the same as their Message Option counterparts, except they apply to all messages in the file. Their Message option counterparts can also be used to overwrite their effect. - -# Tests - - * The normal barrage of tests are run with: `make tests` - * A few weird tests: `make testall` - * Tests for compatibility with [golang/protobuf](https://github.com/golang/protobuf) are handled by a different project [harmonytests](https://github.com/gogo/harmonytests), since it requires goprotobuf. - * Cross version tests are made with [Travis CI](https://travis-ci.org/gogo/protobuf). - * GRPC Tests are also handled by a different project [grpctests](https://github.com/gogo/grpctests), since it depends on a lot of grpc libraries. - * Thanks to [go-fuzz](https://github.com/dvyukov/go-fuzz/) we have proper [fuzztests](https://github.com/gogo/fuzztests). - diff --git a/vendor/github.com/gogo/protobuf/gogoproto/Makefile b/vendor/github.com/gogo/protobuf/gogoproto/Makefile index 02f9c62c2c..0b4659b731 100644 --- a/vendor/github.com/gogo/protobuf/gogoproto/Makefile +++ b/vendor/github.com/gogo/protobuf/gogoproto/Makefile @@ -28,7 +28,7 @@ regenerate: go install github.com/gogo/protobuf/protoc-gen-gogo - protoc --gogo_out=Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor:. --proto_path=../../../../:../protobuf/:. *.proto + protoc --gogo_out=Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor:../../../../ --proto_path=../../../../:../protobuf/:. *.proto restore: cp gogo.pb.golden gogo.pb.go diff --git a/vendor/github.com/gogo/protobuf/gogoproto/doc.go b/vendor/github.com/gogo/protobuf/gogoproto/doc.go index 147b5ecc62..081c86fa8e 100644 --- a/vendor/github.com/gogo/protobuf/gogoproto/doc.go +++ b/vendor/github.com/gogo/protobuf/gogoproto/doc.go @@ -162,7 +162,7 @@ The most complete way to see examples is to look at github.com/gogo/protobuf/test/thetest.proto Gogoprototest is a seperate project, -because we want to keep gogoprotobuf independant of goprotobuf, +because we want to keep gogoprotobuf independent of goprotobuf, but we still want to test it thoroughly. */ diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go index fa88040f11..0057f8e1bf 100644 --- a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go @@ -1,20 +1,12 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: gogo.proto -/* -Package gogoproto is a generated protocol buffer package. - -It is generated from these files: - gogo.proto - -It has these top-level messages: -*/ -package gogoproto +package gogoproto // import "github.com/gogo/protobuf/gogoproto" import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" -import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" +import descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -28,7 +20,7 @@ var _ = math.Inf const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package var E_GoprotoEnumPrefix = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.EnumOptions)(nil), + ExtendedType: (*descriptor.EnumOptions)(nil), ExtensionType: (*bool)(nil), Field: 62001, Name: "gogoproto.goproto_enum_prefix", @@ -37,7 +29,7 @@ var E_GoprotoEnumPrefix = &proto.ExtensionDesc{ } var E_GoprotoEnumStringer = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.EnumOptions)(nil), + ExtendedType: (*descriptor.EnumOptions)(nil), ExtensionType: (*bool)(nil), Field: 62021, Name: "gogoproto.goproto_enum_stringer", @@ -46,7 +38,7 @@ var E_GoprotoEnumStringer = &proto.ExtensionDesc{ } var E_EnumStringer = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.EnumOptions)(nil), + ExtendedType: (*descriptor.EnumOptions)(nil), ExtensionType: (*bool)(nil), Field: 62022, Name: "gogoproto.enum_stringer", @@ -55,7 +47,7 @@ var E_EnumStringer = &proto.ExtensionDesc{ } var E_EnumCustomname = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.EnumOptions)(nil), + ExtendedType: (*descriptor.EnumOptions)(nil), ExtensionType: (*string)(nil), Field: 62023, Name: "gogoproto.enum_customname", @@ -64,7 +56,7 @@ var E_EnumCustomname = &proto.ExtensionDesc{ } var E_Enumdecl = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.EnumOptions)(nil), + ExtendedType: (*descriptor.EnumOptions)(nil), ExtensionType: (*bool)(nil), Field: 62024, Name: "gogoproto.enumdecl", @@ -73,7 +65,7 @@ var E_Enumdecl = &proto.ExtensionDesc{ } var E_EnumvalueCustomname = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.EnumValueOptions)(nil), + ExtendedType: (*descriptor.EnumValueOptions)(nil), ExtensionType: (*string)(nil), Field: 66001, Name: "gogoproto.enumvalue_customname", @@ -82,7 +74,7 @@ var E_EnumvalueCustomname = &proto.ExtensionDesc{ } var E_GoprotoGettersAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63001, Name: "gogoproto.goproto_getters_all", @@ -91,7 +83,7 @@ var E_GoprotoGettersAll = &proto.ExtensionDesc{ } var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63002, Name: "gogoproto.goproto_enum_prefix_all", @@ -100,7 +92,7 @@ var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{ } var E_GoprotoStringerAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63003, Name: "gogoproto.goproto_stringer_all", @@ -109,7 +101,7 @@ var E_GoprotoStringerAll = &proto.ExtensionDesc{ } var E_VerboseEqualAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63004, Name: "gogoproto.verbose_equal_all", @@ -118,7 +110,7 @@ var E_VerboseEqualAll = &proto.ExtensionDesc{ } var E_FaceAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63005, Name: "gogoproto.face_all", @@ -127,7 +119,7 @@ var E_FaceAll = &proto.ExtensionDesc{ } var E_GostringAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63006, Name: "gogoproto.gostring_all", @@ -136,7 +128,7 @@ var E_GostringAll = &proto.ExtensionDesc{ } var E_PopulateAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63007, Name: "gogoproto.populate_all", @@ -145,7 +137,7 @@ var E_PopulateAll = &proto.ExtensionDesc{ } var E_StringerAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63008, Name: "gogoproto.stringer_all", @@ -154,7 +146,7 @@ var E_StringerAll = &proto.ExtensionDesc{ } var E_OnlyoneAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63009, Name: "gogoproto.onlyone_all", @@ -163,7 +155,7 @@ var E_OnlyoneAll = &proto.ExtensionDesc{ } var E_EqualAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63013, Name: "gogoproto.equal_all", @@ -172,7 +164,7 @@ var E_EqualAll = &proto.ExtensionDesc{ } var E_DescriptionAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63014, Name: "gogoproto.description_all", @@ -181,7 +173,7 @@ var E_DescriptionAll = &proto.ExtensionDesc{ } var E_TestgenAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63015, Name: "gogoproto.testgen_all", @@ -190,7 +182,7 @@ var E_TestgenAll = &proto.ExtensionDesc{ } var E_BenchgenAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63016, Name: "gogoproto.benchgen_all", @@ -199,7 +191,7 @@ var E_BenchgenAll = &proto.ExtensionDesc{ } var E_MarshalerAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63017, Name: "gogoproto.marshaler_all", @@ -208,7 +200,7 @@ var E_MarshalerAll = &proto.ExtensionDesc{ } var E_UnmarshalerAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63018, Name: "gogoproto.unmarshaler_all", @@ -217,7 +209,7 @@ var E_UnmarshalerAll = &proto.ExtensionDesc{ } var E_StableMarshalerAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63019, Name: "gogoproto.stable_marshaler_all", @@ -226,7 +218,7 @@ var E_StableMarshalerAll = &proto.ExtensionDesc{ } var E_SizerAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63020, Name: "gogoproto.sizer_all", @@ -235,7 +227,7 @@ var E_SizerAll = &proto.ExtensionDesc{ } var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63021, Name: "gogoproto.goproto_enum_stringer_all", @@ -244,7 +236,7 @@ var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{ } var E_EnumStringerAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63022, Name: "gogoproto.enum_stringer_all", @@ -253,7 +245,7 @@ var E_EnumStringerAll = &proto.ExtensionDesc{ } var E_UnsafeMarshalerAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63023, Name: "gogoproto.unsafe_marshaler_all", @@ -262,7 +254,7 @@ var E_UnsafeMarshalerAll = &proto.ExtensionDesc{ } var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63024, Name: "gogoproto.unsafe_unmarshaler_all", @@ -271,7 +263,7 @@ var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{ } var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63025, Name: "gogoproto.goproto_extensions_map_all", @@ -280,7 +272,7 @@ var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{ } var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63026, Name: "gogoproto.goproto_unrecognized_all", @@ -289,7 +281,7 @@ var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{ } var E_GogoprotoImport = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63027, Name: "gogoproto.gogoproto_import", @@ -298,7 +290,7 @@ var E_GogoprotoImport = &proto.ExtensionDesc{ } var E_ProtosizerAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63028, Name: "gogoproto.protosizer_all", @@ -307,7 +299,7 @@ var E_ProtosizerAll = &proto.ExtensionDesc{ } var E_CompareAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63029, Name: "gogoproto.compare_all", @@ -316,7 +308,7 @@ var E_CompareAll = &proto.ExtensionDesc{ } var E_TypedeclAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63030, Name: "gogoproto.typedecl_all", @@ -325,7 +317,7 @@ var E_TypedeclAll = &proto.ExtensionDesc{ } var E_EnumdeclAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63031, Name: "gogoproto.enumdecl_all", @@ -334,7 +326,7 @@ var E_EnumdeclAll = &proto.ExtensionDesc{ } var E_GoprotoRegistration = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63032, Name: "gogoproto.goproto_registration", @@ -342,8 +334,35 @@ var E_GoprotoRegistration = &proto.ExtensionDesc{ Filename: "gogo.proto", } +var E_MessagenameAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63033, + Name: "gogoproto.messagename_all", + Tag: "varint,63033,opt,name=messagename_all,json=messagenameAll", + Filename: "gogo.proto", +} + +var E_GoprotoSizecacheAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63034, + Name: "gogoproto.goproto_sizecache_all", + Tag: "varint,63034,opt,name=goproto_sizecache_all,json=goprotoSizecacheAll", + Filename: "gogo.proto", +} + +var E_GoprotoUnkeyedAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63035, + Name: "gogoproto.goproto_unkeyed_all", + Tag: "varint,63035,opt,name=goproto_unkeyed_all,json=goprotoUnkeyedAll", + Filename: "gogo.proto", +} + var E_GoprotoGetters = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtendedType: (*descriptor.MessageOptions)(nil), ExtensionType: (*bool)(nil), Field: 64001, Name: "gogoproto.goproto_getters", @@ -352,7 +371,7 @@ var E_GoprotoGetters = &proto.ExtensionDesc{ } var E_GoprotoStringer = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtendedType: (*descriptor.MessageOptions)(nil), ExtensionType: (*bool)(nil), Field: 64003, Name: "gogoproto.goproto_stringer", @@ -361,7 +380,7 @@ var E_GoprotoStringer = &proto.ExtensionDesc{ } var E_VerboseEqual = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtendedType: (*descriptor.MessageOptions)(nil), ExtensionType: (*bool)(nil), Field: 64004, Name: "gogoproto.verbose_equal", @@ -370,7 +389,7 @@ var E_VerboseEqual = &proto.ExtensionDesc{ } var E_Face = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtendedType: (*descriptor.MessageOptions)(nil), ExtensionType: (*bool)(nil), Field: 64005, Name: "gogoproto.face", @@ -379,7 +398,7 @@ var E_Face = &proto.ExtensionDesc{ } var E_Gostring = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtendedType: (*descriptor.MessageOptions)(nil), ExtensionType: (*bool)(nil), Field: 64006, Name: "gogoproto.gostring", @@ -388,7 +407,7 @@ var E_Gostring = &proto.ExtensionDesc{ } var E_Populate = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtendedType: (*descriptor.MessageOptions)(nil), ExtensionType: (*bool)(nil), Field: 64007, Name: "gogoproto.populate", @@ -397,7 +416,7 @@ var E_Populate = &proto.ExtensionDesc{ } var E_Stringer = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtendedType: (*descriptor.MessageOptions)(nil), ExtensionType: (*bool)(nil), Field: 67008, Name: "gogoproto.stringer", @@ -406,7 +425,7 @@ var E_Stringer = &proto.ExtensionDesc{ } var E_Onlyone = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtendedType: (*descriptor.MessageOptions)(nil), ExtensionType: (*bool)(nil), Field: 64009, Name: "gogoproto.onlyone", @@ -415,7 +434,7 @@ var E_Onlyone = &proto.ExtensionDesc{ } var E_Equal = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtendedType: (*descriptor.MessageOptions)(nil), ExtensionType: (*bool)(nil), Field: 64013, Name: "gogoproto.equal", @@ -424,7 +443,7 @@ var E_Equal = &proto.ExtensionDesc{ } var E_Description = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtendedType: (*descriptor.MessageOptions)(nil), ExtensionType: (*bool)(nil), Field: 64014, Name: "gogoproto.description", @@ -433,7 +452,7 @@ var E_Description = &proto.ExtensionDesc{ } var E_Testgen = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtendedType: (*descriptor.MessageOptions)(nil), ExtensionType: (*bool)(nil), Field: 64015, Name: "gogoproto.testgen", @@ -442,7 +461,7 @@ var E_Testgen = &proto.ExtensionDesc{ } var E_Benchgen = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtendedType: (*descriptor.MessageOptions)(nil), ExtensionType: (*bool)(nil), Field: 64016, Name: "gogoproto.benchgen", @@ -451,7 +470,7 @@ var E_Benchgen = &proto.ExtensionDesc{ } var E_Marshaler = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtendedType: (*descriptor.MessageOptions)(nil), ExtensionType: (*bool)(nil), Field: 64017, Name: "gogoproto.marshaler", @@ -460,7 +479,7 @@ var E_Marshaler = &proto.ExtensionDesc{ } var E_Unmarshaler = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtendedType: (*descriptor.MessageOptions)(nil), ExtensionType: (*bool)(nil), Field: 64018, Name: "gogoproto.unmarshaler", @@ -469,7 +488,7 @@ var E_Unmarshaler = &proto.ExtensionDesc{ } var E_StableMarshaler = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtendedType: (*descriptor.MessageOptions)(nil), ExtensionType: (*bool)(nil), Field: 64019, Name: "gogoproto.stable_marshaler", @@ -478,7 +497,7 @@ var E_StableMarshaler = &proto.ExtensionDesc{ } var E_Sizer = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtendedType: (*descriptor.MessageOptions)(nil), ExtensionType: (*bool)(nil), Field: 64020, Name: "gogoproto.sizer", @@ -487,7 +506,7 @@ var E_Sizer = &proto.ExtensionDesc{ } var E_UnsafeMarshaler = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtendedType: (*descriptor.MessageOptions)(nil), ExtensionType: (*bool)(nil), Field: 64023, Name: "gogoproto.unsafe_marshaler", @@ -496,7 +515,7 @@ var E_UnsafeMarshaler = &proto.ExtensionDesc{ } var E_UnsafeUnmarshaler = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtendedType: (*descriptor.MessageOptions)(nil), ExtensionType: (*bool)(nil), Field: 64024, Name: "gogoproto.unsafe_unmarshaler", @@ -505,7 +524,7 @@ var E_UnsafeUnmarshaler = &proto.ExtensionDesc{ } var E_GoprotoExtensionsMap = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtendedType: (*descriptor.MessageOptions)(nil), ExtensionType: (*bool)(nil), Field: 64025, Name: "gogoproto.goproto_extensions_map", @@ -514,7 +533,7 @@ var E_GoprotoExtensionsMap = &proto.ExtensionDesc{ } var E_GoprotoUnrecognized = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtendedType: (*descriptor.MessageOptions)(nil), ExtensionType: (*bool)(nil), Field: 64026, Name: "gogoproto.goproto_unrecognized", @@ -523,7 +542,7 @@ var E_GoprotoUnrecognized = &proto.ExtensionDesc{ } var E_Protosizer = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtendedType: (*descriptor.MessageOptions)(nil), ExtensionType: (*bool)(nil), Field: 64028, Name: "gogoproto.protosizer", @@ -532,7 +551,7 @@ var E_Protosizer = &proto.ExtensionDesc{ } var E_Compare = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtendedType: (*descriptor.MessageOptions)(nil), ExtensionType: (*bool)(nil), Field: 64029, Name: "gogoproto.compare", @@ -541,7 +560,7 @@ var E_Compare = &proto.ExtensionDesc{ } var E_Typedecl = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtendedType: (*descriptor.MessageOptions)(nil), ExtensionType: (*bool)(nil), Field: 64030, Name: "gogoproto.typedecl", @@ -549,8 +568,35 @@ var E_Typedecl = &proto.ExtensionDesc{ Filename: "gogo.proto", } +var E_Messagename = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64033, + Name: "gogoproto.messagename", + Tag: "varint,64033,opt,name=messagename", + Filename: "gogo.proto", +} + +var E_GoprotoSizecache = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64034, + Name: "gogoproto.goproto_sizecache", + Tag: "varint,64034,opt,name=goproto_sizecache,json=goprotoSizecache", + Filename: "gogo.proto", +} + +var E_GoprotoUnkeyed = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64035, + Name: "gogoproto.goproto_unkeyed", + Tag: "varint,64035,opt,name=goproto_unkeyed,json=goprotoUnkeyed", + Filename: "gogo.proto", +} + var E_Nullable = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtendedType: (*descriptor.FieldOptions)(nil), ExtensionType: (*bool)(nil), Field: 65001, Name: "gogoproto.nullable", @@ -559,7 +605,7 @@ var E_Nullable = &proto.ExtensionDesc{ } var E_Embed = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtendedType: (*descriptor.FieldOptions)(nil), ExtensionType: (*bool)(nil), Field: 65002, Name: "gogoproto.embed", @@ -568,7 +614,7 @@ var E_Embed = &proto.ExtensionDesc{ } var E_Customtype = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtendedType: (*descriptor.FieldOptions)(nil), ExtensionType: (*string)(nil), Field: 65003, Name: "gogoproto.customtype", @@ -577,7 +623,7 @@ var E_Customtype = &proto.ExtensionDesc{ } var E_Customname = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtendedType: (*descriptor.FieldOptions)(nil), ExtensionType: (*string)(nil), Field: 65004, Name: "gogoproto.customname", @@ -586,7 +632,7 @@ var E_Customname = &proto.ExtensionDesc{ } var E_Jsontag = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtendedType: (*descriptor.FieldOptions)(nil), ExtensionType: (*string)(nil), Field: 65005, Name: "gogoproto.jsontag", @@ -595,7 +641,7 @@ var E_Jsontag = &proto.ExtensionDesc{ } var E_Moretags = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtendedType: (*descriptor.FieldOptions)(nil), ExtensionType: (*string)(nil), Field: 65006, Name: "gogoproto.moretags", @@ -604,7 +650,7 @@ var E_Moretags = &proto.ExtensionDesc{ } var E_Casttype = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtendedType: (*descriptor.FieldOptions)(nil), ExtensionType: (*string)(nil), Field: 65007, Name: "gogoproto.casttype", @@ -613,7 +659,7 @@ var E_Casttype = &proto.ExtensionDesc{ } var E_Castkey = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtendedType: (*descriptor.FieldOptions)(nil), ExtensionType: (*string)(nil), Field: 65008, Name: "gogoproto.castkey", @@ -622,7 +668,7 @@ var E_Castkey = &proto.ExtensionDesc{ } var E_Castvalue = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtendedType: (*descriptor.FieldOptions)(nil), ExtensionType: (*string)(nil), Field: 65009, Name: "gogoproto.castvalue", @@ -631,7 +677,7 @@ var E_Castvalue = &proto.ExtensionDesc{ } var E_Stdtime = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtendedType: (*descriptor.FieldOptions)(nil), ExtensionType: (*bool)(nil), Field: 65010, Name: "gogoproto.stdtime", @@ -640,7 +686,7 @@ var E_Stdtime = &proto.ExtensionDesc{ } var E_Stdduration = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtendedType: (*descriptor.FieldOptions)(nil), ExtensionType: (*bool)(nil), Field: 65011, Name: "gogoproto.stdduration", @@ -648,6 +694,15 @@ var E_Stdduration = &proto.ExtensionDesc{ Filename: "gogo.proto", } +var E_Wktpointer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65012, + Name: "gogoproto.wktpointer", + Tag: "varint,65012,opt,name=wktpointer", + Filename: "gogo.proto", +} + func init() { proto.RegisterExtension(E_GoprotoEnumPrefix) proto.RegisterExtension(E_GoprotoEnumStringer) @@ -684,6 +739,9 @@ func init() { proto.RegisterExtension(E_TypedeclAll) proto.RegisterExtension(E_EnumdeclAll) proto.RegisterExtension(E_GoprotoRegistration) + proto.RegisterExtension(E_MessagenameAll) + proto.RegisterExtension(E_GoprotoSizecacheAll) + proto.RegisterExtension(E_GoprotoUnkeyedAll) proto.RegisterExtension(E_GoprotoGetters) proto.RegisterExtension(E_GoprotoStringer) proto.RegisterExtension(E_VerboseEqual) @@ -707,6 +765,9 @@ func init() { proto.RegisterExtension(E_Protosizer) proto.RegisterExtension(E_Compare) proto.RegisterExtension(E_Typedecl) + proto.RegisterExtension(E_Messagename) + proto.RegisterExtension(E_GoprotoSizecache) + proto.RegisterExtension(E_GoprotoUnkeyed) proto.RegisterExtension(E_Nullable) proto.RegisterExtension(E_Embed) proto.RegisterExtension(E_Customtype) @@ -718,86 +779,94 @@ func init() { proto.RegisterExtension(E_Castvalue) proto.RegisterExtension(E_Stdtime) proto.RegisterExtension(E_Stdduration) -} - -func init() { proto.RegisterFile("gogo.proto", fileDescriptorGogo) } - -var fileDescriptorGogo = []byte{ - // 1201 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0xcb, 0x6f, 0x1c, 0x45, - 0x13, 0xc0, 0xf5, 0xe9, 0x73, 0x64, 0x6f, 0xf9, 0x85, 0xd7, 0xc6, 0x84, 0x08, 0x44, 0x72, 0xe3, - 0xe4, 0x9c, 0x22, 0x94, 0xb6, 0x22, 0xcb, 0xb1, 0x1c, 0x2b, 0x11, 0x06, 0x63, 0xe2, 0x00, 0xe2, - 0xb0, 0x9a, 0xdd, 0x6d, 0x4f, 0x06, 0x66, 0xa6, 0x87, 0x99, 0x9e, 0x28, 0xce, 0x0d, 0x85, 0x87, - 0x10, 0xe2, 0x8d, 0x04, 0x09, 0x49, 0x80, 0x03, 0xef, 0x67, 0x78, 0x1f, 0xb9, 0xf0, 0xb8, 0xf2, - 0x3f, 0x70, 0x01, 0xcc, 0xdb, 0x37, 0x5f, 0x50, 0xcd, 0x56, 0xcd, 0xf6, 0xac, 0x57, 0xea, 0xde, - 0xdb, 0xec, 0xba, 0x7f, 0xbf, 0xad, 0xa9, 0x9a, 0xae, 0xea, 0x31, 0x80, 0xaf, 0x7c, 0x35, 0x97, - 0xa4, 0x4a, 0xab, 0x7a, 0x0d, 0xaf, 0x8b, 0xcb, 0x03, 0x07, 0x7d, 0xa5, 0xfc, 0x50, 0x1e, 0x2e, - 0x3e, 0x35, 0xf3, 0xcd, 0xc3, 0x6d, 0x99, 0xb5, 0xd2, 0x20, 0xd1, 0x2a, 0xed, 0x2c, 0x16, 0x77, - 0xc1, 0x34, 0x2d, 0x6e, 0xc8, 0x38, 0x8f, 0x1a, 0x49, 0x2a, 0x37, 0x83, 0xf3, 0xf5, 0x5b, 0xe6, - 0x3a, 0xe4, 0x1c, 0x93, 0x73, 0xcb, 0x71, 0x1e, 0xdd, 0x9d, 0xe8, 0x40, 0xc5, 0xd9, 0xfe, 0xeb, - 0x3f, 0xff, 0xff, 0xe0, 0xff, 0x6e, 0x1f, 0x59, 0x9f, 0x22, 0x14, 0xff, 0xb6, 0x56, 0x80, 0x62, - 0x1d, 0x6e, 0xac, 0xf8, 0x32, 0x9d, 0x06, 0xb1, 0x2f, 0x53, 0x8b, 0xf1, 0x3b, 0x32, 0x4e, 0x1b, - 0xc6, 0x7b, 0x09, 0x15, 0x4b, 0x30, 0x3e, 0x88, 0xeb, 0x7b, 0x72, 0x8d, 0x49, 0x53, 0xb2, 0x02, - 0x93, 0x85, 0xa4, 0x95, 0x67, 0x5a, 0x45, 0xb1, 0x17, 0x49, 0x8b, 0xe6, 0x87, 0x42, 0x53, 0x5b, - 0x9f, 0x40, 0x6c, 0xa9, 0xa4, 0x84, 0x80, 0x11, 0xfc, 0xa6, 0x2d, 0x5b, 0xa1, 0xc5, 0xf0, 0x23, - 0x05, 0x52, 0xae, 0x17, 0x67, 0x60, 0x06, 0xaf, 0xcf, 0x79, 0x61, 0x2e, 0xcd, 0x48, 0x0e, 0xf5, - 0xf5, 0x9c, 0xc1, 0x65, 0x2c, 0xfb, 0xe9, 0xe2, 0x50, 0x11, 0xce, 0x74, 0x29, 0x30, 0x62, 0x32, - 0xaa, 0xe8, 0x4b, 0xad, 0x65, 0x9a, 0x35, 0xbc, 0xb0, 0x5f, 0x78, 0x27, 0x82, 0xb0, 0x34, 0x5e, - 0xda, 0xae, 0x56, 0x71, 0xa5, 0x43, 0x2e, 0x86, 0xa1, 0xd8, 0x80, 0x9b, 0xfa, 0x3c, 0x15, 0x0e, - 0xce, 0xcb, 0xe4, 0x9c, 0xd9, 0xf3, 0x64, 0xa0, 0x76, 0x0d, 0xf8, 0xfb, 0xb2, 0x96, 0x0e, 0xce, - 0xd7, 0xc8, 0x59, 0x27, 0x96, 0x4b, 0x8a, 0xc6, 0x53, 0x30, 0x75, 0x4e, 0xa6, 0x4d, 0x95, 0xc9, - 0x86, 0x7c, 0x24, 0xf7, 0x42, 0x07, 0xdd, 0x15, 0xd2, 0x4d, 0x12, 0xb8, 0x8c, 0x1c, 0xba, 0x8e, - 0xc2, 0xc8, 0xa6, 0xd7, 0x92, 0x0e, 0x8a, 0xab, 0xa4, 0x18, 0xc6, 0xf5, 0x88, 0x2e, 0xc2, 0x98, - 0xaf, 0x3a, 0xb7, 0xe4, 0x80, 0x5f, 0x23, 0x7c, 0x94, 0x19, 0x52, 0x24, 0x2a, 0xc9, 0x43, 0x4f, - 0xbb, 0x44, 0xf0, 0x3a, 0x2b, 0x98, 0x21, 0xc5, 0x00, 0x69, 0x7d, 0x83, 0x15, 0x99, 0x91, 0xcf, - 0x05, 0x18, 0x55, 0x71, 0xb8, 0xa5, 0x62, 0x97, 0x20, 0xde, 0x24, 0x03, 0x10, 0x82, 0x82, 0x79, - 0xa8, 0xb9, 0x16, 0xe2, 0xad, 0x6d, 0xde, 0x1e, 0x5c, 0x81, 0x15, 0x98, 0xe4, 0x06, 0x15, 0xa8, - 0xd8, 0x41, 0xf1, 0x36, 0x29, 0x26, 0x0c, 0x8c, 0x6e, 0x43, 0xcb, 0x4c, 0xfb, 0xd2, 0x45, 0xf2, - 0x0e, 0xdf, 0x06, 0x21, 0x94, 0xca, 0xa6, 0x8c, 0x5b, 0x67, 0xdd, 0x0c, 0xef, 0x72, 0x2a, 0x99, - 0x41, 0xc5, 0x12, 0x8c, 0x47, 0x5e, 0x9a, 0x9d, 0xf5, 0x42, 0xa7, 0x72, 0xbc, 0x47, 0x8e, 0xb1, - 0x12, 0xa2, 0x8c, 0xe4, 0xf1, 0x20, 0x9a, 0xf7, 0x39, 0x23, 0x06, 0x46, 0x5b, 0x2f, 0xd3, 0x5e, - 0x33, 0x94, 0x8d, 0x41, 0x6c, 0x1f, 0xf0, 0xd6, 0xeb, 0xb0, 0xab, 0xa6, 0x71, 0x1e, 0x6a, 0x59, - 0x70, 0xc1, 0x49, 0xf3, 0x21, 0x57, 0xba, 0x00, 0x10, 0x7e, 0x00, 0x6e, 0xee, 0x3b, 0x26, 0x1c, - 0x64, 0x1f, 0x91, 0x6c, 0xb6, 0xcf, 0xa8, 0xa0, 0x96, 0x30, 0xa8, 0xf2, 0x63, 0x6e, 0x09, 0xb2, - 0xc7, 0xb5, 0x06, 0x33, 0x79, 0x9c, 0x79, 0x9b, 0x83, 0x65, 0xed, 0x13, 0xce, 0x5a, 0x87, 0xad, - 0x64, 0xed, 0x34, 0xcc, 0x92, 0x71, 0xb0, 0xba, 0x7e, 0xca, 0x8d, 0xb5, 0x43, 0x6f, 0x54, 0xab, - 0xfb, 0x20, 0x1c, 0x28, 0xd3, 0x79, 0x5e, 0xcb, 0x38, 0x43, 0xa6, 0x11, 0x79, 0x89, 0x83, 0xf9, - 0x3a, 0x99, 0xb9, 0xe3, 0x2f, 0x97, 0x82, 0x55, 0x2f, 0x41, 0xf9, 0xfd, 0xb0, 0x9f, 0xe5, 0x79, - 0x9c, 0xca, 0x96, 0xf2, 0xe3, 0xe0, 0x82, 0x6c, 0x3b, 0xa8, 0x3f, 0xeb, 0x29, 0xd5, 0x86, 0x81, - 0xa3, 0xf9, 0x24, 0xdc, 0x50, 0x9e, 0x55, 0x1a, 0x41, 0x94, 0xa8, 0x54, 0x5b, 0x8c, 0x9f, 0x73, - 0xa5, 0x4a, 0xee, 0x64, 0x81, 0x89, 0x65, 0x98, 0x28, 0x3e, 0xba, 0x3e, 0x92, 0x5f, 0x90, 0x68, - 0xbc, 0x4b, 0x51, 0xe3, 0x68, 0xa9, 0x28, 0xf1, 0x52, 0x97, 0xfe, 0xf7, 0x25, 0x37, 0x0e, 0x42, - 0xa8, 0x71, 0xe8, 0xad, 0x44, 0xe2, 0xb4, 0x77, 0x30, 0x7c, 0xc5, 0x8d, 0x83, 0x19, 0x52, 0xf0, - 0x81, 0xc1, 0x41, 0xf1, 0x35, 0x2b, 0x98, 0x41, 0xc5, 0x3d, 0xdd, 0x41, 0x9b, 0x4a, 0x3f, 0xc8, - 0x74, 0xea, 0xe1, 0x6a, 0x8b, 0xea, 0x9b, 0xed, 0xea, 0x21, 0x6c, 0xdd, 0x40, 0xc5, 0x29, 0x98, - 0xec, 0x39, 0x62, 0xd4, 0x6f, 0xdb, 0x63, 0x5b, 0x95, 0x59, 0xe6, 0xf9, 0xa5, 0xf0, 0xd1, 0x1d, - 0x6a, 0x46, 0xd5, 0x13, 0x86, 0xb8, 0x13, 0xeb, 0x5e, 0x3d, 0x07, 0xd8, 0x65, 0x17, 0x77, 0xca, - 0xd2, 0x57, 0x8e, 0x01, 0xe2, 0x04, 0x8c, 0x57, 0xce, 0x00, 0x76, 0xd5, 0x63, 0xa4, 0x1a, 0x33, - 0x8f, 0x00, 0xe2, 0x08, 0x0c, 0xe1, 0x3c, 0xb7, 0xe3, 0x8f, 0x13, 0x5e, 0x2c, 0x17, 0xc7, 0x60, - 0x84, 0xe7, 0xb8, 0x1d, 0x7d, 0x82, 0xd0, 0x12, 0x41, 0x9c, 0x67, 0xb8, 0x1d, 0x7f, 0x92, 0x71, - 0x46, 0x10, 0x77, 0x4f, 0xe1, 0xb7, 0x4f, 0x0f, 0x51, 0x1f, 0xe6, 0xdc, 0xcd, 0xc3, 0x30, 0x0d, - 0x6f, 0x3b, 0xfd, 0x14, 0xfd, 0x38, 0x13, 0xe2, 0x0e, 0xd8, 0xe7, 0x98, 0xf0, 0x67, 0x08, 0xed, - 0xac, 0x17, 0x4b, 0x30, 0x6a, 0x0c, 0x6c, 0x3b, 0xfe, 0x2c, 0xe1, 0x26, 0x85, 0xa1, 0xd3, 0xc0, - 0xb6, 0x0b, 0x9e, 0xe3, 0xd0, 0x89, 0xc0, 0xb4, 0xf1, 0xac, 0xb6, 0xd3, 0xcf, 0x73, 0xd6, 0x19, - 0x11, 0x0b, 0x50, 0x2b, 0xfb, 0xaf, 0x9d, 0x7f, 0x81, 0xf8, 0x2e, 0x83, 0x19, 0x30, 0xfa, 0xbf, - 0x5d, 0xf1, 0x22, 0x67, 0xc0, 0xa0, 0x70, 0x1b, 0xf5, 0xce, 0x74, 0xbb, 0xe9, 0x25, 0xde, 0x46, - 0x3d, 0x23, 0x1d, 0xab, 0x59, 0xb4, 0x41, 0xbb, 0xe2, 0x65, 0xae, 0x66, 0xb1, 0x1e, 0xc3, 0xe8, - 0x1d, 0x92, 0x76, 0xc7, 0x2b, 0x1c, 0x46, 0xcf, 0x8c, 0x14, 0x6b, 0x50, 0xdf, 0x3b, 0x20, 0xed, - 0xbe, 0x57, 0xc9, 0x37, 0xb5, 0x67, 0x3e, 0x8a, 0xfb, 0x60, 0xb6, 0xff, 0x70, 0xb4, 0x5b, 0x2f, - 0xed, 0xf4, 0xbc, 0xce, 0x98, 0xb3, 0x51, 0x9c, 0xee, 0x76, 0x59, 0x73, 0x30, 0xda, 0xb5, 0x97, - 0x77, 0xaa, 0x8d, 0xd6, 0x9c, 0x8b, 0x62, 0x11, 0xa0, 0x3b, 0x93, 0xec, 0xae, 0x2b, 0xe4, 0x32, - 0x20, 0xdc, 0x1a, 0x34, 0x92, 0xec, 0xfc, 0x55, 0xde, 0x1a, 0x44, 0xe0, 0xd6, 0xe0, 0x69, 0x64, - 0xa7, 0xaf, 0xf1, 0xd6, 0x60, 0x44, 0xcc, 0xc3, 0x48, 0x9c, 0x87, 0x21, 0x3e, 0x5b, 0xf5, 0x5b, - 0xfb, 0x8c, 0x1b, 0x19, 0xb6, 0x19, 0xfe, 0x65, 0x97, 0x60, 0x06, 0xc4, 0x11, 0xd8, 0x27, 0xa3, - 0xa6, 0x6c, 0xdb, 0xc8, 0x5f, 0x77, 0xb9, 0x9f, 0xe0, 0x6a, 0xb1, 0x00, 0xd0, 0x79, 0x99, 0xc6, - 0x28, 0x6c, 0xec, 0x6f, 0xbb, 0x9d, 0xf7, 0x7a, 0x03, 0xe9, 0x0a, 0x8a, 0xb7, 0x71, 0x8b, 0x60, - 0xbb, 0x2a, 0x28, 0x5e, 0xc0, 0x8f, 0xc2, 0xf0, 0x43, 0x99, 0x8a, 0xb5, 0xe7, 0xdb, 0xe8, 0xdf, - 0x89, 0xe6, 0xf5, 0x98, 0xb0, 0x48, 0xa5, 0x52, 0x7b, 0x7e, 0x66, 0x63, 0xff, 0x20, 0xb6, 0x04, - 0x10, 0x6e, 0x79, 0x99, 0x76, 0xb9, 0xef, 0x3f, 0x19, 0x66, 0x00, 0x83, 0xc6, 0xeb, 0x87, 0xe5, - 0x96, 0x8d, 0xfd, 0x8b, 0x83, 0xa6, 0xf5, 0xe2, 0x18, 0xd4, 0xf0, 0xb2, 0xf8, 0x3f, 0x84, 0x0d, - 0xfe, 0x9b, 0xe0, 0x2e, 0x81, 0xbf, 0x9c, 0xe9, 0xb6, 0x0e, 0xec, 0xc9, 0xfe, 0x87, 0x2a, 0xcd, - 0xeb, 0xc5, 0x22, 0x8c, 0x66, 0xba, 0xdd, 0xce, 0xe9, 0x44, 0x63, 0xc1, 0xff, 0xdd, 0x2d, 0x5f, - 0x72, 0x4b, 0xe6, 0xf8, 0x21, 0x98, 0x6e, 0xa9, 0xa8, 0x17, 0x3c, 0x0e, 0x2b, 0x6a, 0x45, 0xad, - 0x15, 0xbb, 0xe8, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0a, 0x9c, 0xec, 0xd8, 0x50, 0x13, 0x00, - 0x00, + proto.RegisterExtension(E_Wktpointer) +} + +func init() { proto.RegisterFile("gogo.proto", fileDescriptor_gogo_b95f77e237336c7c) } + +var fileDescriptor_gogo_b95f77e237336c7c = []byte{ + // 1328 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0x49, 0x6f, 0x1c, 0x45, + 0x14, 0x80, 0x85, 0x48, 0x64, 0x4f, 0x79, 0x8b, 0xc7, 0xc6, 0x84, 0x08, 0x44, 0xe0, 0xc4, 0xc9, + 0x3e, 0x45, 0x28, 0x65, 0x45, 0x96, 0x63, 0x39, 0x56, 0x10, 0x0e, 0xc6, 0x89, 0xc3, 0x76, 0x18, + 0xf5, 0xf4, 0x94, 0xdb, 0x8d, 0xbb, 0xbb, 0x9a, 0xee, 0xea, 0x10, 0xe7, 0x86, 0xc2, 0x22, 0x84, + 0xd8, 0x91, 0x20, 0x21, 0x09, 0x04, 0xc4, 0xbe, 0x86, 0x7d, 0xb9, 0x70, 0x61, 0xb9, 0xf2, 0x1f, + 0xb8, 0x00, 0x66, 0xf7, 0xcd, 0x17, 0xf4, 0xba, 0xdf, 0xeb, 0xa9, 0x69, 0x8f, 0x54, 0x35, 0xb7, + 0xf6, 0xb8, 0xbe, 0x6f, 0xaa, 0xdf, 0xeb, 0x7a, 0xef, 0x4d, 0x33, 0xe6, 0x49, 0x4f, 0x4e, 0xc6, + 0x89, 0x54, 0xb2, 0x5e, 0x83, 0xeb, 0xfc, 0x72, 0xdf, 0x7e, 0x4f, 0x4a, 0x2f, 0x10, 0x53, 0xf9, + 0x5f, 0xcd, 0x6c, 0x75, 0xaa, 0x25, 0x52, 0x37, 0xf1, 0x63, 0x25, 0x93, 0x62, 0x31, 0x3f, 0xc6, + 0xc6, 0x70, 0x71, 0x43, 0x44, 0x59, 0xd8, 0x88, 0x13, 0xb1, 0xea, 0x9f, 0xae, 0x5f, 0x3f, 0x59, + 0x90, 0x93, 0x44, 0x4e, 0xce, 0x47, 0x59, 0x78, 0x47, 0xac, 0x7c, 0x19, 0xa5, 0x7b, 0xaf, 0xfc, + 0x72, 0xf5, 0xfe, 0xab, 0x6e, 0xe9, 0x5f, 0x1e, 0x45, 0x14, 0xfe, 0xb7, 0x94, 0x83, 0x7c, 0x99, + 0x5d, 0xd3, 0xe1, 0x4b, 0x55, 0xe2, 0x47, 0x9e, 0x48, 0x0c, 0xc6, 0xef, 0xd1, 0x38, 0xa6, 0x19, + 0x8f, 0x23, 0xca, 0xe7, 0xd8, 0x50, 0x2f, 0xae, 0x1f, 0xd0, 0x35, 0x28, 0x74, 0xc9, 0x02, 0x1b, + 0xc9, 0x25, 0x6e, 0x96, 0x2a, 0x19, 0x46, 0x4e, 0x28, 0x0c, 0x9a, 0x1f, 0x73, 0x4d, 0x6d, 0x79, + 0x18, 0xb0, 0xb9, 0x92, 0xe2, 0x9c, 0xf5, 0xc3, 0x27, 0x2d, 0xe1, 0x06, 0x06, 0xc3, 0x4f, 0xb8, + 0x91, 0x72, 0x3d, 0x3f, 0xc9, 0xc6, 0xe1, 0xfa, 0x94, 0x13, 0x64, 0x42, 0xdf, 0xc9, 0x4d, 0x5d, + 0x3d, 0x27, 0x61, 0x19, 0xc9, 0x7e, 0x3e, 0xbb, 0x2b, 0xdf, 0xce, 0x58, 0x29, 0xd0, 0xf6, 0xa4, + 0x65, 0xd1, 0x13, 0x4a, 0x89, 0x24, 0x6d, 0x38, 0x41, 0xb7, 0xed, 0x1d, 0xf1, 0x83, 0xd2, 0x78, + 0x6e, 0xb3, 0x33, 0x8b, 0x0b, 0x05, 0x39, 0x1b, 0x04, 0x7c, 0x85, 0x5d, 0xdb, 0xe5, 0xa9, 0xb0, + 0x70, 0x9e, 0x47, 0xe7, 0xf8, 0x8e, 0x27, 0x03, 0xb4, 0x4b, 0x8c, 0x3e, 0x2f, 0x73, 0x69, 0xe1, + 0x7c, 0x19, 0x9d, 0x75, 0x64, 0x29, 0xa5, 0x60, 0xbc, 0x8d, 0x8d, 0x9e, 0x12, 0x49, 0x53, 0xa6, + 0xa2, 0x21, 0x1e, 0xc8, 0x9c, 0xc0, 0x42, 0x77, 0x01, 0x75, 0x23, 0x08, 0xce, 0x03, 0x07, 0xae, + 0x83, 0xac, 0x7f, 0xd5, 0x71, 0x85, 0x85, 0xe2, 0x22, 0x2a, 0xfa, 0x60, 0x3d, 0xa0, 0xb3, 0x6c, + 0xd0, 0x93, 0xc5, 0x2d, 0x59, 0xe0, 0x97, 0x10, 0x1f, 0x20, 0x06, 0x15, 0xb1, 0x8c, 0xb3, 0xc0, + 0x51, 0x36, 0x3b, 0x78, 0x85, 0x14, 0xc4, 0xa0, 0xa2, 0x87, 0xb0, 0xbe, 0x4a, 0x8a, 0x54, 0x8b, + 0xe7, 0x0c, 0x1b, 0x90, 0x51, 0xb0, 0x21, 0x23, 0x9b, 0x4d, 0x5c, 0x46, 0x03, 0x43, 0x04, 0x04, + 0xd3, 0xac, 0x66, 0x9b, 0x88, 0x37, 0x36, 0xe9, 0x78, 0x50, 0x06, 0x16, 0xd8, 0x08, 0x15, 0x28, + 0x5f, 0x46, 0x16, 0x8a, 0x37, 0x51, 0x31, 0xac, 0x61, 0x78, 0x1b, 0x4a, 0xa4, 0xca, 0x13, 0x36, + 0x92, 0xb7, 0xe8, 0x36, 0x10, 0xc1, 0x50, 0x36, 0x45, 0xe4, 0xae, 0xd9, 0x19, 0xde, 0xa6, 0x50, + 0x12, 0x03, 0x8a, 0x39, 0x36, 0x14, 0x3a, 0x49, 0xba, 0xe6, 0x04, 0x56, 0xe9, 0x78, 0x07, 0x1d, + 0x83, 0x25, 0x84, 0x11, 0xc9, 0xa2, 0x5e, 0x34, 0xef, 0x52, 0x44, 0x34, 0x0c, 0x8f, 0x5e, 0xaa, + 0x9c, 0x66, 0x20, 0x1a, 0xbd, 0xd8, 0xde, 0xa3, 0xa3, 0x57, 0xb0, 0x8b, 0xba, 0x71, 0x9a, 0xd5, + 0x52, 0xff, 0x8c, 0x95, 0xe6, 0x7d, 0xca, 0x74, 0x0e, 0x00, 0x7c, 0x0f, 0xbb, 0xae, 0x6b, 0x9b, + 0xb0, 0x90, 0x7d, 0x80, 0xb2, 0x89, 0x2e, 0xad, 0x02, 0x4b, 0x42, 0xaf, 0xca, 0x0f, 0xa9, 0x24, + 0x88, 0x8a, 0x6b, 0x89, 0x8d, 0x67, 0x51, 0xea, 0xac, 0xf6, 0x16, 0xb5, 0x8f, 0x28, 0x6a, 0x05, + 0xdb, 0x11, 0xb5, 0x13, 0x6c, 0x02, 0x8d, 0xbd, 0xe5, 0xf5, 0x63, 0x2a, 0xac, 0x05, 0xbd, 0xd2, + 0x99, 0xdd, 0xfb, 0xd8, 0xbe, 0x32, 0x9c, 0xa7, 0x95, 0x88, 0x52, 0x60, 0x1a, 0xa1, 0x13, 0x5b, + 0x98, 0xaf, 0xa0, 0x99, 0x2a, 0xfe, 0x7c, 0x29, 0x58, 0x74, 0x62, 0x90, 0xdf, 0xcd, 0xf6, 0x92, + 0x3c, 0x8b, 0x12, 0xe1, 0x4a, 0x2f, 0xf2, 0xcf, 0x88, 0x96, 0x85, 0xfa, 0x93, 0x4a, 0xaa, 0x56, + 0x34, 0x1c, 0xcc, 0x47, 0xd9, 0x9e, 0x72, 0x56, 0x69, 0xf8, 0x61, 0x2c, 0x13, 0x65, 0x30, 0x7e, + 0x4a, 0x99, 0x2a, 0xb9, 0xa3, 0x39, 0xc6, 0xe7, 0xd9, 0x70, 0xfe, 0xa7, 0xed, 0x23, 0xf9, 0x19, + 0x8a, 0x86, 0xda, 0x14, 0x16, 0x0e, 0x57, 0x86, 0xb1, 0x93, 0xd8, 0xd4, 0xbf, 0xcf, 0xa9, 0x70, + 0x20, 0x82, 0x85, 0x43, 0x6d, 0xc4, 0x02, 0xba, 0xbd, 0x85, 0xe1, 0x0b, 0x2a, 0x1c, 0xc4, 0xa0, + 0x82, 0x06, 0x06, 0x0b, 0xc5, 0x97, 0xa4, 0x20, 0x06, 0x14, 0x77, 0xb6, 0x1b, 0x6d, 0x22, 0x3c, + 0x3f, 0x55, 0x89, 0x03, 0xab, 0x0d, 0xaa, 0xaf, 0x36, 0x3b, 0x87, 0xb0, 0x65, 0x0d, 0x85, 0x4a, + 0x14, 0x8a, 0x34, 0x75, 0x3c, 0x01, 0x13, 0x87, 0xc5, 0xc6, 0xbe, 0xa6, 0x4a, 0xa4, 0x61, 0xb0, + 0x37, 0x6d, 0x42, 0x84, 0xb0, 0xbb, 0x8e, 0xbb, 0x66, 0xa3, 0xfb, 0xa6, 0xb2, 0xb9, 0xe3, 0xc4, + 0x82, 0x53, 0x9b, 0x7f, 0xb2, 0x68, 0x5d, 0x6c, 0x58, 0x3d, 0x9d, 0xdf, 0x56, 0xe6, 0x9f, 0x95, + 0x82, 0x2c, 0x6a, 0xc8, 0x48, 0x65, 0x9e, 0xaa, 0xdf, 0xb8, 0xc3, 0xb5, 0x58, 0xdc, 0x17, 0xe9, + 0x1e, 0xda, 0xc2, 0xfb, 0xed, 0x1c, 0xa7, 0xf8, 0xed, 0xf0, 0x90, 0x77, 0x0e, 0x3d, 0x66, 0xd9, + 0xd9, 0xad, 0xf2, 0x39, 0xef, 0x98, 0x79, 0xf8, 0x11, 0x36, 0xd4, 0x31, 0xf0, 0x98, 0x55, 0x0f, + 0xa3, 0x6a, 0x50, 0x9f, 0x77, 0xf8, 0x01, 0xb6, 0x0b, 0x86, 0x17, 0x33, 0xfe, 0x08, 0xe2, 0xf9, + 0x72, 0x7e, 0x88, 0xf5, 0xd3, 0xd0, 0x62, 0x46, 0x1f, 0x45, 0xb4, 0x44, 0x00, 0xa7, 0x81, 0xc5, + 0x8c, 0x3f, 0x46, 0x38, 0x21, 0x80, 0xdb, 0x87, 0xf0, 0xbb, 0x27, 0x76, 0x61, 0xd3, 0xa1, 0xd8, + 0x4d, 0xb3, 0x3e, 0x9c, 0x54, 0xcc, 0xf4, 0xe3, 0xf8, 0xe5, 0x44, 0xf0, 0x5b, 0xd9, 0x6e, 0xcb, + 0x80, 0x3f, 0x89, 0x68, 0xb1, 0x9e, 0xcf, 0xb1, 0x01, 0x6d, 0x3a, 0x31, 0xe3, 0x4f, 0x21, 0xae, + 0x53, 0xb0, 0x75, 0x9c, 0x4e, 0xcc, 0x82, 0xa7, 0x69, 0xeb, 0x48, 0x40, 0xd8, 0x68, 0x30, 0x31, + 0xd3, 0xcf, 0x50, 0xd4, 0x09, 0xe1, 0x33, 0xac, 0x56, 0x36, 0x1b, 0x33, 0xff, 0x2c, 0xf2, 0x6d, + 0x06, 0x22, 0xa0, 0x35, 0x3b, 0xb3, 0xe2, 0x39, 0x8a, 0x80, 0x46, 0xc1, 0x31, 0xaa, 0x0e, 0x30, + 0x66, 0xd3, 0xf3, 0x74, 0x8c, 0x2a, 0xf3, 0x0b, 0x64, 0x33, 0xaf, 0xf9, 0x66, 0xc5, 0x0b, 0x94, + 0xcd, 0x7c, 0x3d, 0x6c, 0xa3, 0x3a, 0x11, 0x98, 0x1d, 0x2f, 0xd2, 0x36, 0x2a, 0x03, 0x01, 0x5f, + 0x62, 0xf5, 0x9d, 0xd3, 0x80, 0xd9, 0xf7, 0x12, 0xfa, 0x46, 0x77, 0x0c, 0x03, 0xfc, 0x2e, 0x36, + 0xd1, 0x7d, 0x12, 0x30, 0x5b, 0xcf, 0x6d, 0x55, 0x7e, 0xbb, 0xe9, 0x83, 0x00, 0x3f, 0xd1, 0x6e, + 0x29, 0xfa, 0x14, 0x60, 0xd6, 0x9e, 0xdf, 0xea, 0x2c, 0xdc, 0xfa, 0x10, 0xc0, 0x67, 0x19, 0x6b, + 0x37, 0x60, 0xb3, 0xeb, 0x02, 0xba, 0x34, 0x08, 0x8e, 0x06, 0xf6, 0x5f, 0x33, 0x7f, 0x91, 0x8e, + 0x06, 0x12, 0x70, 0x34, 0xa8, 0xf5, 0x9a, 0xe9, 0x4b, 0x74, 0x34, 0x08, 0x81, 0x27, 0x5b, 0xeb, + 0x6e, 0x66, 0xc3, 0x65, 0x7a, 0xb2, 0x35, 0x8a, 0x1f, 0x63, 0xa3, 0x3b, 0x1a, 0xa2, 0x59, 0xf5, + 0x1a, 0xaa, 0xf6, 0x54, 0xfb, 0xa1, 0xde, 0xbc, 0xb0, 0x19, 0x9a, 0x6d, 0xaf, 0x57, 0x9a, 0x17, + 0xf6, 0x42, 0x3e, 0xcd, 0xfa, 0xa3, 0x2c, 0x08, 0xe0, 0xf0, 0xd4, 0x6f, 0xe8, 0xd2, 0x4d, 0x45, + 0xd0, 0x22, 0xc5, 0xaf, 0xdb, 0x18, 0x1d, 0x02, 0xf8, 0x01, 0xb6, 0x5b, 0x84, 0x4d, 0xd1, 0x32, + 0x91, 0xbf, 0x6d, 0x53, 0xc1, 0x84, 0xd5, 0x7c, 0x86, 0xb1, 0xe2, 0xd5, 0x08, 0x84, 0xd9, 0xc4, + 0xfe, 0xbe, 0x5d, 0xbc, 0xa5, 0xd1, 0x90, 0xb6, 0x20, 0x4f, 0x8a, 0x41, 0xb0, 0xd9, 0x29, 0xc8, + 0x33, 0x72, 0x90, 0xf5, 0xdd, 0x9f, 0xca, 0x48, 0x39, 0x9e, 0x89, 0xfe, 0x03, 0x69, 0x5a, 0x0f, + 0x01, 0x0b, 0x65, 0x22, 0x94, 0xe3, 0xa5, 0x26, 0xf6, 0x4f, 0x64, 0x4b, 0x00, 0x60, 0xd7, 0x49, + 0x95, 0xcd, 0x7d, 0xff, 0x45, 0x30, 0x01, 0xb0, 0x69, 0xb8, 0x5e, 0x17, 0x1b, 0x26, 0xf6, 0x6f, + 0xda, 0x34, 0xae, 0xe7, 0x87, 0x58, 0x0d, 0x2e, 0xf3, 0xb7, 0x4a, 0x26, 0xf8, 0x1f, 0x84, 0xdb, + 0x04, 0x7c, 0x73, 0xaa, 0x5a, 0xca, 0x37, 0x07, 0xfb, 0x5f, 0xcc, 0x34, 0xad, 0xe7, 0xb3, 0x6c, + 0x20, 0x55, 0xad, 0x56, 0x86, 0xf3, 0xa9, 0x01, 0xff, 0x6f, 0xbb, 0x7c, 0x65, 0x51, 0x32, 0x90, + 0xed, 0x07, 0xd7, 0x55, 0x2c, 0xfd, 0x48, 0x89, 0xc4, 0x64, 0xd8, 0x42, 0x83, 0x86, 0x1c, 0x9e, + 0x67, 0x63, 0xae, 0x0c, 0xab, 0xdc, 0x61, 0xb6, 0x20, 0x17, 0xe4, 0x52, 0x5e, 0x67, 0xee, 0xbd, + 0xd9, 0xf3, 0xd5, 0x5a, 0xd6, 0x9c, 0x74, 0x65, 0x38, 0x05, 0xbf, 0x3c, 0xda, 0x2f, 0x54, 0xcb, + 0xdf, 0x21, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0x9c, 0xaf, 0x70, 0x4e, 0x83, 0x15, 0x00, 0x00, } diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto b/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto index fbca44cd48..b80c85653f 100644 --- a/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto @@ -33,6 +33,7 @@ import "google/protobuf/descriptor.proto"; option java_package = "com.google.protobuf"; option java_outer_classname = "GoGoProtos"; +option go_package = "github.com/gogo/protobuf/gogoproto"; extend google.protobuf.EnumOptions { optional bool goproto_enum_prefix = 62001; @@ -82,6 +83,10 @@ extend google.protobuf.FileOptions { optional bool enumdecl_all = 63031; optional bool goproto_registration = 63032; + optional bool messagename_all = 63033; + + optional bool goproto_sizecache_all = 63034; + optional bool goproto_unkeyed_all = 63035; } extend google.protobuf.MessageOptions { @@ -114,6 +119,11 @@ extend google.protobuf.MessageOptions { optional bool compare = 64029; optional bool typedecl = 64030; + + optional bool messagename = 64033; + + optional bool goproto_sizecache = 64034; + optional bool goproto_unkeyed = 64035; } extend google.protobuf.FieldOptions { @@ -129,4 +139,6 @@ extend google.protobuf.FieldOptions { optional bool stdtime = 65010; optional bool stdduration = 65011; + optional bool wktpointer = 65012; + } diff --git a/vendor/github.com/gogo/protobuf/gogoproto/helper.go b/vendor/github.com/gogo/protobuf/gogoproto/helper.go index 6b851c5623..390d4e4be6 100644 --- a/vendor/github.com/gogo/protobuf/gogoproto/helper.go +++ b/vendor/github.com/gogo/protobuf/gogoproto/helper.go @@ -47,6 +47,55 @@ func IsStdDuration(field *google_protobuf.FieldDescriptorProto) bool { return proto.GetBoolExtension(field.Options, E_Stdduration, false) } +func IsStdDouble(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.DoubleValue" +} + +func IsStdFloat(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.FloatValue" +} + +func IsStdInt64(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int64Value" +} + +func IsStdUInt64(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt64Value" +} + +func IsStdInt32(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int32Value" +} + +func IsStdUInt32(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt32Value" +} + +func IsStdBool(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BoolValue" +} + +func IsStdString(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.StringValue" +} + +func IsStdBytes(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BytesValue" +} + +func IsStdType(field *google_protobuf.FieldDescriptorProto) bool { + return (IsStdTime(field) || IsStdDuration(field) || + IsStdDouble(field) || IsStdFloat(field) || + IsStdInt64(field) || IsStdUInt64(field) || + IsStdInt32(field) || IsStdUInt32(field) || + IsStdBool(field) || + IsStdString(field) || IsStdBytes(field)) +} + +func IsWktPtr(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) +} + func NeedsNilCheck(proto3 bool, field *google_protobuf.FieldDescriptorProto) bool { nullable := IsNullable(field) if field.IsMessage() || IsCustomType(field) { @@ -334,9 +383,6 @@ func HasExtensionsMap(file *google_protobuf.FileDescriptorProto, message *google } func HasUnrecognized(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - if IsProto3(file) { - return false - } return proto.GetBoolExtension(message.Options, E_GoprotoUnrecognized, proto.GetBoolExtension(file.Options, E_GoprotoUnrecognizedAll, true)) } @@ -355,3 +401,15 @@ func HasCompare(file *google_protobuf.FileDescriptorProto, message *google_proto func RegistersGolangProto(file *google_protobuf.FileDescriptorProto) bool { return proto.GetBoolExtension(file.Options, E_GoprotoRegistration, false) } + +func HasMessageName(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Messagename, proto.GetBoolExtension(file.Options, E_MessagenameAll, false)) +} + +func HasSizecache(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoSizecache, proto.GetBoolExtension(file.Options, E_GoprotoSizecacheAll, true)) +} + +func HasUnkeyed(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoUnkeyed, proto.GetBoolExtension(file.Options, E_GoprotoUnkeyedAll, true)) +} diff --git a/vendor/github.com/gogo/protobuf/install-protobuf.sh b/vendor/github.com/gogo/protobuf/install-protobuf.sh deleted file mode 100755 index fc40642e40..0000000000 --- a/vendor/github.com/gogo/protobuf/install-protobuf.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/env bash - -set -ex - -die() { - echo "$@" >&2 - exit 1 -} - -cd /home/travis - -case "$PROTOBUF_VERSION" in -2*) - basename=protobuf-$PROTOBUF_VERSION - wget https://github.com/google/protobuf/releases/download/v$PROTOBUF_VERSION/$basename.tar.gz - tar xzf $basename.tar.gz - cd protobuf-$PROTOBUF_VERSION - ./configure --prefix=/home/travis && make -j2 && make install - ;; -3*) - basename=protoc-$PROTOBUF_VERSION-linux-x86_64 - wget https://github.com/google/protobuf/releases/download/v$PROTOBUF_VERSION/$basename.zip - unzip $basename.zip - ;; -*) - die "unknown protobuf version: $PROTOBUF_VERSION" - ;; -esac - - - - diff --git a/vendor/github.com/gogo/protobuf/proto/Makefile b/vendor/github.com/gogo/protobuf/proto/Makefile index 41c717573e..00d65f3277 100644 --- a/vendor/github.com/gogo/protobuf/proto/Makefile +++ b/vendor/github.com/gogo/protobuf/proto/Makefile @@ -38,6 +38,6 @@ test: install generate-test-pbs generate-test-pbs: make install - make -C testdata - protoc-min-version --version="3.0.0" --proto_path=.:../../../../:../protobuf --gogo_out=Mtestdata/test.proto=github.com/gogo/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/gogo/protobuf/types:. proto3_proto/proto3.proto + make -C test_proto + make -C proto3_proto make diff --git a/vendor/github.com/gogo/protobuf/proto/all_test.go b/vendor/github.com/gogo/protobuf/proto/all_test.go deleted file mode 100644 index b5f8709d85..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/all_test.go +++ /dev/null @@ -1,2278 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "math" - "math/rand" - "reflect" - "runtime/debug" - "strings" - "testing" - "time" - - . "github.com/gogo/protobuf/proto" - . "github.com/gogo/protobuf/proto/testdata" -) - -var globalO *Buffer - -func old() *Buffer { - if globalO == nil { - globalO = NewBuffer(nil) - } - globalO.Reset() - return globalO -} - -func equalbytes(b1, b2 []byte, t *testing.T) { - if len(b1) != len(b2) { - t.Errorf("wrong lengths: 2*%d != %d", len(b1), len(b2)) - return - } - for i := 0; i < len(b1); i++ { - if b1[i] != b2[i] { - t.Errorf("bad byte[%d]:%x %x: %s %s", i, b1[i], b2[i], b1, b2) - } - } -} - -func initGoTestField() *GoTestField { - f := new(GoTestField) - f.Label = String("label") - f.Type = String("type") - return f -} - -// These are all structurally equivalent but the tag numbers differ. -// (It's remarkable that required, optional, and repeated all have -// 8 letters.) -func initGoTest_RequiredGroup() *GoTest_RequiredGroup { - return &GoTest_RequiredGroup{ - RequiredField: String("required"), - } -} - -func initGoTest_OptionalGroup() *GoTest_OptionalGroup { - return &GoTest_OptionalGroup{ - RequiredField: String("optional"), - } -} - -func initGoTest_RepeatedGroup() *GoTest_RepeatedGroup { - return &GoTest_RepeatedGroup{ - RequiredField: String("repeated"), - } -} - -func initGoTest(setdefaults bool) *GoTest { - pb := new(GoTest) - if setdefaults { - pb.F_BoolDefaulted = Bool(Default_GoTest_F_BoolDefaulted) - pb.F_Int32Defaulted = Int32(Default_GoTest_F_Int32Defaulted) - pb.F_Int64Defaulted = Int64(Default_GoTest_F_Int64Defaulted) - pb.F_Fixed32Defaulted = Uint32(Default_GoTest_F_Fixed32Defaulted) - pb.F_Fixed64Defaulted = Uint64(Default_GoTest_F_Fixed64Defaulted) - pb.F_Uint32Defaulted = Uint32(Default_GoTest_F_Uint32Defaulted) - pb.F_Uint64Defaulted = Uint64(Default_GoTest_F_Uint64Defaulted) - pb.F_FloatDefaulted = Float32(Default_GoTest_F_FloatDefaulted) - pb.F_DoubleDefaulted = Float64(Default_GoTest_F_DoubleDefaulted) - pb.F_StringDefaulted = String(Default_GoTest_F_StringDefaulted) - pb.F_BytesDefaulted = Default_GoTest_F_BytesDefaulted - pb.F_Sint32Defaulted = Int32(Default_GoTest_F_Sint32Defaulted) - pb.F_Sint64Defaulted = Int64(Default_GoTest_F_Sint64Defaulted) - } - - pb.Kind = GoTest_TIME.Enum() - pb.RequiredField = initGoTestField() - pb.F_BoolRequired = Bool(true) - pb.F_Int32Required = Int32(3) - pb.F_Int64Required = Int64(6) - pb.F_Fixed32Required = Uint32(32) - pb.F_Fixed64Required = Uint64(64) - pb.F_Uint32Required = Uint32(3232) - pb.F_Uint64Required = Uint64(6464) - pb.F_FloatRequired = Float32(3232) - pb.F_DoubleRequired = Float64(6464) - pb.F_StringRequired = String("string") - pb.F_BytesRequired = []byte("bytes") - pb.F_Sint32Required = Int32(-32) - pb.F_Sint64Required = Int64(-64) - pb.Requiredgroup = initGoTest_RequiredGroup() - - return pb -} - -func fail(msg string, b *bytes.Buffer, s string, t *testing.T) { - data := b.Bytes() - ld := len(data) - ls := len(s) / 2 - - fmt.Printf("fail %s ld=%d ls=%d\n", msg, ld, ls) - - // find the interesting spot - n - n := ls - if ld < ls { - n = ld - } - j := 0 - for i := 0; i < n; i++ { - bs := hex(s[j])*16 + hex(s[j+1]) - j += 2 - if data[i] == bs { - continue - } - n = i - break - } - l := n - 10 - if l < 0 { - l = 0 - } - h := n + 10 - - // find the interesting spot - n - fmt.Printf("is[%d]:", l) - for i := l; i < h; i++ { - if i >= ld { - fmt.Printf(" --") - continue - } - fmt.Printf(" %.2x", data[i]) - } - fmt.Printf("\n") - - fmt.Printf("sb[%d]:", l) - for i := l; i < h; i++ { - if i >= ls { - fmt.Printf(" --") - continue - } - bs := hex(s[j])*16 + hex(s[j+1]) - j += 2 - fmt.Printf(" %.2x", bs) - } - fmt.Printf("\n") - - t.Fail() - - // t.Errorf("%s: \ngood: %s\nbad: %x", msg, s, b.Bytes()) - // Print the output in a partially-decoded format; can - // be helpful when updating the test. It produces the output - // that is pasted, with minor edits, into the argument to verify(). - // data := b.Bytes() - // nesting := 0 - // for b.Len() > 0 { - // start := len(data) - b.Len() - // var u uint64 - // u, err := DecodeVarint(b) - // if err != nil { - // fmt.Printf("decode error on varint:", err) - // return - // } - // wire := u & 0x7 - // tag := u >> 3 - // switch wire { - // case WireVarint: - // v, err := DecodeVarint(b) - // if err != nil { - // fmt.Printf("decode error on varint:", err) - // return - // } - // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", - // data[start:len(data)-b.Len()], tag, wire, v) - // case WireFixed32: - // v, err := DecodeFixed32(b) - // if err != nil { - // fmt.Printf("decode error on fixed32:", err) - // return - // } - // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", - // data[start:len(data)-b.Len()], tag, wire, v) - // case WireFixed64: - // v, err := DecodeFixed64(b) - // if err != nil { - // fmt.Printf("decode error on fixed64:", err) - // return - // } - // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", - // data[start:len(data)-b.Len()], tag, wire, v) - // case WireBytes: - // nb, err := DecodeVarint(b) - // if err != nil { - // fmt.Printf("decode error on bytes:", err) - // return - // } - // after_tag := len(data) - b.Len() - // str := make([]byte, nb) - // _, err = b.Read(str) - // if err != nil { - // fmt.Printf("decode error on bytes:", err) - // return - // } - // fmt.Printf("\t\t\"%x\" \"%x\" // field %d, encoding %d (FIELD)\n", - // data[start:after_tag], str, tag, wire) - // case WireStartGroup: - // nesting++ - // fmt.Printf("\t\t\"%x\"\t\t// start group field %d level %d\n", - // data[start:len(data)-b.Len()], tag, nesting) - // case WireEndGroup: - // fmt.Printf("\t\t\"%x\"\t\t// end group field %d level %d\n", - // data[start:len(data)-b.Len()], tag, nesting) - // nesting-- - // default: - // fmt.Printf("unrecognized wire type %d\n", wire) - // return - // } - // } -} - -func hex(c uint8) uint8 { - if '0' <= c && c <= '9' { - return c - '0' - } - if 'a' <= c && c <= 'f' { - return 10 + c - 'a' - } - if 'A' <= c && c <= 'F' { - return 10 + c - 'A' - } - return 0 -} - -func equal(b []byte, s string, t *testing.T) bool { - if 2*len(b) != len(s) { - // fail(fmt.Sprintf("wrong lengths: 2*%d != %d", len(b), len(s)), b, s, t) - fmt.Printf("wrong lengths: 2*%d != %d\n", len(b), len(s)) - return false - } - for i, j := 0, 0; i < len(b); i, j = i+1, j+2 { - x := hex(s[j])*16 + hex(s[j+1]) - if b[i] != x { - // fail(fmt.Sprintf("bad byte[%d]:%x %x", i, b[i], x), b, s, t) - fmt.Printf("bad byte[%d]:%x %x", i, b[i], x) - return false - } - } - return true -} - -func overify(t *testing.T, pb *GoTest, expected string) { - o := old() - err := o.Marshal(pb) - if err != nil { - fmt.Printf("overify marshal-1 err = %v", err) - o.DebugPrint("", o.Bytes()) - t.Fatalf("expected = %s", expected) - } - if !equal(o.Bytes(), expected, t) { - o.DebugPrint("overify neq 1", o.Bytes()) - t.Fatalf("expected = %s", expected) - } - - // Now test Unmarshal by recreating the original buffer. - pbd := new(GoTest) - err = o.Unmarshal(pbd) - if err != nil { - t.Fatalf("overify unmarshal err = %v", err) - o.DebugPrint("", o.Bytes()) - t.Fatalf("string = %s", expected) - } - o.Reset() - err = o.Marshal(pbd) - if err != nil { - t.Errorf("overify marshal-2 err = %v", err) - o.DebugPrint("", o.Bytes()) - t.Fatalf("string = %s", expected) - } - if !equal(o.Bytes(), expected, t) { - o.DebugPrint("overify neq 2", o.Bytes()) - t.Fatalf("string = %s", expected) - } -} - -// Simple tests for numeric encode/decode primitives (varint, etc.) -func TestNumericPrimitives(t *testing.T) { - for i := uint64(0); i < 1e6; i += 111 { - o := old() - if o.EncodeVarint(i) != nil { - t.Error("EncodeVarint") - break - } - x, e := o.DecodeVarint() - if e != nil { - t.Fatal("DecodeVarint") - } - if x != i { - t.Fatal("varint decode fail:", i, x) - } - - o = old() - if o.EncodeFixed32(i) != nil { - t.Fatal("encFixed32") - } - x, e = o.DecodeFixed32() - if e != nil { - t.Fatal("decFixed32") - } - if x != i { - t.Fatal("fixed32 decode fail:", i, x) - } - - o = old() - if o.EncodeFixed64(i*1234567) != nil { - t.Error("encFixed64") - break - } - x, e = o.DecodeFixed64() - if e != nil { - t.Error("decFixed64") - break - } - if x != i*1234567 { - t.Error("fixed64 decode fail:", i*1234567, x) - break - } - - o = old() - i32 := int32(i - 12345) - if o.EncodeZigzag32(uint64(i32)) != nil { - t.Fatal("EncodeZigzag32") - } - x, e = o.DecodeZigzag32() - if e != nil { - t.Fatal("DecodeZigzag32") - } - if x != uint64(uint32(i32)) { - t.Fatal("zigzag32 decode fail:", i32, x) - } - - o = old() - i64 := int64(i - 12345) - if o.EncodeZigzag64(uint64(i64)) != nil { - t.Fatal("EncodeZigzag64") - } - x, e = o.DecodeZigzag64() - if e != nil { - t.Fatal("DecodeZigzag64") - } - if x != uint64(i64) { - t.Fatal("zigzag64 decode fail:", i64, x) - } - } -} - -// fakeMarshaler is a simple struct implementing Marshaler and Message interfaces. -type fakeMarshaler struct { - b []byte - err error -} - -func (f *fakeMarshaler) Marshal() ([]byte, error) { return f.b, f.err } -func (f *fakeMarshaler) String() string { return fmt.Sprintf("Bytes: %v Error: %v", f.b, f.err) } -func (f *fakeMarshaler) ProtoMessage() {} -func (f *fakeMarshaler) Reset() {} - -type msgWithFakeMarshaler struct { - M *fakeMarshaler `protobuf:"bytes,1,opt,name=fake"` -} - -func (m *msgWithFakeMarshaler) String() string { return CompactTextString(m) } -func (m *msgWithFakeMarshaler) ProtoMessage() {} -func (m *msgWithFakeMarshaler) Reset() {} - -// Simple tests for proto messages that implement the Marshaler interface. -func TestMarshalerEncoding(t *testing.T) { - tests := []struct { - name string - m Message - want []byte - errType reflect.Type - }{ - { - name: "Marshaler that fails", - m: &fakeMarshaler{ - err: errors.New("some marshal err"), - b: []byte{5, 6, 7}, - }, - // Since the Marshal method returned bytes, they should be written to the - // buffer. (For efficiency, we assume that Marshal implementations are - // always correct w.r.t. RequiredNotSetError and output.) - want: []byte{5, 6, 7}, - errType: reflect.TypeOf(errors.New("some marshal err")), - }, - { - name: "Marshaler that fails with RequiredNotSetError", - m: &msgWithFakeMarshaler{ - M: &fakeMarshaler{ - err: &RequiredNotSetError{}, - b: []byte{5, 6, 7}, - }, - }, - // Since there's an error that can be continued after, - // the buffer should be written. - want: []byte{ - 10, 3, // for &msgWithFakeMarshaler - 5, 6, 7, // for &fakeMarshaler - }, - errType: reflect.TypeOf(&RequiredNotSetError{}), - }, - { - name: "Marshaler that succeeds", - m: &fakeMarshaler{ - b: []byte{0, 1, 2, 3, 4, 127, 255}, - }, - want: []byte{0, 1, 2, 3, 4, 127, 255}, - }, - } - for _, test := range tests { - b := NewBuffer(nil) - err := b.Marshal(test.m) - if reflect.TypeOf(err) != test.errType { - t.Errorf("%s: got err %T(%v) wanted %T", test.name, err, err, test.errType) - } - if !reflect.DeepEqual(test.want, b.Bytes()) { - t.Errorf("%s: got bytes %v wanted %v", test.name, b.Bytes(), test.want) - } - if size := Size(test.m); size != len(b.Bytes()) { - t.Errorf("%s: Size(_) = %v, but marshaled to %v bytes", test.name, size, len(b.Bytes())) - } - - m, mErr := Marshal(test.m) - if !bytes.Equal(b.Bytes(), m) { - t.Errorf("%s: Marshal returned %v, but (*Buffer).Marshal wrote %v", test.name, m, b.Bytes()) - } - if !reflect.DeepEqual(err, mErr) { - t.Errorf("%s: Marshal err = %q, but (*Buffer).Marshal returned %q", - test.name, fmt.Sprint(mErr), fmt.Sprint(err)) - } - } -} - -// Simple tests for bytes -func TestBytesPrimitives(t *testing.T) { - o := old() - bytes := []byte{'n', 'o', 'w', ' ', 'i', 's', ' ', 't', 'h', 'e', ' ', 't', 'i', 'm', 'e'} - if o.EncodeRawBytes(bytes) != nil { - t.Error("EncodeRawBytes") - } - decb, e := o.DecodeRawBytes(false) - if e != nil { - t.Error("DecodeRawBytes") - } - equalbytes(bytes, decb, t) -} - -// Simple tests for strings -func TestStringPrimitives(t *testing.T) { - o := old() - s := "now is the time" - if o.EncodeStringBytes(s) != nil { - t.Error("enc_string") - } - decs, e := o.DecodeStringBytes() - if e != nil { - t.Error("dec_string") - } - if s != decs { - t.Error("string encode/decode fail:", s, decs) - } -} - -// Do we catch the "required bit not set" case? -func TestRequiredBit(t *testing.T) { - o := old() - pb := new(GoTest) - err := o.Marshal(pb) - if err == nil { - t.Error("did not catch missing required fields") - } else if strings.Index(err.Error(), "Kind") < 0 { - t.Error("wrong error type:", err) - } -} - -// Check that all fields are nil. -// Clearly silly, and a residue from a more interesting test with an earlier, -// different initialization property, but it once caught a compiler bug so -// it lives. -func checkInitialized(pb *GoTest, t *testing.T) { - if pb.F_BoolDefaulted != nil { - t.Error("New or Reset did not set boolean:", *pb.F_BoolDefaulted) - } - if pb.F_Int32Defaulted != nil { - t.Error("New or Reset did not set int32:", *pb.F_Int32Defaulted) - } - if pb.F_Int64Defaulted != nil { - t.Error("New or Reset did not set int64:", *pb.F_Int64Defaulted) - } - if pb.F_Fixed32Defaulted != nil { - t.Error("New or Reset did not set fixed32:", *pb.F_Fixed32Defaulted) - } - if pb.F_Fixed64Defaulted != nil { - t.Error("New or Reset did not set fixed64:", *pb.F_Fixed64Defaulted) - } - if pb.F_Uint32Defaulted != nil { - t.Error("New or Reset did not set uint32:", *pb.F_Uint32Defaulted) - } - if pb.F_Uint64Defaulted != nil { - t.Error("New or Reset did not set uint64:", *pb.F_Uint64Defaulted) - } - if pb.F_FloatDefaulted != nil { - t.Error("New or Reset did not set float:", *pb.F_FloatDefaulted) - } - if pb.F_DoubleDefaulted != nil { - t.Error("New or Reset did not set double:", *pb.F_DoubleDefaulted) - } - if pb.F_StringDefaulted != nil { - t.Error("New or Reset did not set string:", *pb.F_StringDefaulted) - } - if pb.F_BytesDefaulted != nil { - t.Error("New or Reset did not set bytes:", string(pb.F_BytesDefaulted)) - } - if pb.F_Sint32Defaulted != nil { - t.Error("New or Reset did not set int32:", *pb.F_Sint32Defaulted) - } - if pb.F_Sint64Defaulted != nil { - t.Error("New or Reset did not set int64:", *pb.F_Sint64Defaulted) - } -} - -// Does Reset() reset? -func TestReset(t *testing.T) { - pb := initGoTest(true) - // muck with some values - pb.F_BoolDefaulted = Bool(false) - pb.F_Int32Defaulted = Int32(237) - pb.F_Int64Defaulted = Int64(12346) - pb.F_Fixed32Defaulted = Uint32(32000) - pb.F_Fixed64Defaulted = Uint64(666) - pb.F_Uint32Defaulted = Uint32(323232) - pb.F_Uint64Defaulted = nil - pb.F_FloatDefaulted = nil - pb.F_DoubleDefaulted = Float64(0) - pb.F_StringDefaulted = String("gotcha") - pb.F_BytesDefaulted = []byte("asdfasdf") - pb.F_Sint32Defaulted = Int32(123) - pb.F_Sint64Defaulted = Int64(789) - pb.Reset() - checkInitialized(pb, t) -} - -// All required fields set, no defaults provided. -func TestEncodeDecode1(t *testing.T) { - pb := initGoTest(false) - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 0x20 - "714000000000000000"+ // field 14, encoding 1, value 0x40 - "78a019"+ // field 15, encoding 0, value 0xca0 = 3232 - "8001c032"+ // field 16, encoding 0, value 0x1940 = 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2, string "string" - "b304"+ // field 70, encoding 3, start group - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // field 70, encoding 4, end group - "aa0605"+"6279746573"+ // field 101, encoding 2, string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f") // field 103, encoding 0, 0x7f zigzag64 -} - -// All required fields set, defaults provided. -func TestEncodeDecode2(t *testing.T) { - pb := initGoTest(true) - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 32 - "714000000000000000"+ // field 14, encoding 1, value 64 - "78a019"+ // field 15, encoding 0, value 3232 - "8001c032"+ // field 16, encoding 0, value 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" - "c00201"+ // field 40, encoding 0, value 1 - "c80220"+ // field 41, encoding 0, value 32 - "d00240"+ // field 42, encoding 0, value 64 - "dd0240010000"+ // field 43, encoding 5, value 320 - "e1028002000000000000"+ // field 44, encoding 1, value 640 - "e8028019"+ // field 45, encoding 0, value 3200 - "f0028032"+ // field 46, encoding 0, value 6400 - "fd02e0659948"+ // field 47, encoding 5, value 314159.0 - "81030000000050971041"+ // field 48, encoding 1, value 271828.0 - "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" - "b304"+ // start group field 70 level 1 - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // end group field 70 level 1 - "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 - "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" - "90193f"+ // field 402, encoding 0, value 63 - "98197f") // field 403, encoding 0, value 127 - -} - -// All default fields set to their default value by hand -func TestEncodeDecode3(t *testing.T) { - pb := initGoTest(false) - pb.F_BoolDefaulted = Bool(true) - pb.F_Int32Defaulted = Int32(32) - pb.F_Int64Defaulted = Int64(64) - pb.F_Fixed32Defaulted = Uint32(320) - pb.F_Fixed64Defaulted = Uint64(640) - pb.F_Uint32Defaulted = Uint32(3200) - pb.F_Uint64Defaulted = Uint64(6400) - pb.F_FloatDefaulted = Float32(314159) - pb.F_DoubleDefaulted = Float64(271828) - pb.F_StringDefaulted = String("hello, \"world!\"\n") - pb.F_BytesDefaulted = []byte("Bignose") - pb.F_Sint32Defaulted = Int32(-32) - pb.F_Sint64Defaulted = Int64(-64) - - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 32 - "714000000000000000"+ // field 14, encoding 1, value 64 - "78a019"+ // field 15, encoding 0, value 3232 - "8001c032"+ // field 16, encoding 0, value 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" - "c00201"+ // field 40, encoding 0, value 1 - "c80220"+ // field 41, encoding 0, value 32 - "d00240"+ // field 42, encoding 0, value 64 - "dd0240010000"+ // field 43, encoding 5, value 320 - "e1028002000000000000"+ // field 44, encoding 1, value 640 - "e8028019"+ // field 45, encoding 0, value 3200 - "f0028032"+ // field 46, encoding 0, value 6400 - "fd02e0659948"+ // field 47, encoding 5, value 314159.0 - "81030000000050971041"+ // field 48, encoding 1, value 271828.0 - "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" - "b304"+ // start group field 70 level 1 - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // end group field 70 level 1 - "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 - "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" - "90193f"+ // field 402, encoding 0, value 63 - "98197f") // field 403, encoding 0, value 127 - -} - -// All required fields set, defaults provided, all non-defaulted optional fields have values. -func TestEncodeDecode4(t *testing.T) { - pb := initGoTest(true) - pb.Table = String("hello") - pb.Param = Int32(7) - pb.OptionalField = initGoTestField() - pb.F_BoolOptional = Bool(true) - pb.F_Int32Optional = Int32(32) - pb.F_Int64Optional = Int64(64) - pb.F_Fixed32Optional = Uint32(3232) - pb.F_Fixed64Optional = Uint64(6464) - pb.F_Uint32Optional = Uint32(323232) - pb.F_Uint64Optional = Uint64(646464) - pb.F_FloatOptional = Float32(32.) - pb.F_DoubleOptional = Float64(64.) - pb.F_StringOptional = String("hello") - pb.F_BytesOptional = []byte("Bignose") - pb.F_Sint32Optional = Int32(-32) - pb.F_Sint64Optional = Int64(-64) - pb.Optionalgroup = initGoTest_OptionalGroup() - - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "1205"+"68656c6c6f"+ // field 2, encoding 2, string "hello" - "1807"+ // field 3, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "320d"+"0a056c6162656c120474797065"+ // field 6, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 32 - "714000000000000000"+ // field 14, encoding 1, value 64 - "78a019"+ // field 15, encoding 0, value 3232 - "8001c032"+ // field 16, encoding 0, value 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" - "f00101"+ // field 30, encoding 0, value 1 - "f80120"+ // field 31, encoding 0, value 32 - "800240"+ // field 32, encoding 0, value 64 - "8d02a00c0000"+ // field 33, encoding 5, value 3232 - "91024019000000000000"+ // field 34, encoding 1, value 6464 - "9802a0dd13"+ // field 35, encoding 0, value 323232 - "a002c0ba27"+ // field 36, encoding 0, value 646464 - "ad0200000042"+ // field 37, encoding 5, value 32.0 - "b1020000000000005040"+ // field 38, encoding 1, value 64.0 - "ba0205"+"68656c6c6f"+ // field 39, encoding 2, string "hello" - "c00201"+ // field 40, encoding 0, value 1 - "c80220"+ // field 41, encoding 0, value 32 - "d00240"+ // field 42, encoding 0, value 64 - "dd0240010000"+ // field 43, encoding 5, value 320 - "e1028002000000000000"+ // field 44, encoding 1, value 640 - "e8028019"+ // field 45, encoding 0, value 3200 - "f0028032"+ // field 46, encoding 0, value 6400 - "fd02e0659948"+ // field 47, encoding 5, value 314159.0 - "81030000000050971041"+ // field 48, encoding 1, value 271828.0 - "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" - "b304"+ // start group field 70 level 1 - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // end group field 70 level 1 - "d305"+ // start group field 90 level 1 - "da0508"+"6f7074696f6e616c"+ // field 91, encoding 2, string "optional" - "d405"+ // end group field 90 level 1 - "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 - "ea1207"+"4269676e6f7365"+ // field 301, encoding 2, string "Bignose" - "f0123f"+ // field 302, encoding 0, value 63 - "f8127f"+ // field 303, encoding 0, value 127 - "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" - "90193f"+ // field 402, encoding 0, value 63 - "98197f") // field 403, encoding 0, value 127 - -} - -// All required fields set, defaults provided, all repeated fields given two values. -func TestEncodeDecode5(t *testing.T) { - pb := initGoTest(true) - pb.RepeatedField = []*GoTestField{initGoTestField(), initGoTestField()} - pb.F_BoolRepeated = []bool{false, true} - pb.F_Int32Repeated = []int32{32, 33} - pb.F_Int64Repeated = []int64{64, 65} - pb.F_Fixed32Repeated = []uint32{3232, 3333} - pb.F_Fixed64Repeated = []uint64{6464, 6565} - pb.F_Uint32Repeated = []uint32{323232, 333333} - pb.F_Uint64Repeated = []uint64{646464, 656565} - pb.F_FloatRepeated = []float32{32., 33.} - pb.F_DoubleRepeated = []float64{64., 65.} - pb.F_StringRepeated = []string{"hello", "sailor"} - pb.F_BytesRepeated = [][]byte{[]byte("big"), []byte("nose")} - pb.F_Sint32Repeated = []int32{32, -32} - pb.F_Sint64Repeated = []int64{64, -64} - pb.Repeatedgroup = []*GoTest_RepeatedGroup{initGoTest_RepeatedGroup(), initGoTest_RepeatedGroup()} - - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField) - "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 32 - "714000000000000000"+ // field 14, encoding 1, value 64 - "78a019"+ // field 15, encoding 0, value 3232 - "8001c032"+ // field 16, encoding 0, value 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" - "a00100"+ // field 20, encoding 0, value 0 - "a00101"+ // field 20, encoding 0, value 1 - "a80120"+ // field 21, encoding 0, value 32 - "a80121"+ // field 21, encoding 0, value 33 - "b00140"+ // field 22, encoding 0, value 64 - "b00141"+ // field 22, encoding 0, value 65 - "bd01a00c0000"+ // field 23, encoding 5, value 3232 - "bd01050d0000"+ // field 23, encoding 5, value 3333 - "c1014019000000000000"+ // field 24, encoding 1, value 6464 - "c101a519000000000000"+ // field 24, encoding 1, value 6565 - "c801a0dd13"+ // field 25, encoding 0, value 323232 - "c80195ac14"+ // field 25, encoding 0, value 333333 - "d001c0ba27"+ // field 26, encoding 0, value 646464 - "d001b58928"+ // field 26, encoding 0, value 656565 - "dd0100000042"+ // field 27, encoding 5, value 32.0 - "dd0100000442"+ // field 27, encoding 5, value 33.0 - "e1010000000000005040"+ // field 28, encoding 1, value 64.0 - "e1010000000000405040"+ // field 28, encoding 1, value 65.0 - "ea0105"+"68656c6c6f"+ // field 29, encoding 2, string "hello" - "ea0106"+"7361696c6f72"+ // field 29, encoding 2, string "sailor" - "c00201"+ // field 40, encoding 0, value 1 - "c80220"+ // field 41, encoding 0, value 32 - "d00240"+ // field 42, encoding 0, value 64 - "dd0240010000"+ // field 43, encoding 5, value 320 - "e1028002000000000000"+ // field 44, encoding 1, value 640 - "e8028019"+ // field 45, encoding 0, value 3200 - "f0028032"+ // field 46, encoding 0, value 6400 - "fd02e0659948"+ // field 47, encoding 5, value 314159.0 - "81030000000050971041"+ // field 48, encoding 1, value 271828.0 - "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" - "b304"+ // start group field 70 level 1 - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // end group field 70 level 1 - "8305"+ // start group field 80 level 1 - "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated" - "8405"+ // end group field 80 level 1 - "8305"+ // start group field 80 level 1 - "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated" - "8405"+ // end group field 80 level 1 - "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 - "ca0c03"+"626967"+ // field 201, encoding 2, string "big" - "ca0c04"+"6e6f7365"+ // field 201, encoding 2, string "nose" - "d00c40"+ // field 202, encoding 0, value 32 - "d00c3f"+ // field 202, encoding 0, value -32 - "d80c8001"+ // field 203, encoding 0, value 64 - "d80c7f"+ // field 203, encoding 0, value -64 - "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" - "90193f"+ // field 402, encoding 0, value 63 - "98197f") // field 403, encoding 0, value 127 - -} - -// All required fields set, all packed repeated fields given two values. -func TestEncodeDecode6(t *testing.T) { - pb := initGoTest(false) - pb.F_BoolRepeatedPacked = []bool{false, true} - pb.F_Int32RepeatedPacked = []int32{32, 33} - pb.F_Int64RepeatedPacked = []int64{64, 65} - pb.F_Fixed32RepeatedPacked = []uint32{3232, 3333} - pb.F_Fixed64RepeatedPacked = []uint64{6464, 6565} - pb.F_Uint32RepeatedPacked = []uint32{323232, 333333} - pb.F_Uint64RepeatedPacked = []uint64{646464, 656565} - pb.F_FloatRepeatedPacked = []float32{32., 33.} - pb.F_DoubleRepeatedPacked = []float64{64., 65.} - pb.F_Sint32RepeatedPacked = []int32{32, -32} - pb.F_Sint64RepeatedPacked = []int64{64, -64} - - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 32 - "714000000000000000"+ // field 14, encoding 1, value 64 - "78a019"+ // field 15, encoding 0, value 3232 - "8001c032"+ // field 16, encoding 0, value 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" - "9203020001"+ // field 50, encoding 2, 2 bytes, value 0, value 1 - "9a03022021"+ // field 51, encoding 2, 2 bytes, value 32, value 33 - "a203024041"+ // field 52, encoding 2, 2 bytes, value 64, value 65 - "aa0308"+ // field 53, encoding 2, 8 bytes - "a00c0000050d0000"+ // value 3232, value 3333 - "b20310"+ // field 54, encoding 2, 16 bytes - "4019000000000000a519000000000000"+ // value 6464, value 6565 - "ba0306"+ // field 55, encoding 2, 6 bytes - "a0dd1395ac14"+ // value 323232, value 333333 - "c20306"+ // field 56, encoding 2, 6 bytes - "c0ba27b58928"+ // value 646464, value 656565 - "ca0308"+ // field 57, encoding 2, 8 bytes - "0000004200000442"+ // value 32.0, value 33.0 - "d20310"+ // field 58, encoding 2, 16 bytes - "00000000000050400000000000405040"+ // value 64.0, value 65.0 - "b304"+ // start group field 70 level 1 - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // end group field 70 level 1 - "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 - "b21f02"+ // field 502, encoding 2, 2 bytes - "403f"+ // value 32, value -32 - "ba1f03"+ // field 503, encoding 2, 3 bytes - "80017f") // value 64, value -64 -} - -// Test that we can encode empty bytes fields. -func TestEncodeDecodeBytes1(t *testing.T) { - pb := initGoTest(false) - - // Create our bytes - pb.F_BytesRequired = []byte{} - pb.F_BytesRepeated = [][]byte{{}} - pb.F_BytesOptional = []byte{} - - d, err := Marshal(pb) - if err != nil { - t.Error(err) - } - - pbd := new(GoTest) - if err := Unmarshal(d, pbd); err != nil { - t.Error(err) - } - - if pbd.F_BytesRequired == nil || len(pbd.F_BytesRequired) != 0 { - t.Error("required empty bytes field is incorrect") - } - if pbd.F_BytesRepeated == nil || len(pbd.F_BytesRepeated) == 1 && pbd.F_BytesRepeated[0] == nil { - t.Error("repeated empty bytes field is incorrect") - } - if pbd.F_BytesOptional == nil || len(pbd.F_BytesOptional) != 0 { - t.Error("optional empty bytes field is incorrect") - } -} - -// Test that we encode nil-valued fields of a repeated bytes field correctly. -// Since entries in a repeated field cannot be nil, nil must mean empty value. -func TestEncodeDecodeBytes2(t *testing.T) { - pb := initGoTest(false) - - // Create our bytes - pb.F_BytesRepeated = [][]byte{nil} - - d, err := Marshal(pb) - if err != nil { - t.Error(err) - } - - pbd := new(GoTest) - if err := Unmarshal(d, pbd); err != nil { - t.Error(err) - } - - if len(pbd.F_BytesRepeated) != 1 || pbd.F_BytesRepeated[0] == nil { - t.Error("Unexpected value for repeated bytes field") - } -} - -// All required fields set, defaults provided, all repeated fields given two values. -func TestSkippingUnrecognizedFields(t *testing.T) { - o := old() - pb := initGoTestField() - - // Marshal it normally. - o.Marshal(pb) - - // Now new a GoSkipTest record. - skip := &GoSkipTest{ - SkipInt32: Int32(32), - SkipFixed32: Uint32(3232), - SkipFixed64: Uint64(6464), - SkipString: String("skipper"), - Skipgroup: &GoSkipTest_SkipGroup{ - GroupInt32: Int32(75), - GroupString: String("wxyz"), - }, - } - - // Marshal it into same buffer. - o.Marshal(skip) - - pbd := new(GoTestField) - o.Unmarshal(pbd) - - // The __unrecognized field should be a marshaling of GoSkipTest - skipd := new(GoSkipTest) - - o.SetBuf(pbd.XXX_unrecognized) - o.Unmarshal(skipd) - - if *skipd.SkipInt32 != *skip.SkipInt32 { - t.Error("skip int32", skipd.SkipInt32) - } - if *skipd.SkipFixed32 != *skip.SkipFixed32 { - t.Error("skip fixed32", skipd.SkipFixed32) - } - if *skipd.SkipFixed64 != *skip.SkipFixed64 { - t.Error("skip fixed64", skipd.SkipFixed64) - } - if *skipd.SkipString != *skip.SkipString { - t.Error("skip string", *skipd.SkipString) - } - if *skipd.Skipgroup.GroupInt32 != *skip.Skipgroup.GroupInt32 { - t.Error("skip group int32", skipd.Skipgroup.GroupInt32) - } - if *skipd.Skipgroup.GroupString != *skip.Skipgroup.GroupString { - t.Error("skip group string", *skipd.Skipgroup.GroupString) - } -} - -// Check that unrecognized fields of a submessage are preserved. -func TestSubmessageUnrecognizedFields(t *testing.T) { - nm := &NewMessage{ - Nested: &NewMessage_Nested{ - Name: String("Nigel"), - FoodGroup: String("carbs"), - }, - } - b, err := Marshal(nm) - if err != nil { - t.Fatalf("Marshal of NewMessage: %v", err) - } - - // Unmarshal into an OldMessage. - om := new(OldMessage) - if err = Unmarshal(b, om); err != nil { - t.Fatalf("Unmarshal to OldMessage: %v", err) - } - exp := &OldMessage{ - Nested: &OldMessage_Nested{ - Name: String("Nigel"), - // normal protocol buffer users should not do this - XXX_unrecognized: []byte("\x12\x05carbs"), - }, - } - if !Equal(om, exp) { - t.Errorf("om = %v, want %v", om, exp) - } - - // Clone the OldMessage. - om = Clone(om).(*OldMessage) - if !Equal(om, exp) { - t.Errorf("Clone(om) = %v, want %v", om, exp) - } - - // Marshal the OldMessage, then unmarshal it into an empty NewMessage. - if b, err = Marshal(om); err != nil { - t.Fatalf("Marshal of OldMessage: %v", err) - } - t.Logf("Marshal(%v) -> %q", om, b) - nm2 := new(NewMessage) - if err := Unmarshal(b, nm2); err != nil { - t.Fatalf("Unmarshal to NewMessage: %v", err) - } - if !Equal(nm, nm2) { - t.Errorf("NewMessage round-trip: %v => %v", nm, nm2) - } -} - -// Check that an int32 field can be upgraded to an int64 field. -func TestNegativeInt32(t *testing.T) { - om := &OldMessage{ - Num: Int32(-1), - } - b, err := Marshal(om) - if err != nil { - t.Fatalf("Marshal of OldMessage: %v", err) - } - - // Check the size. It should be 11 bytes; - // 1 for the field/wire type, and 10 for the negative number. - if len(b) != 11 { - t.Errorf("%v marshaled as %q, wanted 11 bytes", om, b) - } - - // Unmarshal into a NewMessage. - nm := new(NewMessage) - if err := Unmarshal(b, nm); err != nil { - t.Fatalf("Unmarshal to NewMessage: %v", err) - } - want := &NewMessage{ - Num: Int64(-1), - } - if !Equal(nm, want) { - t.Errorf("nm = %v, want %v", nm, want) - } -} - -// Check that we can grow an array (repeated field) to have many elements. -// This test doesn't depend only on our encoding; for variety, it makes sure -// we create, encode, and decode the correct contents explicitly. It's therefore -// a bit messier. -// This test also uses (and hence tests) the Marshal/Unmarshal functions -// instead of the methods. -func TestBigRepeated(t *testing.T) { - pb := initGoTest(true) - - // Create the arrays - const N = 50 // Internally the library starts much smaller. - pb.Repeatedgroup = make([]*GoTest_RepeatedGroup, N) - pb.F_Sint64Repeated = make([]int64, N) - pb.F_Sint32Repeated = make([]int32, N) - pb.F_BytesRepeated = make([][]byte, N) - pb.F_StringRepeated = make([]string, N) - pb.F_DoubleRepeated = make([]float64, N) - pb.F_FloatRepeated = make([]float32, N) - pb.F_Uint64Repeated = make([]uint64, N) - pb.F_Uint32Repeated = make([]uint32, N) - pb.F_Fixed64Repeated = make([]uint64, N) - pb.F_Fixed32Repeated = make([]uint32, N) - pb.F_Int64Repeated = make([]int64, N) - pb.F_Int32Repeated = make([]int32, N) - pb.F_BoolRepeated = make([]bool, N) - pb.RepeatedField = make([]*GoTestField, N) - - // Fill in the arrays with checkable values. - igtf := initGoTestField() - igtrg := initGoTest_RepeatedGroup() - for i := 0; i < N; i++ { - pb.Repeatedgroup[i] = igtrg - pb.F_Sint64Repeated[i] = int64(i) - pb.F_Sint32Repeated[i] = int32(i) - s := fmt.Sprint(i) - pb.F_BytesRepeated[i] = []byte(s) - pb.F_StringRepeated[i] = s - pb.F_DoubleRepeated[i] = float64(i) - pb.F_FloatRepeated[i] = float32(i) - pb.F_Uint64Repeated[i] = uint64(i) - pb.F_Uint32Repeated[i] = uint32(i) - pb.F_Fixed64Repeated[i] = uint64(i) - pb.F_Fixed32Repeated[i] = uint32(i) - pb.F_Int64Repeated[i] = int64(i) - pb.F_Int32Repeated[i] = int32(i) - pb.F_BoolRepeated[i] = i%2 == 0 - pb.RepeatedField[i] = igtf - } - - // Marshal. - buf, _ := Marshal(pb) - - // Now test Unmarshal by recreating the original buffer. - pbd := new(GoTest) - Unmarshal(buf, pbd) - - // Check the checkable values - for i := uint64(0); i < N; i++ { - if pbd.Repeatedgroup[i] == nil { // TODO: more checking? - t.Error("pbd.Repeatedgroup bad") - } - var x uint64 - x = uint64(pbd.F_Sint64Repeated[i]) - if x != i { - t.Error("pbd.F_Sint64Repeated bad", x, i) - } - x = uint64(pbd.F_Sint32Repeated[i]) - if x != i { - t.Error("pbd.F_Sint32Repeated bad", x, i) - } - s := fmt.Sprint(i) - equalbytes(pbd.F_BytesRepeated[i], []byte(s), t) - if pbd.F_StringRepeated[i] != s { - t.Error("pbd.F_Sint32Repeated bad", pbd.F_StringRepeated[i], i) - } - x = uint64(pbd.F_DoubleRepeated[i]) - if x != i { - t.Error("pbd.F_DoubleRepeated bad", x, i) - } - x = uint64(pbd.F_FloatRepeated[i]) - if x != i { - t.Error("pbd.F_FloatRepeated bad", x, i) - } - x = pbd.F_Uint64Repeated[i] - if x != i { - t.Error("pbd.F_Uint64Repeated bad", x, i) - } - x = uint64(pbd.F_Uint32Repeated[i]) - if x != i { - t.Error("pbd.F_Uint32Repeated bad", x, i) - } - x = pbd.F_Fixed64Repeated[i] - if x != i { - t.Error("pbd.F_Fixed64Repeated bad", x, i) - } - x = uint64(pbd.F_Fixed32Repeated[i]) - if x != i { - t.Error("pbd.F_Fixed32Repeated bad", x, i) - } - x = uint64(pbd.F_Int64Repeated[i]) - if x != i { - t.Error("pbd.F_Int64Repeated bad", x, i) - } - x = uint64(pbd.F_Int32Repeated[i]) - if x != i { - t.Error("pbd.F_Int32Repeated bad", x, i) - } - if pbd.F_BoolRepeated[i] != (i%2 == 0) { - t.Error("pbd.F_BoolRepeated bad", x, i) - } - if pbd.RepeatedField[i] == nil { // TODO: more checking? - t.Error("pbd.RepeatedField bad") - } - } -} - -// Verify we give a useful message when decoding to the wrong structure type. -func TestTypeMismatch(t *testing.T) { - pb1 := initGoTest(true) - - // Marshal - o := old() - o.Marshal(pb1) - - // Now Unmarshal it to the wrong type. - pb2 := initGoTestField() - err := o.Unmarshal(pb2) - if err == nil { - t.Error("expected error, got no error") - } else if !strings.Contains(err.Error(), "bad wiretype") { - t.Error("expected bad wiretype error, got", err) - } -} - -func encodeDecode(t *testing.T, in, out Message, msg string) { - buf, err := Marshal(in) - if err != nil { - t.Fatalf("failed marshaling %v: %v", msg, err) - } - if err := Unmarshal(buf, out); err != nil { - t.Fatalf("failed unmarshaling %v: %v", msg, err) - } -} - -func TestPackedNonPackedDecoderSwitching(t *testing.T) { - np, p := new(NonPackedTest), new(PackedTest) - - // non-packed -> packed - np.A = []int32{0, 1, 1, 2, 3, 5} - encodeDecode(t, np, p, "non-packed -> packed") - if !reflect.DeepEqual(np.A, p.B) { - t.Errorf("failed non-packed -> packed; np.A=%+v, p.B=%+v", np.A, p.B) - } - - // packed -> non-packed - np.Reset() - p.B = []int32{3, 1, 4, 1, 5, 9} - encodeDecode(t, p, np, "packed -> non-packed") - if !reflect.DeepEqual(p.B, np.A) { - t.Errorf("failed packed -> non-packed; p.B=%+v, np.A=%+v", p.B, np.A) - } -} - -func TestProto1RepeatedGroup(t *testing.T) { - pb := &MessageList{ - Message: []*MessageList_Message{ - { - Name: String("blah"), - Count: Int32(7), - }, - // NOTE: pb.Message[1] is a nil - nil, - }, - } - - o := old() - err := o.Marshal(pb) - if err == nil || !strings.Contains(err.Error(), "repeated field Message has nil") { - t.Fatalf("unexpected or no error when marshaling: %v", err) - } -} - -// Test that enums work. Checks for a bug introduced by making enums -// named types instead of int32: newInt32FromUint64 would crash with -// a type mismatch in reflect.PointTo. -func TestEnum(t *testing.T) { - pb := new(GoEnum) - pb.Foo = FOO_FOO1.Enum() - o := old() - if err := o.Marshal(pb); err != nil { - t.Fatal("error encoding enum:", err) - } - pb1 := new(GoEnum) - if err := o.Unmarshal(pb1); err != nil { - t.Fatal("error decoding enum:", err) - } - if *pb1.Foo != FOO_FOO1 { - t.Error("expected 7 but got ", *pb1.Foo) - } -} - -// Enum types have String methods. Check that enum fields can be printed. -// We don't care what the value actually is, just as long as it doesn't crash. -func TestPrintingNilEnumFields(t *testing.T) { - pb := new(GoEnum) - _ = fmt.Sprintf("%+v", pb) -} - -// Verify that absent required fields cause Marshal/Unmarshal to return errors. -func TestRequiredFieldEnforcement(t *testing.T) { - pb := new(GoTestField) - _, err := Marshal(pb) - if err == nil { - t.Error("marshal: expected error, got nil") - } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Label") { - t.Errorf("marshal: bad error type: %v", err) - } - - // A slightly sneaky, yet valid, proto. It encodes the same required field twice, - // so simply counting the required fields is insufficient. - // field 1, encoding 2, value "hi" - buf := []byte("\x0A\x02hi\x0A\x02hi") - err = Unmarshal(buf, pb) - if err == nil { - t.Error("unmarshal: expected error, got nil") - } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "{Unknown}") { - t.Errorf("unmarshal: bad error type: %v", err) - } -} - -// Verify that absent required fields in groups cause Marshal/Unmarshal to return errors. -func TestRequiredFieldEnforcementGroups(t *testing.T) { - pb := &GoTestRequiredGroupField{Group: &GoTestRequiredGroupField_Group{}} - if _, err := Marshal(pb); err == nil { - t.Error("marshal: expected error, got nil") - } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Group.Field") { - t.Errorf("marshal: bad error type: %v", err) - } - - buf := []byte{11, 12} - if err := Unmarshal(buf, pb); err == nil { - t.Error("unmarshal: expected error, got nil") - } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Group.{Unknown}") { - t.Errorf("unmarshal: bad error type: %v", err) - } -} - -func TestTypedNilMarshal(t *testing.T) { - // A typed nil should return ErrNil and not crash. - { - var m *GoEnum - if _, err := Marshal(m); err != ErrNil { - t.Errorf("Marshal(%#v): got %v, want ErrNil", m, err) - } - } - - { - m := &Communique{Union: &Communique_Msg{Msg: nil}} - if _, err := Marshal(m); err == nil || err == ErrNil { - t.Errorf("Marshal(%#v): got %v, want errOneofHasNil", m, err) - } - } -} - -// A type that implements the Marshaler interface, but is not nillable. -type nonNillableInt uint64 - -func (nni nonNillableInt) Marshal() ([]byte, error) { - return EncodeVarint(uint64(nni)), nil -} - -type NNIMessage struct { - nni nonNillableInt -} - -func (*NNIMessage) Reset() {} -func (*NNIMessage) String() string { return "" } -func (*NNIMessage) ProtoMessage() {} - -// A type that implements the Marshaler interface and is nillable. -type nillableMessage struct { - x uint64 -} - -func (nm *nillableMessage) Marshal() ([]byte, error) { - return EncodeVarint(nm.x), nil -} - -type NMMessage struct { - nm *nillableMessage -} - -func (*NMMessage) Reset() {} -func (*NMMessage) String() string { return "" } -func (*NMMessage) ProtoMessage() {} - -// Verify a type that uses the Marshaler interface, but has a nil pointer. -func TestNilMarshaler(t *testing.T) { - // Try a struct with a Marshaler field that is nil. - // It should be directly marshable. - nmm := new(NMMessage) - if _, err := Marshal(nmm); err != nil { - t.Error("unexpected error marshaling nmm: ", err) - } - - // Try a struct with a Marshaler field that is not nillable. - nnim := new(NNIMessage) - nnim.nni = 7 - var _ Marshaler = nnim.nni // verify it is truly a Marshaler - if _, err := Marshal(nnim); err != nil { - t.Error("unexpected error marshaling nnim: ", err) - } -} - -func TestAllSetDefaults(t *testing.T) { - // Exercise SetDefaults with all scalar field types. - m := &Defaults{ - // NaN != NaN, so override that here. - F_Nan: Float32(1.7), - } - expected := &Defaults{ - F_Bool: Bool(true), - F_Int32: Int32(32), - F_Int64: Int64(64), - F_Fixed32: Uint32(320), - F_Fixed64: Uint64(640), - F_Uint32: Uint32(3200), - F_Uint64: Uint64(6400), - F_Float: Float32(314159), - F_Double: Float64(271828), - F_String: String(`hello, "world!"` + "\n"), - F_Bytes: []byte("Bignose"), - F_Sint32: Int32(-32), - F_Sint64: Int64(-64), - F_Enum: Defaults_GREEN.Enum(), - F_Pinf: Float32(float32(math.Inf(1))), - F_Ninf: Float32(float32(math.Inf(-1))), - F_Nan: Float32(1.7), - StrZero: String(""), - } - SetDefaults(m) - if !Equal(m, expected) { - t.Errorf("SetDefaults failed\n got %v\nwant %v", m, expected) - } -} - -func TestSetDefaultsWithSetField(t *testing.T) { - // Check that a set value is not overridden. - m := &Defaults{ - F_Int32: Int32(12), - } - SetDefaults(m) - if v := m.GetF_Int32(); v != 12 { - t.Errorf("m.FInt32 = %v, want 12", v) - } -} - -func TestSetDefaultsWithSubMessage(t *testing.T) { - m := &OtherMessage{ - Key: Int64(123), - Inner: &InnerMessage{ - Host: String("gopher"), - }, - } - expected := &OtherMessage{ - Key: Int64(123), - Inner: &InnerMessage{ - Host: String("gopher"), - Port: Int32(4000), - }, - } - SetDefaults(m) - if !Equal(m, expected) { - t.Errorf("\n got %v\nwant %v", m, expected) - } -} - -func TestSetDefaultsWithRepeatedSubMessage(t *testing.T) { - m := &MyMessage{ - RepInner: []*InnerMessage{{}}, - } - expected := &MyMessage{ - RepInner: []*InnerMessage{{ - Port: Int32(4000), - }}, - } - SetDefaults(m) - if !Equal(m, expected) { - t.Errorf("\n got %v\nwant %v", m, expected) - } -} - -func TestSetDefaultWithRepeatedNonMessage(t *testing.T) { - m := &MyMessage{ - Pet: []string{"turtle", "wombat"}, - } - expected := Clone(m) - SetDefaults(m) - if !Equal(m, expected) { - t.Errorf("\n got %v\nwant %v", m, expected) - } -} - -func TestMaximumTagNumber(t *testing.T) { - m := &MaxTag{ - LastField: String("natural goat essence"), - } - buf, err := Marshal(m) - if err != nil { - t.Fatalf("proto.Marshal failed: %v", err) - } - m2 := new(MaxTag) - if err := Unmarshal(buf, m2); err != nil { - t.Fatalf("proto.Unmarshal failed: %v", err) - } - if got, want := m2.GetLastField(), *m.LastField; got != want { - t.Errorf("got %q, want %q", got, want) - } -} - -func TestJSON(t *testing.T) { - m := &MyMessage{ - Count: Int32(4), - Pet: []string{"bunny", "kitty"}, - Inner: &InnerMessage{ - Host: String("cauchy"), - }, - Bikeshed: MyMessage_GREEN.Enum(), - } - const expected = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":1}` - - b, err := json.Marshal(m) - if err != nil { - t.Fatalf("json.Marshal failed: %v", err) - } - s := string(b) - if s != expected { - t.Errorf("got %s\nwant %s", s, expected) - } - - received := new(MyMessage) - if err := json.Unmarshal(b, received); err != nil { - t.Fatalf("json.Unmarshal failed: %v", err) - } - if !Equal(received, m) { - t.Fatalf("got %s, want %s", received, m) - } - - // Test unmarshalling of JSON with symbolic enum name. - const old = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":"GREEN"}` - received.Reset() - if err := json.Unmarshal([]byte(old), received); err != nil { - t.Fatalf("json.Unmarshal failed: %v", err) - } - if !Equal(received, m) { - t.Fatalf("got %s, want %s", received, m) - } -} - -func TestBadWireType(t *testing.T) { - b := []byte{7<<3 | 6} // field 7, wire type 6 - pb := new(OtherMessage) - if err := Unmarshal(b, pb); err == nil { - t.Errorf("Unmarshal did not fail") - } else if !strings.Contains(err.Error(), "unknown wire type") { - t.Errorf("wrong error: %v", err) - } -} - -func TestBytesWithInvalidLength(t *testing.T) { - // If a byte sequence has an invalid (negative) length, Unmarshal should not panic. - b := []byte{2<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0} - Unmarshal(b, new(MyMessage)) -} - -func TestLengthOverflow(t *testing.T) { - // Overflowing a length should not panic. - b := []byte{2<<3 | WireBytes, 1, 1, 3<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x01} - Unmarshal(b, new(MyMessage)) -} - -func TestVarintOverflow(t *testing.T) { - // Overflowing a 64-bit length should not be allowed. - b := []byte{1<<3 | WireVarint, 0x01, 3<<3 | WireBytes, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x01} - if err := Unmarshal(b, new(MyMessage)); err == nil { - t.Fatalf("Overflowed uint64 length without error") - } -} - -func TestUnmarshalFuzz(t *testing.T) { - const N = 1000 - seed := time.Now().UnixNano() - t.Logf("RNG seed is %d", seed) - rng := rand.New(rand.NewSource(seed)) - buf := make([]byte, 20) - for i := 0; i < N; i++ { - for j := range buf { - buf[j] = byte(rng.Intn(256)) - } - fuzzUnmarshal(t, buf) - } -} - -func TestMergeMessages(t *testing.T) { - pb := &MessageList{Message: []*MessageList_Message{{Name: String("x"), Count: Int32(1)}}} - data, err := Marshal(pb) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - - pb1 := new(MessageList) - if err := Unmarshal(data, pb1); err != nil { - t.Fatalf("first Unmarshal: %v", err) - } - if err := Unmarshal(data, pb1); err != nil { - t.Fatalf("second Unmarshal: %v", err) - } - if len(pb1.Message) != 1 { - t.Errorf("two Unmarshals produced %d Messages, want 1", len(pb1.Message)) - } - - pb2 := new(MessageList) - if err := UnmarshalMerge(data, pb2); err != nil { - t.Fatalf("first UnmarshalMerge: %v", err) - } - if err := UnmarshalMerge(data, pb2); err != nil { - t.Fatalf("second UnmarshalMerge: %v", err) - } - if len(pb2.Message) != 2 { - t.Errorf("two UnmarshalMerges produced %d Messages, want 2", len(pb2.Message)) - } -} - -func TestExtensionMarshalOrder(t *testing.T) { - m := &MyMessage{Count: Int(123)} - if err := SetExtension(m, E_Ext_More, &Ext{Data: String("alpha")}); err != nil { - t.Fatalf("SetExtension: %v", err) - } - if err := SetExtension(m, E_Ext_Text, String("aleph")); err != nil { - t.Fatalf("SetExtension: %v", err) - } - if err := SetExtension(m, E_Ext_Number, Int32(1)); err != nil { - t.Fatalf("SetExtension: %v", err) - } - - // Serialize m several times, and check we get the same bytes each time. - var orig []byte - for i := 0; i < 100; i++ { - b, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - if i == 0 { - orig = b - continue - } - if !bytes.Equal(b, orig) { - t.Errorf("Bytes differ on attempt #%d", i) - } - } -} - -// Many extensions, because small maps might not iterate differently on each iteration. -var exts = []*ExtensionDesc{ - E_X201, - E_X202, - E_X203, - E_X204, - E_X205, - E_X206, - E_X207, - E_X208, - E_X209, - E_X210, - E_X211, - E_X212, - E_X213, - E_X214, - E_X215, - E_X216, - E_X217, - E_X218, - E_X219, - E_X220, - E_X221, - E_X222, - E_X223, - E_X224, - E_X225, - E_X226, - E_X227, - E_X228, - E_X229, - E_X230, - E_X231, - E_X232, - E_X233, - E_X234, - E_X235, - E_X236, - E_X237, - E_X238, - E_X239, - E_X240, - E_X241, - E_X242, - E_X243, - E_X244, - E_X245, - E_X246, - E_X247, - E_X248, - E_X249, - E_X250, -} - -func TestMessageSetMarshalOrder(t *testing.T) { - m := &MyMessageSet{} - for _, x := range exts { - if err := SetExtension(m, x, &Empty{}); err != nil { - t.Fatalf("SetExtension: %v", err) - } - } - - buf, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - - // Serialize m several times, and check we get the same bytes each time. - for i := 0; i < 10; i++ { - b1, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - if !bytes.Equal(b1, buf) { - t.Errorf("Bytes differ on re-Marshal #%d", i) - } - - m2 := &MyMessageSet{} - if err = Unmarshal(buf, m2); err != nil { - t.Errorf("Unmarshal: %v", err) - } - b2, err := Marshal(m2) - if err != nil { - t.Errorf("re-Marshal: %v", err) - } - if !bytes.Equal(b2, buf) { - t.Errorf("Bytes differ on round-trip #%d", i) - } - } -} - -func TestUnmarshalMergesMessages(t *testing.T) { - // If a nested message occurs twice in the input, - // the fields should be merged when decoding. - a := &OtherMessage{ - Key: Int64(123), - Inner: &InnerMessage{ - Host: String("polhode"), - Port: Int32(1234), - }, - } - aData, err := Marshal(a) - if err != nil { - t.Fatalf("Marshal(a): %v", err) - } - b := &OtherMessage{ - Weight: Float32(1.2), - Inner: &InnerMessage{ - Host: String("herpolhode"), - Connected: Bool(true), - }, - } - bData, err := Marshal(b) - if err != nil { - t.Fatalf("Marshal(b): %v", err) - } - want := &OtherMessage{ - Key: Int64(123), - Weight: Float32(1.2), - Inner: &InnerMessage{ - Host: String("herpolhode"), - Port: Int32(1234), - Connected: Bool(true), - }, - } - got := new(OtherMessage) - if err := Unmarshal(append(aData, bData...), got); err != nil { - t.Fatalf("Unmarshal: %v", err) - } - if !Equal(got, want) { - t.Errorf("\n got %v\nwant %v", got, want) - } -} - -func TestEncodingSizes(t *testing.T) { - tests := []struct { - m Message - n int - }{ - {&Defaults{F_Int32: Int32(math.MaxInt32)}, 6}, - {&Defaults{F_Int32: Int32(math.MinInt32)}, 11}, - {&Defaults{F_Uint32: Uint32(uint32(math.MaxInt32) + 1)}, 6}, - {&Defaults{F_Uint32: Uint32(math.MaxUint32)}, 6}, - } - for _, test := range tests { - b, err := Marshal(test.m) - if err != nil { - t.Errorf("Marshal(%v): %v", test.m, err) - continue - } - if len(b) != test.n { - t.Errorf("Marshal(%v) yielded %d bytes, want %d bytes", test.m, len(b), test.n) - } - } -} - -func TestRequiredNotSetError(t *testing.T) { - pb := initGoTest(false) - pb.RequiredField.Label = nil - pb.F_Int32Required = nil - pb.F_Int64Required = nil - - expected := "0807" + // field 1, encoding 0, value 7 - "2206" + "120474797065" + // field 4, encoding 2 (GoTestField) - "5001" + // field 10, encoding 0, value 1 - "6d20000000" + // field 13, encoding 5, value 0x20 - "714000000000000000" + // field 14, encoding 1, value 0x40 - "78a019" + // field 15, encoding 0, value 0xca0 = 3232 - "8001c032" + // field 16, encoding 0, value 0x1940 = 6464 - "8d0100004a45" + // field 17, encoding 5, value 3232.0 - "9101000000000040b940" + // field 18, encoding 1, value 6464.0 - "9a0106" + "737472696e67" + // field 19, encoding 2, string "string" - "b304" + // field 70, encoding 3, start group - "ba0408" + "7265717569726564" + // field 71, encoding 2, string "required" - "b404" + // field 70, encoding 4, end group - "aa0605" + "6279746573" + // field 101, encoding 2, string "bytes" - "b0063f" + // field 102, encoding 0, 0x3f zigzag32 - "b8067f" // field 103, encoding 0, 0x7f zigzag64 - - o := old() - mbytes, err := Marshal(pb) - if _, ok := err.(*RequiredNotSetError); !ok { - fmt.Printf("marshal-1 err = %v, want *RequiredNotSetError", err) - o.DebugPrint("", mbytes) - t.Fatalf("expected = %s", expected) - } - if strings.Index(err.Error(), "RequiredField.Label") < 0 { - t.Errorf("marshal-1 wrong err msg: %v", err) - } - if !equal(mbytes, expected, t) { - o.DebugPrint("neq 1", mbytes) - t.Fatalf("expected = %s", expected) - } - - // Now test Unmarshal by recreating the original buffer. - pbd := new(GoTest) - err = Unmarshal(mbytes, pbd) - if _, ok := err.(*RequiredNotSetError); !ok { - t.Fatalf("unmarshal err = %v, want *RequiredNotSetError", err) - o.DebugPrint("", mbytes) - t.Fatalf("string = %s", expected) - } - if strings.Index(err.Error(), "RequiredField.{Unknown}") < 0 { - t.Errorf("unmarshal wrong err msg: %v", err) - } - mbytes, err = Marshal(pbd) - if _, ok := err.(*RequiredNotSetError); !ok { - t.Errorf("marshal-2 err = %v, want *RequiredNotSetError", err) - o.DebugPrint("", mbytes) - t.Fatalf("string = %s", expected) - } - if strings.Index(err.Error(), "RequiredField.Label") < 0 { - t.Errorf("marshal-2 wrong err msg: %v", err) - } - if !equal(mbytes, expected, t) { - o.DebugPrint("neq 2", mbytes) - t.Fatalf("string = %s", expected) - } -} - -func fuzzUnmarshal(t *testing.T, data []byte) { - defer func() { - if e := recover(); e != nil { - t.Errorf("These bytes caused a panic: %+v", data) - t.Logf("Stack:\n%s", debug.Stack()) - t.FailNow() - } - }() - - pb := new(MyMessage) - Unmarshal(data, pb) -} - -func TestMapFieldMarshal(t *testing.T) { - m := &MessageWithMap{ - NameMapping: map[int32]string{ - 1: "Rob", - 4: "Ian", - 8: "Dave", - }, - } - b, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - - // b should be the concatenation of these three byte sequences in some order. - parts := []string{ - "\n\a\b\x01\x12\x03Rob", - "\n\a\b\x04\x12\x03Ian", - "\n\b\b\x08\x12\x04Dave", - } - ok := false - for i := range parts { - for j := range parts { - if j == i { - continue - } - for k := range parts { - if k == i || k == j { - continue - } - try := parts[i] + parts[j] + parts[k] - if bytes.Equal(b, []byte(try)) { - ok = true - break - } - } - } - } - if !ok { - t.Fatalf("Incorrect Marshal output.\n got %q\nwant %q (or a permutation of that)", b, parts[0]+parts[1]+parts[2]) - } - t.Logf("FYI b: %q", b) - - (new(Buffer)).DebugPrint("Dump of b", b) -} - -func TestMapFieldRoundTrips(t *testing.T) { - m := &MessageWithMap{ - NameMapping: map[int32]string{ - 1: "Rob", - 4: "Ian", - 8: "Dave", - }, - MsgMapping: map[int64]*FloatingPoint{ - 0x7001: {F: Float64(2.0)}, - }, - ByteMapping: map[bool][]byte{ - false: []byte("that's not right!"), - true: []byte("aye, 'tis true!"), - }, - } - b, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - t.Logf("FYI b: %q", b) - m2 := new(MessageWithMap) - if err := Unmarshal(b, m2); err != nil { - t.Fatalf("Unmarshal: %v", err) - } - for _, pair := range [][2]interface{}{ - {m.NameMapping, m2.NameMapping}, - {m.MsgMapping, m2.MsgMapping}, - {m.ByteMapping, m2.ByteMapping}, - } { - if !reflect.DeepEqual(pair[0], pair[1]) { - t.Errorf("Map did not survive a round trip.\ninitial: %v\n final: %v", pair[0], pair[1]) - } - } -} - -func TestMapFieldWithNil(t *testing.T) { - m1 := &MessageWithMap{ - MsgMapping: map[int64]*FloatingPoint{ - 1: nil, - }, - } - b, err := Marshal(m1) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - m2 := new(MessageWithMap) - if err := Unmarshal(b, m2); err != nil { - t.Fatalf("Unmarshal: %v, got these bytes: %v", err, b) - } - if v, ok := m2.MsgMapping[1]; !ok { - t.Error("msg_mapping[1] not present") - } else if v != nil { - t.Errorf("msg_mapping[1] not nil: %v", v) - } -} - -func TestMapFieldWithNilBytes(t *testing.T) { - m1 := &MessageWithMap{ - ByteMapping: map[bool][]byte{ - false: {}, - true: nil, - }, - } - n := Size(m1) - b, err := Marshal(m1) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - if n != len(b) { - t.Errorf("Size(m1) = %d; want len(Marshal(m1)) = %d", n, len(b)) - } - m2 := new(MessageWithMap) - if err := Unmarshal(b, m2); err != nil { - t.Fatalf("Unmarshal: %v, got these bytes: %v", err, b) - } - if v, ok := m2.ByteMapping[false]; !ok { - t.Error("byte_mapping[false] not present") - } else if len(v) != 0 { - t.Errorf("byte_mapping[false] not empty: %#v", v) - } - if v, ok := m2.ByteMapping[true]; !ok { - t.Error("byte_mapping[true] not present") - } else if len(v) != 0 { - t.Errorf("byte_mapping[true] not empty: %#v", v) - } -} - -func TestDecodeMapFieldMissingKey(t *testing.T) { - b := []byte{ - 0x0A, 0x03, // message, tag 1 (name_mapping), of length 3 bytes - // no key - 0x12, 0x01, 0x6D, // string value of length 1 byte, value "m" - } - got := &MessageWithMap{} - err := Unmarshal(b, got) - if err != nil { - t.Fatalf("failed to marshal map with missing key: %v", err) - } - want := &MessageWithMap{NameMapping: map[int32]string{0: "m"}} - if !Equal(got, want) { - t.Errorf("Unmarshaled map with no key was not as expected. got: %v, want %v", got, want) - } -} - -func TestDecodeMapFieldMissingValue(t *testing.T) { - b := []byte{ - 0x0A, 0x02, // message, tag 1 (name_mapping), of length 2 bytes - 0x08, 0x01, // varint key, value 1 - // no value - } - got := &MessageWithMap{} - err := Unmarshal(b, got) - if err != nil { - t.Fatalf("failed to marshal map with missing value: %v", err) - } - want := &MessageWithMap{NameMapping: map[int32]string{1: ""}} - if !Equal(got, want) { - t.Errorf("Unmarshaled map with no value was not as expected. got: %v, want %v", got, want) - } -} - -func TestOneof(t *testing.T) { - m := &Communique{} - b, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal of empty message with oneof: %v", err) - } - if len(b) != 0 { - t.Errorf("Marshal of empty message yielded too many bytes: %v", b) - } - - m = &Communique{ - Union: &Communique_Name{Name: "Barry"}, - } - - // Round-trip. - b, err = Marshal(m) - if err != nil { - t.Fatalf("Marshal of message with oneof: %v", err) - } - if len(b) != 7 { // name tag/wire (1) + name len (1) + name (5) - t.Errorf("Incorrect marshal of message with oneof: %v", b) - } - m.Reset() - if err = Unmarshal(b, m); err != nil { - t.Fatalf("Unmarshal of message with oneof: %v", err) - } - if x, ok := m.Union.(*Communique_Name); !ok || x.Name != "Barry" { - t.Errorf("After round trip, Union = %+v", m.Union) - } - if name := m.GetName(); name != "Barry" { - t.Errorf("After round trip, GetName = %q, want %q", name, "Barry") - } - - // Let's try with a message in the oneof. - m.Union = &Communique_Msg{Msg: &Strings{StringField: String("deep deep string")}} - b, err = Marshal(m) - if err != nil { - t.Fatalf("Marshal of message with oneof set to message: %v", err) - } - if len(b) != 20 { // msg tag/wire (1) + msg len (1) + msg (1 + 1 + 16) - t.Errorf("Incorrect marshal of message with oneof set to message: %v", b) - } - m.Reset() - if err := Unmarshal(b, m); err != nil { - t.Fatalf("Unmarshal of message with oneof set to message: %v", err) - } - ss, ok := m.Union.(*Communique_Msg) - if !ok || ss.Msg.GetStringField() != "deep deep string" { - t.Errorf("After round trip with oneof set to message, Union = %+v", m.Union) - } -} - -func TestInefficientPackedBool(t *testing.T) { - // https://github.com/golang/protobuf/issues/76 - inp := []byte{ - 0x12, 0x02, // 0x12 = 2<<3|2; 2 bytes - // Usually a bool should take a single byte, - // but it is permitted to be any varint. - 0xb9, 0x30, - } - if err := Unmarshal(inp, new(MoreRepeated)); err != nil { - t.Error(err) - } -} - -// Benchmarks - -func testMsg() *GoTest { - pb := initGoTest(true) - const N = 1000 // Internally the library starts much smaller. - pb.F_Int32Repeated = make([]int32, N) - pb.F_DoubleRepeated = make([]float64, N) - for i := 0; i < N; i++ { - pb.F_Int32Repeated[i] = int32(i) - pb.F_DoubleRepeated[i] = float64(i) - } - return pb -} - -func bytesMsg() *GoTest { - pb := initGoTest(true) - buf := make([]byte, 4000) - for i := range buf { - buf[i] = byte(i) - } - pb.F_BytesDefaulted = buf - return pb -} - -func benchmarkMarshal(b *testing.B, pb Message, marshal func(Message) ([]byte, error)) { - d, _ := marshal(pb) - b.SetBytes(int64(len(d))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - marshal(pb) - } -} - -func benchmarkBufferMarshal(b *testing.B, pb Message) { - p := NewBuffer(nil) - benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) { - p.Reset() - err := p.Marshal(pb0) - return p.Bytes(), err - }) -} - -func benchmarkSize(b *testing.B, pb Message) { - benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) { - Size(pb) - return nil, nil - }) -} - -func newOf(pb Message) Message { - in := reflect.ValueOf(pb) - if in.IsNil() { - return pb - } - return reflect.New(in.Type().Elem()).Interface().(Message) -} - -func benchmarkUnmarshal(b *testing.B, pb Message, unmarshal func([]byte, Message) error) { - d, _ := Marshal(pb) - b.SetBytes(int64(len(d))) - pbd := newOf(pb) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - unmarshal(d, pbd) - } -} - -func benchmarkBufferUnmarshal(b *testing.B, pb Message) { - p := NewBuffer(nil) - benchmarkUnmarshal(b, pb, func(d []byte, pb0 Message) error { - p.SetBuf(d) - return p.Unmarshal(pb0) - }) -} - -// Benchmark{Marshal,BufferMarshal,Size,Unmarshal,BufferUnmarshal}{,Bytes} - -func BenchmarkMarshal(b *testing.B) { - benchmarkMarshal(b, testMsg(), Marshal) -} - -func BenchmarkBufferMarshal(b *testing.B) { - benchmarkBufferMarshal(b, testMsg()) -} - -func BenchmarkSize(b *testing.B) { - benchmarkSize(b, testMsg()) -} - -func BenchmarkUnmarshal(b *testing.B) { - benchmarkUnmarshal(b, testMsg(), Unmarshal) -} - -func BenchmarkBufferUnmarshal(b *testing.B) { - benchmarkBufferUnmarshal(b, testMsg()) -} - -func BenchmarkMarshalBytes(b *testing.B) { - benchmarkMarshal(b, bytesMsg(), Marshal) -} - -func BenchmarkBufferMarshalBytes(b *testing.B) { - benchmarkBufferMarshal(b, bytesMsg()) -} - -func BenchmarkSizeBytes(b *testing.B) { - benchmarkSize(b, bytesMsg()) -} - -func BenchmarkUnmarshalBytes(b *testing.B) { - benchmarkUnmarshal(b, bytesMsg(), Unmarshal) -} - -func BenchmarkBufferUnmarshalBytes(b *testing.B) { - benchmarkBufferUnmarshal(b, bytesMsg()) -} - -func BenchmarkUnmarshalUnrecognizedFields(b *testing.B) { - b.StopTimer() - pb := initGoTestField() - skip := &GoSkipTest{ - SkipInt32: Int32(32), - SkipFixed32: Uint32(3232), - SkipFixed64: Uint64(6464), - SkipString: String("skipper"), - Skipgroup: &GoSkipTest_SkipGroup{ - GroupInt32: Int32(75), - GroupString: String("wxyz"), - }, - } - - pbd := new(GoTestField) - p := NewBuffer(nil) - p.Marshal(pb) - p.Marshal(skip) - p2 := NewBuffer(nil) - - b.StartTimer() - for i := 0; i < b.N; i++ { - p2.SetBuf(p.Bytes()) - p2.Unmarshal(pbd) - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/any_test.go b/vendor/github.com/gogo/protobuf/proto/any_test.go deleted file mode 100644 index f098d8287e..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/any_test.go +++ /dev/null @@ -1,300 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "strings" - "testing" - - "github.com/gogo/protobuf/proto" - - pb "github.com/gogo/protobuf/proto/proto3_proto" - testpb "github.com/gogo/protobuf/proto/testdata" - "github.com/gogo/protobuf/types" -) - -var ( - expandedMarshaler = proto.TextMarshaler{ExpandAny: true} - expandedCompactMarshaler = proto.TextMarshaler{Compact: true, ExpandAny: true} -) - -// anyEqual reports whether two messages which may be google.protobuf.Any or may -// contain google.protobuf.Any fields are equal. We can't use proto.Equal for -// comparison, because semantically equivalent messages may be marshaled to -// binary in different tag order. Instead, trust that TextMarshaler with -// ExpandAny option works and compare the text marshaling results. -func anyEqual(got, want proto.Message) bool { - // if messages are proto.Equal, no need to marshal. - if proto.Equal(got, want) { - return true - } - g := expandedMarshaler.Text(got) - w := expandedMarshaler.Text(want) - return g == w -} - -type golden struct { - m proto.Message - t, c string -} - -var goldenMessages = makeGolden() - -func makeGolden() []golden { - nested := &pb.Nested{Bunny: "Monty"} - nb, err := proto.Marshal(nested) - if err != nil { - panic(err) - } - m1 := &pb.Message{ - Name: "David", - ResultCount: 47, - Anything: &types.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(nested), Value: nb}, - } - m2 := &pb.Message{ - Name: "David", - ResultCount: 47, - Anything: &types.Any{TypeUrl: "http://[::1]/type.googleapis.com/" + proto.MessageName(nested), Value: nb}, - } - m3 := &pb.Message{ - Name: "David", - ResultCount: 47, - Anything: &types.Any{TypeUrl: `type.googleapis.com/"/` + proto.MessageName(nested), Value: nb}, - } - m4 := &pb.Message{ - Name: "David", - ResultCount: 47, - Anything: &types.Any{TypeUrl: "type.googleapis.com/a/path/" + proto.MessageName(nested), Value: nb}, - } - m5 := &types.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(nested), Value: nb} - - any1 := &testpb.MyMessage{Count: proto.Int32(47), Name: proto.String("David")} - proto.SetExtension(any1, testpb.E_Ext_More, &testpb.Ext{Data: proto.String("foo")}) - proto.SetExtension(any1, testpb.E_Ext_Text, proto.String("bar")) - any1b, err := proto.Marshal(any1) - if err != nil { - panic(err) - } - any2 := &testpb.MyMessage{Count: proto.Int32(42), Bikeshed: testpb.MyMessage_GREEN.Enum(), RepBytes: [][]byte{[]byte("roboto")}} - proto.SetExtension(any2, testpb.E_Ext_More, &testpb.Ext{Data: proto.String("baz")}) - any2b, err := proto.Marshal(any2) - if err != nil { - panic(err) - } - m6 := &pb.Message{ - Name: "David", - ResultCount: 47, - Anything: &types.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any1), Value: any1b}, - ManyThings: []*types.Any{ - {TypeUrl: "type.googleapis.com/" + proto.MessageName(any2), Value: any2b}, - {TypeUrl: "type.googleapis.com/" + proto.MessageName(any1), Value: any1b}, - }, - } - - const ( - m1Golden = ` -name: "David" -result_count: 47 -anything: < - [type.googleapis.com/proto3_proto.Nested]: < - bunny: "Monty" - > -> -` - m2Golden = ` -name: "David" -result_count: 47 -anything: < - ["http://[::1]/type.googleapis.com/proto3_proto.Nested"]: < - bunny: "Monty" - > -> -` - m3Golden = ` -name: "David" -result_count: 47 -anything: < - ["type.googleapis.com/\"/proto3_proto.Nested"]: < - bunny: "Monty" - > -> -` - m4Golden = ` -name: "David" -result_count: 47 -anything: < - [type.googleapis.com/a/path/proto3_proto.Nested]: < - bunny: "Monty" - > -> -` - m5Golden = ` -[type.googleapis.com/proto3_proto.Nested]: < - bunny: "Monty" -> -` - m6Golden = ` -name: "David" -result_count: 47 -anything: < - [type.googleapis.com/testdata.MyMessage]: < - count: 47 - name: "David" - [testdata.Ext.more]: < - data: "foo" - > - [testdata.Ext.text]: "bar" - > -> -many_things: < - [type.googleapis.com/testdata.MyMessage]: < - count: 42 - bikeshed: GREEN - rep_bytes: "roboto" - [testdata.Ext.more]: < - data: "baz" - > - > -> -many_things: < - [type.googleapis.com/testdata.MyMessage]: < - count: 47 - name: "David" - [testdata.Ext.more]: < - data: "foo" - > - [testdata.Ext.text]: "bar" - > -> -` - ) - return []golden{ - {m1, strings.TrimSpace(m1Golden) + "\n", strings.TrimSpace(compact(m1Golden)) + " "}, - {m2, strings.TrimSpace(m2Golden) + "\n", strings.TrimSpace(compact(m2Golden)) + " "}, - {m3, strings.TrimSpace(m3Golden) + "\n", strings.TrimSpace(compact(m3Golden)) + " "}, - {m4, strings.TrimSpace(m4Golden) + "\n", strings.TrimSpace(compact(m4Golden)) + " "}, - {m5, strings.TrimSpace(m5Golden) + "\n", strings.TrimSpace(compact(m5Golden)) + " "}, - {m6, strings.TrimSpace(m6Golden) + "\n", strings.TrimSpace(compact(m6Golden)) + " "}, - } -} - -func TestMarshalGolden(t *testing.T) { - for _, tt := range goldenMessages { - if got, want := expandedMarshaler.Text(tt.m), tt.t; got != want { - t.Errorf("message %v: got:\n%s\nwant:\n%s", tt.m, got, want) - } - if got, want := expandedCompactMarshaler.Text(tt.m), tt.c; got != want { - t.Errorf("message %v: got:\n`%s`\nwant:\n`%s`", tt.m, got, want) - } - } -} - -func TestUnmarshalGolden(t *testing.T) { - for _, tt := range goldenMessages { - want := tt.m - got := proto.Clone(tt.m) - got.Reset() - if err := proto.UnmarshalText(tt.t, got); err != nil { - t.Errorf("failed to unmarshal\n%s\nerror: %v", tt.t, err) - } - if !anyEqual(got, want) { - t.Errorf("message:\n%s\ngot:\n%s\nwant:\n%s", tt.t, got, want) - } - got.Reset() - if err := proto.UnmarshalText(tt.c, got); err != nil { - t.Errorf("failed to unmarshal\n%s\nerror: %v", tt.c, err) - } - if !anyEqual(got, want) { - t.Errorf("message:\n%s\ngot:\n%s\nwant:\n%s", tt.c, got, want) - } - } -} - -func TestMarshalUnknownAny(t *testing.T) { - m := &pb.Message{ - Anything: &types.Any{ - TypeUrl: "foo", - Value: []byte("bar"), - }, - } - want := `anything: < - type_url: "foo" - value: "bar" -> -` - got := expandedMarshaler.Text(m) - if got != want { - t.Errorf("got\n`%s`\nwant\n`%s`", got, want) - } -} - -func TestAmbiguousAny(t *testing.T) { - pb := &types.Any{} - err := proto.UnmarshalText(` - type_url: "ttt/proto3_proto.Nested" - value: "\n\x05Monty" - `, pb) - t.Logf("result: %v (error: %v)", expandedMarshaler.Text(pb), err) - if err != nil { - t.Errorf("failed to parse ambiguous Any message: %v", err) - } -} - -func TestUnmarshalOverwriteAny(t *testing.T) { - pb := &types.Any{} - err := proto.UnmarshalText(` - [type.googleapis.com/a/path/proto3_proto.Nested]: < - bunny: "Monty" - > - [type.googleapis.com/a/path/proto3_proto.Nested]: < - bunny: "Rabbit of Caerbannog" - > - `, pb) - want := `line 7: Any message unpacked multiple times, or "type_url" already set` - if err.Error() != want { - t.Errorf("incorrect error.\nHave: %v\nWant: %v", err.Error(), want) - } -} - -func TestUnmarshalAnyMixAndMatch(t *testing.T) { - pb := &types.Any{} - err := proto.UnmarshalText(` - value: "\n\x05Monty" - [type.googleapis.com/a/path/proto3_proto.Nested]: < - bunny: "Rabbit of Caerbannog" - > - `, pb) - want := `line 5: Any message unpacked multiple times, or "value" already set` - if err.Error() != want { - t.Errorf("incorrect error.\nHave: %v\nWant: %v", err.Error(), want) - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/clone.go b/vendor/github.com/gogo/protobuf/proto/clone.go index 5d4cba4b51..a26b046d94 100644 --- a/vendor/github.com/gogo/protobuf/proto/clone.go +++ b/vendor/github.com/gogo/protobuf/proto/clone.go @@ -35,22 +35,39 @@ package proto import ( + "fmt" "log" "reflect" "strings" ) // Clone returns a deep copy of a protocol buffer. -func Clone(pb Message) Message { - in := reflect.ValueOf(pb) +func Clone(src Message) Message { + in := reflect.ValueOf(src) if in.IsNil() { - return pb + return src } - out := reflect.New(in.Type().Elem()) - // out is empty so a merge is a deep copy. - mergeStruct(out.Elem(), in.Elem()) - return out.Interface().(Message) + dst := out.Interface().(Message) + Merge(dst, src) + return dst +} + +// Merger is the interface representing objects that can merge messages of the same type. +type Merger interface { + // Merge merges src into this message. + // Required and optional fields that are set in src will be set to that value in dst. + // Elements of repeated fields will be appended. + // + // Merge may panic if called with a different argument type than the receiver. + Merge(src Message) +} + +// generatedMerger is the custom merge method that generated protos will have. +// We must add this method since a generate Merge method will conflict with +// many existing protos that have a Merge data field already defined. +type generatedMerger interface { + XXX_Merge(src Message) } // Merge merges src into dst. @@ -58,17 +75,24 @@ func Clone(pb Message) Message { // Elements of repeated fields will be appended. // Merge panics if src and dst are not the same type, or if dst is nil. func Merge(dst, src Message) { + if m, ok := dst.(Merger); ok { + m.Merge(src) + return + } + in := reflect.ValueOf(src) out := reflect.ValueOf(dst) if out.IsNil() { panic("proto: nil destination") } if in.Type() != out.Type() { - // Explicit test prior to mergeStruct so that mistyped nils will fail - panic("proto: type mismatch") + panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) } if in.IsNil() { - // Merging nil into non-nil is a quiet no-op + return // Merge from nil src is a noop + } + if m, ok := dst.(generatedMerger); ok { + m.XXX_Merge(src) return } mergeStruct(out.Elem(), in.Elem()) @@ -89,7 +113,7 @@ func mergeStruct(out, in reflect.Value) { bIn := emIn.GetExtensions() bOut := emOut.GetExtensions() *bOut = append(*bOut, *bIn...) - } else if emIn, ok := extendable(in.Addr().Interface()); ok { + } else if emIn, err := extendable(in.Addr().Interface()); err == nil { emOut, _ := extendable(out.Addr().Interface()) mIn, muIn := emIn.extensionsRead() if mIn != nil { diff --git a/vendor/github.com/gogo/protobuf/proto/clone_test.go b/vendor/github.com/gogo/protobuf/proto/clone_test.go deleted file mode 100644 index 1a16eb5549..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/clone_test.go +++ /dev/null @@ -1,300 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "testing" - - "github.com/gogo/protobuf/proto" - - proto3pb "github.com/gogo/protobuf/proto/proto3_proto" - pb "github.com/gogo/protobuf/proto/testdata" -) - -var cloneTestMessage = &pb.MyMessage{ - Count: proto.Int32(42), - Name: proto.String("Dave"), - Pet: []string{"bunny", "kitty", "horsey"}, - Inner: &pb.InnerMessage{ - Host: proto.String("niles"), - Port: proto.Int32(9099), - Connected: proto.Bool(true), - }, - Others: []*pb.OtherMessage{ - { - Value: []byte("some bytes"), - }, - }, - Somegroup: &pb.MyMessage_SomeGroup{ - GroupField: proto.Int32(6), - }, - RepBytes: [][]byte{[]byte("sham"), []byte("wow")}, -} - -func init() { - ext := &pb.Ext{ - Data: proto.String("extension"), - } - if err := proto.SetExtension(cloneTestMessage, pb.E_Ext_More, ext); err != nil { - panic("SetExtension: " + err.Error()) - } -} - -func TestClone(t *testing.T) { - m := proto.Clone(cloneTestMessage).(*pb.MyMessage) - if !proto.Equal(m, cloneTestMessage) { - t.Errorf("Clone(%v) = %v", cloneTestMessage, m) - } - - // Verify it was a deep copy. - *m.Inner.Port++ - if proto.Equal(m, cloneTestMessage) { - t.Error("Mutating clone changed the original") - } - // Byte fields and repeated fields should be copied. - if &m.Pet[0] == &cloneTestMessage.Pet[0] { - t.Error("Pet: repeated field not copied") - } - if &m.Others[0] == &cloneTestMessage.Others[0] { - t.Error("Others: repeated field not copied") - } - if &m.Others[0].Value[0] == &cloneTestMessage.Others[0].Value[0] { - t.Error("Others[0].Value: bytes field not copied") - } - if &m.RepBytes[0] == &cloneTestMessage.RepBytes[0] { - t.Error("RepBytes: repeated field not copied") - } - if &m.RepBytes[0][0] == &cloneTestMessage.RepBytes[0][0] { - t.Error("RepBytes[0]: bytes field not copied") - } -} - -func TestCloneNil(t *testing.T) { - var m *pb.MyMessage - if c := proto.Clone(m); !proto.Equal(m, c) { - t.Errorf("Clone(%v) = %v", m, c) - } -} - -var mergeTests = []struct { - src, dst, want proto.Message -}{ - { - src: &pb.MyMessage{ - Count: proto.Int32(42), - }, - dst: &pb.MyMessage{ - Name: proto.String("Dave"), - }, - want: &pb.MyMessage{ - Count: proto.Int32(42), - Name: proto.String("Dave"), - }, - }, - { - src: &pb.MyMessage{ - Inner: &pb.InnerMessage{ - Host: proto.String("hey"), - Connected: proto.Bool(true), - }, - Pet: []string{"horsey"}, - Others: []*pb.OtherMessage{ - { - Value: []byte("some bytes"), - }, - }, - }, - dst: &pb.MyMessage{ - Inner: &pb.InnerMessage{ - Host: proto.String("niles"), - Port: proto.Int32(9099), - }, - Pet: []string{"bunny", "kitty"}, - Others: []*pb.OtherMessage{ - { - Key: proto.Int64(31415926535), - }, - { - // Explicitly test a src=nil field - Inner: nil, - }, - }, - }, - want: &pb.MyMessage{ - Inner: &pb.InnerMessage{ - Host: proto.String("hey"), - Connected: proto.Bool(true), - Port: proto.Int32(9099), - }, - Pet: []string{"bunny", "kitty", "horsey"}, - Others: []*pb.OtherMessage{ - { - Key: proto.Int64(31415926535), - }, - {}, - { - Value: []byte("some bytes"), - }, - }, - }, - }, - { - src: &pb.MyMessage{ - RepBytes: [][]byte{[]byte("wow")}, - }, - dst: &pb.MyMessage{ - Somegroup: &pb.MyMessage_SomeGroup{ - GroupField: proto.Int32(6), - }, - RepBytes: [][]byte{[]byte("sham")}, - }, - want: &pb.MyMessage{ - Somegroup: &pb.MyMessage_SomeGroup{ - GroupField: proto.Int32(6), - }, - RepBytes: [][]byte{[]byte("sham"), []byte("wow")}, - }, - }, - // Check that a scalar bytes field replaces rather than appends. - { - src: &pb.OtherMessage{Value: []byte("foo")}, - dst: &pb.OtherMessage{Value: []byte("bar")}, - want: &pb.OtherMessage{Value: []byte("foo")}, - }, - { - src: &pb.MessageWithMap{ - NameMapping: map[int32]string{6: "Nigel"}, - MsgMapping: map[int64]*pb.FloatingPoint{ - 0x4001: {F: proto.Float64(2.0)}, - 0x4002: { - F: proto.Float64(2.0), - }, - }, - ByteMapping: map[bool][]byte{true: []byte("wowsa")}, - }, - dst: &pb.MessageWithMap{ - NameMapping: map[int32]string{ - 6: "Bruce", // should be overwritten - 7: "Andrew", - }, - MsgMapping: map[int64]*pb.FloatingPoint{ - 0x4002: { - F: proto.Float64(3.0), - Exact: proto.Bool(true), - }, // the entire message should be overwritten - }, - }, - want: &pb.MessageWithMap{ - NameMapping: map[int32]string{ - 6: "Nigel", - 7: "Andrew", - }, - MsgMapping: map[int64]*pb.FloatingPoint{ - 0x4001: {F: proto.Float64(2.0)}, - 0x4002: { - F: proto.Float64(2.0), - }, - }, - ByteMapping: map[bool][]byte{true: []byte("wowsa")}, - }, - }, - // proto3 shouldn't merge zero values, - // in the same way that proto2 shouldn't merge nils. - { - src: &proto3pb.Message{ - Name: "Aaron", - Data: []byte(""), // zero value, but not nil - }, - dst: &proto3pb.Message{ - HeightInCm: 176, - Data: []byte("texas!"), - }, - want: &proto3pb.Message{ - Name: "Aaron", - HeightInCm: 176, - Data: []byte("texas!"), - }, - }, - // Oneof fields should merge by assignment. - { - src: &pb.Communique{ - Union: &pb.Communique_Number{Number: 41}, - }, - dst: &pb.Communique{ - Union: &pb.Communique_Name{Name: "Bobby Tables"}, - }, - want: &pb.Communique{ - Union: &pb.Communique_Number{Number: 41}, - }, - }, - // Oneof nil is the same as not set. - { - src: &pb.Communique{}, - dst: &pb.Communique{ - Union: &pb.Communique_Name{Name: "Bobby Tables"}, - }, - want: &pb.Communique{ - Union: &pb.Communique_Name{Name: "Bobby Tables"}, - }, - }, - { - src: &proto3pb.Message{ - Terrain: map[string]*proto3pb.Nested{ - "kay_a": {Cute: true}, // replace - "kay_b": {Bunny: "rabbit"}, // insert - }, - }, - dst: &proto3pb.Message{ - Terrain: map[string]*proto3pb.Nested{ - "kay_a": {Bunny: "lost"}, // replaced - "kay_c": {Bunny: "bunny"}, // keep - }, - }, - want: &proto3pb.Message{ - Terrain: map[string]*proto3pb.Nested{ - "kay_a": {Cute: true}, - "kay_b": {Bunny: "rabbit"}, - "kay_c": {Bunny: "bunny"}, - }, - }, - }, -} - -func TestMerge(t *testing.T) { - for _, m := range mergeTests { - got := proto.Clone(m.dst) - proto.Merge(got, m.src) - if !proto.Equal(got, m.want) { - t.Errorf("Merge(%v, %v)\n got %v\nwant %v\n", m.dst, m.src, got, m.want) - } - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/custom_gogo.go b/vendor/github.com/gogo/protobuf/proto/custom_gogo.go new file mode 100644 index 0000000000..24552483c6 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/custom_gogo.go @@ -0,0 +1,39 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import "reflect" + +type custom interface { + Marshal() ([]byte, error) + Unmarshal(data []byte) error + Size() int +} + +var customType = reflect.TypeOf((*custom)(nil)).Elem() diff --git a/vendor/github.com/gogo/protobuf/proto/decode.go b/vendor/github.com/gogo/protobuf/proto/decode.go index 737f2731d4..d9aa3c42d6 100644 --- a/vendor/github.com/gogo/protobuf/proto/decode.go +++ b/vendor/github.com/gogo/protobuf/proto/decode.go @@ -39,8 +39,6 @@ import ( "errors" "fmt" "io" - "os" - "reflect" ) // errOverflow is returned when an integer is too large to be represented. @@ -50,10 +48,6 @@ var errOverflow = errors.New("proto: integer overflow") // wire type is encountered. It does not get returned to user code. var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") -// The fundamental decoders that interpret bytes on the wire. -// Those that take integer types all return uint64 and are -// therefore of type valueDecoder. - // DecodeVarint reads a varint-encoded integer from the slice. // It returns the integer and the number of bytes consumed, or // zero if there is not enough. @@ -267,9 +261,6 @@ func (p *Buffer) DecodeZigzag32() (x uint64, err error) { return } -// These are not ValueDecoders: they produce an array of bytes or a string. -// bytes, embedded messages - // DecodeRawBytes reads a count-delimited byte buffer from the Buffer. // This is the format used for the bytes protocol buffer // type and for embedded messages. @@ -311,81 +302,29 @@ func (p *Buffer) DecodeStringBytes() (s string, err error) { return string(buf), nil } -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -// If the protocol buffer has extensions, and the field matches, add it as an extension. -// Otherwise, if the XXX_unrecognized field exists, append the skipped data there. -func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { - oi := o.index - - err := o.skip(t, tag, wire) - if err != nil { - return err - } - - if !unrecField.IsValid() { - return nil - } - - ptr := structPointer_Bytes(base, unrecField) - - // Add the skipped field to struct field - obuf := o.buf - - o.buf = *ptr - o.EncodeVarint(uint64(tag<<3 | wire)) - *ptr = append(o.buf, obuf[oi:o.index]...) - - o.buf = obuf - - return nil -} - -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -func (o *Buffer) skip(t reflect.Type, tag, wire int) error { - - var u uint64 - var err error - - switch wire { - case WireVarint: - _, err = o.DecodeVarint() - case WireFixed64: - _, err = o.DecodeFixed64() - case WireBytes: - _, err = o.DecodeRawBytes(false) - case WireFixed32: - _, err = o.DecodeFixed32() - case WireStartGroup: - for { - u, err = o.DecodeVarint() - if err != nil { - break - } - fwire := int(u & 0x7) - if fwire == WireEndGroup { - break - } - ftag := int(u >> 3) - err = o.skip(t, ftag, fwire) - if err != nil { - break - } - } - default: - err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) - } - return err -} - // Unmarshaler is the interface representing objects that can -// unmarshal themselves. The method should reset the receiver before -// decoding starts. The argument points to data that may be +// unmarshal themselves. The argument points to data that may be // overwritten, so implementations should not keep references to the // buffer. +// Unmarshal implementations should not clear the receiver. +// Any unmarshaled data should be merged into the receiver. +// Callers of Unmarshal that do not want to retain existing data +// should Reset the receiver before calling Unmarshal. type Unmarshaler interface { Unmarshal([]byte) error } +// newUnmarshaler is the interface representing objects that can +// unmarshal themselves. The semantics are identical to Unmarshaler. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newUnmarshaler interface { + XXX_Unmarshal([]byte) error +} + // Unmarshal parses the protocol buffer representation in buf and places the // decoded result in pb. If the struct underlying pb does not match // the data in buf, the results can be unpredictable. @@ -395,7 +334,13 @@ type Unmarshaler interface { // to preserve and append to existing data. func Unmarshal(buf []byte, pb Message) error { pb.Reset() - return UnmarshalMerge(buf, pb) + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) } // UnmarshalMerge parses the protocol buffer representation in buf and @@ -405,8 +350,16 @@ func Unmarshal(buf []byte, pb Message) error { // UnmarshalMerge merges into existing data in pb. // Most code should use Unmarshal instead. func UnmarshalMerge(buf []byte, pb Message) error { - // If the object can unmarshal itself, let it. + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 return u.Unmarshal(buf) } return NewBuffer(buf).Unmarshal(pb) @@ -422,12 +375,17 @@ func (p *Buffer) DecodeMessage(pb Message) error { } // DecodeGroup reads a tag-delimited group from the Buffer. +// StartGroup tag is already consumed. This function consumes +// EndGroup tag. func (p *Buffer) DecodeGroup(pb Message) error { - typ, base, err := getbase(pb) - if err != nil { - return err + b := p.buf[p.index:] + x, y := findEndGroup(b) + if x < 0 { + return io.ErrUnexpectedEOF } - return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base) + err := Unmarshal(b[:x], pb) + p.index += y + return err } // Unmarshal parses the protocol buffer representation in the @@ -438,541 +396,33 @@ func (p *Buffer) DecodeGroup(pb Message) error { // Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. func (p *Buffer) Unmarshal(pb Message) error { // If the object can unmarshal itself, let it. - if u, ok := pb.(Unmarshaler); ok { - err := u.Unmarshal(p.buf[p.index:]) + if u, ok := pb.(newUnmarshaler); ok { + err := u.XXX_Unmarshal(p.buf[p.index:]) p.index = len(p.buf) return err } - - typ, base, err := getbase(pb) - if err != nil { - return err - } - - err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) - - if collectStats { - stats.Decode++ - } - - return err -} - -// unmarshalType does the work of unmarshaling a structure. -func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { - var state errorState - required, reqFields := prop.reqCount, uint64(0) - - var err error - for err == nil && o.index < len(o.buf) { - oi := o.index - var u uint64 - u, err = o.DecodeVarint() - if err != nil { - break - } - wire := int(u & 0x7) - if wire == WireEndGroup { - if is_group { - if required > 0 { - // Not enough information to determine the exact field. - // (See below.) - return &RequiredNotSetError{"{Unknown}"} - } - return nil // input is satisfied - } - return fmt.Errorf("proto: %s: wiretype end group for non-group", st) - } - tag := int(u >> 3) - if tag <= 0 { - return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) - } - fieldnum, ok := prop.decoderTags.get(tag) - if !ok { - // Maybe it's an extension? - if prop.extendable { - if e, eok := structPointer_Interface(base, st).(extensionsBytes); eok { - if isExtensionField(e, int32(tag)) { - if err = o.skip(st, tag, wire); err == nil { - ext := e.GetExtensions() - *ext = append(*ext, o.buf[oi:o.index]...) - } - continue - } - } else if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) { - if err = o.skip(st, tag, wire); err == nil { - extmap := e.extensionsWrite() - ext := extmap[int32(tag)] // may be missing - ext.enc = append(ext.enc, o.buf[oi:o.index]...) - extmap[int32(tag)] = ext - } - continue - } - } - // Maybe it's a oneof? - if prop.oneofUnmarshaler != nil { - m := structPointer_Interface(base, st).(Message) - // First return value indicates whether tag is a oneof field. - ok, err = prop.oneofUnmarshaler(m, tag, wire, o) - if err == ErrInternalBadWireType { - // Map the error to something more descriptive. - // Do the formatting here to save generated code space. - err = fmt.Errorf("bad wiretype for oneof field in %T", m) - } - if ok { - continue - } - } - err = o.skipAndSave(st, tag, wire, base, prop.unrecField) - continue - } - p := prop.Prop[fieldnum] - - if p.dec == nil { - fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) - continue - } - dec := p.dec - if wire != WireStartGroup && wire != p.WireType { - if wire == WireBytes && p.packedDec != nil { - // a packable field - dec = p.packedDec - } else { - err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) - continue - } - } - decErr := dec(o, p, base) - if decErr != nil && !state.shouldContinue(decErr, p) { - err = decErr - } - if err == nil && p.Required { - // Successfully decoded a required field. - if tag <= 64 { - // use bitmap for fields 1-64 to catch field reuse. - var mask uint64 = 1 << uint64(tag-1) - if reqFields&mask == 0 { - // new required field - reqFields |= mask - required-- - } - } else { - // This is imprecise. It can be fooled by a required field - // with a tag > 64 that is encoded twice; that's very rare. - // A fully correct implementation would require allocating - // a data structure, which we would like to avoid. - required-- - } - } - } - if err == nil { - if is_group { - return io.ErrUnexpectedEOF - } - if state.err != nil { - return state.err - } - if required > 0 { - // Not enough information to determine the exact field. If we use extra - // CPU, we could determine the field only if the missing required field - // has a tag <= 64 and we check reqFields. - return &RequiredNotSetError{"{Unknown}"} - } - } - return err -} - -// Individual type decoders -// For each, -// u is the decoded value, -// v is a pointer to the field (pointer) in the struct - -// Sizes of the pools to allocate inside the Buffer. -// The goal is modest amortization and allocation -// on at least 16-byte boundaries. -const ( - boolPoolSize = 16 - uint32PoolSize = 8 - uint64PoolSize = 4 -) - -// Decode a bool. -func (o *Buffer) dec_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - if len(o.bools) == 0 { - o.bools = make([]bool, boolPoolSize) - } - o.bools[0] = u != 0 - *structPointer_Bool(base, p.field) = &o.bools[0] - o.bools = o.bools[1:] - return nil -} - -func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - *structPointer_BoolVal(base, p.field) = u != 0 - return nil -} - -// Decode an int32. -func (o *Buffer) dec_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) - return nil -} - -func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) - return nil -} - -// Decode an int64. -func (o *Buffer) dec_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64_Set(structPointer_Word64(base, p.field), o, u) - return nil -} - -func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64Val_Set(structPointer_Word64Val(base, p.field), o, u) - return nil -} - -// Decode a string. -func (o *Buffer) dec_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_String(base, p.field) = &s - return nil -} - -func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_StringVal(base, p.field) = s - return nil -} - -// Decode a slice of bytes ([]byte). -func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - *structPointer_Bytes(base, p.field) = b - return nil -} - -// Decode a slice of bools ([]bool). -func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - v := structPointer_BoolSlice(base, p.field) - *v = append(*v, u != 0) - return nil -} - -// Decode a slice of bools ([]bool) in packed format. -func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { - v := structPointer_BoolSlice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded bools - fin := o.index + nb - if fin < o.index { - return errOverflow - } - - y := *v - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - y = append(y, u != 0) - } - - *v = y - return nil -} - -// Decode a slice of int32s ([]int32). -func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - structPointer_Word32Slice(base, p.field).Append(uint32(u)) - return nil -} - -// Decode a slice of int32s ([]int32) in packed format. -func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int32s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(uint32(u)) - } - return nil -} - -// Decode a slice of int64s ([]int64). -func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - - structPointer_Word64Slice(base, p.field).Append(u) - return nil -} - -// Decode a slice of int64s ([]int64) in packed format. -func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int64s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(u) - } - return nil -} - -// Decode a slice of strings ([]string). -func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - v := structPointer_StringSlice(base, p.field) - *v = append(*v, s) - return nil -} - -// Decode a slice of slice of bytes ([][]byte). -func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - v := structPointer_BytesSlice(base, p.field) - *v = append(*v, b) - return nil -} - -// Decode a map field. -func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { - raw, err := o.DecodeRawBytes(false) - if err != nil { - return err - } - oi := o.index // index at the end of this map entry - o.index -= len(raw) // move buffer back to start of map entry - - mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V - if mptr.Elem().IsNil() { - mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) - } - v := mptr.Elem() // map[K]V - - // Prepare addressable doubly-indirect placeholders for the key and value types. - // See enc_new_map for why. - keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K - keybase := toStructPointer(keyptr.Addr()) // **K - - var valbase structPointer - var valptr reflect.Value - switch p.mtype.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valptr = reflect.ValueOf(&dummy) // *[]byte - valbase = toStructPointer(valptr) // *[]byte - case reflect.Ptr: - // message; valptr is **Msg; need to allocate the intermediate pointer - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valptr.Set(reflect.New(valptr.Type().Elem())) - valbase = toStructPointer(valptr) - default: - // everything else - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valbase = toStructPointer(valptr.Addr()) // **V - } - - // Decode. - // This parses a restricted wire format, namely the encoding of a message - // with two fields. See enc_new_map for the format. - for o.index < oi { - // tagcode for key and value properties are always a single byte - // because they have tags 1 and 2. - tagcode := o.buf[o.index] - o.index++ - switch tagcode { - case p.mkeyprop.tagcode[0]: - if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { - return err - } - case p.mvalprop.tagcode[0]: - if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { - return err - } - default: - // TODO: Should we silently skip this instead? - return fmt.Errorf("proto: bad map data tag %d", raw[0]) - } - } - keyelem, valelem := keyptr.Elem(), valptr.Elem() - if !keyelem.IsValid() { - keyelem = reflect.Zero(p.mtype.Key()) - } - if !valelem.IsValid() { - valelem = reflect.Zero(p.mtype.Elem()) - } - - v.SetMapIndex(keyelem, valelem) - return nil -} - -// Decode a group. -func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - return o.unmarshalType(p.stype, p.sprop, true, bas) -} - -// Decode an embedded message. -func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { - raw, e := o.DecodeRawBytes(false) - if e != nil { - return e - } - - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := structPointer_Interface(bas, p.stype) - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, false, bas) - o.buf = obuf - o.index = oi - - return err -} - -// Decode a slice of embedded messages. -func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, false, base) -} - -// Decode a slice of embedded groups. -func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, true, base) -} - -// Decode a slice of structs ([]*struct). -func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { - v := reflect.New(p.stype) - bas := toStructPointer(v) - structPointer_StructPointerSlice(base, p.field).Append(bas) - - if is_group { - err := o.unmarshalType(p.stype, p.sprop, is_group, bas) - return err - } - - raw, err := o.DecodeRawBytes(false) - if err != nil { + if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 + err := u.Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) return err } - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := v.Interface() - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, is_group, bas) - - o.buf = obuf - o.index = oi - + // Slow workaround for messages that aren't Unmarshalers. + // This includes some hand-coded .pb.go files and + // bootstrap protos. + // TODO: fix all of those and then add Unmarshal to + // the Message interface. Then: + // The cast above and code below can be deleted. + // The old unmarshaler can be deleted. + // Clients can call Unmarshal directly (can already do that, actually). + var info InternalMessageInfo + err := info.Unmarshal(pb, p.buf[p.index:]) + p.index = len(p.buf) return err } diff --git a/vendor/github.com/gogo/protobuf/proto/decode_gogo.go b/vendor/github.com/gogo/protobuf/proto/decode_gogo.go deleted file mode 100644 index 6fb74de4cc..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/decode_gogo.go +++ /dev/null @@ -1,172 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" -) - -// Decode a reference to a struct pointer. -func (o *Buffer) dec_ref_struct_message(p *Properties, base structPointer) (err error) { - raw, e := o.DecodeRawBytes(false) - if e != nil { - return e - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - panic("not supported, since this is a pointer receiver") - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - bas := structPointer_FieldPointer(base, p.field) - - err = o.unmarshalType(p.stype, p.sprop, false, bas) - o.buf = obuf - o.index = oi - - return err -} - -// Decode a slice of references to struct pointers ([]struct). -func (o *Buffer) dec_slice_ref_struct(p *Properties, is_group bool, base structPointer) error { - newBas := appendStructPointer(base, p.field, p.sstype) - - if is_group { - panic("not supported, maybe in future, if requested.") - } - - raw, err := o.DecodeRawBytes(false) - if err != nil { - return err - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - panic("not supported, since this is not a pointer receiver.") - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, is_group, newBas) - - o.buf = obuf - o.index = oi - - return err -} - -// Decode a slice of references to struct pointers. -func (o *Buffer) dec_slice_ref_struct_message(p *Properties, base structPointer) error { - return o.dec_slice_ref_struct(p, false, base) -} - -func setPtrCustomType(base structPointer, f field, v interface{}) { - if v == nil { - return - } - structPointer_SetStructPointer(base, f, toStructPointer(reflect.ValueOf(v))) -} - -func setCustomType(base structPointer, f field, value interface{}) { - if value == nil { - return - } - v := reflect.ValueOf(value).Elem() - t := reflect.TypeOf(value).Elem() - kind := t.Kind() - switch kind { - case reflect.Slice: - slice := reflect.MakeSlice(t, v.Len(), v.Cap()) - reflect.Copy(slice, v) - oldHeader := structPointer_GetSliceHeader(base, f) - oldHeader.Data = slice.Pointer() - oldHeader.Len = v.Len() - oldHeader.Cap = v.Cap() - default: - size := reflect.TypeOf(value).Elem().Size() - structPointer_Copy(toStructPointer(reflect.ValueOf(value)), structPointer_Add(base, f), int(size)) - } -} - -func (o *Buffer) dec_custom_bytes(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - i := reflect.New(p.ctype.Elem()).Interface() - custom := (i).(Unmarshaler) - if err := custom.Unmarshal(b); err != nil { - return err - } - setPtrCustomType(base, p.field, custom) - return nil -} - -func (o *Buffer) dec_custom_ref_bytes(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - i := reflect.New(p.ctype).Interface() - custom := (i).(Unmarshaler) - if err := custom.Unmarshal(b); err != nil { - return err - } - if custom != nil { - setCustomType(base, p.field, custom) - } - return nil -} - -// Decode a slice of bytes ([]byte) into a slice of custom types. -func (o *Buffer) dec_custom_slice_bytes(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - i := reflect.New(p.ctype.Elem()).Interface() - custom := (i).(Unmarshaler) - if err := custom.Unmarshal(b); err != nil { - return err - } - newBas := appendStructPointer(base, p.field, p.ctype) - - var zero field - setCustomType(newBas, zero, custom) - - return nil -} diff --git a/vendor/github.com/gogo/protobuf/proto/decode_test.go b/vendor/github.com/gogo/protobuf/proto/decode_test.go deleted file mode 100644 index 64d4decd9d..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/decode_test.go +++ /dev/null @@ -1,262 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build go1.7 - -package proto_test - -import ( - "testing" - - "github.com/gogo/protobuf/proto" - tpb "github.com/gogo/protobuf/proto/proto3_proto" -) - -var ( - bytesBlackhole []byte - msgBlackhole = new(tpb.Message) -) - -// Disabled this Benchmark because it is using features (b.Run) from go1.7 and gogoprotobuf still have compatibility with go1.5 -// BenchmarkVarint32ArraySmall shows the performance on an array of small int32 fields (1 and -// 2 bytes long). -// func BenchmarkVarint32ArraySmall(b *testing.B) { -// for i := uint(1); i <= 10; i++ { -// dist := genInt32Dist([7]int{0, 3, 1}, 1<" } func init() { RegisterType((*duration)(nil), "gogo.protobuf.proto.duration") } - -func (o *Buffer) decDuration() (time.Duration, error) { - b, err := o.DecodeRawBytes(true) - if err != nil { - return 0, err - } - dproto := &duration{} - if err := Unmarshal(b, dproto); err != nil { - return 0, err - } - return durationFromProto(dproto) -} - -func (o *Buffer) dec_duration(p *Properties, base structPointer) error { - d, err := o.decDuration() - if err != nil { - return err - } - word64_Set(structPointer_Word64(base, p.field), o, uint64(d)) - return nil -} - -func (o *Buffer) dec_ref_duration(p *Properties, base structPointer) error { - d, err := o.decDuration() - if err != nil { - return err - } - word64Val_Set(structPointer_Word64Val(base, p.field), o, uint64(d)) - return nil -} - -func (o *Buffer) dec_slice_duration(p *Properties, base structPointer) error { - d, err := o.decDuration() - if err != nil { - return err - } - newBas := appendStructPointer(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))) - var zero field - setPtrCustomType(newBas, zero, &d) - return nil -} - -func (o *Buffer) dec_slice_ref_duration(p *Properties, base structPointer) error { - d, err := o.decDuration() - if err != nil { - return err - } - structPointer_Word64Slice(base, p.field).Append(uint64(d)) - return nil -} - -func size_duration(p *Properties, base structPointer) (n int) { - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return 0 - } - dur := structPointer_Interface(structp, durationType).(*time.Duration) - d := durationProto(*dur) - size := Size(d) - return size + sizeVarint(uint64(size)) + len(p.tagcode) -} - -func (o *Buffer) enc_duration(p *Properties, base structPointer) error { - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return ErrNil - } - dur := structPointer_Interface(structp, durationType).(*time.Duration) - d := durationProto(*dur) - data, err := Marshal(d) - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return nil -} - -func size_ref_duration(p *Properties, base structPointer) (n int) { - dur := structPointer_InterfaceAt(base, p.field, durationType).(*time.Duration) - d := durationProto(*dur) - size := Size(d) - return size + sizeVarint(uint64(size)) + len(p.tagcode) -} - -func (o *Buffer) enc_ref_duration(p *Properties, base structPointer) error { - dur := structPointer_InterfaceAt(base, p.field, durationType).(*time.Duration) - d := durationProto(*dur) - data, err := Marshal(d) - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return nil -} - -func size_slice_duration(p *Properties, base structPointer) (n int) { - pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))).(*[]*time.Duration) - durs := *pdurs - for i := 0; i < len(durs); i++ { - if durs[i] == nil { - return 0 - } - dproto := durationProto(*durs[i]) - size := Size(dproto) - n += len(p.tagcode) + size + sizeVarint(uint64(size)) - } - return n -} - -func (o *Buffer) enc_slice_duration(p *Properties, base structPointer) error { - pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))).(*[]*time.Duration) - durs := *pdurs - for i := 0; i < len(durs); i++ { - if durs[i] == nil { - return errRepeatedHasNil - } - dproto := durationProto(*durs[i]) - data, err := Marshal(dproto) - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - } - return nil -} - -func size_slice_ref_duration(p *Properties, base structPointer) (n int) { - pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(durationType)).(*[]time.Duration) - durs := *pdurs - for i := 0; i < len(durs); i++ { - dproto := durationProto(durs[i]) - size := Size(dproto) - n += len(p.tagcode) + size + sizeVarint(uint64(size)) - } - return n -} - -func (o *Buffer) enc_slice_ref_duration(p *Properties, base structPointer) error { - pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(durationType)).(*[]time.Duration) - durs := *pdurs - for i := 0; i < len(durs); i++ { - dproto := durationProto(durs[i]) - data, err := Marshal(dproto) - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - } - return nil -} diff --git a/vendor/github.com/gogo/protobuf/proto/encode.go b/vendor/github.com/gogo/protobuf/proto/encode.go index 8b84d1b22d..3abfed2cff 100644 --- a/vendor/github.com/gogo/protobuf/proto/encode.go +++ b/vendor/github.com/gogo/protobuf/proto/encode.go @@ -37,28 +37,9 @@ package proto import ( "errors" - "fmt" "reflect" - "sort" ) -// RequiredNotSetError is the error returned if Marshal is called with -// a protocol buffer struct whose required fields have not -// all been initialized. It is also the error returned if Unmarshal is -// called with an encoded protocol buffer that does not include all the -// required fields. -// -// When printed, RequiredNotSetError reports the first unset required field in a -// message. If the field cannot be precisely determined, it is reported as -// "{Unknown}". -type RequiredNotSetError struct { - field string -} - -func (e *RequiredNotSetError) Error() string { - return fmt.Sprintf("proto: required field %q not set", e.field) -} - var ( // errRepeatedHasNil is the error returned if Marshal is called with // a struct with a repeated field containing a nil element. @@ -82,10 +63,6 @@ var ( const maxVarintBytes = 10 // maximum length of a varint -// maxMarshalSize is the largest allowed size of an encoded protobuf, -// since C++ and Java use signed int32s for the size. -const maxMarshalSize = 1<<31 - 1 - // EncodeVarint returns the varint encoding of x. // This is the format for the // int32, int64, uint32, uint64, bool, and enum @@ -119,18 +96,27 @@ func (p *Buffer) EncodeVarint(x uint64) error { // SizeVarint returns the varint encoding size of an integer. func SizeVarint(x uint64) int { - return sizeVarint(x) -} - -func sizeVarint(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + switch { + case x < 1<<7: + return 1 + case x < 1<<14: + return 2 + case x < 1<<21: + return 3 + case x < 1<<28: + return 4 + case x < 1<<35: + return 5 + case x < 1<<42: + return 6 + case x < 1<<49: + return 7 + case x < 1<<56: + return 8 + case x < 1<<63: + return 9 + } + return 10 } // EncodeFixed64 writes a 64-bit integer to the Buffer. @@ -149,10 +135,6 @@ func (p *Buffer) EncodeFixed64(x uint64) error { return nil } -func sizeFixed64(x uint64) int { - return 8 -} - // EncodeFixed32 writes a 32-bit integer to the Buffer. // This is the format for the // fixed32, sfixed32, and float protocol buffer types. @@ -165,20 +147,12 @@ func (p *Buffer) EncodeFixed32(x uint64) error { return nil } -func sizeFixed32(x uint64) int { - return 4 -} - // EncodeZigzag64 writes a zigzag-encoded 64-bit integer // to the Buffer. // This is the format used for the sint64 protocol buffer type. func (p *Buffer) EncodeZigzag64(x uint64) error { // use signed number to get arithmetic right shift. - return p.EncodeVarint((x << 1) ^ uint64((int64(x) >> 63))) -} - -func sizeZigzag64(x uint64) int { - return sizeVarint((x << 1) ^ uint64((int64(x) >> 63))) + return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } // EncodeZigzag32 writes a zigzag-encoded 32-bit integer @@ -189,10 +163,6 @@ func (p *Buffer) EncodeZigzag32(x uint64) error { return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) } -func sizeZigzag32(x uint64) int { - return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - // EncodeRawBytes writes a count-delimited byte buffer to the Buffer. // This is the format used for the bytes protocol buffer // type and for embedded messages. @@ -202,11 +172,6 @@ func (p *Buffer) EncodeRawBytes(b []byte) error { return nil } -func sizeRawBytes(b []byte) int { - return sizeVarint(uint64(len(b))) + - len(b) -} - // EncodeStringBytes writes an encoded string to the Buffer. // This is the format used for the proto2 string type. func (p *Buffer) EncodeStringBytes(s string) error { @@ -215,319 +180,17 @@ func (p *Buffer) EncodeStringBytes(s string) error { return nil } -func sizeStringBytes(s string) int { - return sizeVarint(uint64(len(s))) + - len(s) -} - // Marshaler is the interface representing objects that can marshal themselves. type Marshaler interface { Marshal() ([]byte, error) } -// Marshal takes the protocol buffer -// and encodes it into the wire format, returning the data. -func Marshal(pb Message) ([]byte, error) { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - return m.Marshal() - } - p := NewBuffer(nil) - err := p.Marshal(pb) - if p.buf == nil && err == nil { - // Return a non-nil slice on success. - return []byte{}, nil - } - return p.buf, err -} - // EncodeMessage writes the protocol buffer to the Buffer, // prefixed by a varint-encoded length. func (p *Buffer) EncodeMessage(pb Message) error { - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return ErrNil - } - if err == nil { - var state errorState - err = p.enc_len_struct(GetProperties(t.Elem()), base, &state) - } - return err -} - -// Marshal takes the protocol buffer -// and encodes it into the wire format, writing the result to the -// Buffer. -func (p *Buffer) Marshal(pb Message) error { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - data, err := m.Marshal() - p.buf = append(p.buf, data...) - return err - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return ErrNil - } - if err == nil { - err = p.enc_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - (stats).Encode++ // Parens are to work around a goimports bug. - } - - if len(p.buf) > maxMarshalSize { - return ErrTooLarge - } - return err -} - -// Size returns the encoded size of a protocol buffer. -func Size(pb Message) (n int) { - // Can the object marshal itself? If so, Size is slow. - // TODO: add Size to Marshaler, or add a Sizer interface. - if m, ok := pb.(Marshaler); ok { - b, _ := m.Marshal() - return len(b) - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return 0 - } - if err == nil { - n = size_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - (stats).Size++ // Parens are to work around a goimports bug. - } - - return -} - -// Individual type encoders. - -// Encode a bool. -func (o *Buffer) enc_bool(p *Properties, base structPointer) error { - v := *structPointer_Bool(base, p.field) - if v == nil { - return ErrNil - } - x := 0 - if *v { - x = 1 - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error { - v := *structPointer_BoolVal(base, p.field) - if !v { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, 1) - return nil -} - -func size_bool(p *Properties, base structPointer) int { - v := *structPointer_Bool(base, p.field) - if v == nil { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -func size_proto3_bool(p *Properties, base structPointer) int { - v := *structPointer_BoolVal(base, p.field) - if !v && !p.oneof { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -// Encode an int32. -func (o *Buffer) enc_int32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode a uint32. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := word32_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := word32_Get(v) - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode an int64. -func (o *Buffer) enc_int64(p *Properties, base structPointer) error { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return ErrNil - } - x := word64_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func size_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return 0 - } - x := word64_Get(v) - n += len(p.tagcode) - n += p.valSize(x) - return -} - -func size_proto3_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(x) - return -} - -// Encode a string. -func (o *Buffer) enc_string(p *Properties, base structPointer) error { - v := *structPointer_String(base, p.field) - if v == nil { - return ErrNil - } - x := *v - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(x) - return nil -} - -func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error { - v := *structPointer_StringVal(base, p.field) - if v == "" { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(v) - return nil -} - -func size_string(p *Properties, base structPointer) (n int) { - v := *structPointer_String(base, p.field) - if v == nil { - return 0 - } - x := *v - n += len(p.tagcode) - n += sizeStringBytes(x) - return -} - -func size_proto3_string(p *Properties, base structPointer) (n int) { - v := *structPointer_StringVal(base, p.field) - if v == "" && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeStringBytes(v) - return + siz := Size(pb) + p.EncodeVarint(uint64(siz)) + return p.Marshal(pb) } // All protocol buffer fields are nillable, but be careful. @@ -538,825 +201,3 @@ func isNil(v reflect.Value) bool { } return false } - -// Encode a message struct. -func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { - var state errorState - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return ErrNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return state.err - } - - o.buf = append(o.buf, p.tagcode...) - return o.enc_len_struct(p.sprop, structp, &state) -} - -func size_struct_message(p *Properties, base structPointer) int { - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return 0 - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n0 := len(p.tagcode) - n1 := sizeRawBytes(data) - return n0 + n1 - } - - n0 := len(p.tagcode) - n1 := size_struct(p.sprop, structp) - n2 := sizeVarint(uint64(n1)) // size of encoded length - return n0 + n1 + n2 -} - -// Encode a group struct. -func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { - var state errorState - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return ErrNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - err := o.enc_struct(p.sprop, b) - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return state.err -} - -func size_struct_group(p *Properties, base structPointer) (n int) { - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return 0 - } - - n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) - n += size_struct(p.sprop, b) - n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return -} - -// Encode a slice of bools ([]bool). -func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - for _, x := range s { - o.buf = append(o.buf, p.tagcode...) - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_bool(p *Properties, base structPointer) int { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - return l * (len(p.tagcode) + 1) // each bool takes exactly one byte -} - -// Encode a slice of bools ([]bool) in packed format. -func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(l)) // each bool takes exactly one byte - for _, x := range s { - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_packed_bool(p *Properties, base structPointer) (n int) { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - n += len(p.tagcode) - n += sizeVarint(uint64(l)) - n += l // each bool takes exactly one byte - return -} - -// Encode a slice of bytes ([]byte). -func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if s == nil { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func size_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if s == nil && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -// Encode a slice of int32s ([]int32). -func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of int32s ([]int32) in packed format. -func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(buf, uint64(x)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - bufSize += p.valSize(uint64(x)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of uint32s ([]uint32). -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := s.Index(i) - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := s.Index(i) - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of uint32s ([]uint32) in packed format. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, uint64(s.Index(i))) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(uint64(s.Index(i))) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of int64s ([]int64). -func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, s.Index(i)) - } - return nil -} - -func size_slice_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - n += p.valSize(s.Index(i)) - } - return -} - -// Encode a slice of int64s ([]int64) in packed format. -func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, s.Index(i)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(s.Index(i)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of slice of bytes ([][]byte). -func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(ss[i]) - } - return nil -} - -func size_slice_slice_byte(p *Properties, base structPointer) (n int) { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return 0 - } - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeRawBytes(ss[i]) - } - return -} - -// Encode a slice of strings ([]string). -func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(ss[i]) - } - return nil -} - -func size_slice_string(p *Properties, base structPointer) (n int) { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeStringBytes(ss[i]) - } - return -} - -// Encode a slice of message structs ([]*struct). -func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return errRepeatedHasNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - continue - } - - o.buf = append(o.buf, p.tagcode...) - err := o.enc_len_struct(p.sprop, structp, &state) - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - } - return state.err -} - -func size_slice_struct_message(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return // return the size up to this point - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n += sizeRawBytes(data) - continue - } - - n0 := size_struct(p.sprop, structp) - n1 := sizeVarint(uint64(n0)) // size of encoded length - n += n0 + n1 - } - return -} - -// Encode a slice of group structs ([]*struct). -func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return errRepeatedHasNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - - err := o.enc_struct(p.sprop, b) - - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - } - return state.err -} - -func size_slice_struct_group(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup)) - n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup)) - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return // return size up to this point - } - - n += size_struct(p.sprop, b) - } - return -} - -// Encode an extension map. -func (o *Buffer) enc_map(p *Properties, base structPointer) error { - exts := structPointer_ExtMap(base, p.field) - if err := encodeExtensionsMap(*exts); err != nil { - return err - } - - return o.enc_map_body(*exts) -} - -func (o *Buffer) enc_exts(p *Properties, base structPointer) error { - exts := structPointer_Extensions(base, p.field) - - v, mu := exts.extensionsRead() - if v == nil { - return nil - } - - mu.Lock() - defer mu.Unlock() - if err := encodeExtensionsMap(v); err != nil { - return err - } - - return o.enc_map_body(v) -} - -func (o *Buffer) enc_map_body(v map[int32]Extension) error { - // Fast-path for common cases: zero or one extensions. - if len(v) <= 1 { - for _, e := range v { - o.buf = append(o.buf, e.enc...) - } - return nil - } - - // Sort keys to provide a deterministic encoding. - keys := make([]int, 0, len(v)) - for k := range v { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, k := range keys { - o.buf = append(o.buf, v[int32(k)].enc...) - } - return nil -} - -func size_map(p *Properties, base structPointer) int { - v := structPointer_ExtMap(base, p.field) - return extensionsMapSize(*v) -} - -func size_exts(p *Properties, base structPointer) int { - v := structPointer_Extensions(base, p.field) - return extensionsSize(v) -} - -// Encode a map field. -func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { - var state errorState // XXX: or do we need to plumb this through? - - /* - A map defined as - map map_field = N; - is encoded in the same way as - message MapFieldEntry { - key_type key = 1; - value_type value = 2; - } - repeated MapFieldEntry map_field = N; - */ - - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - if v.Len() == 0 { - return nil - } - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - enc := func() error { - if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { - return err - } - if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil { - return err - } - return nil - } - - // Don't sort map keys. It is not required by the spec, and C++ doesn't do it. - for _, key := range v.MapKeys() { - val := v.MapIndex(key) - - keycopy.Set(key) - valcopy.Set(val) - - o.buf = append(o.buf, p.tagcode...) - if err := o.enc_len_thing(enc, &state); err != nil { - return err - } - } - return nil -} - -func size_new_map(p *Properties, base structPointer) int { - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - n := 0 - for _, key := range v.MapKeys() { - val := v.MapIndex(key) - keycopy.Set(key) - valcopy.Set(val) - - // Tag codes for key and val are the responsibility of the sub-sizer. - keysize := p.mkeyprop.size(p.mkeyprop, keybase) - valsize := p.mvalprop.size(p.mvalprop, valbase) - entry := keysize + valsize - // Add on tag code and length of map entry itself. - n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry - } - return n -} - -// mapEncodeScratch returns a new reflect.Value matching the map's value type, -// and a structPointer suitable for passing to an encoder or sizer. -func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) { - // Prepare addressable doubly-indirect placeholders for the key and value types. - // This is needed because the element-type encoders expect **T, but the map iteration produces T. - - keycopy = reflect.New(mapType.Key()).Elem() // addressable K - keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K - keyptr.Set(keycopy.Addr()) // - keybase = toStructPointer(keyptr.Addr()) // **K - - // Value types are more varied and require special handling. - switch mapType.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte - valbase = toStructPointer(valcopy.Addr()) - case reflect.Ptr: - // message; the generated field type is map[K]*Msg (so V is *Msg), - // so we only need one level of indirection. - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valbase = toStructPointer(valcopy.Addr()) - default: - // everything else - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V - valptr.Set(valcopy.Addr()) // - valbase = toStructPointer(valptr.Addr()) // **V - } - return -} - -// Encode a struct. -func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { - var state errorState - // Encode fields in tag order so that decoders may use optimizations - // that depend on the ordering. - // https://developers.google.com/protocol-buffers/docs/encoding#order - for _, i := range prop.order { - p := prop.Prop[i] - if p.enc != nil { - err := p.enc(o, p, base) - if err != nil { - if err == ErrNil { - if p.Required && state.err == nil { - state.err = &RequiredNotSetError{p.Name} - } - } else if err == errRepeatedHasNil { - // Give more context to nil values in repeated fields. - return errors.New("repeated field " + p.OrigName + " has nil element") - } else if !state.shouldContinue(err, p) { - return err - } - } - if len(o.buf) > maxMarshalSize { - return ErrTooLarge - } - } - } - - // Do oneof fields. - if prop.oneofMarshaler != nil { - m := structPointer_Interface(base, prop.stype).(Message) - if err := prop.oneofMarshaler(m, o); err == ErrNil { - return errOneofHasNil - } else if err != nil { - return err - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - if len(o.buf)+len(v) > maxMarshalSize { - return ErrTooLarge - } - if len(v) > 0 { - o.buf = append(o.buf, v...) - } - } - - return state.err -} - -func size_struct(prop *StructProperties, base structPointer) (n int) { - for _, i := range prop.order { - p := prop.Prop[i] - if p.size != nil { - n += p.size(p, base) - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - n += len(v) - } - - // Factor in any oneof fields. - if prop.oneofSizer != nil { - m := structPointer_Interface(base, prop.stype).(Message) - n += prop.oneofSizer(m) - } - - return -} - -var zeroes [20]byte // longer than any conceivable sizeVarint - -// Encode a struct, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error { - return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state) -} - -// Encode something, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error { - iLen := len(o.buf) - o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length - iMsg := len(o.buf) - err := enc() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - lMsg := len(o.buf) - iMsg - lLen := sizeVarint(uint64(lMsg)) - switch x := lLen - (iMsg - iLen); { - case x > 0: // actual length is x bytes larger than the space we reserved - // Move msg x bytes right. - o.buf = append(o.buf, zeroes[:x]...) - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - case x < 0: // actual length is x bytes smaller than the space we reserved - // Move msg x bytes left. - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - o.buf = o.buf[:len(o.buf)+x] // x is negative - } - // Encode the length in the reserved space. - o.buf = o.buf[:iLen] - o.EncodeVarint(uint64(lMsg)) - o.buf = o.buf[:len(o.buf)+lMsg] - return state.err -} - -// errorState maintains the first error that occurs and updates that error -// with additional context. -type errorState struct { - err error -} - -// shouldContinue reports whether encoding should continue upon encountering the -// given error. If the error is RequiredNotSetError, shouldContinue returns true -// and, if this is the first appearance of that error, remembers it for future -// reporting. -// -// If prop is not nil, it may update any error with additional context about the -// field with the error. -func (s *errorState) shouldContinue(err error, prop *Properties) bool { - // Ignore unset required fields. - reqNotSet, ok := err.(*RequiredNotSetError) - if !ok { - return false - } - if s.err == nil { - if prop != nil { - err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field} - } - s.err = err - } - return true -} diff --git a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go index 32111b7f41..0f5fb173e9 100644 --- a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go @@ -3,11 +3,6 @@ // Copyright (c) 2013, The GoGo Authors. All rights reserved. // http://github.com/gogo/protobuf // -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// http://github.com/golang/protobuf/ -// // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -18,9 +13,6 @@ // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT @@ -36,315 +28,6 @@ package proto -import ( - "reflect" -) - func NewRequiredNotSetError(field string) *RequiredNotSetError { return &RequiredNotSetError{field} } - -type Sizer interface { - Size() int -} - -func (o *Buffer) enc_ext_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if s == nil { - return ErrNil - } - o.buf = append(o.buf, s...) - return nil -} - -func size_ext_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if s == nil { - return 0 - } - n += len(s) - return -} - -// Encode a reference to bool pointer. -func (o *Buffer) enc_ref_bool(p *Properties, base structPointer) error { - v := *structPointer_BoolVal(base, p.field) - x := 0 - if v { - x = 1 - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_ref_bool(p *Properties, base structPointer) int { - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -// Encode a reference to int32 pointer. -func (o *Buffer) enc_ref_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_ref_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func (o *Buffer) enc_ref_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_ref_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode a reference to an int64 pointer. -func (o *Buffer) enc_ref_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func size_ref_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - n += len(p.tagcode) - n += p.valSize(x) - return -} - -// Encode a reference to a string pointer. -func (o *Buffer) enc_ref_string(p *Properties, base structPointer) error { - v := *structPointer_StringVal(base, p.field) - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(v) - return nil -} - -func size_ref_string(p *Properties, base structPointer) (n int) { - v := *structPointer_StringVal(base, p.field) - n += len(p.tagcode) - n += sizeStringBytes(v) - return -} - -// Encode a reference to a message struct. -func (o *Buffer) enc_ref_struct_message(p *Properties, base structPointer) error { - var state errorState - structp := structPointer_GetRefStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return ErrNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return nil - } - - o.buf = append(o.buf, p.tagcode...) - return o.enc_len_struct(p.sprop, structp, &state) -} - -//TODO this is only copied, please fix this -func size_ref_struct_message(p *Properties, base structPointer) int { - structp := structPointer_GetRefStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return 0 - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n0 := len(p.tagcode) - n1 := sizeRawBytes(data) - return n0 + n1 - } - - n0 := len(p.tagcode) - n1 := size_struct(p.sprop, structp) - n2 := sizeVarint(uint64(n1)) // size of encoded length - return n0 + n1 + n2 -} - -// Encode a slice of references to message struct pointers ([]struct). -func (o *Buffer) enc_slice_ref_struct_message(p *Properties, base structPointer) error { - var state errorState - ss := structPointer_StructRefSlice(base, p.field, p.stype.Size()) - l := ss.Len() - for i := 0; i < l; i++ { - structp := ss.Index(i) - if structPointer_IsNil(structp) { - return errRepeatedHasNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - continue - } - - o.buf = append(o.buf, p.tagcode...) - err := o.enc_len_struct(p.sprop, structp, &state) - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - - } - return state.err -} - -//TODO this is only copied, please fix this -func size_slice_ref_struct_message(p *Properties, base structPointer) (n int) { - ss := structPointer_StructRefSlice(base, p.field, p.stype.Size()) - l := ss.Len() - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - structp := ss.Index(i) - if structPointer_IsNil(structp) { - return // return the size up to this point - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n += len(p.tagcode) - n += sizeRawBytes(data) - continue - } - - n0 := size_struct(p.sprop, structp) - n1 := sizeVarint(uint64(n0)) // size of encoded length - n += n0 + n1 - } - return -} - -func (o *Buffer) enc_custom_bytes(p *Properties, base structPointer) error { - i := structPointer_InterfaceRef(base, p.field, p.ctype) - if i == nil { - return ErrNil - } - custom := i.(Marshaler) - data, err := custom.Marshal() - if err != nil { - return err - } - if data == nil { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return nil -} - -func size_custom_bytes(p *Properties, base structPointer) (n int) { - n += len(p.tagcode) - i := structPointer_InterfaceRef(base, p.field, p.ctype) - if i == nil { - return 0 - } - custom := i.(Marshaler) - data, _ := custom.Marshal() - n += sizeRawBytes(data) - return -} - -func (o *Buffer) enc_custom_ref_bytes(p *Properties, base structPointer) error { - custom := structPointer_InterfaceAt(base, p.field, p.ctype).(Marshaler) - data, err := custom.Marshal() - if err != nil { - return err - } - if data == nil { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return nil -} - -func size_custom_ref_bytes(p *Properties, base structPointer) (n int) { - n += len(p.tagcode) - i := structPointer_InterfaceAt(base, p.field, p.ctype) - if i == nil { - return 0 - } - custom := i.(Marshaler) - data, _ := custom.Marshal() - n += sizeRawBytes(data) - return -} - -func (o *Buffer) enc_custom_slice_bytes(p *Properties, base structPointer) error { - inter := structPointer_InterfaceRef(base, p.field, p.ctype) - if inter == nil { - return ErrNil - } - slice := reflect.ValueOf(inter) - l := slice.Len() - for i := 0; i < l; i++ { - v := slice.Index(i) - custom := v.Interface().(Marshaler) - data, err := custom.Marshal() - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - } - return nil -} - -func size_custom_slice_bytes(p *Properties, base structPointer) (n int) { - inter := structPointer_InterfaceRef(base, p.field, p.ctype) - if inter == nil { - return 0 - } - slice := reflect.ValueOf(inter) - l := slice.Len() - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - v := slice.Index(i) - custom := v.Interface().(Marshaler) - data, _ := custom.Marshal() - n += sizeRawBytes(data) - } - return -} diff --git a/vendor/github.com/gogo/protobuf/proto/encode_test.go b/vendor/github.com/gogo/protobuf/proto/encode_test.go deleted file mode 100644 index 2176b894d1..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/encode_test.go +++ /dev/null @@ -1,84 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build go1.7 - -package proto_test - -import ( - "testing" - - "github.com/gogo/protobuf/proto" - tpb "github.com/gogo/protobuf/proto/proto3_proto" -) - -var ( - blackhole []byte -) - -// Disabled this Benchmark because it is using features (b.Run) from go1.7 and gogoprotobuf still have compatibility with go1.5 -// BenchmarkAny creates increasingly large arbitrary Any messages. The type is always the -// same. -// func BenchmarkAny(b *testing.B) { -// data := make([]byte, 1<<20) -// quantum := 1 << 10 -// for i := uint(0); i <= 10; i++ { -// b.Run(strconv.Itoa(quantum<> 3) - wireType := int(tag & 0x7) - n2, err := size((*ext)[offset+n1:], wireType) - if err != nil { - panic(err) - } - newOffset := offset + n1 + n2 - if fieldNum == theFieldNum { - *ext = append((*ext)[:offset], (*ext)[newOffset:]...) - return offset - } - offset = newOffset - } - return -1 -} - // ClearExtension removes the given extension from pb. func ClearExtension(pb Message, extension *ExtensionDesc) { clearExtension(pb, extension.Field) } func clearExtension(pb Message, fieldNum int32) { - if epb, doki := pb.(extensionsBytes); doki { + if epb, ok := pb.(extensionsBytes); ok { offset := 0 for offset != -1 { offset = deleteExtension(epb, fieldNum, offset) } return } - epb, ok := extendable(pb) - if !ok { + epb, err := extendable(pb) + if err != nil { return } // TODO: Check types, field numbers, etc.? @@ -422,39 +333,33 @@ func clearExtension(pb Message, fieldNum int32) { delete(extmap, fieldNum) } -// GetExtension parses and returns the given extension of pb. -// If the extension is not present and has no default value it returns ErrMissingExtension. +// GetExtension retrieves a proto2 extended field from pb. +// +// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), +// then GetExtension parses the encoded field and returns a Go value of the specified type. +// If the field is not present, then the default value is returned (if one is specified), +// otherwise ErrMissingExtension is reported. +// +// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), +// then GetExtension returns the raw encoded bytes of the field extension. func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { if epb, doki := pb.(extensionsBytes); doki { ext := epb.GetExtensions() - o := 0 - for o < len(*ext) { - tag, n := DecodeVarint((*ext)[o:]) - fieldNum := int32(tag >> 3) - wireType := int(tag & 0x7) - l, err := size((*ext)[o+n:], wireType) - if err != nil { - return nil, err - } - if int32(fieldNum) == extension.Field { - v, err := decodeExtension((*ext)[o:o+n+l], extension) - if err != nil { - return nil, err - } - return v, nil - } - o += n + l - } - return defaultExtensionValue(extension) + return decodeExtensionFromBytes(extension, *ext) } - epb, ok := extendable(pb) - if !ok { - return nil, errors.New("proto: not an extendable proto") - } - if err := checkExtensionTypes(epb, extension); err != nil { + + epb, err := extendable(pb) + if err != nil { return nil, err } + if extension.ExtendedType != nil { + // can only check type if this is a complete descriptor + if cerr := checkExtensionTypes(epb, extension); cerr != nil { + return nil, cerr + } + } + emap, mu := epb.extensionsRead() if emap == nil { return defaultExtensionValue(extension) @@ -479,6 +384,11 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { return e.value, nil } + if extension.ExtensionType == nil { + // incomplete descriptor + return e.enc, nil + } + v, err := decodeExtension(e.enc, extension) if err != nil { return nil, err @@ -496,6 +406,11 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { // defaultExtensionValue returns the default value for extension. // If no default for an extension is defined ErrMissingExtension is returned. func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { + if extension.ExtensionType == nil { + // incomplete descriptor, so no default + return nil, ErrMissingExtension + } + t := reflect.TypeOf(extension.ExtensionType) props := extensionProperties(extension) @@ -530,31 +445,28 @@ func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { // decodeExtension decodes an extension encoded in b. func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { - o := NewBuffer(b) - t := reflect.TypeOf(extension.ExtensionType) - - props := extensionProperties(extension) + unmarshal := typeUnmarshaler(t, extension.Tag) // t is a pointer to a struct, pointer to basic type or a slice. - // Allocate a "field" to store the pointer/slice itself; the - // pointer/slice will be stored here. We pass - // the address of this field to props.dec. - // This passes a zero field and a *t and lets props.dec - // interpret it as a *struct{ x t }. + // Allocate space to store the pointer/slice. value := reflect.New(t).Elem() + var err error for { - // Discard wire type and field number varint. It isn't needed. - if _, err := o.DecodeVarint(); err != nil { - return nil, err + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF } + b = b[n:] + wire := int(x) & 7 - if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { + b, err = unmarshal(b, valToPointer(value.Addr()), wire) + if err != nil { return nil, err } - if o.index >= len(o.buf) { + if len(b) == 0 { break } } @@ -564,9 +476,13 @@ func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { // GetExtensions returns a slice of the extensions present in pb that are also listed in es. // The returned slice has the same length as es; missing extensions will appear as nil elements. func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { + epb, err := extendable(pb) + if err != nil { + return nil, err + } extensions = make([]interface{}, len(es)) for i, e := range es { - extensions[i], err = GetExtension(pb, e) + extensions[i], err = GetExtension(epb, e) if err == ErrMissingExtension { err = nil } @@ -581,9 +497,9 @@ func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, e // For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing // just the Field field, which defines the extension's field number. func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { - epb, ok := extendable(pb) - if !ok { - return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb) + epb, err := extendable(pb) + if err != nil { + return nil, err } registeredExtensions := RegisteredExtensions(pb) @@ -610,23 +526,18 @@ func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { // SetExtension sets the specified extension of pb to the specified value. func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { - if epb, doki := pb.(extensionsBytes); doki { - ClearExtension(pb, extension) - ext := epb.GetExtensions() - et := reflect.TypeOf(extension.ExtensionType) - props := extensionProperties(extension) - p := NewBuffer(nil) - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(value)) - if err := props.enc(p, props, toStructPointer(x)); err != nil { + if epb, ok := pb.(extensionsBytes); ok { + newb, err := encodeExtension(extension, value) + if err != nil { return err } - *ext = append(*ext, p.buf...) + bb := epb.GetExtensions() + *bb = append(*bb, newb...) return nil } - epb, ok := extendable(pb) - if !ok { - return errors.New("proto: not an extendable proto") + epb, err := extendable(pb) + if err != nil { + return err } if err := checkExtensionTypes(epb, extension); err != nil { return err @@ -656,8 +567,8 @@ func ClearAllExtensions(pb Message) { *ext = []byte{} return } - epb, ok := extendable(pb) - if !ok { + epb, err := extendable(pb) + if err != nil { return } m := epb.extensionsWrite() diff --git a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go index ea6478f009..53ebd8cca0 100644 --- a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go @@ -32,12 +32,36 @@ import ( "bytes" "errors" "fmt" + "io" "reflect" "sort" "strings" "sync" ) +type extensionsBytes interface { + Message + ExtensionRangeArray() []ExtensionRange + GetExtensions() *[]byte +} + +type slowExtensionAdapter struct { + extensionsBytes +} + +func (s slowExtensionAdapter) extensionsWrite() map[int32]Extension { + panic("Please report a bug to github.com/gogo/protobuf if you see this message: Writing extensions is not supported for extensions stored in a byte slice field.") +} + +func (s slowExtensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { + b := s.GetExtensions() + m, err := BytesToExtensionsMap(*b) + if err != nil { + panic(err) + } + return m, notLocker{} +} + func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool { if reflect.ValueOf(pb).IsNil() { return ifnotset @@ -56,19 +80,28 @@ func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool } func (this *Extension) Equal(that *Extension) bool { + if err := this.Encode(); err != nil { + return false + } + if err := that.Encode(); err != nil { + return false + } return bytes.Equal(this.enc, that.enc) } func (this *Extension) Compare(that *Extension) int { + if err := this.Encode(); err != nil { + return 1 + } + if err := that.Encode(); err != nil { + return -1 + } return bytes.Compare(this.enc, that.enc) } func SizeOfInternalExtension(m extendableProto) (n int) { - return SizeOfExtensionMap(m.extensionsWrite()) -} - -func SizeOfExtensionMap(m map[int32]Extension) (n int) { - return extensionsMapSize(m) + info := getMarshalInfo(reflect.TypeOf(m)) + return info.sizeV1Extensions(m.extensionsWrite()) } type sortableMapElem struct { @@ -122,28 +155,26 @@ func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error) } func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) { - if err := encodeExtensionsMap(m); err != nil { - return 0, err - } - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - for _, k := range keys { - n += copy(data[n:], m[int32(k)].enc) + o := 0 + for _, e := range m { + if err := e.Encode(); err != nil { + return 0, err + } + n := copy(data[o:], e.enc) + if n != len(e.enc) { + return 0, io.ErrShortBuffer + } + o += n } - return n, nil + return o, nil } func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) { - if m[id].value == nil || m[id].desc == nil { - return m[id].enc, nil - } - if err := encodeExtensionsMap(m); err != nil { + e := m[id] + if err := e.Encode(); err != nil { return nil, err } - return m[id].enc, nil + return e.enc, nil } func size(buf []byte, wire int) (int, error) { @@ -218,35 +249,58 @@ func AppendExtension(e Message, tag int32, buf []byte) { } } -func encodeExtension(e *Extension) error { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - return nil +func encodeExtension(extension *ExtensionDesc, value interface{}) ([]byte, error) { + u := getMarshalInfo(reflect.TypeOf(extension.ExtendedType)) + ei := u.getExtElemInfo(extension) + v := value + p := toAddrPointer(&v, ei.isptr) + siz := ei.sizer(p, SizeVarint(ei.wiretag)) + buf := make([]byte, 0, siz) + return ei.marshaler(buf, p, ei.wiretag, false) +} + +func decodeExtensionFromBytes(extension *ExtensionDesc, buf []byte) (interface{}, error) { + o := 0 + for o < len(buf) { + tag, n := DecodeVarint((buf)[o:]) + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + if o+n > len(buf) { + return nil, fmt.Errorf("unable to decode extension") + } + l, err := size((buf)[o+n:], wireType) + if err != nil { + return nil, err + } + if int32(fieldNum) == extension.Field { + if o+n+l > len(buf) { + return nil, fmt.Errorf("unable to decode extension") + } + v, err := decodeExtension((buf)[o:o+n+l], extension) + if err != nil { + return nil, err + } + return v, nil + } + o += n + l } - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - p := NewBuffer(nil) - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - if err := props.enc(p, props, toStructPointer(x)); err != nil { - return err + return defaultExtensionValue(extension) +} + +func (this *Extension) Encode() error { + if this.enc == nil { + var err error + this.enc, err = encodeExtension(this.desc, this.value) + if err != nil { + return err + } } - e.enc = p.buf return nil } func (this Extension) GoString() string { - if this.enc == nil { - if err := encodeExtension(&this); err != nil { - panic(err) - } + if err := this.Encode(); err != nil { + return fmt.Sprintf("error encoding extension: %v", err) } return fmt.Sprintf("proto.NewExtension(%#v)", this.enc) } @@ -292,3 +346,23 @@ func GetUnsafeExtensionsMap(extendable Message) map[int32]Extension { pb := extendable.(extendableProto) return pb.extensionsWrite() } + +func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int { + ext := pb.GetExtensions() + for offset < len(*ext) { + tag, n1 := DecodeVarint((*ext)[offset:]) + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + n2, err := size((*ext)[offset+n1:], wireType) + if err != nil { + panic(err) + } + newOffset := offset + n1 + n2 + if fieldNum == theFieldNum { + *ext = append((*ext)[:offset], (*ext)[newOffset:]...) + return offset + } + offset = newOffset + } + return -1 +} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions_test.go b/vendor/github.com/gogo/protobuf/proto/extensions_test.go deleted file mode 100644 index 15c76a36db..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/extensions_test.go +++ /dev/null @@ -1,538 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2014 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "bytes" - "fmt" - "reflect" - "sort" - "testing" - - "github.com/gogo/protobuf/proto" - pb "github.com/gogo/protobuf/proto/testdata" -) - -func TestGetExtensionsWithMissingExtensions(t *testing.T) { - msg := &pb.MyMessage{} - ext1 := &pb.Ext{} - if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil { - t.Fatalf("Could not set ext1: %s", err) - } - exts, err := proto.GetExtensions(msg, []*proto.ExtensionDesc{ - pb.E_Ext_More, - pb.E_Ext_Text, - }) - if err != nil { - t.Fatalf("GetExtensions() failed: %s", err) - } - if exts[0] != ext1 { - t.Errorf("ext1 not in returned extensions: %T %v", exts[0], exts[0]) - } - if exts[1] != nil { - t.Errorf("ext2 in returned extensions: %T %v", exts[1], exts[1]) - } -} - -func TestExtensionDescsWithMissingExtensions(t *testing.T) { - msg := &pb.MyMessage{Count: proto.Int32(0)} - extdesc1 := pb.E_Ext_More - if descs, err := proto.ExtensionDescs(msg); len(descs) != 0 || err != nil { - t.Errorf("proto.ExtensionDescs: got %d descs, error %v; want 0, nil", len(descs), err) - } - - ext1 := &pb.Ext{} - if err := proto.SetExtension(msg, extdesc1, ext1); err != nil { - t.Fatalf("Could not set ext1: %s", err) - } - extdesc2 := &proto.ExtensionDesc{ - ExtendedType: (*pb.MyMessage)(nil), - ExtensionType: (*bool)(nil), - Field: 123456789, - Name: "a.b", - Tag: "varint,123456789,opt", - } - ext2 := proto.Bool(false) - if err := proto.SetExtension(msg, extdesc2, ext2); err != nil { - t.Fatalf("Could not set ext2: %s", err) - } - - b, err := proto.Marshal(msg) - if err != nil { - t.Fatalf("Could not marshal msg: %v", err) - } - if err = proto.Unmarshal(b, msg); err != nil { - t.Fatalf("Could not unmarshal into msg: %v", err) - } - - descs, err := proto.ExtensionDescs(msg) - if err != nil { - t.Fatalf("proto.ExtensionDescs: got error %v", err) - } - sortExtDescs(descs) - wantDescs := []*proto.ExtensionDesc{extdesc1, {Field: extdesc2.Field}} - if !reflect.DeepEqual(descs, wantDescs) { - t.Errorf("proto.ExtensionDescs(msg) sorted extension ids: got %+v, want %+v", descs, wantDescs) - } -} - -type ExtensionDescSlice []*proto.ExtensionDesc - -func (s ExtensionDescSlice) Len() int { return len(s) } -func (s ExtensionDescSlice) Less(i, j int) bool { return s[i].Field < s[j].Field } -func (s ExtensionDescSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -func sortExtDescs(s []*proto.ExtensionDesc) { - sort.Sort(ExtensionDescSlice(s)) -} - -func TestGetExtensionStability(t *testing.T) { - check := func(m *pb.MyMessage) bool { - ext1, err := proto.GetExtension(m, pb.E_Ext_More) - if err != nil { - t.Fatalf("GetExtension() failed: %s", err) - } - ext2, err := proto.GetExtension(m, pb.E_Ext_More) - if err != nil { - t.Fatalf("GetExtension() failed: %s", err) - } - return ext1 == ext2 - } - msg := &pb.MyMessage{Count: proto.Int32(4)} - ext0 := &pb.Ext{} - if err := proto.SetExtension(msg, pb.E_Ext_More, ext0); err != nil { - t.Fatalf("Could not set ext1: %s", ext0) - } - if !check(msg) { - t.Errorf("GetExtension() not stable before marshaling") - } - bb, err := proto.Marshal(msg) - if err != nil { - t.Fatalf("Marshal() failed: %s", err) - } - msg1 := &pb.MyMessage{} - err = proto.Unmarshal(bb, msg1) - if err != nil { - t.Fatalf("Unmarshal() failed: %s", err) - } - if !check(msg1) { - t.Errorf("GetExtension() not stable after unmarshaling") - } -} - -func TestGetExtensionDefaults(t *testing.T) { - var setFloat64 float64 = 1 - var setFloat32 float32 = 2 - var setInt32 int32 = 3 - var setInt64 int64 = 4 - var setUint32 uint32 = 5 - var setUint64 uint64 = 6 - var setBool = true - var setBool2 = false - var setString = "Goodnight string" - var setBytes = []byte("Goodnight bytes") - var setEnum = pb.DefaultsMessage_TWO - - type testcase struct { - ext *proto.ExtensionDesc // Extension we are testing. - want interface{} // Expected value of extension, or nil (meaning that GetExtension will fail). - def interface{} // Expected value of extension after ClearExtension(). - } - tests := []testcase{ - {pb.E_NoDefaultDouble, setFloat64, nil}, - {pb.E_NoDefaultFloat, setFloat32, nil}, - {pb.E_NoDefaultInt32, setInt32, nil}, - {pb.E_NoDefaultInt64, setInt64, nil}, - {pb.E_NoDefaultUint32, setUint32, nil}, - {pb.E_NoDefaultUint64, setUint64, nil}, - {pb.E_NoDefaultSint32, setInt32, nil}, - {pb.E_NoDefaultSint64, setInt64, nil}, - {pb.E_NoDefaultFixed32, setUint32, nil}, - {pb.E_NoDefaultFixed64, setUint64, nil}, - {pb.E_NoDefaultSfixed32, setInt32, nil}, - {pb.E_NoDefaultSfixed64, setInt64, nil}, - {pb.E_NoDefaultBool, setBool, nil}, - {pb.E_NoDefaultBool, setBool2, nil}, - {pb.E_NoDefaultString, setString, nil}, - {pb.E_NoDefaultBytes, setBytes, nil}, - {pb.E_NoDefaultEnum, setEnum, nil}, - {pb.E_DefaultDouble, setFloat64, float64(3.1415)}, - {pb.E_DefaultFloat, setFloat32, float32(3.14)}, - {pb.E_DefaultInt32, setInt32, int32(42)}, - {pb.E_DefaultInt64, setInt64, int64(43)}, - {pb.E_DefaultUint32, setUint32, uint32(44)}, - {pb.E_DefaultUint64, setUint64, uint64(45)}, - {pb.E_DefaultSint32, setInt32, int32(46)}, - {pb.E_DefaultSint64, setInt64, int64(47)}, - {pb.E_DefaultFixed32, setUint32, uint32(48)}, - {pb.E_DefaultFixed64, setUint64, uint64(49)}, - {pb.E_DefaultSfixed32, setInt32, int32(50)}, - {pb.E_DefaultSfixed64, setInt64, int64(51)}, - {pb.E_DefaultBool, setBool, true}, - {pb.E_DefaultBool, setBool2, true}, - {pb.E_DefaultString, setString, "Hello, string"}, - {pb.E_DefaultBytes, setBytes, []byte("Hello, bytes")}, - {pb.E_DefaultEnum, setEnum, pb.DefaultsMessage_ONE}, - } - - checkVal := func(test testcase, msg *pb.DefaultsMessage, valWant interface{}) error { - val, err := proto.GetExtension(msg, test.ext) - if err != nil { - if valWant != nil { - return fmt.Errorf("GetExtension(): %s", err) - } - if want := proto.ErrMissingExtension; err != want { - return fmt.Errorf("Unexpected error: got %v, want %v", err, want) - } - return nil - } - - // All proto2 extension values are either a pointer to a value or a slice of values. - ty := reflect.TypeOf(val) - tyWant := reflect.TypeOf(test.ext.ExtensionType) - if got, want := ty, tyWant; got != want { - return fmt.Errorf("unexpected reflect.TypeOf(): got %v want %v", got, want) - } - tye := ty.Elem() - tyeWant := tyWant.Elem() - if got, want := tye, tyeWant; got != want { - return fmt.Errorf("unexpected reflect.TypeOf().Elem(): got %v want %v", got, want) - } - - // Check the name of the type of the value. - // If it is an enum it will be type int32 with the name of the enum. - if got, want := tye.Name(), tye.Name(); got != want { - return fmt.Errorf("unexpected reflect.TypeOf().Elem().Name(): got %v want %v", got, want) - } - - // Check that value is what we expect. - // If we have a pointer in val, get the value it points to. - valExp := val - if ty.Kind() == reflect.Ptr { - valExp = reflect.ValueOf(val).Elem().Interface() - } - if got, want := valExp, valWant; !reflect.DeepEqual(got, want) { - return fmt.Errorf("unexpected reflect.DeepEqual(): got %v want %v", got, want) - } - - return nil - } - - setTo := func(test testcase) interface{} { - setTo := reflect.ValueOf(test.want) - if typ := reflect.TypeOf(test.ext.ExtensionType); typ.Kind() == reflect.Ptr { - setTo = reflect.New(typ).Elem() - setTo.Set(reflect.New(setTo.Type().Elem())) - setTo.Elem().Set(reflect.ValueOf(test.want)) - } - return setTo.Interface() - } - - for _, test := range tests { - msg := &pb.DefaultsMessage{} - name := test.ext.Name - - // Check the initial value. - if err := checkVal(test, msg, test.def); err != nil { - t.Errorf("%s: %v", name, err) - } - - // Set the per-type value and check value. - name = fmt.Sprintf("%s (set to %T %v)", name, test.want, test.want) - if err := proto.SetExtension(msg, test.ext, setTo(test)); err != nil { - t.Errorf("%s: SetExtension(): %v", name, err) - continue - } - if err := checkVal(test, msg, test.want); err != nil { - t.Errorf("%s: %v", name, err) - continue - } - - // Set and check the value. - name += " (cleared)" - proto.ClearExtension(msg, test.ext) - if err := checkVal(test, msg, test.def); err != nil { - t.Errorf("%s: %v", name, err) - } - } -} - -func TestExtensionsRoundTrip(t *testing.T) { - msg := &pb.MyMessage{} - ext1 := &pb.Ext{ - Data: proto.String("hi"), - } - ext2 := &pb.Ext{ - Data: proto.String("there"), - } - exists := proto.HasExtension(msg, pb.E_Ext_More) - if exists { - t.Error("Extension More present unexpectedly") - } - if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil { - t.Error(err) - } - if err := proto.SetExtension(msg, pb.E_Ext_More, ext2); err != nil { - t.Error(err) - } - e, err := proto.GetExtension(msg, pb.E_Ext_More) - if err != nil { - t.Error(err) - } - x, ok := e.(*pb.Ext) - if !ok { - t.Errorf("e has type %T, expected testdata.Ext", e) - } else if *x.Data != "there" { - t.Errorf("SetExtension failed to overwrite, got %+v, not 'there'", x) - } - proto.ClearExtension(msg, pb.E_Ext_More) - if _, err = proto.GetExtension(msg, pb.E_Ext_More); err != proto.ErrMissingExtension { - t.Errorf("got %v, expected ErrMissingExtension", e) - } - if _, err := proto.GetExtension(msg, pb.E_X215); err == nil { - t.Error("expected bad extension error, got nil") - } - if err := proto.SetExtension(msg, pb.E_X215, 12); err == nil { - t.Error("expected extension err") - } - if err := proto.SetExtension(msg, pb.E_Ext_More, 12); err == nil { - t.Error("expected some sort of type mismatch error, got nil") - } -} - -func TestNilExtension(t *testing.T) { - msg := &pb.MyMessage{ - Count: proto.Int32(1), - } - if err := proto.SetExtension(msg, pb.E_Ext_Text, proto.String("hello")); err != nil { - t.Fatal(err) - } - if err := proto.SetExtension(msg, pb.E_Ext_More, (*pb.Ext)(nil)); err == nil { - t.Error("expected SetExtension to fail due to a nil extension") - } else if want := "proto: SetExtension called with nil value of type *testdata.Ext"; err.Error() != want { - t.Errorf("expected error %v, got %v", want, err) - } - // Note: if the behavior of Marshal is ever changed to ignore nil extensions, update - // this test to verify that E_Ext_Text is properly propagated through marshal->unmarshal. -} - -func TestMarshalUnmarshalRepeatedExtension(t *testing.T) { - // Add a repeated extension to the result. - tests := []struct { - name string - ext []*pb.ComplexExtension - }{ - { - "two fields", - []*pb.ComplexExtension{ - {First: proto.Int32(7)}, - {Second: proto.Int32(11)}, - }, - }, - { - "repeated field", - []*pb.ComplexExtension{ - {Third: []int32{1000}}, - {Third: []int32{2000}}, - }, - }, - { - "two fields and repeated field", - []*pb.ComplexExtension{ - {Third: []int32{1000}}, - {First: proto.Int32(9)}, - {Second: proto.Int32(21)}, - {Third: []int32{2000}}, - }, - }, - } - for _, test := range tests { - // Marshal message with a repeated extension. - msg1 := new(pb.OtherMessage) - err := proto.SetExtension(msg1, pb.E_RComplex, test.ext) - if err != nil { - t.Fatalf("[%s] Error setting extension: %v", test.name, err) - } - b, err := proto.Marshal(msg1) - if err != nil { - t.Fatalf("[%s] Error marshaling message: %v", test.name, err) - } - - // Unmarshal and read the merged proto. - msg2 := new(pb.OtherMessage) - err = proto.Unmarshal(b, msg2) - if err != nil { - t.Fatalf("[%s] Error unmarshaling message: %v", test.name, err) - } - e, err := proto.GetExtension(msg2, pb.E_RComplex) - if err != nil { - t.Fatalf("[%s] Error getting extension: %v", test.name, err) - } - ext := e.([]*pb.ComplexExtension) - if ext == nil { - t.Fatalf("[%s] Invalid extension", test.name) - } - if !reflect.DeepEqual(ext, test.ext) { - t.Errorf("[%s] Wrong value for ComplexExtension: got: %v want: %v\n", test.name, ext, test.ext) - } - } -} - -func TestUnmarshalRepeatingNonRepeatedExtension(t *testing.T) { - // We may see multiple instances of the same extension in the wire - // format. For example, the proto compiler may encode custom options in - // this way. Here, we verify that we merge the extensions together. - tests := []struct { - name string - ext []*pb.ComplexExtension - }{ - { - "two fields", - []*pb.ComplexExtension{ - {First: proto.Int32(7)}, - {Second: proto.Int32(11)}, - }, - }, - { - "repeated field", - []*pb.ComplexExtension{ - {Third: []int32{1000}}, - {Third: []int32{2000}}, - }, - }, - { - "two fields and repeated field", - []*pb.ComplexExtension{ - {Third: []int32{1000}}, - {First: proto.Int32(9)}, - {Second: proto.Int32(21)}, - {Third: []int32{2000}}, - }, - }, - } - for _, test := range tests { - var buf bytes.Buffer - var want pb.ComplexExtension - - // Generate a serialized representation of a repeated extension - // by catenating bytes together. - for i, e := range test.ext { - // Merge to create the wanted proto. - proto.Merge(&want, e) - - // serialize the message - msg := new(pb.OtherMessage) - err := proto.SetExtension(msg, pb.E_Complex, e) - if err != nil { - t.Fatalf("[%s] Error setting extension %d: %v", test.name, i, err) - } - b, err := proto.Marshal(msg) - if err != nil { - t.Fatalf("[%s] Error marshaling message %d: %v", test.name, i, err) - } - buf.Write(b) - } - - // Unmarshal and read the merged proto. - msg2 := new(pb.OtherMessage) - err := proto.Unmarshal(buf.Bytes(), msg2) - if err != nil { - t.Fatalf("[%s] Error unmarshaling message: %v", test.name, err) - } - e, err := proto.GetExtension(msg2, pb.E_Complex) - if err != nil { - t.Fatalf("[%s] Error getting extension: %v", test.name, err) - } - ext := e.(*pb.ComplexExtension) - if ext == nil { - t.Fatalf("[%s] Invalid extension", test.name) - } - if !reflect.DeepEqual(*ext, want) { - t.Errorf("[%s] Wrong value for ComplexExtension: got: %v want: %v\n", test.name, ext, want) - } - } -} - -func TestClearAllExtensions(t *testing.T) { - // unregistered extension - desc := &proto.ExtensionDesc{ - ExtendedType: (*pb.MyMessage)(nil), - ExtensionType: (*bool)(nil), - Field: 101010100, - Name: "emptyextension", - Tag: "varint,0,opt", - } - m := &pb.MyMessage{} - if proto.HasExtension(m, desc) { - t.Errorf("proto.HasExtension(%s): got true, want false", proto.MarshalTextString(m)) - } - if err := proto.SetExtension(m, desc, proto.Bool(true)); err != nil { - t.Errorf("proto.SetExtension(m, desc, true): got error %q, want nil", err) - } - if !proto.HasExtension(m, desc) { - t.Errorf("proto.HasExtension(%s): got false, want true", proto.MarshalTextString(m)) - } - proto.ClearAllExtensions(m) - if proto.HasExtension(m, desc) { - t.Errorf("proto.HasExtension(%s): got true, want false", proto.MarshalTextString(m)) - } -} - -func TestMarshalRace(t *testing.T) { - // unregistered extension - desc := &proto.ExtensionDesc{ - ExtendedType: (*pb.MyMessage)(nil), - ExtensionType: (*bool)(nil), - Field: 101010100, - Name: "emptyextension", - Tag: "varint,0,opt", - } - - m := &pb.MyMessage{Count: proto.Int32(4)} - if err := proto.SetExtension(m, desc, proto.Bool(true)); err != nil { - t.Errorf("proto.SetExtension(m, desc, true): got error %q, want nil", err) - } - - errChan := make(chan error, 3) - for n := 3; n > 0; n-- { - go func() { - _, err := proto.Marshal(m) - errChan <- err - }() - } - for i := 0; i < 3; i++ { - err := <-errChan - if err != nil { - t.Fatal(err) - } - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go index c98d73da49..b2271d0b7b 100644 --- a/vendor/github.com/gogo/protobuf/proto/lib.go +++ b/vendor/github.com/gogo/protobuf/proto/lib.go @@ -273,6 +273,67 @@ import ( "sync" ) +// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. +// Marshal reports this when a required field is not initialized. +// Unmarshal reports this when a required field is missing from the wire data. +type RequiredNotSetError struct{ field string } + +func (e *RequiredNotSetError) Error() string { + if e.field == "" { + return fmt.Sprintf("proto: required field not set") + } + return fmt.Sprintf("proto: required field %q not set", e.field) +} +func (e *RequiredNotSetError) RequiredNotSet() bool { + return true +} + +type invalidUTF8Error struct{ field string } + +func (e *invalidUTF8Error) Error() string { + if e.field == "" { + return "proto: invalid UTF-8 detected" + } + return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field) +} +func (e *invalidUTF8Error) InvalidUTF8() bool { + return true +} + +// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8. +// This error should not be exposed to the external API as such errors should +// be recreated with the field information. +var errInvalidUTF8 = &invalidUTF8Error{} + +// isNonFatal reports whether the error is either a RequiredNotSet error +// or a InvalidUTF8 error. +func isNonFatal(err error) bool { + if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() { + return true + } + if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() { + return true + } + return false +} + +type nonFatal struct{ E error } + +// Merge merges err into nf and reports whether it was successful. +// Otherwise it returns false for any fatal non-nil errors. +func (nf *nonFatal) Merge(err error) (ok bool) { + if err == nil { + return true // not an error + } + if !isNonFatal(err) { + return false // fatal error + } + if nf.E == nil { + nf.E = err // store first instance of non-fatal error + } + return true +} + // Message is implemented by generated protocol buffer messages. type Message interface { Reset() @@ -309,16 +370,7 @@ type Buffer struct { buf []byte // encode/decode byte stream index int // read point - // pools of basic types to amortize allocation. - bools []bool - uint32s []uint32 - uint64s []uint64 - - // extra pools, only used with pointer_reflect.go - int32s []int32 - int64s []int64 - float32s []float32 - float64s []float64 + deterministic bool } // NewBuffer allocates a new Buffer and initializes its internal data to @@ -343,6 +395,30 @@ func (p *Buffer) SetBuf(s []byte) { // Bytes returns the contents of the Buffer. func (p *Buffer) Bytes() []byte { return p.buf } +// SetDeterministic sets whether to use deterministic serialization. +// +// Deterministic serialization guarantees that for a given binary, equal +// messages will always be serialized to the same bytes. This implies: +// +// - Repeated serialization of a message will return the same bytes. +// - Different processes of the same binary (which may be executing on +// different machines) will serialize equal messages to the same bytes. +// +// Note that the deterministic serialization is NOT canonical across +// languages. It is not guaranteed to remain stable over time. It is unstable +// across different builds with schema changes due to unknown fields. +// Users who need canonical serialization (e.g., persistent storage in a +// canonical form, fingerprinting, etc.) should define their own +// canonicalization specification and implement their own serializer rather +// than relying on this API. +// +// If deterministic serialization is requested, map entries will be sorted +// by keys in lexographical order. This is an implementation detail and +// subject to change. +func (p *Buffer) SetDeterministic(deterministic bool) { + p.deterministic = deterministic +} + /* * Helper routines for simplifying the creation of optional fields of basic type. */ @@ -552,9 +628,11 @@ func SetDefaults(pb Message) { setDefaults(reflect.ValueOf(pb), true, false) } -// v is a pointer to a struct. +// v is a struct. func setDefaults(v reflect.Value, recur, zeros bool) { - v = v.Elem() + if v.Kind() == reflect.Ptr { + v = v.Elem() + } defaultMu.RLock() dm, ok := defaults[v.Type()] @@ -656,8 +734,11 @@ func setDefaults(v reflect.Value, recur, zeros bool) { for _, ni := range dm.nested { f := v.Field(ni) - // f is *T or []*T or map[T]*T + // f is *T or T or []*T or []T switch f.Kind() { + case reflect.Struct: + setDefaults(f, recur, zeros) + case reflect.Ptr: if f.IsNil() { continue @@ -667,7 +748,7 @@ func setDefaults(v reflect.Value, recur, zeros bool) { case reflect.Slice: for i := 0; i < f.Len(); i++ { e := f.Index(i) - if e.IsNil() { + if e.Kind() == reflect.Ptr && e.IsNil() { continue } setDefaults(e, recur, zeros) @@ -739,6 +820,9 @@ func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { var canHaveDefault bool switch ft.Kind() { + case reflect.Struct: + nestedMessage = true // non-nullable + case reflect.Ptr: if ft.Elem().Kind() == reflect.Struct { nestedMessage = true @@ -748,7 +832,7 @@ func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMes case reflect.Slice: switch ft.Elem().Kind() { - case reflect.Ptr: + case reflect.Ptr, reflect.Struct: nestedMessage = true // repeated message case reflect.Uint8: canHaveDefault = true // bytes field @@ -831,22 +915,12 @@ func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMes return sf, false, nil } +// mapKeys returns a sort.Interface to be used for sorting the map keys. // Map fields may have key types of non-float scalars, strings and enums. -// The easiest way to sort them in some deterministic order is to use fmt. -// If this turns out to be inefficient we can always consider other options, -// such as doing a Schwartzian transform. - func mapKeys(vs []reflect.Value) sort.Interface { - s := mapKeySorter{ - vs: vs, - // default Less function: textual comparison - less: func(a, b reflect.Value) bool { - return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface()) - }, - } + s := mapKeySorter{vs: vs} - // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps; - // numeric keys are sorted numerically. + // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. if len(vs) == 0 { return s } @@ -855,6 +929,12 @@ func mapKeys(vs []reflect.Value) sort.Interface { s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } case reflect.Uint32, reflect.Uint64: s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } + case reflect.Bool: + s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true + case reflect.String: + s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } + default: + panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) } return s @@ -895,3 +975,13 @@ const GoGoProtoPackageIsVersion2 = true // ProtoPackageIsVersion1 is referenced from generated protocol buffer files // to assert that that code is compatible with this version of the proto package. const GoGoProtoPackageIsVersion1 = true + +// InternalMessageInfo is a type used internally by generated .pb.go files. +// This type is not intended to be used by non-generated code. +// This type is not subject to any compatibility guarantee. +type InternalMessageInfo struct { + marshal *marshalInfo + unmarshal *unmarshalInfo + merge *mergeInfo + discard *discardInfo +} diff --git a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go index 4b4f7c909e..b3aa39190a 100644 --- a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go @@ -33,6 +33,14 @@ import ( "strconv" ) +type Sizer interface { + Size() int +} + +type ProtoSizer interface { + ProtoSize() int +} + func MarshalJSONEnum(m map[int32]string, value int32) ([]byte, error) { s, ok := m[value] if !ok { diff --git a/vendor/github.com/gogo/protobuf/proto/map_test.go b/vendor/github.com/gogo/protobuf/proto/map_test.go deleted file mode 100644 index 18b946d00e..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/map_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package proto_test - -import ( - "fmt" - "testing" - - "github.com/gogo/protobuf/proto" - ppb "github.com/gogo/protobuf/proto/proto3_proto" -) - -func marshalled() []byte { - m := &ppb.IntMaps{} - for i := 0; i < 1000; i++ { - m.Maps = append(m.Maps, &ppb.IntMap{ - Rtt: map[int32]int32{1: 2}, - }) - } - b, err := proto.Marshal(m) - if err != nil { - panic(fmt.Sprintf("Can't marshal %+v: %v", m, err)) - } - return b -} - -func BenchmarkConcurrentMapUnmarshal(b *testing.B) { - in := marshalled() - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - var out ppb.IntMaps - if err := proto.Unmarshal(in, &out); err != nil { - b.Errorf("Can't unmarshal ppb.IntMaps: %v", err) - } - } - }) -} - -func BenchmarkSequentialMapUnmarshal(b *testing.B) { - in := marshalled() - b.ResetTimer() - for i := 0; i < b.N; i++ { - var out ppb.IntMaps - if err := proto.Unmarshal(in, &out); err != nil { - b.Errorf("Can't unmarshal ppb.IntMaps: %v", err) - } - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/message_set.go b/vendor/github.com/gogo/protobuf/proto/message_set.go index fd982decd6..3b6ca41d5e 100644 --- a/vendor/github.com/gogo/protobuf/proto/message_set.go +++ b/vendor/github.com/gogo/protobuf/proto/message_set.go @@ -42,6 +42,7 @@ import ( "fmt" "reflect" "sort" + "sync" ) // errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. @@ -94,10 +95,7 @@ func (ms *messageSet) find(pb Message) *_MessageSet_Item { } func (ms *messageSet) Has(pb Message) bool { - if ms.find(pb) != nil { - return true - } - return false + return ms.find(pb) != nil } func (ms *messageSet) Unmarshal(pb Message) error { @@ -150,46 +148,42 @@ func skipVarint(buf []byte) []byte { // MarshalMessageSet encodes the extension map represented by m in the message set wire format. // It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. func MarshalMessageSet(exts interface{}) ([]byte, error) { - var m map[int32]Extension + return marshalMessageSet(exts, false) +} + +// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal. +func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) { switch exts := exts.(type) { case *XXX_InternalExtensions: - if err := encodeExtensions(exts); err != nil { - return nil, err - } - m, _ = exts.extensionsRead() + var u marshalInfo + siz := u.sizeMessageSet(exts) + b := make([]byte, 0, siz) + return u.appendMessageSet(b, exts, deterministic) + case map[int32]Extension: - if err := encodeExtensionsMap(exts); err != nil { - return nil, err + // This is an old-style extension map. + // Wrap it in a new-style XXX_InternalExtensions. + ie := XXX_InternalExtensions{ + p: &struct { + mu sync.Mutex + extensionMap map[int32]Extension + }{ + extensionMap: exts, + }, } - m = exts + + var u marshalInfo + siz := u.sizeMessageSet(&ie) + b := make([]byte, 0, siz) + return u.appendMessageSet(b, &ie, deterministic) + default: return nil, errors.New("proto: not an extension map") } - - // Sort extension IDs to provide a deterministic encoding. - // See also enc_map in encode.go. - ids := make([]int, 0, len(m)) - for id := range m { - ids = append(ids, int(id)) - } - sort.Ints(ids) - - ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))} - for _, id := range ids { - e := m[int32(id)] - // Remove the wire type and field number varint, as well as the length varint. - msg := skipVarint(skipVarint(e.enc)) - - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: Int32(int32(id)), - Message: msg, - }) - } - return Marshal(ms) } // UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. -// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. func UnmarshalMessageSet(buf []byte, exts interface{}) error { var m map[int32]Extension switch exts := exts.(type) { @@ -235,7 +229,15 @@ func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { var m map[int32]Extension switch exts := exts.(type) { case *XXX_InternalExtensions: - m, _ = exts.extensionsRead() + var mu sync.Locker + m, mu = exts.extensionsRead() + if m != nil { + // Keep the extensions map locked until we're done marshaling to prevent + // races between marshaling and unmarshaling the lazily-{en,de}coded + // values. + mu.Lock() + defer mu.Unlock() + } case map[int32]Extension: m = exts default: @@ -253,15 +255,16 @@ func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { for i, id := range ids { ext := m[id] - if i > 0 { - b.WriteByte(',') - } - msd, ok := messageSetMap[id] if !ok { // Unknown type; we can't render it, so skip it. continue } + + if i > 0 && b.Len() > 1 { + b.WriteByte(',') + } + fmt.Fprintf(&b, `"[%s]":`, msd.name) x := ext.value diff --git a/vendor/github.com/gogo/protobuf/proto/message_set_test.go b/vendor/github.com/gogo/protobuf/proto/message_set_test.go deleted file mode 100644 index 353a3ea769..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/message_set_test.go +++ /dev/null @@ -1,66 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2014 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "bytes" - "testing" -) - -func TestUnmarshalMessageSetWithDuplicate(t *testing.T) { - // Check that a repeated message set entry will be concatenated. - in := &messageSet{ - Item: []*_MessageSet_Item{ - {TypeId: Int32(12345), Message: []byte("hoo")}, - {TypeId: Int32(12345), Message: []byte("hah")}, - }, - } - b, err := Marshal(in) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - t.Logf("Marshaled bytes: %q", b) - - var extensions XXX_InternalExtensions - if err := UnmarshalMessageSet(b, &extensions); err != nil { - t.Fatalf("UnmarshalMessageSet: %v", err) - } - ext, ok := extensions.p.extensionMap[12345] - if !ok { - t.Fatalf("Didn't retrieve extension 12345; map is %v", extensions.p.extensionMap) - } - // Skip wire type/field number and length varints. - got := skipVarint(skipVarint(ext.enc)) - if want := []byte("hoohah"); !bytes.Equal(got, want) { - t.Errorf("Combined extension is %q, want %q", got, want) - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go index fb512e2e16..b6cad90834 100644 --- a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go +++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go @@ -29,7 +29,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// +build appengine js +// +build purego appengine js // This file contains an implementation of proto field accesses using package reflect. // It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can @@ -38,32 +38,13 @@ package proto import ( - "math" "reflect" + "sync" ) -// A structPointer is a pointer to a struct. -type structPointer struct { - v reflect.Value -} - -// toStructPointer returns a structPointer equivalent to the given reflect value. -// The reflect value must itself be a pointer to a struct. -func toStructPointer(v reflect.Value) structPointer { - return structPointer{v} -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p.v.IsNil() -} +const unsafeAllowed = false -// Interface returns the struct pointer as an interface value. -func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { - return p.v.Interface() -} - -// A field identifies a field in a struct, accessible from a structPointer. +// A field identifies a field in a struct, accessible from a pointer. // In this implementation, a field is identified by the sequence of field indices // passed to reflect's FieldByIndex. type field []int @@ -76,409 +57,301 @@ func toField(f *reflect.StructField) field { // invalidField is an invalid field identifier. var invalidField = field(nil) +// zeroField is a noop when calling pointer.offset. +var zeroField = field([]int{}) + // IsValid reports whether the field identifier is valid. func (f field) IsValid() bool { return f != nil } -// field returns the given field in the struct as a reflect value. -func structPointer_field(p structPointer, f field) reflect.Value { - // Special case: an extension map entry with a value of type T - // passes a *T to the struct-handling code with a zero field, - // expecting that it will be treated as equivalent to *struct{ X T }, - // which has the same memory layout. We have to handle that case - // specially, because reflect will panic if we call FieldByIndex on a - // non-struct. - if f == nil { - return p.v.Elem() - } - - return p.v.Elem().FieldByIndex(f) +// The pointer type is for the table-driven decoder. +// The implementation here uses a reflect.Value of pointer type to +// create a generic pointer. In pointer_unsafe.go we use unsafe +// instead of reflect to implement the same (but faster) interface. +type pointer struct { + v reflect.Value } -// ifield returns the given field in the struct as an interface value. -func structPointer_ifield(p structPointer, f field) interface{} { - return structPointer_field(p, f).Addr().Interface() +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + return pointer{v: reflect.ValueOf(*i)} } -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return structPointer_ifield(p, f).(*[]byte) +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + v := reflect.ValueOf(*i) + u := reflect.New(v.Type()) + u.Elem().Set(v) + return pointer{v: u} } -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return structPointer_ifield(p, f).(*[][]byte) +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{v: v} } -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return structPointer_ifield(p, f).(**bool) +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} } -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return structPointer_ifield(p, f).(*bool) +func (p pointer) isNil() bool { + return p.v.IsNil() } -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return structPointer_ifield(p, f).(*[]bool) +// grow updates the slice s in place to make it one element longer. +// s must be addressable. +// Returns the (addressable) new element. +func grow(s reflect.Value) reflect.Value { + n, m := s.Len(), s.Cap() + if n < m { + s.SetLen(n + 1) + } else { + s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) + } + return s.Index(n) } -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return structPointer_ifield(p, f).(**string) +func (p pointer) toInt64() *int64 { + return p.v.Interface().(*int64) } - -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return structPointer_ifield(p, f).(*string) +func (p pointer) toInt64Ptr() **int64 { + return p.v.Interface().(**int64) } - -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return structPointer_ifield(p, f).(*[]string) +func (p pointer) toInt64Slice() *[]int64 { + return p.v.Interface().(*[]int64) } -// Extensions returns the address of an extension map field in the struct. -func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { - return structPointer_ifield(p, f).(*XXX_InternalExtensions) -} +var int32ptr = reflect.TypeOf((*int32)(nil)) -// ExtMap returns the address of an extension map field in the struct. -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return structPointer_ifield(p, f).(*map[int32]Extension) +func (p pointer) toInt32() *int32 { + return p.v.Convert(int32ptr).Interface().(*int32) } -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return structPointer_field(p, f).Addr() +// The toInt32Ptr/Slice methods don't work because of enums. +// Instead, we must use set/get methods for the int32ptr/slice case. +/* + func (p pointer) toInt32Ptr() **int32 { + return p.v.Interface().(**int32) } - -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - structPointer_field(p, f).Set(q.v) + func (p pointer) toInt32Slice() *[]int32 { + return p.v.Interface().(*[]int32) } - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return structPointer{structPointer_field(p, f)} +*/ +func (p pointer) getInt32Ptr() *int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().(*int32) + } + // an enum + return p.v.Elem().Convert(int32PtrType).Interface().(*int32) +} +func (p pointer) setInt32Ptr(v int32) { + // Allocate value in a *int32. Possibly convert that to a *enum. + // Then assign it to a **int32 or **enum. + // Note: we can convert *int32 to *enum, but we can't convert + // **int32 to **enum! + p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) +} + +// getInt32Slice copies []int32 from p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getInt32Slice() []int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().([]int32) + } + // an enum + // Allocate a []int32, then assign []enum's values into it. + // Note: we can't convert []enum to []int32. + slice := p.v.Elem() + s := make([]int32, slice.Len()) + for i := 0; i < slice.Len(); i++ { + s[i] = int32(slice.Index(i).Int()) + } + return s } -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { - return structPointerSlice{structPointer_field(p, f)} +// setInt32Slice copies []int32 into p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setInt32Slice(v []int32) { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + p.v.Elem().Set(reflect.ValueOf(v)) + return + } + // an enum + // Allocate a []enum, then assign []int32's values into it. + // Note: we can't convert []enum to []int32. + slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) + for i, x := range v { + slice.Index(i).SetInt(int64(x)) + } + p.v.Elem().Set(slice) } - -// A structPointerSlice represents the address of a slice of pointers to structs -// (themselves messages or groups). That is, v.Type() is *[]*struct{...}. -type structPointerSlice struct { - v reflect.Value +func (p pointer) appendInt32Slice(v int32) { + grow(p.v.Elem()).SetInt(int64(v)) } -func (p structPointerSlice) Len() int { return p.v.Len() } -func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } -func (p structPointerSlice) Append(q structPointer) { - p.v.Set(reflect.Append(p.v, q.v)) +func (p pointer) toUint64() *uint64 { + return p.v.Interface().(*uint64) } - -var ( - int32Type = reflect.TypeOf(int32(0)) - uint32Type = reflect.TypeOf(uint32(0)) - float32Type = reflect.TypeOf(float32(0)) - int64Type = reflect.TypeOf(int64(0)) - uint64Type = reflect.TypeOf(uint64(0)) - float64Type = reflect.TypeOf(float64(0)) -) - -// A word32 represents a field of type *int32, *uint32, *float32, or *enum. -// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. -type word32 struct { - v reflect.Value +func (p pointer) toUint64Ptr() **uint64 { + return p.v.Interface().(**uint64) } - -// IsNil reports whether p is nil. -func word32_IsNil(p word32) bool { - return p.v.IsNil() +func (p pointer) toUint64Slice() *[]uint64 { + return p.v.Interface().(*[]uint64) } - -// Set sets p to point at a newly allocated word with bits set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - t := p.v.Type().Elem() - switch t { - case int32Type: - if len(o.int32s) == 0 { - o.int32s = make([]int32, uint32PoolSize) - } - o.int32s[0] = int32(x) - p.v.Set(reflect.ValueOf(&o.int32s[0])) - o.int32s = o.int32s[1:] - return - case uint32Type: - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) - } - o.uint32s[0] = x - p.v.Set(reflect.ValueOf(&o.uint32s[0])) - o.uint32s = o.uint32s[1:] - return - case float32Type: - if len(o.float32s) == 0 { - o.float32s = make([]float32, uint32PoolSize) - } - o.float32s[0] = math.Float32frombits(x) - p.v.Set(reflect.ValueOf(&o.float32s[0])) - o.float32s = o.float32s[1:] - return - } - - // must be enum - p.v.Set(reflect.New(t)) - p.v.Elem().SetInt(int64(int32(x))) +func (p pointer) toUint32() *uint32 { + return p.v.Interface().(*uint32) } - -// Get gets the bits pointed at by p, as a uint32. -func word32_Get(p word32) uint32 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") +func (p pointer) toUint32Ptr() **uint32 { + return p.v.Interface().(**uint32) } - -// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32{structPointer_field(p, f)} +func (p pointer) toUint32Slice() *[]uint32 { + return p.v.Interface().(*[]uint32) } - -// A word32Val represents a field of type int32, uint32, float32, or enum. -// That is, v.Type() is int32, uint32, float32, or enum and v is assignable. -type word32Val struct { - v reflect.Value +func (p pointer) toBool() *bool { + return p.v.Interface().(*bool) } - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - switch p.v.Type() { - case int32Type: - p.v.SetInt(int64(x)) - return - case uint32Type: - p.v.SetUint(uint64(x)) - return - case float32Type: - p.v.SetFloat(float64(math.Float32frombits(x))) - return - } - - // must be enum - p.v.SetInt(int64(int32(x))) +func (p pointer) toBoolPtr() **bool { + return p.v.Interface().(**bool) } - -// Get gets the bits pointed at by p, as a uint32. -func word32Val_Get(p word32Val) uint32 { - elem := p.v - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") +func (p pointer) toBoolSlice() *[]bool { + return p.v.Interface().(*[]bool) } - -// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val{structPointer_field(p, f)} +func (p pointer) toFloat64() *float64 { + return p.v.Interface().(*float64) } - -// A word32Slice is a slice of 32-bit values. -// That is, v.Type() is []int32, []uint32, []float32, or []enum. -type word32Slice struct { - v reflect.Value +func (p pointer) toFloat64Ptr() **float64 { + return p.v.Interface().(**float64) } - -func (p word32Slice) Append(x uint32) { - n, m := p.v.Len(), p.v.Cap() - if n < m { - p.v.SetLen(n + 1) - } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) - } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int32: - elem.SetInt(int64(int32(x))) - case reflect.Uint32: - elem.SetUint(uint64(x)) - case reflect.Float32: - elem.SetFloat(float64(math.Float32frombits(x))) - } +func (p pointer) toFloat64Slice() *[]float64 { + return p.v.Interface().(*[]float64) } - -func (p word32Slice) Len() int { - return p.v.Len() +func (p pointer) toFloat32() *float32 { + return p.v.Interface().(*float32) } - -func (p word32Slice) Index(i int) uint32 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") +func (p pointer) toFloat32Ptr() **float32 { + return p.v.Interface().(**float32) } - -// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) word32Slice { - return word32Slice{structPointer_field(p, f)} +func (p pointer) toFloat32Slice() *[]float32 { + return p.v.Interface().(*[]float32) } - -// word64 is like word32 but for 64-bit values. -type word64 struct { - v reflect.Value +func (p pointer) toString() *string { + return p.v.Interface().(*string) } - -func word64_Set(p word64, o *Buffer, x uint64) { - t := p.v.Type().Elem() - switch t { - case int64Type: - if len(o.int64s) == 0 { - o.int64s = make([]int64, uint64PoolSize) - } - o.int64s[0] = int64(x) - p.v.Set(reflect.ValueOf(&o.int64s[0])) - o.int64s = o.int64s[1:] - return - case uint64Type: - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) - } - o.uint64s[0] = x - p.v.Set(reflect.ValueOf(&o.uint64s[0])) - o.uint64s = o.uint64s[1:] - return - case float64Type: - if len(o.float64s) == 0 { - o.float64s = make([]float64, uint64PoolSize) - } - o.float64s[0] = math.Float64frombits(x) - p.v.Set(reflect.ValueOf(&o.float64s[0])) - o.float64s = o.float64s[1:] - return - } - panic("unreachable") +func (p pointer) toStringPtr() **string { + return p.v.Interface().(**string) } - -func word64_IsNil(p word64) bool { - return p.v.IsNil() +func (p pointer) toStringSlice() *[]string { + return p.v.Interface().(*[]string) } - -func word64_Get(p word64) uint64 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) - } - panic("unreachable") +func (p pointer) toBytes() *[]byte { + return p.v.Interface().(*[]byte) } - -func structPointer_Word64(p structPointer, f field) word64 { - return word64{structPointer_field(p, f)} +func (p pointer) toBytesSlice() *[][]byte { + return p.v.Interface().(*[][]byte) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return p.v.Interface().(*XXX_InternalExtensions) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return p.v.Interface().(*map[int32]Extension) +} +func (p pointer) getPointer() pointer { + return pointer{v: p.v.Elem()} +} +func (p pointer) setPointer(q pointer) { + p.v.Elem().Set(q.v) +} +func (p pointer) appendPointer(q pointer) { + grow(p.v.Elem()).Set(q.v) } -// word64Val is like word32Val but for 64-bit values. -type word64Val struct { - v reflect.Value +// getPointerSlice copies []*T from p as a new []pointer. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getPointerSlice() []pointer { + if p.v.IsNil() { + return nil + } + n := p.v.Elem().Len() + s := make([]pointer, n) + for i := 0; i < n; i++ { + s[i] = pointer{v: p.v.Elem().Index(i)} + } + return s } -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - switch p.v.Type() { - case int64Type: - p.v.SetInt(int64(x)) - return - case uint64Type: - p.v.SetUint(x) - return - case float64Type: - p.v.SetFloat(math.Float64frombits(x)) +// setPointerSlice copies []pointer into p as a new []*T. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setPointerSlice(v []pointer) { + if v == nil { + p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) return } - panic("unreachable") + s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) + for _, p := range v { + s = reflect.Append(s, p.v) + } + p.v.Elem().Set(s) } -func word64Val_Get(p word64Val) uint64 { - elem := p.v - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + if p.v.Elem().IsNil() { + return pointer{v: p.v.Elem()} } - panic("unreachable") + return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct } -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val{structPointer_field(p, f)} +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + // TODO: check that p.v.Type().Elem() == t? + return p.v } -type word64Slice struct { - v reflect.Value +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p } - -func (p word64Slice) Append(x uint64) { - n, m := p.v.Len(), p.v.Cap() - if n < m { - p.v.SetLen(n + 1) - } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) - } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int64: - elem.SetInt(int64(int64(x))) - case reflect.Uint64: - elem.SetUint(uint64(x)) - case reflect.Float64: - elem.SetFloat(float64(math.Float64frombits(x))) - } +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v } - -func (p word64Slice) Len() int { - return p.v.Len() +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p } - -func (p word64Slice) Index(i int) uint64 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return uint64(elem.Uint()) - case reflect.Float64: - return math.Float64bits(float64(elem.Float())) - } - panic("unreachable") +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v } - -func structPointer_Word64Slice(p structPointer, f field) word64Slice { - return word64Slice{structPointer_field(p, f)} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v } +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} + +var atomicLock sync.Mutex diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go index 1763a5f227..7ffd3c29d9 100644 --- a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go @@ -1,6 +1,6 @@ // Protocol Buffers for Go with Gadgets // -// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// Copyright (c) 2018, The GoGo Authors. All rights reserved. // http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without @@ -26,7 +26,11 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// +build appengine js +// +build purego appengine js + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. package proto @@ -34,52 +38,22 @@ import ( "reflect" ) -func structPointer_FieldPointer(p structPointer, f field) structPointer { - panic("not implemented") -} - -func appendStructPointer(base structPointer, f field, typ reflect.Type) structPointer { - panic("not implemented") -} - -func structPointer_InterfaceAt(p structPointer, f field, t reflect.Type) interface{} { - panic("not implemented") -} - -func structPointer_InterfaceRef(p structPointer, f field, t reflect.Type) interface{} { - panic("not implemented") -} - -func structPointer_GetRefStructPointer(p structPointer, f field) structPointer { - panic("not implemented") -} +// TODO: untested, so probably incorrect. -func structPointer_Add(p structPointer, size field) structPointer { - panic("not implemented") +func (p pointer) getRef() pointer { + return pointer{v: p.v.Addr()} } -func structPointer_Len(p structPointer, f field) int { - panic("not implemented") -} - -func structPointer_GetSliceHeader(p structPointer, f field) *reflect.SliceHeader { - panic("not implemented") -} - -func structPointer_Copy(oldptr structPointer, newptr structPointer, size int) { - panic("not implemented") -} - -func structPointer_StructRefSlice(p structPointer, f field, size uintptr) *structRefSlice { - panic("not implemented") -} - -type structRefSlice struct{} - -func (v *structRefSlice) Len() int { - panic("not implemented") +func (p pointer) appendRef(v pointer, typ reflect.Type) { + slice := p.getSlice(typ) + elem := v.asPointerTo(typ).Elem() + newSlice := reflect.Append(slice, elem) + slice.Set(newSlice) } -func (v *structRefSlice) Index(i int) structPointer { - panic("not implemented") +func (p pointer) getSlice(typ reflect.Type) reflect.Value { + sliceTyp := reflect.SliceOf(typ) + slice := p.asPointerTo(sliceTyp) + slice = slice.Elem() + return slice } diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go index 6b5567d47c..d55a335d94 100644 --- a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go +++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go @@ -29,7 +29,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// +build !appengine,!js +// +build !purego,!appengine,!js // This file contains the implementation of the proto field accesses using package unsafe. @@ -37,38 +37,13 @@ package proto import ( "reflect" + "sync/atomic" "unsafe" ) -// NOTE: These type_Foo functions would more idiomatically be methods, -// but Go does not allow methods on pointer types, and we must preserve -// some pointer type for the garbage collector. We use these -// funcs with clunky names as our poor approximation to methods. -// -// An alternative would be -// type structPointer struct { p unsafe.Pointer } -// but that does not registerize as well. - -// A structPointer is a pointer to a struct. -type structPointer unsafe.Pointer - -// toStructPointer returns a structPointer equivalent to the given reflect value. -func toStructPointer(v reflect.Value) structPointer { - return structPointer(unsafe.Pointer(v.Pointer())) -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p == nil -} - -// Interface returns the struct pointer, assumed to have element type t, -// as an interface value. -func structPointer_Interface(p structPointer, t reflect.Type) interface{} { - return reflect.NewAt(t, unsafe.Pointer(p)).Interface() -} +const unsafeAllowed = true -// A field identifies a field in a struct, accessible from a structPointer. +// A field identifies a field in a struct, accessible from a pointer. // In this implementation, a field is identified by its byte offset from the start of the struct. type field uintptr @@ -80,191 +55,254 @@ func toField(f *reflect.StructField) field { // invalidField is an invalid field identifier. const invalidField = ^field(0) +// zeroField is a noop when calling pointer.offset. +const zeroField = field(0) + // IsValid reports whether the field identifier is valid. func (f field) IsValid() bool { - return f != ^field(0) + return f != invalidField } -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// The pointer type below is for the new table-driven encoder/decoder. +// The implementation here uses unsafe.Pointer to create a generic pointer. +// In pointer_reflect.go we use reflect instead of unsafe to implement +// the same (but slower) interface. +type pointer struct { + p unsafe.Pointer } -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} +// size of pointer +var ptrSize = unsafe.Sizeof(uintptr(0)) -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + // Super-tricky - read pointer out of data word of interface value. + // Saves ~25ns over the equivalent: + // return valToPointer(reflect.ValueOf(*i)) + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} } -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + // Super-tricky - read or get the address of data word of interface value. + if isptr { + // The interface is of pointer type, thus it is a direct interface. + // The data word is the pointer data itself. We take its address. + return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} + } + // The interface is not of pointer type. The data word is the pointer + // to the data. + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} } -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{p: unsafe.Pointer(v.Pointer())} } -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + // For safety, we should panic if !f.IsValid, however calling panic causes + // this to no longer be inlineable, which is a serious performance cost. + /* + if !f.IsValid() { + panic("invalid field") + } + */ + return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} } -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) isNil() bool { + return p.p == nil } -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) toInt64() *int64 { + return (*int64)(p.p) } - -// ExtMap returns the address of an extension map field in the struct. -func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { - return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) toInt64Ptr() **int64 { + return (**int64)(p.p) } - -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) toInt64Slice() *[]int64 { + return (*[]int64)(p.p) } - -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) +func (p pointer) toInt32() *int32 { + return (*int32)(p.p) } -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q +// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. +/* + func (p pointer) toInt32Ptr() **int32 { + return (**int32)(p.p) + } + func (p pointer) toInt32Slice() *[]int32 { + return (*[]int32)(p.p) + } +*/ +func (p pointer) getInt32Ptr() *int32 { + return *(**int32)(p.p) } - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) setInt32Ptr(v int32) { + *(**int32)(p.p) = &v } -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { - return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// getInt32Slice loads a []int32 from p. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getInt32Slice() []int32 { + return *(*[]int32)(p.p) } -// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). -type structPointerSlice []structPointer - -func (v *structPointerSlice) Len() int { return len(*v) } -func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } -func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } - -// A word32 is the address of a "pointer to 32-bit value" field. -type word32 **uint32 - -// IsNil reports whether *v is nil. -func word32_IsNil(p word32) bool { - return *p == nil +// setInt32Slice stores a []int32 to p. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setInt32Slice(v []int32) { + *(*[]int32)(p.p) = v } -// Set sets *v to point at a newly allocated word set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) - } - o.uint32s[0] = x - *p = &o.uint32s[0] - o.uint32s = o.uint32s[1:] +// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? +func (p pointer) appendInt32Slice(v int32) { + s := (*[]int32)(p.p) + *s = append(*s, v) } -// Get gets the value pointed at by *v. -func word32_Get(p word32) uint32 { - return **p +func (p pointer) toUint64() *uint64 { + return (*uint64)(p.p) } - -// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +func (p pointer) toUint64Ptr() **uint64 { + return (**uint64)(p.p) } - -// A word32Val is the address of a 32-bit value field. -type word32Val *uint32 - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - *p = x +func (p pointer) toUint64Slice() *[]uint64 { + return (*[]uint64)(p.p) } - -// Get gets the value pointed at by p. -func word32Val_Get(p word32Val) uint32 { - return *p +func (p pointer) toUint32() *uint32 { + return (*uint32)(p.p) } - -// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +func (p pointer) toUint32Ptr() **uint32 { + return (**uint32)(p.p) } - -// A word32Slice is a slice of 32-bit values. -type word32Slice []uint32 - -func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } -func (v *word32Slice) Len() int { return len(*v) } -func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } - -// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) *word32Slice { - return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) toUint32Slice() *[]uint32 { + return (*[]uint32)(p.p) } - -// word64 is like word32 but for 64-bit values. -type word64 **uint64 - -func word64_Set(p word64, o *Buffer, x uint64) { - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) - } - o.uint64s[0] = x - *p = &o.uint64s[0] - o.uint64s = o.uint64s[1:] +func (p pointer) toBool() *bool { + return (*bool)(p.p) } - -func word64_IsNil(p word64) bool { - return *p == nil +func (p pointer) toBoolPtr() **bool { + return (**bool)(p.p) } - -func word64_Get(p word64) uint64 { - return **p +func (p pointer) toBoolSlice() *[]bool { + return (*[]bool)(p.p) +} +func (p pointer) toFloat64() *float64 { + return (*float64)(p.p) +} +func (p pointer) toFloat64Ptr() **float64 { + return (**float64)(p.p) +} +func (p pointer) toFloat64Slice() *[]float64 { + return (*[]float64)(p.p) +} +func (p pointer) toFloat32() *float32 { + return (*float32)(p.p) +} +func (p pointer) toFloat32Ptr() **float32 { + return (**float32)(p.p) +} +func (p pointer) toFloat32Slice() *[]float32 { + return (*[]float32)(p.p) +} +func (p pointer) toString() *string { + return (*string)(p.p) +} +func (p pointer) toStringPtr() **string { + return (**string)(p.p) +} +func (p pointer) toStringSlice() *[]string { + return (*[]string)(p.p) +} +func (p pointer) toBytes() *[]byte { + return (*[]byte)(p.p) +} +func (p pointer) toBytesSlice() *[][]byte { + return (*[][]byte)(p.p) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return (*XXX_InternalExtensions)(p.p) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return (*map[int32]Extension)(p.p) } -func structPointer_Word64(p structPointer, f field) word64 { - return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +// getPointerSlice loads []*T from p as a []pointer. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getPointerSlice() []pointer { + // Super-tricky - p should point to a []*T where T is a + // message type. We load it as []pointer. + return *(*[]pointer)(p.p) } -// word64Val is like word32Val but for 64-bit values. -type word64Val *uint64 +// setPointerSlice stores []pointer into p as a []*T. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setPointerSlice(v []pointer) { + // Super-tricky - p should point to a []*T where T is a + // message type. We store it as []pointer. + *(*[]pointer)(p.p) = v +} -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - *p = x +// getPointer loads the pointer at p and returns it. +func (p pointer) getPointer() pointer { + return pointer{p: *(*unsafe.Pointer)(p.p)} } -func word64Val_Get(p word64Val) uint64 { - return *p +// setPointer stores the pointer q at p. +func (p pointer) setPointer(q pointer) { + *(*unsafe.Pointer)(p.p) = q.p } -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +// append q to the slice pointed to by p. +func (p pointer) appendPointer(q pointer) { + s := (*[]unsafe.Pointer)(p.p) + *s = append(*s, q.p) } -// word64Slice is like word32Slice but for 64-bit values. -type word64Slice []uint64 +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + // Super-tricky - read pointer out of data word of interface value. + return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} +} -func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } -func (v *word64Slice) Len() int { return len(*v) } -func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } +// asPointerTo returns a reflect.Value that is a pointer to an +// object of type t stored at p. +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + return reflect.NewAt(t, p.p) +} -func structPointer_Word64Slice(p structPointer, f field) *word64Slice { - return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) } diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go index f156a29f0e..aca8eed02a 100644 --- a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go @@ -1,6 +1,6 @@ // Protocol Buffers for Go with Gadgets // -// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// Copyright (c) 2018, The GoGo Authors. All rights reserved. // http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without @@ -26,7 +26,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// +build !appengine,!js +// +build !purego,!appengine,!js // This file contains the implementation of the proto field accesses using package unsafe. @@ -37,92 +37,20 @@ import ( "unsafe" ) -func structPointer_InterfaceAt(p structPointer, f field, t reflect.Type) interface{} { - point := unsafe.Pointer(uintptr(p) + uintptr(f)) - r := reflect.NewAt(t, point) - return r.Interface() +func (p pointer) getRef() pointer { + return pointer{p: (unsafe.Pointer)(&p.p)} } -func structPointer_InterfaceRef(p structPointer, f field, t reflect.Type) interface{} { - point := unsafe.Pointer(uintptr(p) + uintptr(f)) - r := reflect.NewAt(t, point) - if r.Elem().IsNil() { - return nil - } - return r.Elem().Interface() +func (p pointer) appendRef(v pointer, typ reflect.Type) { + slice := p.getSlice(typ) + elem := v.asPointerTo(typ).Elem() + newSlice := reflect.Append(slice, elem) + slice.Set(newSlice) } -func copyUintPtr(oldptr, newptr uintptr, size int) { - oldbytes := make([]byte, 0) - oldslice := (*reflect.SliceHeader)(unsafe.Pointer(&oldbytes)) - oldslice.Data = oldptr - oldslice.Len = size - oldslice.Cap = size - newbytes := make([]byte, 0) - newslice := (*reflect.SliceHeader)(unsafe.Pointer(&newbytes)) - newslice.Data = newptr - newslice.Len = size - newslice.Cap = size - copy(newbytes, oldbytes) -} - -func structPointer_Copy(oldptr structPointer, newptr structPointer, size int) { - copyUintPtr(uintptr(oldptr), uintptr(newptr), size) -} - -func appendStructPointer(base structPointer, f field, typ reflect.Type) structPointer { - size := typ.Elem().Size() - - oldHeader := structPointer_GetSliceHeader(base, f) - oldSlice := reflect.NewAt(typ, unsafe.Pointer(oldHeader)).Elem() - newLen := oldHeader.Len + 1 - newSlice := reflect.MakeSlice(typ, newLen, newLen) - reflect.Copy(newSlice, oldSlice) - bas := toStructPointer(newSlice) - oldHeader.Data = uintptr(bas) - oldHeader.Len = newLen - oldHeader.Cap = newLen - - return structPointer(unsafe.Pointer(uintptr(unsafe.Pointer(bas)) + uintptr(uintptr(newLen-1)*size))) -} - -func structPointer_FieldPointer(p structPointer, f field) structPointer { - return structPointer(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -func structPointer_GetRefStructPointer(p structPointer, f field) structPointer { - return structPointer((*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -func structPointer_GetSliceHeader(p structPointer, f field) *reflect.SliceHeader { - return (*reflect.SliceHeader)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -func structPointer_Add(p structPointer, size field) structPointer { - return structPointer(unsafe.Pointer(uintptr(p) + uintptr(size))) -} - -func structPointer_Len(p structPointer, f field) int { - return len(*(*[]interface{})(unsafe.Pointer(structPointer_GetRefStructPointer(p, f)))) -} - -func structPointer_StructRefSlice(p structPointer, f field, size uintptr) *structRefSlice { - return &structRefSlice{p: p, f: f, size: size} -} - -// A structRefSlice represents a slice of structs (themselves submessages or groups). -type structRefSlice struct { - p structPointer - f field - size uintptr -} - -func (v *structRefSlice) Len() int { - return structPointer_Len(v.p, v.f) -} - -func (v *structRefSlice) Index(i int) structPointer { - ss := structPointer_GetStructPointer(v.p, v.f) - ss1 := structPointer_GetRefStructPointer(ss, 0) - return structPointer_Add(ss1, field(uintptr(i)*v.size)) +func (p pointer) getSlice(typ reflect.Type) reflect.Value { + sliceTyp := reflect.SliceOf(typ) + slice := p.asPointerTo(sliceTyp) + slice = slice.Elem() + return slice } diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go index 2a69e8862d..04dcb8d9ef 100644 --- a/vendor/github.com/gogo/protobuf/proto/properties.go +++ b/vendor/github.com/gogo/protobuf/proto/properties.go @@ -63,42 +63,6 @@ const ( WireFixed32 = 5 ) -const startSize = 10 // initial slice/string sizes - -// Encoders are defined in encode.go -// An encoder outputs the full representation of a field, including its -// tag and encoder type. -type encoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueEncoder encodes a single integer in a particular encoding. -type valueEncoder func(o *Buffer, x uint64) error - -// Sizers are defined in encode.go -// A sizer returns the encoded size of a field, including its tag and encoder -// type. -type sizer func(prop *Properties, base structPointer) int - -// A valueSizer returns the encoded size of a single integer in a particular -// encoding. -type valueSizer func(x uint64) int - -// Decoders are defined in decode.go -// A decoder creates a value from its wire representation. -// Unrecognized subelements are saved in unrec. -type decoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueDecoder decodes a single integer in a particular encoding. -type valueDecoder func(o *Buffer) (x uint64, err error) - -// A oneofMarshaler does the marshaling for all oneof fields in a message. -type oneofMarshaler func(Message, *Buffer) error - -// A oneofUnmarshaler does the unmarshaling for a oneof field in a message. -type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error) - -// A oneofSizer does the sizing for all oneof fields in a message. -type oneofSizer func(Message) int - // tagMap is an optimization over map[int]int for typical protocol buffer // use-cases. Encoded protocol buffers are often in tag order with small tag // numbers. @@ -145,13 +109,6 @@ type StructProperties struct { decoderTags tagMap // map from proto tag to struct field number decoderOrigNames map[string]int // map from original name to struct field number order []int // list of struct field numbers in tag order - unrecField field // field id of the XXX_unrecognized []byte field - extendable bool // is this an extendable proto - - oneofMarshaler oneofMarshaler - oneofUnmarshaler oneofUnmarshaler - oneofSizer oneofSizer - stype reflect.Type // OneofTypes contains information about the oneof fields in this message. // It is keyed by the original name of a field. @@ -187,7 +144,7 @@ type Properties struct { Repeated bool Packed bool // relevant for repeated primitives only Enum string // set for enum types only - proto3 bool // whether this is known to be a proto3 field; set for []byte only + proto3 bool // whether this is known to be a proto3 field oneof bool // whether this is a oneof field Default string // default value @@ -196,37 +153,21 @@ type Properties struct { CastType string StdTime bool StdDuration bool + WktPointer bool - enc encoder - valEnc valueEncoder // set for bool and numeric types only - field field - tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) - tagbuf [8]byte - stype reflect.Type // set for struct types only - sstype reflect.Type // set for slices of structs types only - ctype reflect.Type // set for custom types only - sprop *StructProperties // set for struct types only - isMarshaler bool - isUnmarshaler bool - - mtype reflect.Type // set for map types only - mkeyprop *Properties // set for map types only - mvalprop *Properties // set for map types only - - size sizer - valSize valueSizer // set for bool and numeric types only - - dec decoder - valDec valueDecoder // set for bool and numeric types only - - // If this is a packable field, this will be the decoder for the packed version of the field. - packedDec decoder + stype reflect.Type // set for struct types only + ctype reflect.Type // set for custom types only + sprop *StructProperties // set for struct types only + + mtype reflect.Type // set for map types only + MapKeyProp *Properties // set for map types only + MapValProp *Properties // set for map types only } // String formats the properties in the protobuf struct field tag style. func (p *Properties) String() string { s := p.Wire - s = "," + s += "," s += strconv.Itoa(p.Tag) if p.Required { s += ",req" @@ -272,29 +213,14 @@ func (p *Properties) Parse(s string) { switch p.Wire { case "varint": p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeVarint - p.valDec = (*Buffer).DecodeVarint - p.valSize = sizeVarint case "fixed32": p.WireType = WireFixed32 - p.valEnc = (*Buffer).EncodeFixed32 - p.valDec = (*Buffer).DecodeFixed32 - p.valSize = sizeFixed32 case "fixed64": p.WireType = WireFixed64 - p.valEnc = (*Buffer).EncodeFixed64 - p.valDec = (*Buffer).DecodeFixed64 - p.valSize = sizeFixed64 case "zigzag32": p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag32 - p.valDec = (*Buffer).DecodeZigzag32 - p.valSize = sizeZigzag32 case "zigzag64": p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag64 - p.valDec = (*Buffer).DecodeZigzag64 - p.valSize = sizeZigzag64 case "bytes", "group": p.WireType = WireBytes // no numeric converter for non-numeric types @@ -309,6 +235,7 @@ func (p *Properties) Parse(s string) { return } +outer: for i := 2; i < len(fields); i++ { f := fields[i] switch { @@ -336,7 +263,7 @@ func (p *Properties) Parse(s string) { if i+1 < len(fields) { // Commas aren't escaped, and def is always last. p.Default += "," + strings.Join(fields[i+1:], ",") - break + break outer } case strings.HasPrefix(f, "embedded="): p.OrigName = strings.Split(f, "=")[1] @@ -348,301 +275,58 @@ func (p *Properties) Parse(s string) { p.StdTime = true case f == "stdduration": p.StdDuration = true + case f == "wktptr": + p.WktPointer = true } } } -func logNoSliceEnc(t1, t2 reflect.Type) { - fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) -} - var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() -// Initialize the fields for encoding and decoding. -func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { - p.enc = nil - p.dec = nil - p.size = nil +// setFieldProps initializes the field properties for submessages and maps. +func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { isMap := typ.Kind() == reflect.Map if len(p.CustomType) > 0 && !isMap { - p.setCustomEncAndDec(typ) + p.ctype = typ p.setTag(lockGetProp) return } if p.StdTime && !isMap { - p.setTimeEncAndDec(typ) p.setTag(lockGetProp) return } if p.StdDuration && !isMap { - p.setDurationEncAndDec(typ) + p.setTag(lockGetProp) + return + } + if p.WktPointer && !isMap { p.setTag(lockGetProp) return } switch t1 := typ; t1.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) - - // proto3 scalar types - - case reflect.Bool: - if p.proto3 { - p.enc = (*Buffer).enc_proto3_bool - p.dec = (*Buffer).dec_proto3_bool - p.size = size_proto3_bool - } else { - p.enc = (*Buffer).enc_ref_bool - p.dec = (*Buffer).dec_proto3_bool - p.size = size_ref_bool - } - case reflect.Int32: - if p.proto3 { - p.enc = (*Buffer).enc_proto3_int32 - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_int32 - } else { - p.enc = (*Buffer).enc_ref_int32 - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_ref_int32 - } - case reflect.Uint32: - if p.proto3 { - p.enc = (*Buffer).enc_proto3_uint32 - p.dec = (*Buffer).dec_proto3_int32 // can reuse - p.size = size_proto3_uint32 - } else { - p.enc = (*Buffer).enc_ref_uint32 - p.dec = (*Buffer).dec_proto3_int32 // can reuse - p.size = size_ref_uint32 - } - case reflect.Int64, reflect.Uint64: - if p.proto3 { - p.enc = (*Buffer).enc_proto3_int64 - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - } else { - p.enc = (*Buffer).enc_ref_int64 - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_ref_int64 - } - case reflect.Float32: - if p.proto3 { - p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_uint32 - } else { - p.enc = (*Buffer).enc_ref_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_ref_uint32 - } - case reflect.Float64: - if p.proto3 { - p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - } else { - p.enc = (*Buffer).enc_ref_int64 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_ref_int64 - } - case reflect.String: - if p.proto3 { - p.enc = (*Buffer).enc_proto3_string - p.dec = (*Buffer).dec_proto3_string - p.size = size_proto3_string - } else { - p.enc = (*Buffer).enc_ref_string - p.dec = (*Buffer).dec_proto3_string - p.size = size_ref_string - } case reflect.Struct: p.stype = typ - p.isMarshaler = isMarshaler(typ) - p.isUnmarshaler = isUnmarshaler(typ) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_ref_struct_message - p.dec = (*Buffer).dec_ref_struct_message - p.size = size_ref_struct_message - } else { - fmt.Fprintf(os.Stderr, "proto: no coders for struct %T\n", typ) - } - case reflect.Ptr: - switch t2 := t1.Elem(); t2.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) - break - case reflect.Bool: - p.enc = (*Buffer).enc_bool - p.dec = (*Buffer).dec_bool - p.size = size_bool - case reflect.Int32: - p.enc = (*Buffer).enc_int32 - p.dec = (*Buffer).dec_int32 - p.size = size_int32 - case reflect.Uint32: - p.enc = (*Buffer).enc_uint32 - p.dec = (*Buffer).dec_int32 // can reuse - p.size = size_uint32 - case reflect.Int64, reflect.Uint64: - p.enc = (*Buffer).enc_int64 - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.Float32: - p.enc = (*Buffer).enc_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_int32 - p.size = size_uint32 - case reflect.Float64: - p.enc = (*Buffer).enc_int64 // can just treat them as bits - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.String: - p.enc = (*Buffer).enc_string - p.dec = (*Buffer).dec_string - p.size = size_string - case reflect.Struct: + if t1.Elem().Kind() == reflect.Struct { p.stype = t1.Elem() - p.isMarshaler = isMarshaler(t1) - p.isUnmarshaler = isUnmarshaler(t1) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_struct_message - p.dec = (*Buffer).dec_struct_message - p.size = size_struct_message - } else { - p.enc = (*Buffer).enc_struct_group - p.dec = (*Buffer).dec_struct_group - p.size = size_struct_group - } } - case reflect.Slice: switch t2 := t1.Elem(); t2.Kind() { - default: - logNoSliceEnc(t1, t2) - break - case reflect.Bool: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_bool - p.size = size_slice_packed_bool - } else { - p.enc = (*Buffer).enc_slice_bool - p.size = size_slice_bool - } - p.dec = (*Buffer).dec_slice_bool - p.packedDec = (*Buffer).dec_slice_packed_bool - case reflect.Int32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int32 - p.size = size_slice_packed_int32 - } else { - p.enc = (*Buffer).enc_slice_int32 - p.size = size_slice_int32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Uint32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Int64, reflect.Uint64: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - case reflect.Uint8: - p.dec = (*Buffer).dec_slice_byte - if p.proto3 { - p.enc = (*Buffer).enc_proto3_slice_byte - p.size = size_proto3_slice_byte - } else { - p.enc = (*Buffer).enc_slice_byte - p.size = size_slice_byte - } - case reflect.Float32, reflect.Float64: - switch t2.Bits() { - case 32: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case 64: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - default: - logNoSliceEnc(t1, t2) - break - } - case reflect.String: - p.enc = (*Buffer).enc_slice_string - p.dec = (*Buffer).dec_slice_string - p.size = size_slice_string case reflect.Ptr: switch t3 := t2.Elem(); t3.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) - break case reflect.Struct: - p.stype = t2.Elem() - p.isMarshaler = isMarshaler(t2) - p.isUnmarshaler = isUnmarshaler(t2) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_slice_struct_message - p.dec = (*Buffer).dec_slice_struct_message - p.size = size_slice_struct_message - } else { - p.enc = (*Buffer).enc_slice_struct_group - p.dec = (*Buffer).dec_slice_struct_group - p.size = size_slice_struct_group - } - } - case reflect.Slice: - switch t2.Elem().Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) - break - case reflect.Uint8: - p.enc = (*Buffer).enc_slice_slice_byte - p.dec = (*Buffer).dec_slice_slice_byte - p.size = size_slice_slice_byte + p.stype = t3 } case reflect.Struct: - p.setSliceOfNonPointerStructs(t1) + p.stype = t2 } case reflect.Map: - p.enc = (*Buffer).enc_new_map - p.dec = (*Buffer).dec_new_map - p.size = size_new_map p.mtype = t1 - p.mkeyprop = &Properties{} - p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) - p.mvalprop = &Properties{} + p.MapKeyProp = &Properties{} + p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) + p.MapValProp = &Properties{} vtype := p.mtype.Elem() if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { // The value type is not a message (*T) or bytes ([]byte), @@ -650,29 +334,16 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock vtype = reflect.PtrTo(vtype) } - p.mvalprop.CustomType = p.CustomType - p.mvalprop.StdDuration = p.StdDuration - p.mvalprop.StdTime = p.StdTime - p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) + p.MapValProp.CustomType = p.CustomType + p.MapValProp.StdDuration = p.StdDuration + p.MapValProp.StdTime = p.StdTime + p.MapValProp.WktPointer = p.WktPointer + p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) } p.setTag(lockGetProp) } func (p *Properties) setTag(lockGetProp bool) { - // precalculate tag code - wire := p.WireType - if p.Packed { - wire = WireBytes - } - x := uint32(p.Tag)<<3 | uint32(wire) - i := 0 - for i = 0; x > 127; i++ { - p.tagbuf[i] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - p.tagbuf[i] = uint8(x) - p.tagcode = p.tagbuf[0 : i+1] - if p.stype != nil { if lockGetProp { p.sprop = GetProperties(p.stype) @@ -683,20 +354,9 @@ func (p *Properties) setTag(lockGetProp bool) { } var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() - unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() ) -// isMarshaler reports whether type t implements Marshaler. -func isMarshaler(t reflect.Type) bool { - return t.Implements(marshalerType) -} - -// isUnmarshaler reports whether type t implements Unmarshaler. -func isUnmarshaler(t reflect.Type) bool { - return t.Implements(unmarshalerType) -} - // Init populates the properties from a protocol buffer struct tag. func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { p.init(typ, name, tag, f, true) @@ -706,14 +366,11 @@ func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructF // "bytes,49,opt,def=hello!" p.Name = name p.OrigName = name - if f != nil { - p.field = toField(f) - } if tag == "" { return } p.Parse(tag) - p.setEncAndDec(typ, f, lockGetProp) + p.setFieldProps(typ, f, lockGetProp) } var ( @@ -763,10 +420,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { propertiesMap[t] = prop // build properties - prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) || - reflect.PtrTo(t).Implements(extendableProtoV1Type) || - reflect.PtrTo(t).Implements(extendableBytesType) - prop.unrecField = invalidField prop.Prop = make([]*Properties, t.NumField()) prop.order = make([]int, t.NumField()) @@ -777,23 +430,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { name := f.Name p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) - if f.Name == "XXX_InternalExtensions" { // special case - p.enc = (*Buffer).enc_exts - p.dec = nil // not needed - p.size = size_exts - } else if f.Name == "XXX_extensions" { // special case - if len(f.Tag.Get("protobuf")) > 0 { - p.enc = (*Buffer).enc_ext_slice_byte - p.dec = nil // not needed - p.size = size_ext_slice_byte - } else { - p.enc = (*Buffer).enc_map - p.dec = nil // not needed - p.size = size_map - } - } else if f.Name == "XXX_unrecognized" { // special case - prop.unrecField = toField(&f) - } oneof := f.Tag.Get("protobuf_oneof") // special case if oneof != "" { isOneofMessage = true @@ -809,9 +445,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { } print("\n") } - if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" { - fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") - } } // Re-order prop.order. @@ -822,8 +455,7 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { } if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); isOneofMessage && ok { var oots []interface{} - prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs() - prop.stype = t + _, _, _, oots = om.XXX_OneofFuncs() // Interpret oneof metadata. prop.OneofTypes = make(map[string]*OneofProperties) @@ -873,30 +505,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { return prop } -// Return the Properties object for the x[0]'th field of the structure. -func propByIndex(t reflect.Type, x []int) *Properties { - if len(x) != 1 { - fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) - return nil - } - prop := GetProperties(t) - return prop.Prop[x[0]] -} - -// Get the address and type of a pointer to a struct from an interface. -func getbase(pb Message) (t reflect.Type, b structPointer, err error) { - if pb == nil { - err = ErrNil - return - } - // get the reflect type of the pointer to the struct. - t = reflect.TypeOf(pb) - // get the address of the struct. - value := reflect.ValueOf(pb) - b = toStructPointer(value) - return -} - // A global registry of enum types. // The generated code will register the generated maps by calling RegisterEnum. @@ -925,20 +533,42 @@ func EnumValueMap(enumType string) map[string]int32 { // A registry of all linked message types. // The string is a fully-qualified proto name ("pkg.Message"). var ( - protoTypes = make(map[string]reflect.Type) - revProtoTypes = make(map[reflect.Type]string) + protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers + protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types + revProtoTypes = make(map[reflect.Type]string) ) // RegisterType is called from generated code and maps from the fully qualified // proto name to the type (pointer to struct) of the protocol buffer. func RegisterType(x Message, name string) { - if _, ok := protoTypes[name]; ok { + if _, ok := protoTypedNils[name]; ok { // TODO: Some day, make this a panic. log.Printf("proto: duplicate proto type registered: %s", name) return } t := reflect.TypeOf(x) - protoTypes[name] = t + if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { + // Generated code always calls RegisterType with nil x. + // This check is just for extra safety. + protoTypedNils[name] = x + } else { + protoTypedNils[name] = reflect.Zero(t).Interface().(Message) + } + revProtoTypes[t] = name +} + +// RegisterMapType is called from generated code and maps from the fully qualified +// proto name to the native map type of the proto map definition. +func RegisterMapType(x interface{}, name string) { + if reflect.TypeOf(x).Kind() != reflect.Map { + panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) + } + if _, ok := protoMapTypes[name]; ok { + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + protoMapTypes[name] = t revProtoTypes[t] = name } @@ -954,7 +584,14 @@ func MessageName(x Message) string { } // MessageType returns the message type (pointer to struct) for a named message. -func MessageType(name string) reflect.Type { return protoTypes[name] } +// The type is not guaranteed to implement proto.Message if the name refers to a +// map entry. +func MessageType(name string) reflect.Type { + if t, ok := protoTypedNils[name]; ok { + return reflect.TypeOf(t) + } + return protoMapTypes[name] +} // A registry of all linked proto files. var ( diff --git a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go index b6b7176c56..40ea3dd935 100644 --- a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go @@ -1,6 +1,6 @@ // Protocol Buffers for Go with Gadgets // -// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// Copyright (c) 2018, The GoGo Authors. All rights reserved. // http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without @@ -29,83 +29,8 @@ package proto import ( - "fmt" - "os" "reflect" ) -func (p *Properties) setCustomEncAndDec(typ reflect.Type) { - p.ctype = typ - if p.Repeated { - p.enc = (*Buffer).enc_custom_slice_bytes - p.dec = (*Buffer).dec_custom_slice_bytes - p.size = size_custom_slice_bytes - } else if typ.Kind() == reflect.Ptr { - p.enc = (*Buffer).enc_custom_bytes - p.dec = (*Buffer).dec_custom_bytes - p.size = size_custom_bytes - } else { - p.enc = (*Buffer).enc_custom_ref_bytes - p.dec = (*Buffer).dec_custom_ref_bytes - p.size = size_custom_ref_bytes - } -} - -func (p *Properties) setDurationEncAndDec(typ reflect.Type) { - if p.Repeated { - if typ.Elem().Kind() == reflect.Ptr { - p.enc = (*Buffer).enc_slice_duration - p.dec = (*Buffer).dec_slice_duration - p.size = size_slice_duration - } else { - p.enc = (*Buffer).enc_slice_ref_duration - p.dec = (*Buffer).dec_slice_ref_duration - p.size = size_slice_ref_duration - } - } else if typ.Kind() == reflect.Ptr { - p.enc = (*Buffer).enc_duration - p.dec = (*Buffer).dec_duration - p.size = size_duration - } else { - p.enc = (*Buffer).enc_ref_duration - p.dec = (*Buffer).dec_ref_duration - p.size = size_ref_duration - } -} - -func (p *Properties) setTimeEncAndDec(typ reflect.Type) { - if p.Repeated { - if typ.Elem().Kind() == reflect.Ptr { - p.enc = (*Buffer).enc_slice_time - p.dec = (*Buffer).dec_slice_time - p.size = size_slice_time - } else { - p.enc = (*Buffer).enc_slice_ref_time - p.dec = (*Buffer).dec_slice_ref_time - p.size = size_slice_ref_time - } - } else if typ.Kind() == reflect.Ptr { - p.enc = (*Buffer).enc_time - p.dec = (*Buffer).dec_time - p.size = size_time - } else { - p.enc = (*Buffer).enc_ref_time - p.dec = (*Buffer).dec_ref_time - p.size = size_ref_time - } - -} - -func (p *Properties) setSliceOfNonPointerStructs(typ reflect.Type) { - t2 := typ.Elem() - p.sstype = typ - p.stype = t2 - p.isMarshaler = isMarshaler(t2) - p.isUnmarshaler = isUnmarshaler(t2) - p.enc = (*Buffer).enc_slice_ref_struct_message - p.dec = (*Buffer).dec_slice_ref_struct_message - p.size = size_slice_ref_struct_message - if p.Wire != "bytes" { - fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T \n", typ, t2) - } -} +var sizerType = reflect.TypeOf((*Sizer)(nil)).Elem() +var protosizerType = reflect.TypeOf((*ProtoSizer)(nil)).Elem() diff --git a/vendor/github.com/gogo/protobuf/proto/proto3_test.go b/vendor/github.com/gogo/protobuf/proto/proto3_test.go deleted file mode 100644 index 75b66c1797..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/proto3_test.go +++ /dev/null @@ -1,135 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2014 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "testing" - - "github.com/gogo/protobuf/proto" - pb "github.com/gogo/protobuf/proto/proto3_proto" - tpb "github.com/gogo/protobuf/proto/testdata" -) - -func TestProto3ZeroValues(t *testing.T) { - tests := []struct { - desc string - m proto.Message - }{ - {"zero message", &pb.Message{}}, - {"empty bytes field", &pb.Message{Data: []byte{}}}, - } - for _, test := range tests { - b, err := proto.Marshal(test.m) - if err != nil { - t.Errorf("%s: proto.Marshal: %v", test.desc, err) - continue - } - if len(b) > 0 { - t.Errorf("%s: Encoding is non-empty: %q", test.desc, b) - } - } -} - -func TestRoundTripProto3(t *testing.T) { - m := &pb.Message{ - Name: "David", // (2 | 1<<3): 0x0a 0x05 "David" - Hilarity: pb.Message_PUNS, // (0 | 2<<3): 0x10 0x01 - HeightInCm: 178, // (0 | 3<<3): 0x18 0xb2 0x01 - Data: []byte("roboto"), // (2 | 4<<3): 0x20 0x06 "roboto" - ResultCount: 47, // (0 | 7<<3): 0x38 0x2f - TrueScotsman: true, // (0 | 8<<3): 0x40 0x01 - Score: 8.1, // (5 | 9<<3): 0x4d <8.1> - - Key: []uint64{1, 0xdeadbeef}, - Nested: &pb.Nested{ - Bunny: "Monty", - }, - } - t.Logf(" m: %v", m) - - b, err := proto.Marshal(m) - if err != nil { - t.Fatalf("proto.Marshal: %v", err) - } - t.Logf(" b: %q", b) - - m2 := new(pb.Message) - if err := proto.Unmarshal(b, m2); err != nil { - t.Fatalf("proto.Unmarshal: %v", err) - } - t.Logf("m2: %v", m2) - - if !proto.Equal(m, m2) { - t.Errorf("proto.Equal returned false:\n m: %v\nm2: %v", m, m2) - } -} - -func TestGettersForBasicTypesExist(t *testing.T) { - var m pb.Message - if got := m.GetNested().GetBunny(); got != "" { - t.Errorf("m.GetNested().GetBunny() = %q, want empty string", got) - } - if got := m.GetNested().GetCute(); got { - t.Errorf("m.GetNested().GetCute() = %t, want false", got) - } -} - -func TestProto3SetDefaults(t *testing.T) { - in := &pb.Message{ - Terrain: map[string]*pb.Nested{ - "meadow": new(pb.Nested), - }, - Proto2Field: new(tpb.SubDefaults), - Proto2Value: map[string]*tpb.SubDefaults{ - "badlands": new(tpb.SubDefaults), - }, - } - - got := proto.Clone(in).(*pb.Message) - proto.SetDefaults(got) - - // There are no defaults in proto3. Everything should be the zero value, but - // we need to remember to set defaults for nested proto2 messages. - want := &pb.Message{ - Terrain: map[string]*pb.Nested{ - "meadow": new(pb.Nested), - }, - Proto2Field: &tpb.SubDefaults{N: proto.Int64(7)}, - Proto2Value: map[string]*tpb.SubDefaults{ - "badlands": {N: proto.Int64(7)}, - }, - } - - if !proto.Equal(got, want) { - t.Errorf("with in = %v\nproto.SetDefaults(in) =>\ngot %v\nwant %v", in, got, want) - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/size2_test.go b/vendor/github.com/gogo/protobuf/proto/size2_test.go deleted file mode 100644 index a2729c39a1..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/size2_test.go +++ /dev/null @@ -1,63 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "testing" -) - -// This is a separate file and package from size_test.go because that one uses -// generated messages and thus may not be in package proto without having a circular -// dependency, whereas this file tests unexported details of size.go. - -func TestVarintSize(t *testing.T) { - // Check the edge cases carefully. - testCases := []struct { - n uint64 - size int - }{ - {0, 1}, - {1, 1}, - {127, 1}, - {128, 2}, - {16383, 2}, - {16384, 3}, - {1<<63 - 1, 9}, - {1 << 63, 10}, - } - for _, tc := range testCases { - size := sizeVarint(tc.n) - if size != tc.size { - t.Errorf("sizeVarint(%d) = %d, want %d", tc.n, size, tc.size) - } - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/size_test.go b/vendor/github.com/gogo/protobuf/proto/size_test.go deleted file mode 100644 index 0a6c1772b4..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/size_test.go +++ /dev/null @@ -1,164 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "log" - "strings" - "testing" - - . "github.com/gogo/protobuf/proto" - proto3pb "github.com/gogo/protobuf/proto/proto3_proto" - pb "github.com/gogo/protobuf/proto/testdata" -) - -var messageWithExtension1 = &pb.MyMessage{Count: Int32(7)} - -// messageWithExtension2 is in equal_test.go. -var messageWithExtension3 = &pb.MyMessage{Count: Int32(8)} - -func init() { - if err := SetExtension(messageWithExtension1, pb.E_Ext_More, &pb.Ext{Data: String("Abbott")}); err != nil { - log.Panicf("SetExtension: %v", err) - } - if err := SetExtension(messageWithExtension3, pb.E_Ext_More, &pb.Ext{Data: String("Costello")}); err != nil { - log.Panicf("SetExtension: %v", err) - } - - // Force messageWithExtension3 to have the extension encoded. - Marshal(messageWithExtension3) - -} - -var SizeTests = []struct { - desc string - pb Message -}{ - {"empty", &pb.OtherMessage{}}, - // Basic types. - {"bool", &pb.Defaults{F_Bool: Bool(true)}}, - {"int32", &pb.Defaults{F_Int32: Int32(12)}}, - {"negative int32", &pb.Defaults{F_Int32: Int32(-1)}}, - {"small int64", &pb.Defaults{F_Int64: Int64(1)}}, - {"big int64", &pb.Defaults{F_Int64: Int64(1 << 20)}}, - {"negative int64", &pb.Defaults{F_Int64: Int64(-1)}}, - {"fixed32", &pb.Defaults{F_Fixed32: Uint32(71)}}, - {"fixed64", &pb.Defaults{F_Fixed64: Uint64(72)}}, - {"uint32", &pb.Defaults{F_Uint32: Uint32(123)}}, - {"uint64", &pb.Defaults{F_Uint64: Uint64(124)}}, - {"float", &pb.Defaults{F_Float: Float32(12.6)}}, - {"double", &pb.Defaults{F_Double: Float64(13.9)}}, - {"string", &pb.Defaults{F_String: String("niles")}}, - {"bytes", &pb.Defaults{F_Bytes: []byte("wowsa")}}, - {"bytes, empty", &pb.Defaults{F_Bytes: []byte{}}}, - {"sint32", &pb.Defaults{F_Sint32: Int32(65)}}, - {"sint64", &pb.Defaults{F_Sint64: Int64(67)}}, - {"enum", &pb.Defaults{F_Enum: pb.Defaults_BLUE.Enum()}}, - // Repeated. - {"empty repeated bool", &pb.MoreRepeated{Bools: []bool{}}}, - {"repeated bool", &pb.MoreRepeated{Bools: []bool{false, true, true, false}}}, - {"packed repeated bool", &pb.MoreRepeated{BoolsPacked: []bool{false, true, true, false, true, true, true}}}, - {"repeated int32", &pb.MoreRepeated{Ints: []int32{1, 12203, 1729, -1}}}, - {"repeated int32 packed", &pb.MoreRepeated{IntsPacked: []int32{1, 12203, 1729}}}, - {"repeated int64 packed", &pb.MoreRepeated{Int64SPacked: []int64{ - // Need enough large numbers to verify that the header is counting the number of bytes - // for the field, not the number of elements. - 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, - 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, - }}}, - {"repeated string", &pb.MoreRepeated{Strings: []string{"r", "ken", "gri"}}}, - {"repeated fixed", &pb.MoreRepeated{Fixeds: []uint32{1, 2, 3, 4}}}, - // Nested. - {"nested", &pb.OldMessage{Nested: &pb.OldMessage_Nested{Name: String("whatever")}}}, - {"group", &pb.GroupOld{G: &pb.GroupOld_G{X: Int32(12345)}}}, - // Other things. - {"unrecognized", &pb.MoreRepeated{XXX_unrecognized: []byte{13<<3 | 0, 4}}}, - {"extension (unencoded)", messageWithExtension1}, - {"extension (encoded)", messageWithExtension3}, - // proto3 message - {"proto3 empty", &proto3pb.Message{}}, - {"proto3 bool", &proto3pb.Message{TrueScotsman: true}}, - {"proto3 int64", &proto3pb.Message{ResultCount: 1}}, - {"proto3 uint32", &proto3pb.Message{HeightInCm: 123}}, - {"proto3 float", &proto3pb.Message{Score: 12.6}}, - {"proto3 string", &proto3pb.Message{Name: "Snezana"}}, - {"proto3 bytes", &proto3pb.Message{Data: []byte("wowsa")}}, - {"proto3 bytes, empty", &proto3pb.Message{Data: []byte{}}}, - {"proto3 enum", &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}}, - {"proto3 map field with empty bytes", &proto3pb.MessageWithMap{ByteMapping: map[bool][]byte{false: {}}}}, - - {"map field", &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob", 7: "Andrew"}}}, - {"map field with message", &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{0x7001: {F: Float64(2.0)}}}}, - {"map field with bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte("this time for sure")}}}, - {"map field with empty bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: {}}}}, - - {"map field with big entry", &pb.MessageWithMap{NameMapping: map[int32]string{8: strings.Repeat("x", 125)}}}, - {"map field with big key and val", &pb.MessageWithMap{StrToStr: map[string]string{strings.Repeat("x", 70): strings.Repeat("y", 70)}}}, - {"map field with big numeric key", &pb.MessageWithMap{NameMapping: map[int32]string{0xf00d: "om nom nom"}}}, - - {"oneof not set", &pb.Oneof{}}, - {"oneof bool", &pb.Oneof{Union: &pb.Oneof_F_Bool{F_Bool: true}}}, - {"oneof zero int32", &pb.Oneof{Union: &pb.Oneof_F_Int32{F_Int32: 0}}}, - {"oneof big int32", &pb.Oneof{Union: &pb.Oneof_F_Int32{F_Int32: 1 << 20}}}, - {"oneof int64", &pb.Oneof{Union: &pb.Oneof_F_Int64{F_Int64: 42}}}, - {"oneof fixed32", &pb.Oneof{Union: &pb.Oneof_F_Fixed32{F_Fixed32: 43}}}, - {"oneof fixed64", &pb.Oneof{Union: &pb.Oneof_F_Fixed64{F_Fixed64: 44}}}, - {"oneof uint32", &pb.Oneof{Union: &pb.Oneof_F_Uint32{F_Uint32: 45}}}, - {"oneof uint64", &pb.Oneof{Union: &pb.Oneof_F_Uint64{F_Uint64: 46}}}, - {"oneof float", &pb.Oneof{Union: &pb.Oneof_F_Float{F_Float: 47.1}}}, - {"oneof double", &pb.Oneof{Union: &pb.Oneof_F_Double{F_Double: 48.9}}}, - {"oneof string", &pb.Oneof{Union: &pb.Oneof_F_String{F_String: "Rhythmic Fman"}}}, - {"oneof bytes", &pb.Oneof{Union: &pb.Oneof_F_Bytes{F_Bytes: []byte("let go")}}}, - {"oneof sint32", &pb.Oneof{Union: &pb.Oneof_F_Sint32{F_Sint32: 50}}}, - {"oneof sint64", &pb.Oneof{Union: &pb.Oneof_F_Sint64{F_Sint64: 51}}}, - {"oneof enum", &pb.Oneof{Union: &pb.Oneof_F_Enum{F_Enum: pb.MyMessage_BLUE}}}, - {"message for oneof", &pb.GoTestField{Label: String("k"), Type: String("v")}}, - {"oneof message", &pb.Oneof{Union: &pb.Oneof_F_Message{F_Message: &pb.GoTestField{Label: String("k"), Type: String("v")}}}}, - {"oneof group", &pb.Oneof{Union: &pb.Oneof_FGroup{FGroup: &pb.Oneof_F_Group{X: Int32(52)}}}}, - {"oneof largest tag", &pb.Oneof{Union: &pb.Oneof_F_Largest_Tag{F_Largest_Tag: 1}}}, - {"multiple oneofs", &pb.Oneof{Union: &pb.Oneof_F_Int32{F_Int32: 1}, Tormato: &pb.Oneof_Value{Value: 2}}}, -} - -func TestSize(t *testing.T) { - for _, tc := range SizeTests { - size := Size(tc.pb) - b, err := Marshal(tc.pb) - if err != nil { - t.Errorf("%v: Marshal failed: %v", tc.desc, err) - continue - } - if size != len(b) { - t.Errorf("%v: Size(%v) = %d, want %d", tc.desc, tc.pb, size, len(b)) - t.Logf("%v: bytes: %#v", tc.desc, b) - } - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal.go b/vendor/github.com/gogo/protobuf/proto/table_marshal.go new file mode 100644 index 0000000000..ba58c49a43 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_marshal.go @@ -0,0 +1,3006 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// a sizer takes a pointer to a field and the size of its tag, computes the size of +// the encoded data. +type sizer func(pointer, int) int + +// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), +// marshals the field to the end of the slice, returns the slice and error (if any). +type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) + +// marshalInfo is the information used for marshaling a message. +type marshalInfo struct { + typ reflect.Type + fields []*marshalFieldInfo + unrecognized field // offset of XXX_unrecognized + extensions field // offset of XXX_InternalExtensions + v1extensions field // offset of XXX_extensions + sizecache field // offset of XXX_sizecache + initialized int32 // 0 -- only typ is set, 1 -- fully initialized + messageset bool // uses message set wire format + hasmarshaler bool // has custom marshaler + sync.RWMutex // protect extElems map, also for initialization + extElems map[int32]*marshalElemInfo // info of extension elements + + hassizer bool // has custom sizer + hasprotosizer bool // has custom protosizer + + bytesExtensions field // offset of XXX_extensions where the field type is []byte +} + +// marshalFieldInfo is the information used for marshaling a field of a message. +type marshalFieldInfo struct { + field field + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isPointer bool + required bool // field is required + name string // name of the field, for error reporting + oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements +} + +// marshalElemInfo is the information used for marshaling an extension or oneof element. +type marshalElemInfo struct { + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) +} + +var ( + marshalInfoMap = map[reflect.Type]*marshalInfo{} + marshalInfoLock sync.Mutex + + uint8SliceType = reflect.TypeOf(([]uint8)(nil)).Kind() +) + +// getMarshalInfo returns the information to marshal a given type of message. +// The info it returns may not necessarily initialized. +// t is the type of the message (NOT the pointer to it). +func getMarshalInfo(t reflect.Type) *marshalInfo { + marshalInfoLock.Lock() + u, ok := marshalInfoMap[t] + if !ok { + u = &marshalInfo{typ: t} + marshalInfoMap[t] = u + } + marshalInfoLock.Unlock() + return u +} + +// Size is the entry point from generated code, +// and should be ONLY called by generated code. +// It computes the size of encoded data of msg. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Size(msg Message) int { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return 0 + } + return u.size(ptr) +} + +// Marshal is the entry point from generated code, +// and should be ONLY called by generated code. +// It marshals msg to the end of b. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return b, ErrNil + } + return u.marshal(b, ptr, deterministic) +} + +func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { + // u := a.marshal, but atomically. + // We use an atomic here to ensure memory consistency. + u := atomicLoadMarshalInfo(&a.marshal) + if u == nil { + // Get marshal information from type of message. + t := reflect.ValueOf(msg).Type() + if t.Kind() != reflect.Ptr { + panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) + } + u = getMarshalInfo(t.Elem()) + // Store it in the cache for later users. + // a.marshal = u, but atomically. + atomicStoreMarshalInfo(&a.marshal, u) + } + return u +} + +// size is the main function to compute the size of the encoded data of a message. +// ptr is the pointer to the message. +func (u *marshalInfo) size(ptr pointer) int { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + // Uses the message's Size method if available + if u.hassizer { + s := ptr.asPointerTo(u.typ).Interface().(Sizer) + return s.Size() + } + // Uses the message's ProtoSize method if available + if u.hasprotosizer { + s := ptr.asPointerTo(u.typ).Interface().(ProtoSizer) + return s.ProtoSize() + } + + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b, _ := m.Marshal() + return len(b) + } + + n := 0 + for _, f := range u.fields { + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + n += f.sizer(ptr.offset(f.field), f.tagsize) + } + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + n += u.sizeMessageSet(e) + } else { + n += u.sizeExtensions(e) + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + n += u.sizeV1Extensions(m) + } + if u.bytesExtensions.IsValid() { + s := *ptr.offset(u.bytesExtensions).toBytes() + n += len(s) + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + n += len(s) + } + + // cache the result for use in marshal + if u.sizecache.IsValid() { + atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) + } + return n +} + +// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), +// fall back to compute the size. +func (u *marshalInfo) cachedsize(ptr pointer) int { + if u.sizecache.IsValid() { + return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) + } + return u.size(ptr) +} + +// marshal is the main function to marshal a message. It takes a byte slice and appends +// the encoded data to the end of the slice, returns the slice and error (if any). +// ptr is the pointer to the message. +// If deterministic is true, map is marshaled in deterministic order. +func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b1, err := m.Marshal() + b = append(b, b1...) + return b, err + } + + var err, errLater error + // The old marshaler encodes extensions at beginning. + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + b, err = u.appendMessageSet(b, e, deterministic) + } else { + b, err = u.appendExtensions(b, e, deterministic) + } + if err != nil { + return b, err + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + b, err = u.appendV1Extensions(b, m, deterministic) + if err != nil { + return b, err + } + } + if u.bytesExtensions.IsValid() { + s := *ptr.offset(u.bytesExtensions).toBytes() + b = append(b, s...) + } + for _, f := range u.fields { + if f.required { + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // Required field is not set. + // We record the error but keep going, to give a complete marshaling. + if errLater == nil { + errLater = &RequiredNotSetError{f.name} + } + continue + } + } + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) + if err != nil { + if err1, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errLater == nil { + errLater = &RequiredNotSetError{f.name + "." + err1.field} + } + continue + } + if err == errRepeatedHasNil { + err = errors.New("proto: repeated field " + f.name + " has nil element") + } + if err == errInvalidUTF8 { + if errLater == nil { + fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name + errLater = &invalidUTF8Error{fullName} + } + continue + } + return b, err + } + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + b = append(b, s...) + } + return b, errLater +} + +// computeMarshalInfo initializes the marshal info. +func (u *marshalInfo) computeMarshalInfo() { + u.Lock() + defer u.Unlock() + if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock + return + } + + t := u.typ + u.unrecognized = invalidField + u.extensions = invalidField + u.v1extensions = invalidField + u.bytesExtensions = invalidField + u.sizecache = invalidField + isOneofMessage := false + + if reflect.PtrTo(t).Implements(sizerType) { + u.hassizer = true + } + if reflect.PtrTo(t).Implements(protosizerType) { + u.hasprotosizer = true + } + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if reflect.PtrTo(t).Implements(marshalerType) { + u.hasmarshaler = true + atomic.StoreInt32(&u.initialized, 1) + return + } + + n := t.NumField() + + // deal with XXX fields first + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Tag.Get("protobuf_oneof") != "" { + isOneofMessage = true + } + if !strings.HasPrefix(f.Name, "XXX_") { + continue + } + switch f.Name { + case "XXX_sizecache": + u.sizecache = toField(&f) + case "XXX_unrecognized": + u.unrecognized = toField(&f) + case "XXX_InternalExtensions": + u.extensions = toField(&f) + u.messageset = f.Tag.Get("protobuf_messageset") == "1" + case "XXX_extensions": + if f.Type.Kind() == reflect.Map { + u.v1extensions = toField(&f) + } else { + u.bytesExtensions = toField(&f) + } + case "XXX_NoUnkeyedLiteral": + // nothing to do + default: + panic("unknown XXX field: " + f.Name) + } + n-- + } + + // get oneof implementers + var oneofImplementers []interface{} + // gogo: isOneofMessage is needed for embedded oneof messages, without a marshaler and unmarshaler + if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok && isOneofMessage { + _, _, _, oneofImplementers = m.XXX_OneofFuncs() + } + + // normal fields + fields := make([]marshalFieldInfo, n) // batch allocation + u.fields = make([]*marshalFieldInfo, 0, n) + for i, j := 0, 0; i < t.NumField(); i++ { + f := t.Field(i) + + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + field := &fields[j] + j++ + field.name = f.Name + u.fields = append(u.fields, field) + if f.Tag.Get("protobuf_oneof") != "" { + field.computeOneofFieldInfo(&f, oneofImplementers) + continue + } + if f.Tag.Get("protobuf") == "" { + // field has no tag (not in generated message), ignore it + u.fields = u.fields[:len(u.fields)-1] + j-- + continue + } + field.computeMarshalFieldInfo(&f) + } + + // fields are marshaled in tag order on the wire. + sort.Sort(byTag(u.fields)) + + atomic.StoreInt32(&u.initialized, 1) +} + +// helper for sorting fields by tag +type byTag []*marshalFieldInfo + +func (a byTag) Len() int { return len(a) } +func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } + +// getExtElemInfo returns the information to marshal an extension element. +// The info it returns is initialized. +func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { + // get from cache first + u.RLock() + e, ok := u.extElems[desc.Field] + u.RUnlock() + if ok { + return e + } + + t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct + tags := strings.Split(desc.Tag, ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizr, marshalr := typeMarshaler(t, tags, false, false) + e = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizr, + marshaler: marshalr, + isptr: t.Kind() == reflect.Ptr, + } + + // update cache + u.Lock() + if u.extElems == nil { + u.extElems = make(map[int32]*marshalElemInfo) + } + u.extElems[desc.Field] = e + u.Unlock() + return e +} + +// computeMarshalFieldInfo fills up the information to marshal a field. +func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { + // parse protobuf tag of the field. + // tag has format of "bytes,49,opt,name=foo,def=hello!" + tags := strings.Split(f.Tag.Get("protobuf"), ",") + if tags[0] == "" { + return + } + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + if tags[2] == "req" { + fi.required = true + } + fi.setTag(f, tag, wt) + fi.setMarshaler(f, tags) +} + +func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { + fi.field = toField(f) + fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. + fi.isPointer = true + fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) + fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) + + ityp := f.Type // interface type + for _, o := range oneofImplementers { + t := reflect.TypeOf(o) + if !t.Implements(ityp) { + continue + } + sf := t.Elem().Field(0) // oneof implementer is a struct with a single field + tags := strings.Split(sf.Tag.Get("protobuf"), ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizr, marshalr := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value + fi.oneofElems[t.Elem()] = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizr, + marshaler: marshalr, + } + } +} + +type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) +} + +// wiretype returns the wire encoding of the type. +func wiretype(encoding string) uint64 { + switch encoding { + case "fixed32": + return WireFixed32 + case "fixed64": + return WireFixed64 + case "varint", "zigzag32", "zigzag64": + return WireVarint + case "bytes": + return WireBytes + case "group": + return WireStartGroup + } + panic("unknown wire type " + encoding) +} + +// setTag fills up the tag (in wire format) and its size in the info of a field. +func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { + fi.field = toField(f) + fi.wiretag = uint64(tag)<<3 | wt + fi.tagsize = SizeVarint(uint64(tag) << 3) +} + +// setMarshaler fills up the sizer and marshaler in the info of a field. +func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { + switch f.Type.Kind() { + case reflect.Map: + // map field + fi.isPointer = true + fi.sizer, fi.marshaler = makeMapMarshaler(f) + return + case reflect.Ptr, reflect.Slice: + fi.isPointer = true + } + fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) +} + +// typeMarshaler returns the sizer and marshaler of a given field. +// t is the type of the field. +// tags is the generated "protobuf" tag of the field. +// If nozero is true, zero value is not marshaled to the wire. +// If oneof is true, it is a oneof field. +func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { + encoding := tags[0] + + pointer := false + slice := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + packed := false + proto3 := false + ctype := false + isTime := false + isDuration := false + isWktPointer := false + validateUTF8 := true + for i := 2; i < len(tags); i++ { + if tags[i] == "packed" { + packed = true + } + if tags[i] == "proto3" { + proto3 = true + } + if strings.HasPrefix(tags[i], "customtype=") { + ctype = true + } + if tags[i] == "stdtime" { + isTime = true + } + if tags[i] == "stdduration" { + isDuration = true + } + if tags[i] == "wktptr" { + isWktPointer = true + } + } + validateUTF8 = validateUTF8 && proto3 + if !proto3 && !pointer && !slice { + nozero = false + } + + if ctype { + if reflect.PtrTo(t).Implements(customType) { + if slice { + return makeMessageRefSliceMarshaler(getMarshalInfo(t)) + } + if pointer { + return makeCustomPtrMarshaler(getMarshalInfo(t)) + } + return makeCustomMarshaler(getMarshalInfo(t)) + } else { + panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) + } + } + + if isTime { + if pointer { + if slice { + return makeTimePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeTimePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeTimeSliceMarshaler(getMarshalInfo(t)) + } + return makeTimeMarshaler(getMarshalInfo(t)) + } + + if isDuration { + if pointer { + if slice { + return makeDurationPtrSliceMarshaler(getMarshalInfo(t)) + } + return makeDurationPtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeDurationSliceMarshaler(getMarshalInfo(t)) + } + return makeDurationMarshaler(getMarshalInfo(t)) + } + + if isWktPointer { + switch t.Kind() { + case reflect.Float64: + if pointer { + if slice { + return makeStdDoubleValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdDoubleValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdDoubleValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdDoubleValueMarshaler(getMarshalInfo(t)) + case reflect.Float32: + if pointer { + if slice { + return makeStdFloatValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdFloatValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdFloatValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdFloatValueMarshaler(getMarshalInfo(t)) + case reflect.Int64: + if pointer { + if slice { + return makeStdInt64ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt64ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdInt64ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt64ValueMarshaler(getMarshalInfo(t)) + case reflect.Uint64: + if pointer { + if slice { + return makeStdUInt64ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt64ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdUInt64ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt64ValueMarshaler(getMarshalInfo(t)) + case reflect.Int32: + if pointer { + if slice { + return makeStdInt32ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt32ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdInt32ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt32ValueMarshaler(getMarshalInfo(t)) + case reflect.Uint32: + if pointer { + if slice { + return makeStdUInt32ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt32ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdUInt32ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt32ValueMarshaler(getMarshalInfo(t)) + case reflect.Bool: + if pointer { + if slice { + return makeStdBoolValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBoolValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdBoolValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBoolValueMarshaler(getMarshalInfo(t)) + case reflect.String: + if pointer { + if slice { + return makeStdStringValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdStringValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdStringValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdStringValueMarshaler(getMarshalInfo(t)) + case uint8SliceType: + if pointer { + if slice { + return makeStdBytesValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBytesValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdBytesValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBytesValueMarshaler(getMarshalInfo(t)) + default: + panic(fmt.Sprintf("unknown wktpointer type %#v", t)) + } + } + + switch t.Kind() { + case reflect.Bool: + if pointer { + return sizeBoolPtr, appendBoolPtr + } + if slice { + if packed { + return sizeBoolPackedSlice, appendBoolPackedSlice + } + return sizeBoolSlice, appendBoolSlice + } + if nozero { + return sizeBoolValueNoZero, appendBoolValueNoZero + } + return sizeBoolValue, appendBoolValue + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixed32Ptr, appendFixed32Ptr + } + if slice { + if packed { + return sizeFixed32PackedSlice, appendFixed32PackedSlice + } + return sizeFixed32Slice, appendFixed32Slice + } + if nozero { + return sizeFixed32ValueNoZero, appendFixed32ValueNoZero + } + return sizeFixed32Value, appendFixed32Value + case "varint": + if pointer { + return sizeVarint32Ptr, appendVarint32Ptr + } + if slice { + if packed { + return sizeVarint32PackedSlice, appendVarint32PackedSlice + } + return sizeVarint32Slice, appendVarint32Slice + } + if nozero { + return sizeVarint32ValueNoZero, appendVarint32ValueNoZero + } + return sizeVarint32Value, appendVarint32Value + } + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixedS32Ptr, appendFixedS32Ptr + } + if slice { + if packed { + return sizeFixedS32PackedSlice, appendFixedS32PackedSlice + } + return sizeFixedS32Slice, appendFixedS32Slice + } + if nozero { + return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero + } + return sizeFixedS32Value, appendFixedS32Value + case "varint": + if pointer { + return sizeVarintS32Ptr, appendVarintS32Ptr + } + if slice { + if packed { + return sizeVarintS32PackedSlice, appendVarintS32PackedSlice + } + return sizeVarintS32Slice, appendVarintS32Slice + } + if nozero { + return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero + } + return sizeVarintS32Value, appendVarintS32Value + case "zigzag32": + if pointer { + return sizeZigzag32Ptr, appendZigzag32Ptr + } + if slice { + if packed { + return sizeZigzag32PackedSlice, appendZigzag32PackedSlice + } + return sizeZigzag32Slice, appendZigzag32Slice + } + if nozero { + return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero + } + return sizeZigzag32Value, appendZigzag32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixed64Ptr, appendFixed64Ptr + } + if slice { + if packed { + return sizeFixed64PackedSlice, appendFixed64PackedSlice + } + return sizeFixed64Slice, appendFixed64Slice + } + if nozero { + return sizeFixed64ValueNoZero, appendFixed64ValueNoZero + } + return sizeFixed64Value, appendFixed64Value + case "varint": + if pointer { + return sizeVarint64Ptr, appendVarint64Ptr + } + if slice { + if packed { + return sizeVarint64PackedSlice, appendVarint64PackedSlice + } + return sizeVarint64Slice, appendVarint64Slice + } + if nozero { + return sizeVarint64ValueNoZero, appendVarint64ValueNoZero + } + return sizeVarint64Value, appendVarint64Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixedS64Ptr, appendFixedS64Ptr + } + if slice { + if packed { + return sizeFixedS64PackedSlice, appendFixedS64PackedSlice + } + return sizeFixedS64Slice, appendFixedS64Slice + } + if nozero { + return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero + } + return sizeFixedS64Value, appendFixedS64Value + case "varint": + if pointer { + return sizeVarintS64Ptr, appendVarintS64Ptr + } + if slice { + if packed { + return sizeVarintS64PackedSlice, appendVarintS64PackedSlice + } + return sizeVarintS64Slice, appendVarintS64Slice + } + if nozero { + return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero + } + return sizeVarintS64Value, appendVarintS64Value + case "zigzag64": + if pointer { + return sizeZigzag64Ptr, appendZigzag64Ptr + } + if slice { + if packed { + return sizeZigzag64PackedSlice, appendZigzag64PackedSlice + } + return sizeZigzag64Slice, appendZigzag64Slice + } + if nozero { + return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero + } + return sizeZigzag64Value, appendZigzag64Value + } + case reflect.Float32: + if pointer { + return sizeFloat32Ptr, appendFloat32Ptr + } + if slice { + if packed { + return sizeFloat32PackedSlice, appendFloat32PackedSlice + } + return sizeFloat32Slice, appendFloat32Slice + } + if nozero { + return sizeFloat32ValueNoZero, appendFloat32ValueNoZero + } + return sizeFloat32Value, appendFloat32Value + case reflect.Float64: + if pointer { + return sizeFloat64Ptr, appendFloat64Ptr + } + if slice { + if packed { + return sizeFloat64PackedSlice, appendFloat64PackedSlice + } + return sizeFloat64Slice, appendFloat64Slice + } + if nozero { + return sizeFloat64ValueNoZero, appendFloat64ValueNoZero + } + return sizeFloat64Value, appendFloat64Value + case reflect.String: + if validateUTF8 { + if pointer { + return sizeStringPtr, appendUTF8StringPtr + } + if slice { + return sizeStringSlice, appendUTF8StringSlice + } + if nozero { + return sizeStringValueNoZero, appendUTF8StringValueNoZero + } + return sizeStringValue, appendUTF8StringValue + } + if pointer { + return sizeStringPtr, appendStringPtr + } + if slice { + return sizeStringSlice, appendStringSlice + } + if nozero { + return sizeStringValueNoZero, appendStringValueNoZero + } + return sizeStringValue, appendStringValue + case reflect.Slice: + if slice { + return sizeBytesSlice, appendBytesSlice + } + if oneof { + // Oneof bytes field may also have "proto3" tag. + // We want to marshal it as a oneof field. Do this + // check before the proto3 check. + return sizeBytesOneof, appendBytesOneof + } + if proto3 { + return sizeBytes3, appendBytes3 + } + return sizeBytes, appendBytes + case reflect.Struct: + switch encoding { + case "group": + if slice { + return makeGroupSliceMarshaler(getMarshalInfo(t)) + } + return makeGroupMarshaler(getMarshalInfo(t)) + case "bytes": + if pointer { + if slice { + return makeMessageSliceMarshaler(getMarshalInfo(t)) + } + return makeMessageMarshaler(getMarshalInfo(t)) + } else { + if slice { + return makeMessageRefSliceMarshaler(getMarshalInfo(t)) + } + return makeMessageRefMarshaler(getMarshalInfo(t)) + } + } + } + panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) +} + +// Below are functions to size/marshal a specific type of a field. +// They are stored in the field's info, and called by function pointers. +// They have type sizer or marshaler. + +func sizeFixed32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixedS32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFloat32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + return (4 + tagsize) * len(s) +} +func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixed64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFixedS64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFloat64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + return (8 + tagsize) * len(s) +} +func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeVarint32Value(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarint32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarint64Value(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + return SizeVarint(v) + tagsize +} +func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return SizeVarint(v) + tagsize +} +func sizeVarint64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return SizeVarint(*p) + tagsize +} +func sizeVarint64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(v) + tagsize + } + return n +} +func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize + } + return n +} +func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize + } + return n +} +func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeBoolValue(_ pointer, tagsize int) int { + return 1 + tagsize +} +func sizeBoolValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toBool() + if !v { + return 0 + } + return 1 + tagsize +} +func sizeBoolPtr(ptr pointer, tagsize int) int { + p := *ptr.toBoolPtr() + if p == nil { + return 0 + } + return 1 + tagsize +} +func sizeBoolSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + return (1 + tagsize) * len(s) +} +func sizeBoolPackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return 0 + } + return len(s) + SizeVarint(uint64(len(s))) + tagsize +} +func sizeStringValue(ptr pointer, tagsize int) int { + v := *ptr.toString() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toString() + if v == "" { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringPtr(ptr pointer, tagsize int) int { + p := *ptr.toStringPtr() + if p == nil { + return 0 + } + v := *p + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringSlice(ptr pointer, tagsize int) int { + s := *ptr.toStringSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} +func sizeBytes(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if v == nil { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytes3(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if len(v) == 0 { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesOneof(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesSlice(ptr pointer, tagsize int) int { + s := *ptr.toBytesSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} + +// appendFixed32 appends an encoded fixed32 to b. +func appendFixed32(b []byte, v uint32) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24)) + return b +} + +// appendFixed64 appends an encoded fixed64 to b. +func appendFixed64(b []byte, v uint64) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24), + byte(v>>32), + byte(v>>40), + byte(v>>48), + byte(v>>56)) + return b +} + +// appendVarint appends an encoded varint to b. +func appendVarint(b []byte, v uint64) []byte { + // TODO: make 1-byte (maybe 2-byte) case inline-able, once we + // have non-leaf inliner. + switch { + case v < 1<<7: + b = append(b, byte(v)) + case v < 1<<14: + b = append(b, + byte(v&0x7f|0x80), + byte(v>>7)) + case v < 1<<21: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte(v>>14)) + case v < 1<<28: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte(v>>21)) + case v < 1<<35: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte(v>>28)) + case v < 1<<42: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte(v>>35)) + case v < 1<<49: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte(v>>42)) + case v < 1<<56: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte(v>>49)) + case v < 1<<63: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte(v>>56)) + default: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte((v>>56)&0x7f|0x80), + 1) + } + return b +} + +func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, *p) + return b, nil +} +func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(*p)) + return b, nil +} +func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(*p)) + return b, nil +} +func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, *p) + return b, nil +} +func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(*p)) + return b, nil +} +func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(*p)) + return b, nil +} +func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, *p) + return b, nil +} +func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + } + return b, nil +} +func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, v) + } + return b, nil +} +func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + if !v { + return b, nil + } + b = appendVarint(b, wiretag) + b = append(b, 1) + return b, nil +} + +func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toBoolPtr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + if *p { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(len(s))) + for _, v := range s { + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + if v == "" { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toStringSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} +func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + v := *ptr.toString() + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + v := *ptr.toString() + if v == "" { + return b, nil + } + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + s := *ptr.toStringSlice() + for _, v := range s { + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if v == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if len(v) == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBytesSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} + +// makeGroupMarshaler returns the sizer and marshaler for a group. +// u is the marshal info of the underlying message. +func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + return u.size(p) + 2*tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + var err error + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, p, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + return b, err + } +} + +// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. +// u is the marshal info of the underlying message. +func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + n += u.size(v) + 2*tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err error + var nerr nonFatal + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, v, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + if !nerr.Merge(err) { + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, nerr.E + } +} + +// makeMessageMarshaler returns the sizer and marshaler for a message field. +// u is the marshal info of the message. +func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.size(p) + return siz + SizeVarint(uint64(siz)) + tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(p) + b = appendVarint(b, uint64(siz)) + return u.marshal(b, p, deterministic) + } +} + +// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. +// u is the marshal info of the message. +func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + siz := u.size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err error + var nerr nonFatal + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(v) + b = appendVarint(b, uint64(siz)) + b, err = u.marshal(b, v, deterministic) + + if !nerr.Merge(err) { + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, nerr.E + } +} + +// makeMapMarshaler returns the sizer and marshaler for a map field. +// f is the pointer to the reflect data structure of the field. +func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { + // figure out key and value type + t := f.Type + keyType := t.Key() + valType := t.Elem() + tags := strings.Split(f.Tag.Get("protobuf"), ",") + keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") + valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") + stdOptions := false + for _, t := range tags { + if strings.HasPrefix(t, "customtype=") { + valTags = append(valTags, t) + } + if t == "stdtime" { + valTags = append(valTags, t) + stdOptions = true + } + if t == "stdduration" { + valTags = append(valTags, t) + stdOptions = true + } + if t == "wktptr" { + valTags = append(valTags, t) + } + } + keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map + valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map + keyWireTag := 1<<3 | wiretype(keyTags[0]) + valWireTag := 2<<3 | wiretype(valTags[0]) + + // We create an interface to get the addresses of the map key and value. + // If value is pointer-typed, the interface is a direct interface, the + // idata itself is the value. Otherwise, the idata is the pointer to the + // value. + // Key cannot be pointer-typed. + valIsPtr := valType.Kind() == reflect.Ptr + + // If value is a message with nested maps, calling + // valSizer in marshal may be quadratic. We should use + // cached version in marshal (but not in size). + // If value is not message type, we don't have size cache, + // but it cannot be nested either. Just use valSizer. + valCachedSizer := valSizer + if valIsPtr && !stdOptions && valType.Elem().Kind() == reflect.Struct { + u := getMarshalInfo(valType.Elem()) + valCachedSizer = func(ptr pointer, tagsize int) int { + // Same as message sizer, but use cache. + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.cachedsize(p) + return siz + SizeVarint(uint64(siz)) + tagsize + } + } + return func(ptr pointer, tagsize int) int { + m := ptr.asPointerTo(t).Elem() // the map + n := 0 + for _, k := range m.MapKeys() { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { + m := ptr.asPointerTo(t).Elem() // the map + var err error + keys := m.MapKeys() + if len(keys) > 1 && deterministic { + sort.Sort(mapKeys(keys)) + } + + var nerr nonFatal + for _, k := range keys { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + b = appendVarint(b, tag) + siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + b = appendVarint(b, uint64(siz)) + b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) + if !nerr.Merge(err) { + return b, err + } + b, err = valMarshaler(b, vaddr, valWireTag, deterministic) + if err != ErrNil && !nerr.Merge(err) { // allow nil value in map + return b, err + } + } + return b, nerr.E + } +} + +// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. +// fi is the marshal info of the field. +// f is the pointer to the reflect data structure of the field. +func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { + // Oneof field is an interface. We need to get the actual data type on the fly. + t := f.Type + return func(ptr pointer, _ int) int { + p := ptr.getInterfacePointer() + if p.isNil() { + return 0 + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + e := fi.oneofElems[telem] + return e.sizer(p, e.tagsize) + }, + func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { + p := ptr.getInterfacePointer() + if p.isNil() { + return b, nil + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { + return b, errOneofHasNil + } + e := fi.oneofElems[telem] + return e.marshaler(b, p, e.wiretag, deterministic) + } +} + +// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. +func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + mu.Unlock() + return n +} + +// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. +func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + var nerr nonFatal + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E + } + + // Sort the keys to provide a deterministic encoding. + // Not sure this is required, but the old code does it. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// message set format is: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } + +// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field +// in message set format (above). +func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for id, e := range m { + n += 2 // start group, end group. tag = 1 (size=1) + n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + siz := len(msgWithLen) + n += siz + 1 // message, tag = 3 (size=1) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, 1) // message, tag = 3 (size=1) + } + mu.Unlock() + return n +} + +// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) +// to the end of byte slice b. +func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + var nerr nonFatal + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for id, e := range m { + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + if !nerr.Merge(err) { + return b, err + } + b = append(b, 1<<3|WireEndGroup) + } + return b, nerr.E + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, id := range keys { + e := m[int32(id)] + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + b = append(b, 1<<3|WireEndGroup) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// sizeV1Extensions computes the size of encoded data for a V1-API extension field. +func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { + if m == nil { + return 0 + } + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + return n +} + +// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. +func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { + if m == nil { + return b, nil + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + var err error + var nerr nonFatal + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// newMarshaler is the interface representing objects that can marshal themselves. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newMarshaler interface { + XXX_Size() int + XXX_Marshal(b []byte, deterministic bool) ([]byte, error) +} + +// Size returns the encoded size of a protocol buffer message. +// This is the main entry point. +func Size(pb Message) int { + if m, ok := pb.(newMarshaler); ok { + return m.XXX_Size() + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + b, _ := m.Marshal() + return len(b) + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return 0 + } + var info InternalMessageInfo + return info.Size(pb) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, returning the data. +// This is the main entry point. +func Marshal(pb Message) ([]byte, error) { + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + b := make([]byte, 0, siz) + return m.XXX_Marshal(b, false) + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + return m.Marshal() + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return nil, ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + b := make([]byte, 0, siz) + return info.Marshal(b, pb, false) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, writing the result to the +// Buffer. +// This is an alternative entry point. It is not necessary to use +// a Buffer for most applications. +func (p *Buffer) Marshal(pb Message) error { + var err error + if p.deterministic { + if _, ok := pb.(Marshaler); ok { + return fmt.Errorf("proto: deterministic not supported by the Marshal method of %T", pb) + } + } + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + p.grow(siz) // make sure buf has enough capacity + p.buf, err = m.XXX_Marshal(p.buf, p.deterministic) + return err + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + var b []byte + b, err = m.Marshal() + p.buf = append(p.buf, b...) + return err + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + p.grow(siz) // make sure buf has enough capacity + p.buf, err = info.Marshal(p.buf, pb, p.deterministic) + return err +} + +// grow grows the buffer's capacity, if necessary, to guarantee space for +// another n bytes. After grow(n), at least n bytes can be written to the +// buffer without another allocation. +func (p *Buffer) grow(n int) { + need := len(p.buf) + n + if need <= cap(p.buf) { + return + } + newCap := len(p.buf) * 2 + if newCap < need { + newCap = need + } + p.buf = append(make([]byte, 0, newCap), p.buf...) +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go new file mode 100644 index 0000000000..997f57c1e1 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go @@ -0,0 +1,388 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +// makeMessageRefMarshaler differs a bit from makeMessageMarshaler +// It marshal a message T instead of a *T +func makeMessageRefMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + siz := u.size(ptr) + return siz + SizeVarint(uint64(siz)) + tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + b = appendVarint(b, wiretag) + siz := u.cachedsize(ptr) + b = appendVarint(b, uint64(siz)) + return u.marshal(b, ptr, deterministic) + } +} + +// makeMessageRefSliceMarshaler differs quite a lot from makeMessageSliceMarshaler +// It marshals a slice of messages []T instead of []*T +func makeMessageRefSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + e := elem.Interface() + v := toAddrPointer(&e, false) + siz := u.size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + var err, errreq error + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + e := elem.Interface() + v := toAddrPointer(&e, false) + b = appendVarint(b, wiretag) + siz := u.size(v) + b = appendVarint(b, uint64(siz)) + b, err = u.marshal(b, v, deterministic) + + if err != nil { + if _, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = err + } + continue + } + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + + return b, errreq + } +} + +func makeCustomPtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) + siz := m.Size() + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) + siz := m.Size() + buf, err := m.Marshal() + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + return b, nil + } +} + +func makeCustomMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + m := ptr.asPointerTo(u.typ).Interface().(custom) + siz := m.Size() + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + m := ptr.asPointerTo(u.typ).Interface().(custom) + siz := m.Size() + buf, err := m.Marshal() + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + return b, nil + } +} + +func makeTimeMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return 0 + } + siz := Size(ts) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return nil, err + } + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeTimePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return 0 + } + siz := Size(ts) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return nil, err + } + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeTimeSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(time.Time) + ts, err := timestampProto(t) + if err != nil { + return 0 + } + siz := Size(ts) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(time.Time) + ts, err := timestampProto(t) + if err != nil { + return nil, err + } + siz := Size(ts) + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeTimePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return 0 + } + siz := Size(ts) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return nil, err + } + siz := Size(ts) + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeDurationMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) + dur := durationProto(*d) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeDurationPtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) + dur := durationProto(*d) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeDurationSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(time.Duration) + dur := durationProto(d) + siz := Size(dur) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(time.Duration) + dur := durationProto(d) + siz := Size(dur) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeDurationPtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_merge.go b/vendor/github.com/gogo/protobuf/proto/table_merge.go new file mode 100644 index 0000000000..f520106e09 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_merge.go @@ -0,0 +1,657 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +// Merge merges the src message into dst. +// This assumes that dst and src of the same type and are non-nil. +func (a *InternalMessageInfo) Merge(dst, src Message) { + mi := atomicLoadMergeInfo(&a.merge) + if mi == nil { + mi = getMergeInfo(reflect.TypeOf(dst).Elem()) + atomicStoreMergeInfo(&a.merge, mi) + } + mi.merge(toPointer(&dst), toPointer(&src)) +} + +type mergeInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []mergeFieldInfo + unrecognized field // Offset of XXX_unrecognized +} + +type mergeFieldInfo struct { + field field // Offset of field, guaranteed to be valid + + // isPointer reports whether the value in the field is a pointer. + // This is true for the following situations: + // * Pointer to struct + // * Pointer to basic type (proto2 only) + // * Slice (first value in slice header is a pointer) + // * String (first value in string header is a pointer) + isPointer bool + + // basicWidth reports the width of the field assuming that it is directly + // embedded in the struct (as is the case for basic types in proto3). + // The possible values are: + // 0: invalid + // 1: bool + // 4: int32, uint32, float32 + // 8: int64, uint64, float64 + basicWidth int + + // Where dst and src are pointers to the types being merged. + merge func(dst, src pointer) +} + +var ( + mergeInfoMap = map[reflect.Type]*mergeInfo{} + mergeInfoLock sync.Mutex +) + +func getMergeInfo(t reflect.Type) *mergeInfo { + mergeInfoLock.Lock() + defer mergeInfoLock.Unlock() + mi := mergeInfoMap[t] + if mi == nil { + mi = &mergeInfo{typ: t} + mergeInfoMap[t] = mi + } + return mi +} + +// merge merges src into dst assuming they are both of type *mi.typ. +func (mi *mergeInfo) merge(dst, src pointer) { + if dst.isNil() { + panic("proto: nil destination") + } + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&mi.initialized) == 0 { + mi.computeMergeInfo() + } + + for _, fi := range mi.fields { + sfp := src.offset(fi.field) + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string + continue + } + if fi.basicWidth > 0 { + switch { + case fi.basicWidth == 1 && !*sfp.toBool(): + continue + case fi.basicWidth == 4 && *sfp.toUint32() == 0: + continue + case fi.basicWidth == 8 && *sfp.toUint64() == 0: + continue + } + } + } + + dfp := dst.offset(fi.field) + fi.merge(dfp, sfp) + } + + // TODO: Make this faster? + out := dst.asPointerTo(mi.typ).Elem() + in := src.asPointerTo(mi.typ).Elem() + if emIn, err := extendable(in.Addr().Interface()); err == nil { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + if mi.unrecognized.IsValid() { + if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { + *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) + } + } +} + +func (mi *mergeInfo) computeMergeInfo() { + mi.lock.Lock() + defer mi.lock.Unlock() + if mi.initialized != 0 { + return + } + t := mi.typ + n := t.NumField() + + props := GetProperties(t) + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + mfi := mergeFieldInfo{field: toField(&f)} + tf := f.Type + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + switch tf.Kind() { + case reflect.Ptr, reflect.Slice, reflect.String: + // As a special case, we assume slices and strings are pointers + // since we know that the first field in the SliceSlice or + // StringHeader is a data pointer. + mfi.isPointer = true + case reflect.Bool: + mfi.basicWidth = 1 + case reflect.Int32, reflect.Uint32, reflect.Float32: + mfi.basicWidth = 4 + case reflect.Int64, reflect.Uint64, reflect.Float64: + mfi.basicWidth = 8 + } + } + + // Unwrap tf to get at its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + tf.Name()) + } + + switch tf.Kind() { + case reflect.Int32: + switch { + case isSlice: // E.g., []int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Slice is not defined (see pointer_reflect.go). + /* + sfsp := src.toInt32Slice() + if *sfsp != nil { + dfsp := dst.toInt32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + */ + sfs := src.getInt32Slice() + if sfs != nil { + dfs := dst.getInt32Slice() + dfs = append(dfs, sfs...) + if dfs == nil { + dfs = []int32{} + } + dst.setInt32Slice(dfs) + } + } + case isPointer: // E.g., *int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). + /* + sfpp := src.toInt32Ptr() + if *sfpp != nil { + dfpp := dst.toInt32Ptr() + if *dfpp == nil { + *dfpp = Int32(**sfpp) + } else { + **dfpp = **sfpp + } + } + */ + sfp := src.getInt32Ptr() + if sfp != nil { + dfp := dst.getInt32Ptr() + if dfp == nil { + dst.setInt32Ptr(*sfp) + } else { + *dfp = *sfp + } + } + } + default: // E.g., int32 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt32(); v != 0 { + *dst.toInt32() = v + } + } + } + case reflect.Int64: + switch { + case isSlice: // E.g., []int64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toInt64Slice() + if *sfsp != nil { + dfsp := dst.toInt64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + } + case isPointer: // E.g., *int64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toInt64Ptr() + if *sfpp != nil { + dfpp := dst.toInt64Ptr() + if *dfpp == nil { + *dfpp = Int64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., int64 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt64(); v != 0 { + *dst.toInt64() = v + } + } + } + case reflect.Uint32: + switch { + case isSlice: // E.g., []uint32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint32Slice() + if *sfsp != nil { + dfsp := dst.toUint32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint32{} + } + } + } + case isPointer: // E.g., *uint32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint32Ptr() + if *sfpp != nil { + dfpp := dst.toUint32Ptr() + if *dfpp == nil { + *dfpp = Uint32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint32 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint32(); v != 0 { + *dst.toUint32() = v + } + } + } + case reflect.Uint64: + switch { + case isSlice: // E.g., []uint64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint64Slice() + if *sfsp != nil { + dfsp := dst.toUint64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint64{} + } + } + } + case isPointer: // E.g., *uint64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint64Ptr() + if *sfpp != nil { + dfpp := dst.toUint64Ptr() + if *dfpp == nil { + *dfpp = Uint64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint64 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint64(); v != 0 { + *dst.toUint64() = v + } + } + } + case reflect.Float32: + switch { + case isSlice: // E.g., []float32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat32Slice() + if *sfsp != nil { + dfsp := dst.toFloat32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float32{} + } + } + } + case isPointer: // E.g., *float32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat32Ptr() + if *sfpp != nil { + dfpp := dst.toFloat32Ptr() + if *dfpp == nil { + *dfpp = Float32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float32 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat32(); v != 0 { + *dst.toFloat32() = v + } + } + } + case reflect.Float64: + switch { + case isSlice: // E.g., []float64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat64Slice() + if *sfsp != nil { + dfsp := dst.toFloat64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float64{} + } + } + } + case isPointer: // E.g., *float64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat64Ptr() + if *sfpp != nil { + dfpp := dst.toFloat64Ptr() + if *dfpp == nil { + *dfpp = Float64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float64 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat64(); v != 0 { + *dst.toFloat64() = v + } + } + } + case reflect.Bool: + switch { + case isSlice: // E.g., []bool + mfi.merge = func(dst, src pointer) { + sfsp := src.toBoolSlice() + if *sfsp != nil { + dfsp := dst.toBoolSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []bool{} + } + } + } + case isPointer: // E.g., *bool + mfi.merge = func(dst, src pointer) { + sfpp := src.toBoolPtr() + if *sfpp != nil { + dfpp := dst.toBoolPtr() + if *dfpp == nil { + *dfpp = Bool(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., bool + mfi.merge = func(dst, src pointer) { + if v := *src.toBool(); v { + *dst.toBool() = v + } + } + } + case reflect.String: + switch { + case isSlice: // E.g., []string + mfi.merge = func(dst, src pointer) { + sfsp := src.toStringSlice() + if *sfsp != nil { + dfsp := dst.toStringSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []string{} + } + } + } + case isPointer: // E.g., *string + mfi.merge = func(dst, src pointer) { + sfpp := src.toStringPtr() + if *sfpp != nil { + dfpp := dst.toStringPtr() + if *dfpp == nil { + *dfpp = String(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., string + mfi.merge = func(dst, src pointer) { + if v := *src.toString(); v != "" { + *dst.toString() = v + } + } + } + case reflect.Slice: + isProto3 := props.Prop[i].proto3 + switch { + case isPointer: + panic("bad pointer in byte slice case in " + tf.Name()) + case tf.Elem().Kind() != reflect.Uint8: + panic("bad element kind in byte slice case in " + tf.Name()) + case isSlice: // E.g., [][]byte + mfi.merge = func(dst, src pointer) { + sbsp := src.toBytesSlice() + if *sbsp != nil { + dbsp := dst.toBytesSlice() + for _, sb := range *sbsp { + if sb == nil { + *dbsp = append(*dbsp, nil) + } else { + *dbsp = append(*dbsp, append([]byte{}, sb...)) + } + } + if *dbsp == nil { + *dbsp = [][]byte{} + } + } + } + default: // E.g., []byte + mfi.merge = func(dst, src pointer) { + sbp := src.toBytes() + if *sbp != nil { + dbp := dst.toBytes() + if !isProto3 || len(*sbp) > 0 { + *dbp = append([]byte{}, *sbp...) + } + } + } + } + case reflect.Struct: + switch { + case !isPointer: + mergeInfo := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + mergeInfo.merge(dst, src) + } + case isSlice: // E.g., []*pb.T + mergeInfo := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sps := src.getPointerSlice() + if sps != nil { + dps := dst.getPointerSlice() + for _, sp := range sps { + var dp pointer + if !sp.isNil() { + dp = valToPointer(reflect.New(tf)) + mergeInfo.merge(dp, sp) + } + dps = append(dps, dp) + } + if dps == nil { + dps = []pointer{} + } + dst.setPointerSlice(dps) + } + } + default: // E.g., *pb.T + mergeInfo := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sp := src.getPointer() + if !sp.isNil() { + dp := dst.getPointer() + if dp.isNil() { + dp = valToPointer(reflect.New(tf)) + dst.setPointer(dp) + } + mergeInfo.merge(dp, sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic("bad pointer or slice in map case in " + tf.Name()) + default: // E.g., map[K]V + mfi.merge = func(dst, src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + dm := dst.asPointerTo(tf).Elem() + if dm.IsNil() { + dm.Set(reflect.MakeMap(tf)) + } + + switch tf.Elem().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(Clone(val.Interface().(Message))) + dm.SetMapIndex(key, val) + } + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + dm.SetMapIndex(key, val) + } + default: // Basic type (e.g., string) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + dm.SetMapIndex(key, val) + } + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic("bad pointer or slice in interface case in " + tf.Name()) + default: // E.g., interface{} + // TODO: Make this faster? + mfi.merge = func(dst, src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + du := dst.asPointerTo(tf).Elem() + typ := su.Elem().Type() + if du.IsNil() || du.Elem().Type() != typ { + du.Set(reflect.New(typ.Elem())) // Initialize interface if empty + } + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + dv := du.Elem().Elem().Field(0) + if dv.Kind() == reflect.Ptr && dv.IsNil() { + dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + Merge(dv.Interface().(Message), sv.Interface().(Message)) + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) + default: // Basic type (e.g., string) + dv.Set(sv) + } + } + } + } + default: + panic(fmt.Sprintf("merger not found for type:%s", tf)) + } + mi.fields = append(mi.fields, mfi) + } + + mi.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + mi.unrecognized = toField(&f) + } + + atomic.StoreInt32(&mi.initialized, 1) +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go new file mode 100644 index 0000000000..e6b15c76ca --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go @@ -0,0 +1,2245 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "io" + "math" + "reflect" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// Unmarshal is the entry point from the generated .pb.go files. +// This function is not intended to be used by non-generated code. +// This function is not subject to any compatibility guarantee. +// msg contains a pointer to a protocol buffer struct. +// b is the data to be unmarshaled into the protocol buffer. +// a is a pointer to a place to store cached unmarshal information. +func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { + // Load the unmarshal information for this message type. + // The atomic load ensures memory consistency. + u := atomicLoadUnmarshalInfo(&a.unmarshal) + if u == nil { + // Slow path: find unmarshal info for msg, update a with it. + u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) + atomicStoreUnmarshalInfo(&a.unmarshal, u) + } + // Then do the unmarshaling. + err := u.unmarshal(toPointer(&msg), b) + return err +} + +type unmarshalInfo struct { + typ reflect.Type // type of the protobuf struct + + // 0 = only typ field is initialized + // 1 = completely initialized + initialized int32 + lock sync.Mutex // prevents double initialization + dense []unmarshalFieldInfo // fields indexed by tag # + sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # + reqFields []string // names of required fields + reqMask uint64 // 1< 0 { + // Read tag and wire type. + // Special case 1 and 2 byte varints. + var x uint64 + if b[0] < 128 { + x = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + x = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + x, n = decodeVarint(b) + if n == 0 { + return io.ErrUnexpectedEOF + } + b = b[n:] + } + tag := x >> 3 + wire := int(x) & 7 + + // Dispatch on the tag to one of the unmarshal* functions below. + var f unmarshalFieldInfo + if tag < uint64(len(u.dense)) { + f = u.dense[tag] + } else { + f = u.sparse[tag] + } + if fn := f.unmarshal; fn != nil { + var err error + b, err = fn(b, m.offset(f.field), wire) + if err == nil { + reqMask |= f.reqMask + continue + } + if r, ok := err.(*RequiredNotSetError); ok { + // Remember this error, but keep parsing. We need to produce + // a full parse even if a required field is missing. + if errLater == nil { + errLater = r + } + reqMask |= f.reqMask + continue + } + if err != errInternalBadWireType { + if err == errInvalidUTF8 { + if errLater == nil { + fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name + errLater = &invalidUTF8Error{fullName} + } + continue + } + return err + } + // Fragments with bad wire type are treated as unknown fields. + } + + // Unknown tag. + if !u.unrecognized.IsValid() { + // Don't keep unrecognized data; just skip it. + var err error + b, err = skipField(b, wire) + if err != nil { + return err + } + continue + } + // Keep unrecognized data around. + // maybe in extensions, maybe in the unrecognized field. + z := m.offset(u.unrecognized).toBytes() + var emap map[int32]Extension + var e Extension + for _, r := range u.extensionRanges { + if uint64(r.Start) <= tag && tag <= uint64(r.End) { + if u.extensions.IsValid() { + mp := m.offset(u.extensions).toExtensions() + emap = mp.extensionsWrite() + e = emap[int32(tag)] + z = &e.enc + break + } + if u.oldExtensions.IsValid() { + p := m.offset(u.oldExtensions).toOldExtensions() + emap = *p + if emap == nil { + emap = map[int32]Extension{} + *p = emap + } + e = emap[int32(tag)] + z = &e.enc + break + } + if u.bytesExtensions.IsValid() { + z = m.offset(u.bytesExtensions).toBytes() + break + } + panic("no extensions field available") + } + } + // Use wire type to skip data. + var err error + b0 := b + b, err = skipField(b, wire) + if err != nil { + return err + } + *z = encodeVarint(*z, tag<<3|uint64(wire)) + *z = append(*z, b0[:len(b0)-len(b)]...) + + if emap != nil { + emap[int32(tag)] = e + } + } + if reqMask != u.reqMask && errLater == nil { + // A required field of this message is missing. + for _, n := range u.reqFields { + if reqMask&1 == 0 { + errLater = &RequiredNotSetError{n} + } + reqMask >>= 1 + } + } + return errLater +} + +// computeUnmarshalInfo fills in u with information for use +// in unmarshaling protocol buffers of type u.typ. +func (u *unmarshalInfo) computeUnmarshalInfo() { + u.lock.Lock() + defer u.lock.Unlock() + if u.initialized != 0 { + return + } + t := u.typ + n := t.NumField() + + // Set up the "not found" value for the unrecognized byte buffer. + // This is the default for proto3. + u.unrecognized = invalidField + u.extensions = invalidField + u.oldExtensions = invalidField + u.bytesExtensions = invalidField + + // List of the generated type and offset for each oneof field. + type oneofField struct { + ityp reflect.Type // interface type of oneof field + field field // offset in containing message + } + var oneofFields []oneofField + + for i := 0; i < n; i++ { + f := t.Field(i) + if f.Name == "XXX_unrecognized" { + // The byte slice used to hold unrecognized input is special. + if f.Type != reflect.TypeOf(([]byte)(nil)) { + panic("bad type for XXX_unrecognized field: " + f.Type.Name()) + } + u.unrecognized = toField(&f) + continue + } + if f.Name == "XXX_InternalExtensions" { + // Ditto here. + if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { + panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) + } + u.extensions = toField(&f) + if f.Tag.Get("protobuf_messageset") == "1" { + u.isMessageSet = true + } + continue + } + if f.Name == "XXX_extensions" { + // An older form of the extensions field. + if f.Type == reflect.TypeOf((map[int32]Extension)(nil)) { + u.oldExtensions = toField(&f) + continue + } else if f.Type == reflect.TypeOf(([]byte)(nil)) { + u.bytesExtensions = toField(&f) + continue + } + panic("bad type for XXX_extensions field: " + f.Type.Name()) + } + if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { + continue + } + + oneof := f.Tag.Get("protobuf_oneof") + if oneof != "" { + oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) + // The rest of oneof processing happens below. + continue + } + + tags := f.Tag.Get("protobuf") + tagArray := strings.Split(tags, ",") + if len(tagArray) < 2 { + panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) + } + tag, err := strconv.Atoi(tagArray[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tagArray[1]) + } + + name := "" + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + } + + // Extract unmarshaling function from the field (its type and tags). + unmarshal := fieldUnmarshaler(&f) + + // Required field? + var reqMask uint64 + if tagArray[2] == "req" { + bit := len(u.reqFields) + u.reqFields = append(u.reqFields, name) + reqMask = uint64(1) << uint(bit) + // TODO: if we have more than 64 required fields, we end up + // not verifying that all required fields are present. + // Fix this, perhaps using a count of required fields? + } + + // Store the info in the correct slot in the message. + u.setTag(tag, toField(&f), unmarshal, reqMask, name) + } + + // Find any types associated with oneof fields. + // TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it? + fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs") + // gogo: len(oneofFields) > 0 is needed for embedded oneof messages, without a marshaler and unmarshaler + if fn.IsValid() && len(oneofFields) > 0 { + res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{} + for i := res.Len() - 1; i >= 0; i-- { + v := res.Index(i) // interface{} + tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X + typ := tptr.Elem() // Msg_X + + f := typ.Field(0) // oneof implementers have one field + baseUnmarshal := fieldUnmarshaler(&f) + tags := strings.Split(f.Tag.Get("protobuf"), ",") + fieldNum, err := strconv.Atoi(tags[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tags[1]) + } + var name string + for _, tag := range tags { + if strings.HasPrefix(tag, "name=") { + name = strings.TrimPrefix(tag, "name=") + break + } + } + + // Find the oneof field that this struct implements. + // Might take O(n^2) to process all of the oneofs, but who cares. + for _, of := range oneofFields { + if tptr.Implements(of.ityp) { + // We have found the corresponding interface for this struct. + // That lets us know where this struct should be stored + // when we encounter it during unmarshaling. + unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) + u.setTag(fieldNum, of.field, unmarshal, 0, name) + } + } + } + } + + // Get extension ranges, if any. + fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") + if fn.IsValid() { + if !u.extensions.IsValid() && !u.oldExtensions.IsValid() && !u.bytesExtensions.IsValid() { + panic("a message with extensions, but no extensions field in " + t.Name()) + } + u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) + } + + // Explicitly disallow tag 0. This will ensure we flag an error + // when decoding a buffer of all zeros. Without this code, we + // would decode and skip an all-zero buffer of even length. + // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. + u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { + return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) + }, 0, "") + + // Set mask for required field check. + u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? + for len(u.dense) <= tag { + u.dense = append(u.dense, unmarshalFieldInfo{}) + } + u.dense[tag] = i + return + } + if u.sparse == nil { + u.sparse = map[uint64]unmarshalFieldInfo{} + } + u.sparse[uint64(tag)] = i +} + +// fieldUnmarshaler returns an unmarshaler for the given field. +func fieldUnmarshaler(f *reflect.StructField) unmarshaler { + if f.Type.Kind() == reflect.Map { + return makeUnmarshalMap(f) + } + return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) +} + +// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. +func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { + tagArray := strings.Split(tags, ",") + encoding := tagArray[0] + name := "unknown" + ctype := false + isTime := false + isDuration := false + isWktPointer := false + proto3 := false + validateUTF8 := true + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + if tag == "proto3" { + proto3 = true + } + if strings.HasPrefix(tag, "customtype=") { + ctype = true + } + if tag == "stdtime" { + isTime = true + } + if tag == "stdduration" { + isDuration = true + } + if tag == "wktptr" { + isWktPointer = true + } + } + validateUTF8 = validateUTF8 && proto3 + + // Figure out packaging (pointer, slice, or both) + slice := false + pointer := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + if ctype { + if reflect.PtrTo(t).Implements(customType) { + if slice { + return makeUnmarshalCustomSlice(getUnmarshalInfo(t), name) + } + if pointer { + return makeUnmarshalCustomPtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalCustom(getUnmarshalInfo(t), name) + } else { + panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) + } + } + + if isTime { + if pointer { + if slice { + return makeUnmarshalTimePtrSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalTimePtr(getUnmarshalInfo(t), name) + } + if slice { + return makeUnmarshalTimeSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalTime(getUnmarshalInfo(t), name) + } + + if isDuration { + if pointer { + if slice { + return makeUnmarshalDurationPtrSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalDurationPtr(getUnmarshalInfo(t), name) + } + if slice { + return makeUnmarshalDurationSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalDuration(getUnmarshalInfo(t), name) + } + + if isWktPointer { + switch t.Kind() { + case reflect.Float64: + if pointer { + if slice { + return makeStdDoubleValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdDoubleValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdDoubleValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdDoubleValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Float32: + if pointer { + if slice { + return makeStdFloatValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdFloatValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdFloatValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdFloatValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Int64: + if pointer { + if slice { + return makeStdInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt64ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Uint64: + if pointer { + if slice { + return makeStdUInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdUInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt64ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Int32: + if pointer { + if slice { + return makeStdInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt32ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Uint32: + if pointer { + if slice { + return makeStdUInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdUInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt32ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Bool: + if pointer { + if slice { + return makeStdBoolValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBoolValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdBoolValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBoolValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.String: + if pointer { + if slice { + return makeStdStringValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdStringValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdStringValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdStringValueUnmarshaler(getUnmarshalInfo(t), name) + case uint8SliceType: + if pointer { + if slice { + return makeStdBytesValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBytesValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdBytesValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBytesValueUnmarshaler(getUnmarshalInfo(t), name) + default: + panic(fmt.Sprintf("unknown wktpointer type %#v", t)) + } + } + + // We'll never have both pointer and slice for basic types. + if pointer && slice && t.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + t.Name()) + } + + switch t.Kind() { + case reflect.Bool: + if pointer { + return unmarshalBoolPtr + } + if slice { + return unmarshalBoolSlice + } + return unmarshalBoolValue + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixedS32Ptr + } + if slice { + return unmarshalFixedS32Slice + } + return unmarshalFixedS32Value + case "varint": + // this could be int32 or enum + if pointer { + return unmarshalInt32Ptr + } + if slice { + return unmarshalInt32Slice + } + return unmarshalInt32Value + case "zigzag32": + if pointer { + return unmarshalSint32Ptr + } + if slice { + return unmarshalSint32Slice + } + return unmarshalSint32Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixedS64Ptr + } + if slice { + return unmarshalFixedS64Slice + } + return unmarshalFixedS64Value + case "varint": + if pointer { + return unmarshalInt64Ptr + } + if slice { + return unmarshalInt64Slice + } + return unmarshalInt64Value + case "zigzag64": + if pointer { + return unmarshalSint64Ptr + } + if slice { + return unmarshalSint64Slice + } + return unmarshalSint64Value + } + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixed32Ptr + } + if slice { + return unmarshalFixed32Slice + } + return unmarshalFixed32Value + case "varint": + if pointer { + return unmarshalUint32Ptr + } + if slice { + return unmarshalUint32Slice + } + return unmarshalUint32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixed64Ptr + } + if slice { + return unmarshalFixed64Slice + } + return unmarshalFixed64Value + case "varint": + if pointer { + return unmarshalUint64Ptr + } + if slice { + return unmarshalUint64Slice + } + return unmarshalUint64Value + } + case reflect.Float32: + if pointer { + return unmarshalFloat32Ptr + } + if slice { + return unmarshalFloat32Slice + } + return unmarshalFloat32Value + case reflect.Float64: + if pointer { + return unmarshalFloat64Ptr + } + if slice { + return unmarshalFloat64Slice + } + return unmarshalFloat64Value + case reflect.Map: + panic("map type in typeUnmarshaler in " + t.Name()) + case reflect.Slice: + if pointer { + panic("bad pointer in slice case in " + t.Name()) + } + if slice { + return unmarshalBytesSlice + } + return unmarshalBytesValue + case reflect.String: + if validateUTF8 { + if pointer { + return unmarshalUTF8StringPtr + } + if slice { + return unmarshalUTF8StringSlice + } + return unmarshalUTF8StringValue + } + if pointer { + return unmarshalStringPtr + } + if slice { + return unmarshalStringSlice + } + return unmarshalStringValue + case reflect.Struct: + // message or group field + if !pointer { + switch encoding { + case "bytes": + if slice { + return makeUnmarshalMessageSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalMessage(getUnmarshalInfo(t), name) + } + } + switch encoding { + case "bytes": + if slice { + return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) + case "group": + if slice { + return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) + } + } + panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) +} + +// Below are all the unmarshalers for individual fields of various types. + +func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64() = v + return b, nil +} + +func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64() = v + return b, nil +} + +func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64() = v + return b, nil +} + +func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64Ptr() = &v + return b, nil +} + +func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + *f.toInt32() = v + return b, nil +} + +func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + *f.toInt32() = v + return b, nil +} + +func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32() = v + return b, nil +} + +func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32Ptr() = &v + return b, nil +} + +func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64() = v + return b[8:], nil +} + +func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64() = v + return b[8:], nil +} + +func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32() = v + return b[4:], nil +} + +func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32Ptr() = &v + return b[4:], nil +} + +func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + *f.toInt32() = v + return b[4:], nil +} + +func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.setInt32Ptr(v) + return b[4:], nil +} + +func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + return b[4:], nil +} + +func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + // Note: any length varint is allowed, even though any sane + // encoder will use one byte. + // See https://github.com/golang/protobuf/issues/76 + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + // TODO: check if x>1? Tests seem to indicate no. + v := x != 0 + *f.toBool() = v + return b[n:], nil +} + +func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + *f.toBoolPtr() = &v + return b[n:], nil +} + +func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + b = b[n:] + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + return b[n:], nil +} + +func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64() = v + return b[8:], nil +} + +func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64Ptr() = &v + return b[8:], nil +} + +func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32() = v + return b[4:], nil +} + +func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32Ptr() = &v + return b[4:], nil +} + +func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toString() = v + return b[x:], nil +} + +func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toStringPtr() = &v + return b[x:], nil +} + +func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + s := f.toStringSlice() + *s = append(*s, v) + return b[x:], nil +} + +func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toString() = v + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toStringPtr() = &v + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + s := f.toStringSlice() + *s = append(*s, v) + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +var emptyBuf [0]byte + +func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // The use of append here is a trick which avoids the zeroing + // that would be required if we used a make/copy pair. + // We append to emptyBuf instead of nil because we want + // a non-nil result even when the length is 0. + v := append(emptyBuf[:], b[:x]...) + *f.toBytes() = v + return b[x:], nil +} + +func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := append(emptyBuf[:], b[:x]...) + s := f.toBytesSlice() + *s = append(*s, v) + return b[x:], nil +} + +func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // First read the message field to see if something is there. + // The semantics of multiple submessages are weird. Instead of + // the last one winning (as it is for all other fields), multiple + // submessages are merged. + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[x:], err + } +} + +func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[x:], err + } +} + +func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[y:], err + } +} + +func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[y:], err + } +} + +func makeUnmarshalMap(f *reflect.StructField) unmarshaler { + t := f.Type + kt := t.Key() + vt := t.Elem() + tagArray := strings.Split(f.Tag.Get("protobuf"), ",") + valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") + for _, t := range tagArray { + if strings.HasPrefix(t, "customtype=") { + valTags = append(valTags, t) + } + if t == "stdtime" { + valTags = append(valTags, t) + } + if t == "stdduration" { + valTags = append(valTags, t) + } + if t == "wktptr" { + valTags = append(valTags, t) + } + } + unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) + unmarshalVal := typeUnmarshaler(vt, strings.Join(valTags, ",")) + return func(b []byte, f pointer, w int) ([]byte, error) { + // The map entry is a submessage. Figure out how big it is. + if w != WireBytes { + return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + r := b[x:] // unused data to return + b = b[:x] // data for map entry + + // Note: we could use #keys * #values ~= 200 functions + // to do map decoding without reflection. Probably not worth it. + // Maps will be somewhat slow. Oh well. + + // Read key and value from data. + var nerr nonFatal + k := reflect.New(kt) + v := reflect.New(vt) + for len(b) > 0 { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + wire := int(x) & 7 + b = b[n:] + + var err error + switch x >> 3 { + case 1: + b, err = unmarshalKey(b, valToPointer(k), wire) + case 2: + b, err = unmarshalVal(b, valToPointer(v), wire) + default: + err = errInternalBadWireType // skip unknown tag + } + + if nerr.Merge(err) { + continue + } + if err != errInternalBadWireType { + return nil, err + } + + // Skip past unknown fields. + b, err = skipField(b, wire) + if err != nil { + return nil, err + } + } + + // Get map, allocate if needed. + m := f.asPointerTo(t).Elem() // an addressable map[K]T + if m.IsNil() { + m.Set(reflect.MakeMap(t)) + } + + // Insert into map. + m.SetMapIndex(k.Elem(), v.Elem()) + + return r, nerr.E + } +} + +// makeUnmarshalOneof makes an unmarshaler for oneof fields. +// for: +// message Msg { +// oneof F { +// int64 X = 1; +// float64 Y = 2; +// } +// } +// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). +// ityp is the interface type of the oneof field (e.g. isMsg_F). +// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). +// Note that this function will be called once for each case in the oneof. +func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { + sf := typ.Field(0) + field0 := toField(&sf) + return func(b []byte, f pointer, w int) ([]byte, error) { + // Allocate holder for value. + v := reflect.New(typ) + + // Unmarshal data into holder. + // We unmarshal into the first field of the holder object. + var err error + var nerr nonFatal + b, err = unmarshal(b, valToPointer(v).offset(field0), w) + if !nerr.Merge(err) { + return nil, err + } + + // Write pointer to holder into target field. + f.asPointerTo(ityp).Elem().Set(v) + + return b, nerr.E + } +} + +// Error used by decode internally. +var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") + +// skipField skips past a field of type wire and returns the remaining bytes. +func skipField(b []byte, wire int) ([]byte, error) { + switch wire { + case WireVarint: + _, k := decodeVarint(b) + if k == 0 { + return b, io.ErrUnexpectedEOF + } + b = b[k:] + case WireFixed32: + if len(b) < 4 { + return b, io.ErrUnexpectedEOF + } + b = b[4:] + case WireFixed64: + if len(b) < 8 { + return b, io.ErrUnexpectedEOF + } + b = b[8:] + case WireBytes: + m, k := decodeVarint(b) + if k == 0 || uint64(len(b)-k) < m { + return b, io.ErrUnexpectedEOF + } + b = b[uint64(k)+m:] + case WireStartGroup: + _, i := findEndGroup(b) + if i == -1 { + return b, io.ErrUnexpectedEOF + } + b = b[i:] + default: + return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) + } + return b, nil +} + +// findEndGroup finds the index of the next EndGroup tag. +// Groups may be nested, so the "next" EndGroup tag is the first +// unpaired EndGroup. +// findEndGroup returns the indexes of the start and end of the EndGroup tag. +// Returns (-1,-1) if it can't find one. +func findEndGroup(b []byte) (int, int) { + depth := 1 + i := 0 + for { + x, n := decodeVarint(b[i:]) + if n == 0 { + return -1, -1 + } + j := i + i += n + switch x & 7 { + case WireVarint: + _, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + case WireFixed32: + if len(b)-4 < i { + return -1, -1 + } + i += 4 + case WireFixed64: + if len(b)-8 < i { + return -1, -1 + } + i += 8 + case WireBytes: + m, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + if uint64(len(b)-i) < m { + return -1, -1 + } + i += int(m) + case WireStartGroup: + depth++ + case WireEndGroup: + depth-- + if depth == 0 { + return j, i + } + default: + return -1, -1 + } + } +} + +// encodeVarint appends a varint-encoded integer to b and returns the result. +func encodeVarint(b []byte, x uint64) []byte { + for x >= 1<<7 { + b = append(b, byte(x&0x7f|0x80)) + x >>= 7 + } + return append(b, byte(x)) +} + +// decodeVarint reads a varint-encoded integer from b. +// Returns the decoded integer and the number of bytes read. +// If there is an error, it returns 0,0. +func decodeVarint(b []byte) (uint64, int) { + var x, y uint64 + if len(b) <= 0 { + goto bad + } + x = uint64(b[0]) + if x < 0x80 { + return x, 1 + } + x -= 0x80 + + if len(b) <= 1 { + goto bad + } + y = uint64(b[1]) + x += y << 7 + if y < 0x80 { + return x, 2 + } + x -= 0x80 << 7 + + if len(b) <= 2 { + goto bad + } + y = uint64(b[2]) + x += y << 14 + if y < 0x80 { + return x, 3 + } + x -= 0x80 << 14 + + if len(b) <= 3 { + goto bad + } + y = uint64(b[3]) + x += y << 21 + if y < 0x80 { + return x, 4 + } + x -= 0x80 << 21 + + if len(b) <= 4 { + goto bad + } + y = uint64(b[4]) + x += y << 28 + if y < 0x80 { + return x, 5 + } + x -= 0x80 << 28 + + if len(b) <= 5 { + goto bad + } + y = uint64(b[5]) + x += y << 35 + if y < 0x80 { + return x, 6 + } + x -= 0x80 << 35 + + if len(b) <= 6 { + goto bad + } + y = uint64(b[6]) + x += y << 42 + if y < 0x80 { + return x, 7 + } + x -= 0x80 << 42 + + if len(b) <= 7 { + goto bad + } + y = uint64(b[7]) + x += y << 49 + if y < 0x80 { + return x, 8 + } + x -= 0x80 << 49 + + if len(b) <= 8 { + goto bad + } + y = uint64(b[8]) + x += y << 56 + if y < 0x80 { + return x, 9 + } + x -= 0x80 << 56 + + if len(b) <= 9 { + goto bad + } + y = uint64(b[9]) + x += y << 63 + if y < 2 { + return x, 10 + } + +bad: + return 0, 0 +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go new file mode 100644 index 0000000000..00d6c7ad93 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go @@ -0,0 +1,385 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "io" + "reflect" +) + +func makeUnmarshalMessage(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // First read the message field to see if something is there. + // The semantics of multiple submessages are weird. Instead of + // the last one winning (as it is for all other fields), multiple + // submessages are merged. + v := f // gogo: changed from v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[x:], err + } +} + +func makeUnmarshalMessageSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendRef(v, sub.typ) // gogo: changed from f.appendPointer(v) + return b[x:], err + } +} + +func makeUnmarshalCustomPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.New(sub.typ)) + m := s.Interface().(custom) + if err := m.Unmarshal(b[:x]); err != nil { + return nil, err + } + return b[x:], nil + } +} + +func makeUnmarshalCustomSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := reflect.New(sub.typ) + c := m.Interface().(custom) + if err := c.Unmarshal(b[:x]); err != nil { + return nil, err + } + v := valToPointer(m) + f.appendRef(v, sub.typ) + return b[x:], nil + } +} + +func makeUnmarshalCustom(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + + m := f.asPointerTo(sub.typ).Interface().(custom) + if err := m.Unmarshal(b[:x]); err != nil { + return nil, err + } + return b[x:], nil + } +} + +func makeUnmarshalTime(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(t)) + return b[x:], nil + } +} + +func makeUnmarshalTimePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&t)) + return b[x:], nil + } +} + +func makeUnmarshalTimePtrSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&t)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeUnmarshalTimeSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(t)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeUnmarshalDurationPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&d)) + return b[x:], nil + } +} + +func makeUnmarshalDuration(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(d)) + return b[x:], nil + } +} + +func makeUnmarshalDurationPtrSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&d)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeUnmarshalDurationSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(d)) + slice.Set(newSlice) + return b[x:], nil + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/text.go b/vendor/github.com/gogo/protobuf/proto/text.go index f609d1d453..0407ba85d0 100644 --- a/vendor/github.com/gogo/protobuf/proto/text.go +++ b/vendor/github.com/gogo/protobuf/proto/text.go @@ -57,7 +57,6 @@ import ( var ( newline = []byte("\n") spaces = []byte(" ") - gtNewline = []byte(">\n") endBraceNewline = []byte("}\n") backslashN = []byte{'\\', 'n'} backslashR = []byte{'\\', 'r'} @@ -177,11 +176,6 @@ func writeName(w *textWriter, props *Properties) error { return nil } -// raw is the interface satisfied by RawMessage. -type raw interface { - Bytes() []byte -} - func requiresQuotes(u string) bool { // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. for _, ch := range u { @@ -276,6 +270,10 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { props := sprops.Prop[i] name := st.Field(i).Name + if name == "XXX_NoUnkeyedLiteral" { + continue + } + if strings.HasPrefix(name, "XXX_") { // There are two XXX_ fields: // XXX_unrecognized []byte @@ -366,7 +364,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { return err } } - if err := tm.writeAny(w, key, props.mkeyprop); err != nil { + if err := tm.writeAny(w, key, props.MapKeyProp); err != nil { return err } if err := w.WriteByte('\n'); err != nil { @@ -383,7 +381,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { return err } } - if err := tm.writeAny(w, val, props.mvalprop); err != nil { + if err := tm.writeAny(w, val, props.MapValProp); err != nil { return err } if err := w.WriteByte('\n'); err != nil { @@ -447,12 +445,6 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { return err } } - if b, ok := fv.Interface().(raw); ok { - if err := writeRaw(w, b.Bytes()); err != nil { - return err - } - continue - } if len(props.Enum) > 0 { if err := tm.writeEnum(w, fv, props); err != nil { @@ -475,7 +467,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { pv = reflect.New(sv.Type()) pv.Elem().Set(sv) } - if pv.Type().Implements(extensionRangeType) { + if _, err := extendable(pv.Interface()); err == nil { if err := tm.writeExtensions(w, pv); err != nil { return err } @@ -484,27 +476,6 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { return nil } -// writeRaw writes an uninterpreted raw message. -func writeRaw(w *textWriter, b []byte) error { - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if err := writeUnknownStruct(w, b); err != nil { - return err - } - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - return nil -} - // writeAny writes an arbitrary field. func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { v = reflect.Indirect(v) @@ -605,6 +576,19 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert } } w.indent() + if v.CanAddr() { + // Calling v.Interface on a struct causes the reflect package to + // copy the entire struct. This is racy with the new Marshaler + // since we atomically update the XXX_sizecache. + // + // Thus, we retrieve a pointer to the struct if possible to avoid + // a race since v.Interface on the pointer doesn't copy the struct. + // + // If v is not addressable, then we are not worried about a race + // since it implies that the binary Marshaler cannot possibly be + // mutating this value. + v = v.Addr() + } if etm, ok := v.Interface().(encoding.TextMarshaler); ok { text, err := etm.MarshalText() if err != nil { @@ -613,8 +597,13 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert if _, err = w.Write(text); err != nil { return err } - } else if err := tm.writeStruct(w, v); err != nil { - return err + } else { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + if err := tm.writeStruct(w, v); err != nil { + return err + } } w.unindent() if err := w.WriteByte(ket); err != nil { diff --git a/vendor/github.com/gogo/protobuf/proto/text_parser.go b/vendor/github.com/gogo/protobuf/proto/text_parser.go index f1276729a3..1ce0be2fa9 100644 --- a/vendor/github.com/gogo/protobuf/proto/text_parser.go +++ b/vendor/github.com/gogo/protobuf/proto/text_parser.go @@ -212,7 +212,6 @@ func (p *textParser) advance() { var ( errBadUTF8 = errors.New("proto: bad UTF-8") - errBadHex = errors.New("proto: bad hexadecimal") ) func unquoteC(s string, quote rune) (string, error) { @@ -283,60 +282,47 @@ func unescape(s string) (ch string, tail string, err error) { return "?", s, nil // trigraph workaround case '\'', '"', '\\': return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': + case '0', '1', '2', '3', '4', '5', '6', '7': if len(s) < 2 { return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) } - base := 8 - ss := s[:2] + ss := string(r) + s[:2] s = s[2:] - if r == 'x' || r == 'X' { - base = 16 - } else { - ss = string(r) + ss - } - i, err := strconv.ParseUint(ss, base, 8) + i, err := strconv.ParseUint(ss, 8, 8) if err != nil { - return "", "", err + return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) } return string([]byte{byte(i)}), s, nil - case 'u', 'U': - n := 4 - if r == 'U' { + case 'x', 'X', 'u', 'U': + var n int + switch r { + case 'x', 'X': + n = 2 + case 'u': + n = 4 + case 'U': n = 8 } if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) - } - - bs := make([]byte, n/2) - for i := 0; i < n; i += 2 { - a, ok1 := unhex(s[i]) - b, ok2 := unhex(s[i+1]) - if !ok1 || !ok2 { - return "", "", errBadHex - } - bs[i/2] = a<<4 | b + return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) } + ss := s[:n] s = s[n:] - return string(bs), s, nil + i, err := strconv.ParseUint(ss, 16, 64) + if err != nil { + return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) + } + if r == 'x' || r == 'X' { + return string([]byte{byte(i)}), s, nil + } + if i > utf8.MaxRune { + return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) + } + return string(i), s, nil } return "", "", fmt.Errorf(`unknown escape \%c`, r) } -// Adapted from src/pkg/strconv/quote.go. -func unhex(b byte) (v byte, ok bool) { - switch { - case '0' <= b && b <= '9': - return b - '0', true - case 'a' <= b && b <= 'f': - return b - 'a' + 10, true - case 'A' <= b && b <= 'F': - return b - 'A' + 10, true - } - return 0, false -} - // Back off the parser by one token. Can only be done between calls to next(). // It makes the next advance() a no-op. func (p *textParser) back() { p.backed = true } @@ -650,17 +636,17 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { if err := p.consumeToken(":"); err != nil { return err } - if err := p.readAny(key, props.mkeyprop); err != nil { + if err := p.readAny(key, props.MapKeyProp); err != nil { return err } if err := p.consumeOptionalSeparator(); err != nil { return err } case "value": - if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { + if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil { return err } - if err := p.readAny(val, props.mvalprop); err != nil { + if err := p.readAny(val, props.MapValProp); err != nil { return err } if err := p.consumeOptionalSeparator(); err != nil { @@ -734,6 +720,9 @@ func (p *textParser) consumeExtName() (string, error) { if tok.err != nil { return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) } + if p.done && tok.value != "]" { + return "", p.errorf("unclosed type_url or extension name") + } } return strings.Join(parts, ""), nil } @@ -934,6 +923,16 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { fv.SetFloat(f) return nil } + case reflect.Int8: + if x, err := strconv.ParseInt(tok.value, 0, 8); err == nil { + fv.SetInt(x) + return nil + } + case reflect.Int16: + if x, err := strconv.ParseInt(tok.value, 0, 16); err == nil { + fv.SetInt(x) + return nil + } case reflect.Int32: if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { fv.SetInt(x) @@ -981,9 +980,19 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { } // TODO: Handle nested messages which implement encoding.TextUnmarshaler. return p.readStruct(fv, terminator) + case reflect.Uint8: + if x, err := strconv.ParseUint(tok.value, 0, 8); err == nil { + fv.SetUint(x) + return nil + } + case reflect.Uint16: + if x, err := strconv.ParseUint(tok.value, 0, 16); err == nil { + fv.SetUint(x) + return nil + } case reflect.Uint32: if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(x) + fv.SetUint(uint64(x)) return nil } case reflect.Uint64: @@ -1001,13 +1010,9 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { // UnmarshalText returns *RequiredNotSetError. func UnmarshalText(s string, pb Message) error { if um, ok := pb.(encoding.TextUnmarshaler); ok { - err := um.UnmarshalText([]byte(s)) - return err + return um.UnmarshalText([]byte(s)) } pb.Reset() v := reflect.ValueOf(pb) - if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { - return pe - } - return nil + return newTextParser(s).readStruct(v.Elem(), "") } diff --git a/vendor/github.com/gogo/protobuf/proto/text_parser_test.go b/vendor/github.com/gogo/protobuf/proto/text_parser_test.go deleted file mode 100644 index 9a3a447ce7..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/text_parser_test.go +++ /dev/null @@ -1,673 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "math" - "reflect" - "testing" - - . "github.com/gogo/protobuf/proto" - proto3pb "github.com/gogo/protobuf/proto/proto3_proto" - . "github.com/gogo/protobuf/proto/testdata" -) - -type UnmarshalTextTest struct { - in string - err string // if "", no error expected - out *MyMessage -} - -func buildExtStructTest(text string) UnmarshalTextTest { - msg := &MyMessage{ - Count: Int32(42), - } - SetExtension(msg, E_Ext_More, &Ext{ - Data: String("Hello, world!"), - }) - return UnmarshalTextTest{in: text, out: msg} -} - -func buildExtDataTest(text string) UnmarshalTextTest { - msg := &MyMessage{ - Count: Int32(42), - } - SetExtension(msg, E_Ext_Text, String("Hello, world!")) - SetExtension(msg, E_Ext_Number, Int32(1729)) - return UnmarshalTextTest{in: text, out: msg} -} - -func buildExtRepStringTest(text string) UnmarshalTextTest { - msg := &MyMessage{ - Count: Int32(42), - } - if err := SetExtension(msg, E_Greeting, []string{"bula", "hola"}); err != nil { - panic(err) - } - return UnmarshalTextTest{in: text, out: msg} -} - -var unMarshalTextTests = []UnmarshalTextTest{ - // Basic - { - in: " count:42\n name:\"Dave\" ", - out: &MyMessage{ - Count: Int32(42), - Name: String("Dave"), - }, - }, - - // Empty quoted string - { - in: `count:42 name:""`, - out: &MyMessage{ - Count: Int32(42), - Name: String(""), - }, - }, - - // Quoted string concatenation with double quotes - { - in: `count:42 name: "My name is "` + "\n" + `"elsewhere"`, - out: &MyMessage{ - Count: Int32(42), - Name: String("My name is elsewhere"), - }, - }, - - // Quoted string concatenation with single quotes - { - in: "count:42 name: 'My name is '\n'elsewhere'", - out: &MyMessage{ - Count: Int32(42), - Name: String("My name is elsewhere"), - }, - }, - - // Quoted string concatenations with mixed quotes - { - in: "count:42 name: 'My name is '\n\"elsewhere\"", - out: &MyMessage{ - Count: Int32(42), - Name: String("My name is elsewhere"), - }, - }, - { - in: "count:42 name: \"My name is \"\n'elsewhere'", - out: &MyMessage{ - Count: Int32(42), - Name: String("My name is elsewhere"), - }, - }, - - // Quoted string with escaped apostrophe - { - in: `count:42 name: "HOLIDAY - New Year\'s Day"`, - out: &MyMessage{ - Count: Int32(42), - Name: String("HOLIDAY - New Year's Day"), - }, - }, - - // Quoted string with single quote - { - in: `count:42 name: 'Roger "The Ramster" Ramjet'`, - out: &MyMessage{ - Count: Int32(42), - Name: String(`Roger "The Ramster" Ramjet`), - }, - }, - - // Quoted string with all the accepted special characters from the C++ test - { - in: `count:42 name: ` + "\"\\\"A string with \\' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"", - out: &MyMessage{ - Count: Int32(42), - Name: String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces"), - }, - }, - - // Quoted string with quoted backslash - { - in: `count:42 name: "\\'xyz"`, - out: &MyMessage{ - Count: Int32(42), - Name: String(`\'xyz`), - }, - }, - - // Quoted string with UTF-8 bytes. - { - in: "count:42 name: '\303\277\302\201\xAB'", - out: &MyMessage{ - Count: Int32(42), - Name: String("\303\277\302\201\xAB"), - }, - }, - - // Bad quoted string - { - in: `inner: < host: "\0" >` + "\n", - err: `line 1.15: invalid quoted string "\0": \0 requires 2 following digits`, - }, - - // Number too large for int64 - { - in: "count: 1 others { key: 123456789012345678901 }", - err: "line 1.23: invalid int64: 123456789012345678901", - }, - - // Number too large for int32 - { - in: "count: 1234567890123", - err: "line 1.7: invalid int32: 1234567890123", - }, - - // Number in hexadecimal - { - in: "count: 0x2beef", - out: &MyMessage{ - Count: Int32(0x2beef), - }, - }, - - // Number in octal - { - in: "count: 024601", - out: &MyMessage{ - Count: Int32(024601), - }, - }, - - // Floating point number with "f" suffix - { - in: "count: 4 others:< weight: 17.0f >", - out: &MyMessage{ - Count: Int32(4), - Others: []*OtherMessage{ - { - Weight: Float32(17), - }, - }, - }, - }, - - // Floating point positive infinity - { - in: "count: 4 bigfloat: inf", - out: &MyMessage{ - Count: Int32(4), - Bigfloat: Float64(math.Inf(1)), - }, - }, - - // Floating point negative infinity - { - in: "count: 4 bigfloat: -inf", - out: &MyMessage{ - Count: Int32(4), - Bigfloat: Float64(math.Inf(-1)), - }, - }, - - // Number too large for float32 - { - in: "others:< weight: 12345678901234567890123456789012345678901234567890 >", - err: "line 1.17: invalid float32: 12345678901234567890123456789012345678901234567890", - }, - - // Number posing as a quoted string - { - in: `inner: < host: 12 >` + "\n", - err: `line 1.15: invalid string: 12`, - }, - - // Quoted string posing as int32 - { - in: `count: "12"`, - err: `line 1.7: invalid int32: "12"`, - }, - - // Quoted string posing a float32 - { - in: `others:< weight: "17.4" >`, - err: `line 1.17: invalid float32: "17.4"`, - }, - - // Enum - { - in: `count:42 bikeshed: BLUE`, - out: &MyMessage{ - Count: Int32(42), - Bikeshed: MyMessage_BLUE.Enum(), - }, - }, - - // Repeated field - { - in: `count:42 pet: "horsey" pet:"bunny"`, - out: &MyMessage{ - Count: Int32(42), - Pet: []string{"horsey", "bunny"}, - }, - }, - - // Repeated field with list notation - { - in: `count:42 pet: ["horsey", "bunny"]`, - out: &MyMessage{ - Count: Int32(42), - Pet: []string{"horsey", "bunny"}, - }, - }, - - // Repeated message with/without colon and <>/{} - { - in: `count:42 others:{} others{} others:<> others:{}`, - out: &MyMessage{ - Count: Int32(42), - Others: []*OtherMessage{ - {}, - {}, - {}, - {}, - }, - }, - }, - - // Missing colon for inner message - { - in: `count:42 inner < host: "cauchy.syd" >`, - out: &MyMessage{ - Count: Int32(42), - Inner: &InnerMessage{ - Host: String("cauchy.syd"), - }, - }, - }, - - // Missing colon for string field - { - in: `name "Dave"`, - err: `line 1.5: expected ':', found "\"Dave\""`, - }, - - // Missing colon for int32 field - { - in: `count 42`, - err: `line 1.6: expected ':', found "42"`, - }, - - // Missing required field - { - in: `name: "Pawel"`, - err: `proto: required field "testdata.MyMessage.count" not set`, - out: &MyMessage{ - Name: String("Pawel"), - }, - }, - - // Missing required field in a required submessage - { - in: `count: 42 we_must_go_deeper < leo_finally_won_an_oscar <> >`, - err: `proto: required field "testdata.InnerMessage.host" not set`, - out: &MyMessage{ - Count: Int32(42), - WeMustGoDeeper: &RequiredInnerMessage{LeoFinallyWonAnOscar: &InnerMessage{}}, - }, - }, - - // Repeated non-repeated field - { - in: `name: "Rob" name: "Russ"`, - err: `line 1.12: non-repeated field "name" was repeated`, - }, - - // Group - { - in: `count: 17 SomeGroup { group_field: 12 }`, - out: &MyMessage{ - Count: Int32(17), - Somegroup: &MyMessage_SomeGroup{ - GroupField: Int32(12), - }, - }, - }, - - // Semicolon between fields - { - in: `count:3;name:"Calvin"`, - out: &MyMessage{ - Count: Int32(3), - Name: String("Calvin"), - }, - }, - // Comma between fields - { - in: `count:4,name:"Ezekiel"`, - out: &MyMessage{ - Count: Int32(4), - Name: String("Ezekiel"), - }, - }, - - // Boolean false - { - in: `count:42 inner { host: "example.com" connected: false }`, - out: &MyMessage{ - Count: Int32(42), - Inner: &InnerMessage{ - Host: String("example.com"), - Connected: Bool(false), - }, - }, - }, - // Boolean true - { - in: `count:42 inner { host: "example.com" connected: true }`, - out: &MyMessage{ - Count: Int32(42), - Inner: &InnerMessage{ - Host: String("example.com"), - Connected: Bool(true), - }, - }, - }, - // Boolean 0 - { - in: `count:42 inner { host: "example.com" connected: 0 }`, - out: &MyMessage{ - Count: Int32(42), - Inner: &InnerMessage{ - Host: String("example.com"), - Connected: Bool(false), - }, - }, - }, - // Boolean 1 - { - in: `count:42 inner { host: "example.com" connected: 1 }`, - out: &MyMessage{ - Count: Int32(42), - Inner: &InnerMessage{ - Host: String("example.com"), - Connected: Bool(true), - }, - }, - }, - // Boolean f - { - in: `count:42 inner { host: "example.com" connected: f }`, - out: &MyMessage{ - Count: Int32(42), - Inner: &InnerMessage{ - Host: String("example.com"), - Connected: Bool(false), - }, - }, - }, - // Boolean t - { - in: `count:42 inner { host: "example.com" connected: t }`, - out: &MyMessage{ - Count: Int32(42), - Inner: &InnerMessage{ - Host: String("example.com"), - Connected: Bool(true), - }, - }, - }, - // Boolean False - { - in: `count:42 inner { host: "example.com" connected: False }`, - out: &MyMessage{ - Count: Int32(42), - Inner: &InnerMessage{ - Host: String("example.com"), - Connected: Bool(false), - }, - }, - }, - // Boolean True - { - in: `count:42 inner { host: "example.com" connected: True }`, - out: &MyMessage{ - Count: Int32(42), - Inner: &InnerMessage{ - Host: String("example.com"), - Connected: Bool(true), - }, - }, - }, - - // Extension - buildExtStructTest(`count: 42 [testdata.Ext.more]:`), - buildExtStructTest(`count: 42 [testdata.Ext.more] {data:"Hello, world!"}`), - buildExtDataTest(`count: 42 [testdata.Ext.text]:"Hello, world!" [testdata.Ext.number]:1729`), - buildExtRepStringTest(`count: 42 [testdata.greeting]:"bula" [testdata.greeting]:"hola"`), - - // Big all-in-one - { - in: "count:42 # Meaning\n" + - `name:"Dave" ` + - `quote:"\"I didn't want to go.\"" ` + - `pet:"bunny" ` + - `pet:"kitty" ` + - `pet:"horsey" ` + - `inner:<` + - ` host:"footrest.syd" ` + - ` port:7001 ` + - ` connected:true ` + - `> ` + - `others:<` + - ` key:3735928559 ` + - ` value:"\x01A\a\f" ` + - `> ` + - `others:<` + - " weight:58.9 # Atomic weight of Co\n" + - ` inner:<` + - ` host:"lesha.mtv" ` + - ` port:8002 ` + - ` >` + - `>`, - out: &MyMessage{ - Count: Int32(42), - Name: String("Dave"), - Quote: String(`"I didn't want to go."`), - Pet: []string{"bunny", "kitty", "horsey"}, - Inner: &InnerMessage{ - Host: String("footrest.syd"), - Port: Int32(7001), - Connected: Bool(true), - }, - Others: []*OtherMessage{ - { - Key: Int64(3735928559), - Value: []byte{0x1, 'A', '\a', '\f'}, - }, - { - Weight: Float32(58.9), - Inner: &InnerMessage{ - Host: String("lesha.mtv"), - Port: Int32(8002), - }, - }, - }, - }, - }, -} - -func TestUnmarshalText(t *testing.T) { - for i, test := range unMarshalTextTests { - pb := new(MyMessage) - err := UnmarshalText(test.in, pb) - if test.err == "" { - // We don't expect failure. - if err != nil { - t.Errorf("Test %d: Unexpected error: %v", i, err) - } else if !reflect.DeepEqual(pb, test.out) { - t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v", - i, pb, test.out) - } - } else { - // We do expect failure. - if err == nil { - t.Errorf("Test %d: Didn't get expected error: %v", i, test.err) - } else if err.Error() != test.err { - t.Errorf("Test %d: Incorrect error.\nHave: %v\nWant: %v", - i, err.Error(), test.err) - } else if _, ok := err.(*RequiredNotSetError); ok && test.out != nil && !reflect.DeepEqual(pb, test.out) { - t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v", - i, pb, test.out) - } - } - } -} - -func TestUnmarshalTextCustomMessage(t *testing.T) { - msg := &textMessage{} - if err := UnmarshalText("custom", msg); err != nil { - t.Errorf("Unexpected error from custom unmarshal: %v", err) - } - if UnmarshalText("not custom", msg) == nil { - t.Errorf("Didn't get expected error from custom unmarshal") - } -} - -// Regression test; this caused a panic. -func TestRepeatedEnum(t *testing.T) { - pb := new(RepeatedEnum) - if err := UnmarshalText("color: RED", pb); err != nil { - t.Fatal(err) - } - exp := &RepeatedEnum{ - Color: []RepeatedEnum_Color{RepeatedEnum_RED}, - } - if !Equal(pb, exp) { - t.Errorf("Incorrect populated \nHave: %v\nWant: %v", pb, exp) - } -} - -func TestProto3TextParsing(t *testing.T) { - m := new(proto3pb.Message) - const in = `name: "Wallace" true_scotsman: true` - want := &proto3pb.Message{ - Name: "Wallace", - TrueScotsman: true, - } - if err := UnmarshalText(in, m); err != nil { - t.Fatal(err) - } - if !Equal(m, want) { - t.Errorf("\n got %v\nwant %v", m, want) - } -} - -func TestMapParsing(t *testing.T) { - m := new(MessageWithMap) - const in = `name_mapping: name_mapping:` + - `msg_mapping:,>` + // separating commas are okay - `msg_mapping>` + // no colon after "value" - `msg_mapping:>` + // omitted key - `msg_mapping:` + // omitted value - `byte_mapping:` + - `byte_mapping:<>` // omitted key and value - want := &MessageWithMap{ - NameMapping: map[int32]string{ - 1: "Beatles", - 1234: "Feist", - }, - MsgMapping: map[int64]*FloatingPoint{ - -4: {F: Float64(2.0)}, - -2: {F: Float64(4.0)}, - 0: {F: Float64(5.0)}, - 1: nil, - }, - ByteMapping: map[bool][]byte{ - false: nil, - true: []byte("so be it"), - }, - } - if err := UnmarshalText(in, m); err != nil { - t.Fatal(err) - } - if !Equal(m, want) { - t.Errorf("\n got %v\nwant %v", m, want) - } -} - -func TestOneofParsing(t *testing.T) { - const in = `name:"Shrek"` - m := new(Communique) - want := &Communique{Union: &Communique_Name{Name: "Shrek"}} - if err := UnmarshalText(in, m); err != nil { - t.Fatal(err) - } - if !Equal(m, want) { - t.Errorf("\n got %v\nwant %v", m, want) - } - - const inOverwrite = `name:"Shrek" number:42` - m = new(Communique) - testErr := "line 1.13: field 'number' would overwrite already parsed oneof 'Union'" - if err := UnmarshalText(inOverwrite, m); err == nil { - t.Errorf("TestOneofParsing: Didn't get expected error: %v", testErr) - } else if err.Error() != testErr { - t.Errorf("TestOneofParsing: Incorrect error.\nHave: %v\nWant: %v", - err.Error(), testErr) - } - -} - -var benchInput string - -func init() { - benchInput = "count: 4\n" - for i := 0; i < 1000; i++ { - benchInput += "pet: \"fido\"\n" - } - - // Check it is valid input. - pb := new(MyMessage) - err := UnmarshalText(benchInput, pb) - if err != nil { - panic("Bad benchmark input: " + err.Error()) - } -} - -func BenchmarkUnmarshalText(b *testing.B) { - pb := new(MyMessage) - for i := 0; i < b.N; i++ { - UnmarshalText(benchInput, pb) - } - b.SetBytes(int64(len(benchInput))) -} diff --git a/vendor/github.com/gogo/protobuf/proto/text_test.go b/vendor/github.com/gogo/protobuf/proto/text_test.go deleted file mode 100644 index 27df6cb9b4..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/text_test.go +++ /dev/null @@ -1,474 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "bytes" - "errors" - "io/ioutil" - "math" - "strings" - "testing" - - "github.com/gogo/protobuf/proto" - - proto3pb "github.com/gogo/protobuf/proto/proto3_proto" - pb "github.com/gogo/protobuf/proto/testdata" -) - -// textMessage implements the methods that allow it to marshal and unmarshal -// itself as text. -type textMessage struct { -} - -func (*textMessage) MarshalText() ([]byte, error) { - return []byte("custom"), nil -} - -func (*textMessage) UnmarshalText(bytes []byte) error { - if string(bytes) != "custom" { - return errors.New("expected 'custom'") - } - return nil -} - -func (*textMessage) Reset() {} -func (*textMessage) String() string { return "" } -func (*textMessage) ProtoMessage() {} - -func newTestMessage() *pb.MyMessage { - msg := &pb.MyMessage{ - Count: proto.Int32(42), - Name: proto.String("Dave"), - Quote: proto.String(`"I didn't want to go."`), - Pet: []string{"bunny", "kitty", "horsey"}, - Inner: &pb.InnerMessage{ - Host: proto.String("footrest.syd"), - Port: proto.Int32(7001), - Connected: proto.Bool(true), - }, - Others: []*pb.OtherMessage{ - { - Key: proto.Int64(0xdeadbeef), - Value: []byte{1, 65, 7, 12}, - }, - { - Weight: proto.Float32(6.022), - Inner: &pb.InnerMessage{ - Host: proto.String("lesha.mtv"), - Port: proto.Int32(8002), - }, - }, - }, - Bikeshed: pb.MyMessage_BLUE.Enum(), - Somegroup: &pb.MyMessage_SomeGroup{ - GroupField: proto.Int32(8), - }, - // One normally wouldn't do this. - // This is an undeclared tag 13, as a varint (wire type 0) with value 4. - XXX_unrecognized: []byte{13<<3 | 0, 4}, - } - ext := &pb.Ext{ - Data: proto.String("Big gobs for big rats"), - } - if err := proto.SetExtension(msg, pb.E_Ext_More, ext); err != nil { - panic(err) - } - greetings := []string{"adg", "easy", "cow"} - if err := proto.SetExtension(msg, pb.E_Greeting, greetings); err != nil { - panic(err) - } - - // Add an unknown extension. We marshal a pb.Ext, and fake the ID. - b, err := proto.Marshal(&pb.Ext{Data: proto.String("3G skiing")}) - if err != nil { - panic(err) - } - b = append(proto.EncodeVarint(201<<3|proto.WireBytes), b...) - proto.SetRawExtension(msg, 201, b) - - // Extensions can be plain fields, too, so let's test that. - b = append(proto.EncodeVarint(202<<3|proto.WireVarint), 19) - proto.SetRawExtension(msg, 202, b) - - return msg -} - -const text = `count: 42 -name: "Dave" -quote: "\"I didn't want to go.\"" -pet: "bunny" -pet: "kitty" -pet: "horsey" -inner: < - host: "footrest.syd" - port: 7001 - connected: true -> -others: < - key: 3735928559 - value: "\001A\007\014" -> -others: < - weight: 6.022 - inner: < - host: "lesha.mtv" - port: 8002 - > -> -bikeshed: BLUE -SomeGroup { - group_field: 8 -} -/* 2 unknown bytes */ -13: 4 -[testdata.Ext.more]: < - data: "Big gobs for big rats" -> -[testdata.greeting]: "adg" -[testdata.greeting]: "easy" -[testdata.greeting]: "cow" -/* 13 unknown bytes */ -201: "\t3G skiing" -/* 3 unknown bytes */ -202: 19 -` - -func TestMarshalText(t *testing.T) { - buf := new(bytes.Buffer) - if err := proto.MarshalText(buf, newTestMessage()); err != nil { - t.Fatalf("proto.MarshalText: %v", err) - } - s := buf.String() - if s != text { - t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, text) - } -} - -func TestMarshalTextCustomMessage(t *testing.T) { - buf := new(bytes.Buffer) - if err := proto.MarshalText(buf, &textMessage{}); err != nil { - t.Fatalf("proto.MarshalText: %v", err) - } - s := buf.String() - if s != "custom" { - t.Errorf("Got %q, expected %q", s, "custom") - } -} -func TestMarshalTextNil(t *testing.T) { - want := "" - tests := []proto.Message{nil, (*pb.MyMessage)(nil)} - for i, test := range tests { - buf := new(bytes.Buffer) - if err := proto.MarshalText(buf, test); err != nil { - t.Fatal(err) - } - if got := buf.String(); got != want { - t.Errorf("%d: got %q want %q", i, got, want) - } - } -} - -func TestMarshalTextUnknownEnum(t *testing.T) { - // The Color enum only specifies values 0-2. - m := &pb.MyMessage{Bikeshed: pb.MyMessage_Color(3).Enum()} - got := m.String() - const want = `bikeshed:3 ` - if got != want { - t.Errorf("\n got %q\nwant %q", got, want) - } -} - -func TestTextOneof(t *testing.T) { - tests := []struct { - m proto.Message - want string - }{ - // zero message - {&pb.Communique{}, ``}, - // scalar field - {&pb.Communique{Union: &pb.Communique_Number{Number: 4}}, `number:4`}, - // message field - {&pb.Communique{Union: &pb.Communique_Msg{ - Msg: &pb.Strings{StringField: proto.String("why hello!")}, - }}, `msg:`}, - // bad oneof (should not panic) - {&pb.Communique{Union: &pb.Communique_Msg{Msg: nil}}, `msg:/* nil */`}, - } - for _, test := range tests { - got := strings.TrimSpace(test.m.String()) - if got != test.want { - t.Errorf("\n got %s\nwant %s", got, test.want) - } - } -} - -func BenchmarkMarshalTextBuffered(b *testing.B) { - buf := new(bytes.Buffer) - m := newTestMessage() - for i := 0; i < b.N; i++ { - buf.Reset() - proto.MarshalText(buf, m) - } -} - -func BenchmarkMarshalTextUnbuffered(b *testing.B) { - w := ioutil.Discard - m := newTestMessage() - for i := 0; i < b.N; i++ { - proto.MarshalText(w, m) - } -} - -func compact(src string) string { - // s/[ \n]+/ /g; s/ $//; - dst := make([]byte, len(src)) - space, comment := false, false - j := 0 - for i := 0; i < len(src); i++ { - if strings.HasPrefix(src[i:], "/*") { - comment = true - i++ - continue - } - if comment && strings.HasPrefix(src[i:], "*/") { - comment = false - i++ - continue - } - if comment { - continue - } - c := src[i] - if c == ' ' || c == '\n' { - space = true - continue - } - if j > 0 && (dst[j-1] == ':' || dst[j-1] == '<' || dst[j-1] == '{') { - space = false - } - if c == '{' { - space = false - } - if space { - dst[j] = ' ' - j++ - space = false - } - dst[j] = c - j++ - } - if space { - dst[j] = ' ' - j++ - } - return string(dst[0:j]) -} - -var compactText = compact(text) - -func TestCompactText(t *testing.T) { - s := proto.CompactTextString(newTestMessage()) - if s != compactText { - t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v\n===\n", s, compactText) - } -} - -func TestStringEscaping(t *testing.T) { - testCases := []struct { - in *pb.Strings - out string - }{ - { - // Test data from C++ test (TextFormatTest.StringEscape). - // Single divergence: we don't escape apostrophes. - &pb.Strings{StringField: proto.String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces")}, - "string_field: \"\\\"A string with ' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"\n", - }, - { - // Test data from the same C++ test. - &pb.Strings{StringField: proto.String("\350\260\267\346\255\214")}, - "string_field: \"\\350\\260\\267\\346\\255\\214\"\n", - }, - { - // Some UTF-8. - &pb.Strings{StringField: proto.String("\x00\x01\xff\x81")}, - `string_field: "\000\001\377\201"` + "\n", - }, - } - - for i, tc := range testCases { - var buf bytes.Buffer - if err := proto.MarshalText(&buf, tc.in); err != nil { - t.Errorf("proto.MarsalText: %v", err) - continue - } - s := buf.String() - if s != tc.out { - t.Errorf("#%d: Got:\n%s\nExpected:\n%s\n", i, s, tc.out) - continue - } - - // Check round-trip. - pbStrings := new(pb.Strings) - if err := proto.UnmarshalText(s, pbStrings); err != nil { - t.Errorf("#%d: UnmarshalText: %v", i, err) - continue - } - if !proto.Equal(pbStrings, tc.in) { - t.Errorf("#%d: Round-trip failed:\nstart: %v\n end: %v", i, tc.in, pbStrings) - } - } -} - -// A limitedWriter accepts some output before it fails. -// This is a proxy for something like a nearly-full or imminently-failing disk, -// or a network connection that is about to die. -type limitedWriter struct { - b bytes.Buffer - limit int -} - -var outOfSpace = errors.New("proto: insufficient space") - -func (w *limitedWriter) Write(p []byte) (n int, err error) { - var avail = w.limit - w.b.Len() - if avail <= 0 { - return 0, outOfSpace - } - if len(p) <= avail { - return w.b.Write(p) - } - n, _ = w.b.Write(p[:avail]) - return n, outOfSpace -} - -func TestMarshalTextFailing(t *testing.T) { - // Try lots of different sizes to exercise more error code-paths. - for lim := 0; lim < len(text); lim++ { - buf := new(limitedWriter) - buf.limit = lim - err := proto.MarshalText(buf, newTestMessage()) - // We expect a certain error, but also some partial results in the buffer. - if err != outOfSpace { - t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", err, outOfSpace) - } - s := buf.b.String() - x := text[:buf.limit] - if s != x { - t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, x) - } - } -} - -func TestFloats(t *testing.T) { - tests := []struct { - f float64 - want string - }{ - {0, "0"}, - {4.7, "4.7"}, - {math.Inf(1), "inf"}, - {math.Inf(-1), "-inf"}, - {math.NaN(), "nan"}, - } - for _, test := range tests { - msg := &pb.FloatingPoint{F: &test.f} - got := strings.TrimSpace(msg.String()) - want := `f:` + test.want - if got != want { - t.Errorf("f=%f: got %q, want %q", test.f, got, want) - } - } -} - -func TestRepeatedNilText(t *testing.T) { - m := &pb.MessageList{ - Message: []*pb.MessageList_Message{ - nil, - { - Name: proto.String("Horse"), - }, - nil, - }, - } - want := `Message -Message { - name: "Horse" -} -Message -` - if s := proto.MarshalTextString(m); s != want { - t.Errorf(" got: %s\nwant: %s", s, want) - } -} - -func TestProto3Text(t *testing.T) { - tests := []struct { - m proto.Message - want string - }{ - // zero message - {&proto3pb.Message{}, ``}, - // zero message except for an empty byte slice - {&proto3pb.Message{Data: []byte{}}, ``}, - // trivial case - {&proto3pb.Message{Name: "Rob", HeightInCm: 175}, `name:"Rob" height_in_cm:175`}, - // empty map - {&pb.MessageWithMap{}, ``}, - // non-empty map; map format is the same as a repeated struct, - // and they are sorted by key (numerically for numeric keys). - { - &pb.MessageWithMap{NameMapping: map[int32]string{ - -1: "Negatory", - 7: "Lucky", - 1234: "Feist", - 6345789: "Otis", - }}, - `name_mapping: ` + - `name_mapping: ` + - `name_mapping: ` + - `name_mapping:`, - }, - // map with nil value; not well-defined, but we shouldn't crash - { - &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{7: nil}}, - `msg_mapping:`, - }, - } - for _, test := range tests { - got := strings.TrimSpace(test.m.String()) - if got != test.want { - t.Errorf("\n got %s\nwant %s", got, test.want) - } - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go index d427647436..38439fa990 100644 --- a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go @@ -47,183 +47,3 @@ func (*timestamp) String() string { return "timestamp" } func init() { RegisterType((*timestamp)(nil), "gogo.protobuf.proto.timestamp") } - -func (o *Buffer) decTimestamp() (time.Time, error) { - b, err := o.DecodeRawBytes(true) - if err != nil { - return time.Time{}, err - } - tproto := ×tamp{} - if err := Unmarshal(b, tproto); err != nil { - return time.Time{}, err - } - return timestampFromProto(tproto) -} - -func (o *Buffer) dec_time(p *Properties, base structPointer) error { - t, err := o.decTimestamp() - if err != nil { - return err - } - setPtrCustomType(base, p.field, &t) - return nil -} - -func (o *Buffer) dec_ref_time(p *Properties, base structPointer) error { - t, err := o.decTimestamp() - if err != nil { - return err - } - setCustomType(base, p.field, &t) - return nil -} - -func (o *Buffer) dec_slice_time(p *Properties, base structPointer) error { - t, err := o.decTimestamp() - if err != nil { - return err - } - newBas := appendStructPointer(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))) - var zero field - setPtrCustomType(newBas, zero, &t) - return nil -} - -func (o *Buffer) dec_slice_ref_time(p *Properties, base structPointer) error { - t, err := o.decTimestamp() - if err != nil { - return err - } - newBas := appendStructPointer(base, p.field, reflect.SliceOf(timeType)) - var zero field - setCustomType(newBas, zero, &t) - return nil -} - -func size_time(p *Properties, base structPointer) (n int) { - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return 0 - } - tim := structPointer_Interface(structp, timeType).(*time.Time) - t, err := timestampProto(*tim) - if err != nil { - return 0 - } - size := Size(t) - return size + sizeVarint(uint64(size)) + len(p.tagcode) -} - -func (o *Buffer) enc_time(p *Properties, base structPointer) error { - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return ErrNil - } - tim := structPointer_Interface(structp, timeType).(*time.Time) - t, err := timestampProto(*tim) - if err != nil { - return err - } - data, err := Marshal(t) - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return nil -} - -func size_ref_time(p *Properties, base structPointer) (n int) { - tim := structPointer_InterfaceAt(base, p.field, timeType).(*time.Time) - t, err := timestampProto(*tim) - if err != nil { - return 0 - } - size := Size(t) - return size + sizeVarint(uint64(size)) + len(p.tagcode) -} - -func (o *Buffer) enc_ref_time(p *Properties, base structPointer) error { - tim := structPointer_InterfaceAt(base, p.field, timeType).(*time.Time) - t, err := timestampProto(*tim) - if err != nil { - return err - } - data, err := Marshal(t) - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return nil -} - -func size_slice_time(p *Properties, base structPointer) (n int) { - ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))).(*[]*time.Time) - tims := *ptims - for i := 0; i < len(tims); i++ { - if tims[i] == nil { - return 0 - } - tproto, err := timestampProto(*tims[i]) - if err != nil { - return 0 - } - size := Size(tproto) - n += len(p.tagcode) + size + sizeVarint(uint64(size)) - } - return n -} - -func (o *Buffer) enc_slice_time(p *Properties, base structPointer) error { - ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))).(*[]*time.Time) - tims := *ptims - for i := 0; i < len(tims); i++ { - if tims[i] == nil { - return errRepeatedHasNil - } - tproto, err := timestampProto(*tims[i]) - if err != nil { - return err - } - data, err := Marshal(tproto) - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - } - return nil -} - -func size_slice_ref_time(p *Properties, base structPointer) (n int) { - ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(timeType)).(*[]time.Time) - tims := *ptims - for i := 0; i < len(tims); i++ { - tproto, err := timestampProto(tims[i]) - if err != nil { - return 0 - } - size := Size(tproto) - n += len(p.tagcode) + size + sizeVarint(uint64(size)) - } - return n -} - -func (o *Buffer) enc_slice_ref_time(p *Properties, base structPointer) error { - ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(timeType)).(*[]time.Time) - tims := *ptims - for i := 0; i < len(tims); i++ { - tproto, err := timestampProto(tims[i]) - if err != nil { - return err - } - data, err := Marshal(tproto) - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - } - return nil -} diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers.go b/vendor/github.com/gogo/protobuf/proto/wrappers.go new file mode 100644 index 0000000000..b175d1b642 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/wrappers.go @@ -0,0 +1,1888 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "io" + "reflect" +) + +func makeStdDoubleValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*float64) + v := &float64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdDoubleValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64) + v := &float64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdDoubleValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float64) + v := &float64Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float64) + v := &float64Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdDoubleValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdDoubleValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdDoubleValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdDoubleValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdDoubleValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdFloatValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*float32) + v := &float32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdFloatValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32) + v := &float32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdFloatValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float32) + v := &float32Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float32) + v := &float32Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdFloatValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdFloatValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdFloatValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdFloatValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdFloatValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*int64) + v := &int64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64) + v := &int64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int64) + v := &int64Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int64) + v := &int64Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*uint64) + v := &uint64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64) + v := &uint64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint64) + v := &uint64Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint64) + v := &uint64Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdUInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdUInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*int32) + v := &int32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32) + v := &int32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int32) + v := &int32Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int32) + v := &int32Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*uint32) + v := &uint32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32) + v := &uint32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint32) + v := &uint32Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint32) + v := &uint32Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdUInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdUInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBoolValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*bool) + v := &boolValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBoolValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool) + v := &boolValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBoolValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(bool) + v := &boolValue{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(bool) + v := &boolValue{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBoolValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBoolValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdBoolValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdBoolValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBoolValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdStringValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*string) + v := &stringValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdStringValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string) + v := &stringValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdStringValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(string) + v := &stringValue{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(string) + v := &stringValue{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdStringValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdStringValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdStringValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdStringValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdStringValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBytesValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*[]byte) + v := &bytesValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBytesValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte) + v := &bytesValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBytesValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().([]byte) + v := &bytesValue{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().([]byte) + v := &bytesValue{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBytesValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBytesValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdBytesValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdBytesValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBytesValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go b/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go new file mode 100644 index 0000000000..c1cf7bf85e --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go @@ -0,0 +1,113 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +type float64Value struct { + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *float64Value) Reset() { *m = float64Value{} } +func (*float64Value) ProtoMessage() {} +func (*float64Value) String() string { return "float64" } + +type float32Value struct { + Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *float32Value) Reset() { *m = float32Value{} } +func (*float32Value) ProtoMessage() {} +func (*float32Value) String() string { return "float32" } + +type int64Value struct { + Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *int64Value) Reset() { *m = int64Value{} } +func (*int64Value) ProtoMessage() {} +func (*int64Value) String() string { return "int64" } + +type uint64Value struct { + Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *uint64Value) Reset() { *m = uint64Value{} } +func (*uint64Value) ProtoMessage() {} +func (*uint64Value) String() string { return "uint64" } + +type int32Value struct { + Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *int32Value) Reset() { *m = int32Value{} } +func (*int32Value) ProtoMessage() {} +func (*int32Value) String() string { return "int32" } + +type uint32Value struct { + Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *uint32Value) Reset() { *m = uint32Value{} } +func (*uint32Value) ProtoMessage() {} +func (*uint32Value) String() string { return "uint32" } + +type boolValue struct { + Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *boolValue) Reset() { *m = boolValue{} } +func (*boolValue) ProtoMessage() {} +func (*boolValue) String() string { return "bool" } + +type stringValue struct { + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *stringValue) Reset() { *m = stringValue{} } +func (*stringValue) ProtoMessage() {} +func (*stringValue) String() string { return "string" } + +type bytesValue struct { + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *bytesValue) Reset() { *m = bytesValue{} } +func (*bytesValue) ProtoMessage() {} +func (*bytesValue) String() string { return "[]byte" } + +func init() { + RegisterType((*float64Value)(nil), "gogo.protobuf.proto.DoubleValue") + RegisterType((*float32Value)(nil), "gogo.protobuf.proto.FloatValue") + RegisterType((*int64Value)(nil), "gogo.protobuf.proto.Int64Value") + RegisterType((*uint64Value)(nil), "gogo.protobuf.proto.UInt64Value") + RegisterType((*int32Value)(nil), "gogo.protobuf.proto.Int32Value") + RegisterType((*uint32Value)(nil), "gogo.protobuf.proto.UInt32Value") + RegisterType((*boolValue)(nil), "gogo.protobuf.proto.BoolValue") + RegisterType((*stringValue)(nil), "gogo.protobuf.proto.StringValue") + RegisterType((*bytesValue)(nil), "gogo.protobuf.proto.BytesValue") +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/Makefile b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/Makefile deleted file mode 100644 index a42cc3717f..0000000000 --- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/Makefile +++ /dev/null @@ -1,33 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -test: - cd testdata && make test diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go index 82623f0497..44f893b777 100644 --- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go @@ -1,35 +1,6 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: descriptor.proto -/* -Package descriptor is a generated protocol buffer package. - -It is generated from these files: - descriptor.proto - -It has these top-level messages: - FileDescriptorSet - FileDescriptorProto - DescriptorProto - ExtensionRangeOptions - FieldDescriptorProto - OneofDescriptorProto - EnumDescriptorProto - EnumValueDescriptorProto - ServiceDescriptorProto - MethodDescriptorProto - FileOptions - MessageOptions - FieldOptions - OneofOptions - EnumOptions - EnumValueOptions - ServiceOptions - MethodOptions - UninterpretedOption - SourceCodeInfo - GeneratedCodeInfo -*/ package descriptor import proto "github.com/gogo/protobuf/proto" @@ -139,7 +110,7 @@ func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { return nil } func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{4, 0} + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{4, 0} } type FieldDescriptorProto_Label int32 @@ -179,7 +150,7 @@ func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { return nil } func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{4, 1} + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{4, 1} } // Generated classes can be optimized for speed or code size. @@ -220,7 +191,7 @@ func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { return nil } func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{10, 0} + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{10, 0} } type FieldOptions_CType int32 @@ -260,7 +231,7 @@ func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { return nil } func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{12, 0} + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{12, 0} } type FieldOptions_JSType int32 @@ -302,7 +273,7 @@ func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { return nil } func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{12, 1} + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{12, 1} } // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, @@ -344,20 +315,41 @@ func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { return nil } func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{17, 0} + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{17, 0} } // The protocol compiler can output a FileDescriptorSet containing the .proto // files it parses. type FileDescriptorSet struct { - File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` - XXX_unrecognized []byte `json:"-"` + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } -func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } -func (*FileDescriptorSet) ProtoMessage() {} -func (*FileDescriptorSet) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{0} } +func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } +func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorSet) ProtoMessage() {} +func (*FileDescriptorSet) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{0} +} +func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorSet.Unmarshal(m, b) +} +func (m *FileDescriptorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorSet.Marshal(b, m, deterministic) +} +func (dst *FileDescriptorSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorSet.Merge(dst, src) +} +func (m *FileDescriptorSet) XXX_Size() int { + return xxx_messageInfo_FileDescriptorSet.Size(m) +} +func (m *FileDescriptorSet) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorSet.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorSet proto.InternalMessageInfo func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { if m != nil { @@ -390,14 +382,35 @@ type FileDescriptorProto struct { SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` // The syntax of the proto file. // The supported values are "proto2" and "proto3". - Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` - XXX_unrecognized []byte `json:"-"` + Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } +func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorProto) ProtoMessage() {} +func (*FileDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{1} +} +func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorProto.Unmarshal(m, b) +} +func (m *FileDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *FileDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorProto.Merge(dst, src) +} +func (m *FileDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FileDescriptorProto.Size(m) +} +func (m *FileDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorProto.DiscardUnknown(m) } -func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } -func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*FileDescriptorProto) ProtoMessage() {} -func (*FileDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{1} } +var xxx_messageInfo_FileDescriptorProto proto.InternalMessageInfo func (m *FileDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -496,14 +509,35 @@ type DescriptorProto struct { ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` // Reserved field names, which may not be used by fields in the same message. // A given name may only be reserved once. - ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` - XXX_unrecognized []byte `json:"-"` + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } -func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } -func (*DescriptorProto) ProtoMessage() {} -func (*DescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{2} } +func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } +func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto) ProtoMessage() {} +func (*DescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{2} +} +func (m *DescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto.Unmarshal(m, b) +} +func (m *DescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto.Marshal(b, m, deterministic) +} +func (dst *DescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto.Merge(dst, src) +} +func (m *DescriptorProto) XXX_Size() int { + return xxx_messageInfo_DescriptorProto.Size(m) +} +func (m *DescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto proto.InternalMessageInfo func (m *DescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -576,18 +610,37 @@ func (m *DescriptorProto) GetReservedName() []string { } type DescriptorProto_ExtensionRange struct { - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` - Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } func (*DescriptorProto_ExtensionRange) ProtoMessage() {} func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{2, 0} + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{2, 0} } +func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ExtensionRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Marshal(b, m, deterministic) +} +func (dst *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(dst, src) +} +func (m *DescriptorProto_ExtensionRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Size(m) +} +func (m *DescriptorProto_ExtensionRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ExtensionRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ExtensionRange proto.InternalMessageInfo func (m *DescriptorProto_ExtensionRange) GetStart() int32 { if m != nil && m.Start != nil { @@ -614,17 +667,36 @@ func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { // fields or extension ranges in the same message. Reserved ranges may // not overlap. type DescriptorProto_ReservedRange struct { - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` - XXX_unrecognized []byte `json:"-"` + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } func (*DescriptorProto_ReservedRange) ProtoMessage() {} func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{2, 1} + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{2, 1} } +func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ReservedRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ReservedRange.Marshal(b, m, deterministic) +} +func (dst *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ReservedRange.Merge(dst, src) +} +func (m *DescriptorProto_ReservedRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ReservedRange.Size(m) +} +func (m *DescriptorProto_ReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ReservedRange proto.InternalMessageInfo func (m *DescriptorProto_ReservedRange) GetStart() int32 { if m != nil && m.Start != nil { @@ -643,14 +715,18 @@ func (m *DescriptorProto_ReservedRange) GetEnd() int32 { type ExtensionRangeOptions struct { // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } -func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } -func (*ExtensionRangeOptions) ProtoMessage() {} -func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{3} } +func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } +func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } +func (*ExtensionRangeOptions) ProtoMessage() {} +func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{3} +} var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ {Start: 1000, End: 536870911}, @@ -659,6 +735,23 @@ var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_ExtensionRangeOptions } +func (m *ExtensionRangeOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExtensionRangeOptions.Unmarshal(m, b) +} +func (m *ExtensionRangeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExtensionRangeOptions.Marshal(b, m, deterministic) +} +func (dst *ExtensionRangeOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtensionRangeOptions.Merge(dst, src) +} +func (m *ExtensionRangeOptions) XXX_Size() int { + return xxx_messageInfo_ExtensionRangeOptions.Size(m) +} +func (m *ExtensionRangeOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ExtensionRangeOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtensionRangeOptions proto.InternalMessageInfo func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { if m != nil { @@ -697,15 +790,36 @@ type FieldDescriptorProto struct { // user has set a "json_name" option on this field, that option's value // will be used. Otherwise, it's deduced from the field's name by converting // it to camelCase. - JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` - Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` + JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` + Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } -func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*FieldDescriptorProto) ProtoMessage() {} -func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{4} } +func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } +func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FieldDescriptorProto) ProtoMessage() {} +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{4} +} +func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldDescriptorProto.Unmarshal(m, b) +} +func (m *FieldDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *FieldDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldDescriptorProto.Merge(dst, src) +} +func (m *FieldDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FieldDescriptorProto.Size(m) +} +func (m *FieldDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FieldDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldDescriptorProto proto.InternalMessageInfo func (m *FieldDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -779,15 +893,36 @@ func (m *FieldDescriptorProto) GetOptions() *FieldOptions { // Describes a oneof. type OneofDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } +func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*OneofDescriptorProto) ProtoMessage() {} +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{5} +} +func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofDescriptorProto.Unmarshal(m, b) +} +func (m *OneofDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *OneofDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofDescriptorProto.Merge(dst, src) +} +func (m *OneofDescriptorProto) XXX_Size() int { + return xxx_messageInfo_OneofDescriptorProto.Size(m) +} +func (m *OneofDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_OneofDescriptorProto.DiscardUnknown(m) } -func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } -func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*OneofDescriptorProto) ProtoMessage() {} -func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{5} } +var xxx_messageInfo_OneofDescriptorProto proto.InternalMessageInfo func (m *OneofDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -805,16 +940,44 @@ func (m *OneofDescriptorProto) GetOptions() *OneofOptions { // Describes an enum type. type EnumDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` - Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } +func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto) ProtoMessage() {} +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{6} +} +func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto.Unmarshal(m, b) +} +func (m *EnumDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *EnumDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto.Merge(dst, src) +} +func (m *EnumDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto.Size(m) +} +func (m *EnumDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto.DiscardUnknown(m) } -func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } -func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*EnumDescriptorProto) ProtoMessage() {} -func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{6} } +var xxx_messageInfo_EnumDescriptorProto proto.InternalMessageInfo func (m *EnumDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -837,21 +1000,106 @@ func (m *EnumDescriptorProto) GetOptions() *EnumOptions { return nil } +func (m *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +// Range of reserved numeric values. Reserved values may not be used by +// entries in the same enum. Reserved ranges may not overlap. +// +// Note that this is distinct from DescriptorProto.ReservedRange in that it +// is inclusive such that it can appropriately represent the entire int32 +// domain. +type EnumDescriptorProto_EnumReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto_EnumReservedRange) Reset() { *m = EnumDescriptorProto_EnumReservedRange{} } +func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} +func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{6, 0} +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Unmarshal(m, b) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Marshal(b, m, deterministic) +} +func (dst *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(dst, src) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Size(m) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto_EnumReservedRange proto.InternalMessageInfo + +func (m *EnumDescriptorProto_EnumReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + // Describes a value within an enum. type EnumValueDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` - Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } func (*EnumValueDescriptorProto) ProtoMessage() {} func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{7} + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{7} +} +func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueDescriptorProto.Unmarshal(m, b) +} +func (m *EnumValueDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *EnumValueDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueDescriptorProto.Merge(dst, src) +} +func (m *EnumValueDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumValueDescriptorProto.Size(m) +} +func (m *EnumValueDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueDescriptorProto.DiscardUnknown(m) } +var xxx_messageInfo_EnumValueDescriptorProto proto.InternalMessageInfo + func (m *EnumValueDescriptorProto) GetName() string { if m != nil && m.Name != nil { return *m.Name @@ -875,16 +1123,37 @@ func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { // Describes a service. type ServiceDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` - Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } -func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*ServiceDescriptorProto) ProtoMessage() {} -func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{8} } +func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } +func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*ServiceDescriptorProto) ProtoMessage() {} +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{8} +} +func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceDescriptorProto.Unmarshal(m, b) +} +func (m *ServiceDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *ServiceDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceDescriptorProto.Merge(dst, src) +} +func (m *ServiceDescriptorProto) XXX_Size() int { + return xxx_messageInfo_ServiceDescriptorProto.Size(m) +} +func (m *ServiceDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceDescriptorProto proto.InternalMessageInfo func (m *ServiceDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -918,14 +1187,35 @@ type MethodDescriptorProto struct { // Identifies if client streams multiple client messages ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` // Identifies if server streams multiple server messages - ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` - XXX_unrecognized []byte `json:"-"` + ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } +func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*MethodDescriptorProto) ProtoMessage() {} +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{9} +} +func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodDescriptorProto.Unmarshal(m, b) +} +func (m *MethodDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *MethodDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodDescriptorProto.Merge(dst, src) +} +func (m *MethodDescriptorProto) XXX_Size() int { + return xxx_messageInfo_MethodDescriptorProto.Size(m) +} +func (m *MethodDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_MethodDescriptorProto.DiscardUnknown(m) } -func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } -func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*MethodDescriptorProto) ProtoMessage() {} -func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{9} } +var xxx_messageInfo_MethodDescriptorProto proto.InternalMessageInfo const Default_MethodDescriptorProto_ClientStreaming bool = false const Default_MethodDescriptorProto_ServerStreaming bool = false @@ -992,7 +1282,7 @@ type FileOptions struct { // top-level extensions defined in the file. JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` // This option does nothing. - JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // Deprecated: Do not use. // If set true, then the Java2 code generator will generate code that // throws an exception whenever an attempt is made to assign a non-UTF-8 // byte sequence to a string field. @@ -1020,7 +1310,7 @@ type FileOptions struct { CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` - PhpGenericServices *bool `protobuf:"varint,19,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` + PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` // Is this file deprecated? // Depending on the target platform, this can emit Deprecated annotations // for everything in the file, or it will be completely ignored; in the very @@ -1046,16 +1336,21 @@ type FileOptions struct { // is empty. When this option is empty, the package name will be used for // determining the namespace. PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` - // The parser stores options it doesn't recognize here. See above. + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *FileOptions) Reset() { *m = FileOptions{} } -func (m *FileOptions) String() string { return proto.CompactTextString(m) } -func (*FileOptions) ProtoMessage() {} -func (*FileOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{10} } +func (m *FileOptions) Reset() { *m = FileOptions{} } +func (m *FileOptions) String() string { return proto.CompactTextString(m) } +func (*FileOptions) ProtoMessage() {} +func (*FileOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{10} +} var extRange_FileOptions = []proto.ExtensionRange{ {Start: 1000, End: 536870911}, @@ -1064,6 +1359,23 @@ var extRange_FileOptions = []proto.ExtensionRange{ func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_FileOptions } +func (m *FileOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileOptions.Unmarshal(m, b) +} +func (m *FileOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileOptions.Marshal(b, m, deterministic) +} +func (dst *FileOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileOptions.Merge(dst, src) +} +func (m *FileOptions) XXX_Size() int { + return xxx_messageInfo_FileOptions.Size(m) +} +func (m *FileOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FileOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FileOptions proto.InternalMessageInfo const Default_FileOptions_JavaMultipleFiles bool = false const Default_FileOptions_JavaStringCheckUtf8 bool = false @@ -1096,6 +1408,7 @@ func (m *FileOptions) GetJavaMultipleFiles() bool { return Default_FileOptions_JavaMultipleFiles } +// Deprecated: Do not use. func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { if m != nil && m.JavaGenerateEqualsAndHash != nil { return *m.JavaGenerateEqualsAndHash @@ -1261,14 +1574,18 @@ type MessageOptions struct { MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *MessageOptions) Reset() { *m = MessageOptions{} } -func (m *MessageOptions) String() string { return proto.CompactTextString(m) } -func (*MessageOptions) ProtoMessage() {} -func (*MessageOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{11} } +func (m *MessageOptions) Reset() { *m = MessageOptions{} } +func (m *MessageOptions) String() string { return proto.CompactTextString(m) } +func (*MessageOptions) ProtoMessage() {} +func (*MessageOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{11} +} var extRange_MessageOptions = []proto.ExtensionRange{ {Start: 1000, End: 536870911}, @@ -1277,6 +1594,23 @@ var extRange_MessageOptions = []proto.ExtensionRange{ func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_MessageOptions } +func (m *MessageOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MessageOptions.Unmarshal(m, b) +} +func (m *MessageOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MessageOptions.Marshal(b, m, deterministic) +} +func (dst *MessageOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MessageOptions.Merge(dst, src) +} +func (m *MessageOptions) XXX_Size() int { + return xxx_messageInfo_MessageOptions.Size(m) +} +func (m *MessageOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MessageOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MessageOptions proto.InternalMessageInfo const Default_MessageOptions_MessageSetWireFormat bool = false const Default_MessageOptions_NoStandardDescriptorAccessor bool = false @@ -1379,14 +1713,18 @@ type FieldOptions struct { Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *FieldOptions) Reset() { *m = FieldOptions{} } -func (m *FieldOptions) String() string { return proto.CompactTextString(m) } -func (*FieldOptions) ProtoMessage() {} -func (*FieldOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{12} } +func (m *FieldOptions) Reset() { *m = FieldOptions{} } +func (m *FieldOptions) String() string { return proto.CompactTextString(m) } +func (*FieldOptions) ProtoMessage() {} +func (*FieldOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{12} +} var extRange_FieldOptions = []proto.ExtensionRange{ {Start: 1000, End: 536870911}, @@ -1395,6 +1733,23 @@ var extRange_FieldOptions = []proto.ExtensionRange{ func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_FieldOptions } +func (m *FieldOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldOptions.Unmarshal(m, b) +} +func (m *FieldOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldOptions.Marshal(b, m, deterministic) +} +func (dst *FieldOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldOptions.Merge(dst, src) +} +func (m *FieldOptions) XXX_Size() int { + return xxx_messageInfo_FieldOptions.Size(m) +} +func (m *FieldOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FieldOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldOptions proto.InternalMessageInfo const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL @@ -1454,14 +1809,18 @@ func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { type OneofOptions struct { // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *OneofOptions) Reset() { *m = OneofOptions{} } -func (m *OneofOptions) String() string { return proto.CompactTextString(m) } -func (*OneofOptions) ProtoMessage() {} -func (*OneofOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{13} } +func (m *OneofOptions) Reset() { *m = OneofOptions{} } +func (m *OneofOptions) String() string { return proto.CompactTextString(m) } +func (*OneofOptions) ProtoMessage() {} +func (*OneofOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{13} +} var extRange_OneofOptions = []proto.ExtensionRange{ {Start: 1000, End: 536870911}, @@ -1470,6 +1829,23 @@ var extRange_OneofOptions = []proto.ExtensionRange{ func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_OneofOptions } +func (m *OneofOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofOptions.Unmarshal(m, b) +} +func (m *OneofOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofOptions.Marshal(b, m, deterministic) +} +func (dst *OneofOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofOptions.Merge(dst, src) +} +func (m *OneofOptions) XXX_Size() int { + return xxx_messageInfo_OneofOptions.Size(m) +} +func (m *OneofOptions) XXX_DiscardUnknown() { + xxx_messageInfo_OneofOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofOptions proto.InternalMessageInfo func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { if m != nil { @@ -1489,14 +1865,18 @@ type EnumOptions struct { Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *EnumOptions) Reset() { *m = EnumOptions{} } -func (m *EnumOptions) String() string { return proto.CompactTextString(m) } -func (*EnumOptions) ProtoMessage() {} -func (*EnumOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{14} } +func (m *EnumOptions) Reset() { *m = EnumOptions{} } +func (m *EnumOptions) String() string { return proto.CompactTextString(m) } +func (*EnumOptions) ProtoMessage() {} +func (*EnumOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{14} +} var extRange_EnumOptions = []proto.ExtensionRange{ {Start: 1000, End: 536870911}, @@ -1505,6 +1885,23 @@ var extRange_EnumOptions = []proto.ExtensionRange{ func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_EnumOptions } +func (m *EnumOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumOptions.Unmarshal(m, b) +} +func (m *EnumOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumOptions.Marshal(b, m, deterministic) +} +func (dst *EnumOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumOptions.Merge(dst, src) +} +func (m *EnumOptions) XXX_Size() int { + return xxx_messageInfo_EnumOptions.Size(m) +} +func (m *EnumOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumOptions proto.InternalMessageInfo const Default_EnumOptions_Deprecated bool = false @@ -1537,14 +1934,18 @@ type EnumValueOptions struct { Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } -func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } -func (*EnumValueOptions) ProtoMessage() {} -func (*EnumValueOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{15} } +func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } +func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } +func (*EnumValueOptions) ProtoMessage() {} +func (*EnumValueOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{15} +} var extRange_EnumValueOptions = []proto.ExtensionRange{ {Start: 1000, End: 536870911}, @@ -1553,6 +1954,23 @@ var extRange_EnumValueOptions = []proto.ExtensionRange{ func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_EnumValueOptions } +func (m *EnumValueOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueOptions.Unmarshal(m, b) +} +func (m *EnumValueOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueOptions.Marshal(b, m, deterministic) +} +func (dst *EnumValueOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueOptions.Merge(dst, src) +} +func (m *EnumValueOptions) XXX_Size() int { + return xxx_messageInfo_EnumValueOptions.Size(m) +} +func (m *EnumValueOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueOptions proto.InternalMessageInfo const Default_EnumValueOptions_Deprecated bool = false @@ -1578,14 +1996,18 @@ type ServiceOptions struct { Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } -func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } -func (*ServiceOptions) ProtoMessage() {} -func (*ServiceOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{16} } +func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } +func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } +func (*ServiceOptions) ProtoMessage() {} +func (*ServiceOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{16} +} var extRange_ServiceOptions = []proto.ExtensionRange{ {Start: 1000, End: 536870911}, @@ -1594,6 +2016,23 @@ var extRange_ServiceOptions = []proto.ExtensionRange{ func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_ServiceOptions } +func (m *ServiceOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceOptions.Unmarshal(m, b) +} +func (m *ServiceOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceOptions.Marshal(b, m, deterministic) +} +func (dst *ServiceOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceOptions.Merge(dst, src) +} +func (m *ServiceOptions) XXX_Size() int { + return xxx_messageInfo_ServiceOptions.Size(m) +} +func (m *ServiceOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceOptions proto.InternalMessageInfo const Default_ServiceOptions_Deprecated bool = false @@ -1620,14 +2059,18 @@ type MethodOptions struct { IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *MethodOptions) Reset() { *m = MethodOptions{} } -func (m *MethodOptions) String() string { return proto.CompactTextString(m) } -func (*MethodOptions) ProtoMessage() {} -func (*MethodOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{17} } +func (m *MethodOptions) Reset() { *m = MethodOptions{} } +func (m *MethodOptions) String() string { return proto.CompactTextString(m) } +func (*MethodOptions) ProtoMessage() {} +func (*MethodOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{17} +} var extRange_MethodOptions = []proto.ExtensionRange{ {Start: 1000, End: 536870911}, @@ -1636,6 +2079,23 @@ var extRange_MethodOptions = []proto.ExtensionRange{ func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_MethodOptions } +func (m *MethodOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodOptions.Unmarshal(m, b) +} +func (m *MethodOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodOptions.Marshal(b, m, deterministic) +} +func (dst *MethodOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodOptions.Merge(dst, src) +} +func (m *MethodOptions) XXX_Size() int { + return xxx_messageInfo_MethodOptions.Size(m) +} +func (m *MethodOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MethodOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodOptions proto.InternalMessageInfo const Default_MethodOptions_Deprecated bool = false const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN @@ -1671,19 +2131,40 @@ type UninterpretedOption struct { Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` // The value of the uninterpreted option, in whatever type the tokenizer // identified it as during parsing. Exactly one of these should be set. - IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` - PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` - NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` - DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` - StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` - AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` - XXX_unrecognized []byte `json:"-"` + IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` + PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` + NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` + StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` + AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } -func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } -func (*UninterpretedOption) ProtoMessage() {} -func (*UninterpretedOption) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{18} } +func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } +func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption) ProtoMessage() {} +func (*UninterpretedOption) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{18} +} +func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption.Unmarshal(m, b) +} +func (m *UninterpretedOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption.Marshal(b, m, deterministic) +} +func (dst *UninterpretedOption) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption.Merge(dst, src) +} +func (m *UninterpretedOption) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption.Size(m) +} +func (m *UninterpretedOption) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption proto.InternalMessageInfo func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { if m != nil { @@ -1740,17 +2221,36 @@ func (m *UninterpretedOption) GetAggregateValue() string { // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents // "foo.(bar.baz).qux". type UninterpretedOption_NamePart struct { - NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` - IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` - XXX_unrecognized []byte `json:"-"` + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } func (*UninterpretedOption_NamePart) ProtoMessage() {} func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{18, 0} + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{18, 0} } +func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption_NamePart.Unmarshal(m, b) +} +func (m *UninterpretedOption_NamePart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption_NamePart.Marshal(b, m, deterministic) +} +func (dst *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption_NamePart.Merge(dst, src) +} +func (m *UninterpretedOption_NamePart) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption_NamePart.Size(m) +} +func (m *UninterpretedOption_NamePart) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption_NamePart.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption_NamePart proto.InternalMessageInfo func (m *UninterpretedOption_NamePart) GetNamePart() string { if m != nil && m.NamePart != nil { @@ -1812,14 +2312,35 @@ type SourceCodeInfo struct { // - Code which tries to interpret locations should probably be designed to // ignore those that it doesn't understand, as more types of locations could // be recorded in the future. - Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` - XXX_unrecognized []byte `json:"-"` + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } -func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } -func (*SourceCodeInfo) ProtoMessage() {} -func (*SourceCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{19} } +func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } +func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo) ProtoMessage() {} +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{19} +} +func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo.Unmarshal(m, b) +} +func (m *SourceCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo.Marshal(b, m, deterministic) +} +func (dst *SourceCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo.Merge(dst, src) +} +func (m *SourceCodeInfo) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo.Size(m) +} +func (m *SourceCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo proto.InternalMessageInfo func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { if m != nil { @@ -1909,15 +2430,34 @@ type SourceCodeInfo_Location struct { LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } func (*SourceCodeInfo_Location) ProtoMessage() {} func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{19, 0} + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{19, 0} } +func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo_Location.Unmarshal(m, b) +} +func (m *SourceCodeInfo_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo_Location.Marshal(b, m, deterministic) +} +func (dst *SourceCodeInfo_Location) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo_Location.Merge(dst, src) +} +func (m *SourceCodeInfo_Location) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo_Location.Size(m) +} +func (m *SourceCodeInfo_Location) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo_Location.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo_Location proto.InternalMessageInfo func (m *SourceCodeInfo_Location) GetPath() []int32 { if m != nil { @@ -1960,14 +2500,35 @@ func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { type GeneratedCodeInfo struct { // An Annotation connects some span of text in generated code to an element // of its generating .proto file. - Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` - XXX_unrecognized []byte `json:"-"` + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } -func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } -func (*GeneratedCodeInfo) ProtoMessage() {} -func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{20} } +func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } +func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo) ProtoMessage() {} +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{20} +} +func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo.Marshal(b, m, deterministic) +} +func (dst *GeneratedCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo.Merge(dst, src) +} +func (m *GeneratedCodeInfo) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo.Size(m) +} +func (m *GeneratedCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo proto.InternalMessageInfo func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { if m != nil { @@ -1988,16 +2549,35 @@ type GeneratedCodeInfo_Annotation struct { // Identifies the ending offset in bytes in the generated code that // relates to the identified offset. The end offset should be one past // the last relevant byte (so the length of the text = end - begin). - End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` - XXX_unrecognized []byte `json:"-"` + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{20, 0} + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{20, 0} } +func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Marshal(b, m, deterministic) +} +func (dst *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(dst, src) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Size(m) +} +func (m *GeneratedCodeInfo_Annotation) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo_Annotation.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo_Annotation proto.InternalMessageInfo func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { if m != nil { @@ -2037,6 +2617,7 @@ func init() { proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") + proto.RegisterType((*EnumDescriptorProto_EnumReservedRange)(nil), "google.protobuf.EnumDescriptorProto.EnumReservedRange") proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") @@ -2062,162 +2643,164 @@ func init() { proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) } -func init() { proto.RegisterFile("descriptor.proto", fileDescriptorDescriptor) } +func init() { proto.RegisterFile("descriptor.proto", fileDescriptor_descriptor_9588782fb9cbecd6) } -var fileDescriptorDescriptor = []byte{ - // 2451 bytes of a gzipped FileDescriptorProto +var fileDescriptor_descriptor_9588782fb9cbecd6 = []byte{ + // 2487 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xcd, 0x6f, 0xdb, 0xc8, - 0x15, 0x5f, 0x7d, 0x5a, 0x7a, 0x92, 0xe5, 0xf1, 0xd8, 0x9b, 0x30, 0xce, 0x66, 0xe3, 0x28, 0xc9, - 0xc6, 0x49, 0x5a, 0x65, 0xe1, 0x7c, 0xae, 0xb7, 0xd8, 0x56, 0x96, 0x18, 0xaf, 0x52, 0x59, 0x52, - 0x29, 0xb9, 0x9b, 0xec, 0x85, 0x18, 0x93, 0x23, 0x89, 0x09, 0x45, 0x72, 0x49, 0x2a, 0x89, 0x83, - 0x1e, 0x02, 0xf4, 0xd4, 0xff, 0xa0, 0x28, 0x8a, 0x1e, 0xf6, 0xb2, 0x40, 0xaf, 0x05, 0x0a, 0xb4, - 0xf7, 0x5e, 0x0b, 0xf4, 0xde, 0x43, 0x0f, 0x05, 0xda, 0x3f, 0xa3, 0x98, 0x19, 0x92, 0xa2, 0xbe, - 0x12, 0x77, 0x81, 0x64, 0x4f, 0xf6, 0xfc, 0xde, 0xef, 0xbd, 0x79, 0xf3, 0xf8, 0x66, 0xde, 0x9b, - 0x11, 0x20, 0x9d, 0x7a, 0x9a, 0x6b, 0x38, 0xbe, 0xed, 0x56, 0x1c, 0xd7, 0xf6, 0x6d, 0xbc, 0x36, - 0xb0, 0xed, 0x81, 0x49, 0xc5, 0xe8, 0x78, 0xdc, 0x2f, 0x1f, 0xc2, 0xfa, 0x43, 0xc3, 0xa4, 0xf5, - 0x88, 0xd8, 0xa5, 0x3e, 0x7e, 0x00, 0xe9, 0xbe, 0x61, 0x52, 0x29, 0xb1, 0x9d, 0xda, 0x29, 0xec, - 0x5e, 0xa9, 0xcc, 0x28, 0x55, 0xa6, 0x35, 0x3a, 0x0c, 0x56, 0xb8, 0x46, 0xf9, 0xdf, 0x69, 0xd8, - 0x58, 0x20, 0xc5, 0x18, 0xd2, 0x16, 0x19, 0x31, 0x8b, 0x89, 0x9d, 0xbc, 0xc2, 0xff, 0xc7, 0x12, - 0xac, 0x38, 0x44, 0x7b, 0x46, 0x06, 0x54, 0x4a, 0x72, 0x38, 0x1c, 0xe2, 0x8f, 0x01, 0x74, 0xea, - 0x50, 0x4b, 0xa7, 0x96, 0x76, 0x22, 0xa5, 0xb6, 0x53, 0x3b, 0x79, 0x25, 0x86, 0xe0, 0x9b, 0xb0, - 0xee, 0x8c, 0x8f, 0x4d, 0x43, 0x53, 0x63, 0x34, 0xd8, 0x4e, 0xed, 0x64, 0x14, 0x24, 0x04, 0xf5, - 0x09, 0xf9, 0x1a, 0xac, 0xbd, 0xa0, 0xe4, 0x59, 0x9c, 0x5a, 0xe0, 0xd4, 0x12, 0x83, 0x63, 0xc4, - 0x1a, 0x14, 0x47, 0xd4, 0xf3, 0xc8, 0x80, 0xaa, 0xfe, 0x89, 0x43, 0xa5, 0x34, 0x5f, 0xfd, 0xf6, - 0xdc, 0xea, 0x67, 0x57, 0x5e, 0x08, 0xb4, 0x7a, 0x27, 0x0e, 0xc5, 0x55, 0xc8, 0x53, 0x6b, 0x3c, - 0x12, 0x16, 0x32, 0x4b, 0xe2, 0x27, 0x5b, 0xe3, 0xd1, 0xac, 0x95, 0x1c, 0x53, 0x0b, 0x4c, 0xac, - 0x78, 0xd4, 0x7d, 0x6e, 0x68, 0x54, 0xca, 0x72, 0x03, 0xd7, 0xe6, 0x0c, 0x74, 0x85, 0x7c, 0xd6, - 0x46, 0xa8, 0x87, 0x6b, 0x90, 0xa7, 0x2f, 0x7d, 0x6a, 0x79, 0x86, 0x6d, 0x49, 0x2b, 0xdc, 0xc8, - 0xd5, 0x05, 0x5f, 0x91, 0x9a, 0xfa, 0xac, 0x89, 0x89, 0x1e, 0xbe, 0x07, 0x2b, 0xb6, 0xe3, 0x1b, - 0xb6, 0xe5, 0x49, 0xb9, 0xed, 0xc4, 0x4e, 0x61, 0xf7, 0xa3, 0x85, 0x89, 0xd0, 0x16, 0x1c, 0x25, - 0x24, 0xe3, 0x06, 0x20, 0xcf, 0x1e, 0xbb, 0x1a, 0x55, 0x35, 0x5b, 0xa7, 0xaa, 0x61, 0xf5, 0x6d, - 0x29, 0xcf, 0x0d, 0x5c, 0x9c, 0x5f, 0x08, 0x27, 0xd6, 0x6c, 0x9d, 0x36, 0xac, 0xbe, 0xad, 0x94, - 0xbc, 0xa9, 0x31, 0x3e, 0x03, 0x59, 0xef, 0xc4, 0xf2, 0xc9, 0x4b, 0xa9, 0xc8, 0x33, 0x24, 0x18, - 0x95, 0xff, 0x92, 0x85, 0xb5, 0xd3, 0xa4, 0xd8, 0xe7, 0x90, 0xe9, 0xb3, 0x55, 0x4a, 0xc9, 0xff, - 0x27, 0x06, 0x42, 0x67, 0x3a, 0x88, 0xd9, 0xef, 0x19, 0xc4, 0x2a, 0x14, 0x2c, 0xea, 0xf9, 0x54, - 0x17, 0x19, 0x91, 0x3a, 0x65, 0x4e, 0x81, 0x50, 0x9a, 0x4f, 0xa9, 0xf4, 0xf7, 0x4a, 0xa9, 0xc7, - 0xb0, 0x16, 0xb9, 0xa4, 0xba, 0xc4, 0x1a, 0x84, 0xb9, 0x79, 0xeb, 0x6d, 0x9e, 0x54, 0xe4, 0x50, - 0x4f, 0x61, 0x6a, 0x4a, 0x89, 0x4e, 0x8d, 0x71, 0x1d, 0xc0, 0xb6, 0xa8, 0xdd, 0x57, 0x75, 0xaa, - 0x99, 0x52, 0x6e, 0x49, 0x94, 0xda, 0x8c, 0x32, 0x17, 0x25, 0x5b, 0xa0, 0x9a, 0x89, 0x3f, 0x9b, - 0xa4, 0xda, 0xca, 0x92, 0x4c, 0x39, 0x14, 0x9b, 0x6c, 0x2e, 0xdb, 0x8e, 0xa0, 0xe4, 0x52, 0x96, - 0xf7, 0x54, 0x0f, 0x56, 0x96, 0xe7, 0x4e, 0x54, 0xde, 0xba, 0x32, 0x25, 0x50, 0x13, 0x0b, 0x5b, - 0x75, 0xe3, 0x43, 0x7c, 0x19, 0x22, 0x40, 0xe5, 0x69, 0x05, 0xfc, 0x14, 0x2a, 0x86, 0x60, 0x8b, - 0x8c, 0xe8, 0xd6, 0x2b, 0x28, 0x4d, 0x87, 0x07, 0x6f, 0x42, 0xc6, 0xf3, 0x89, 0xeb, 0xf3, 0x2c, - 0xcc, 0x28, 0x62, 0x80, 0x11, 0xa4, 0xa8, 0xa5, 0xf3, 0x53, 0x2e, 0xa3, 0xb0, 0x7f, 0xf1, 0xcf, - 0x26, 0x0b, 0x4e, 0xf1, 0x05, 0x7f, 0x32, 0xff, 0x45, 0xa7, 0x2c, 0xcf, 0xae, 0x7b, 0xeb, 0x3e, - 0xac, 0x4e, 0x2d, 0xe0, 0xb4, 0x53, 0x97, 0x7f, 0x05, 0x1f, 0x2e, 0x34, 0x8d, 0x1f, 0xc3, 0xe6, - 0xd8, 0x32, 0x2c, 0x9f, 0xba, 0x8e, 0x4b, 0x59, 0xc6, 0x8a, 0xa9, 0xa4, 0xff, 0xac, 0x2c, 0xc9, - 0xb9, 0xa3, 0x38, 0x5b, 0x58, 0x51, 0x36, 0xc6, 0xf3, 0xe0, 0x8d, 0x7c, 0xee, 0xbf, 0x2b, 0xe8, - 0xf5, 0xeb, 0xd7, 0xaf, 0x93, 0xe5, 0xdf, 0x66, 0x61, 0x73, 0xd1, 0x9e, 0x59, 0xb8, 0x7d, 0xcf, - 0x40, 0xd6, 0x1a, 0x8f, 0x8e, 0xa9, 0xcb, 0x83, 0x94, 0x51, 0x82, 0x11, 0xae, 0x42, 0xc6, 0x24, - 0xc7, 0xd4, 0x94, 0xd2, 0xdb, 0x89, 0x9d, 0xd2, 0xee, 0xcd, 0x53, 0xed, 0xca, 0x4a, 0x93, 0xa9, - 0x28, 0x42, 0x13, 0x7f, 0x01, 0xe9, 0xe0, 0x88, 0x66, 0x16, 0x6e, 0x9c, 0xce, 0x02, 0xdb, 0x4b, - 0x0a, 0xd7, 0xc3, 0xe7, 0x21, 0xcf, 0xfe, 0x8a, 0xdc, 0xc8, 0x72, 0x9f, 0x73, 0x0c, 0x60, 0x79, - 0x81, 0xb7, 0x20, 0xc7, 0xb7, 0x89, 0x4e, 0xc3, 0xd2, 0x16, 0x8d, 0x59, 0x62, 0xe9, 0xb4, 0x4f, - 0xc6, 0xa6, 0xaf, 0x3e, 0x27, 0xe6, 0x98, 0xf2, 0x84, 0xcf, 0x2b, 0xc5, 0x00, 0xfc, 0x25, 0xc3, - 0xf0, 0x45, 0x28, 0x88, 0x5d, 0x65, 0x58, 0x3a, 0x7d, 0xc9, 0x4f, 0xcf, 0x8c, 0x22, 0x36, 0x5a, - 0x83, 0x21, 0x6c, 0xfa, 0xa7, 0x9e, 0x6d, 0x85, 0xa9, 0xc9, 0xa7, 0x60, 0x00, 0x9f, 0xfe, 0xfe, - 0xec, 0xc1, 0x7d, 0x61, 0xf1, 0xf2, 0x66, 0x73, 0xaa, 0xfc, 0xe7, 0x24, 0xa4, 0xf9, 0x79, 0xb1, - 0x06, 0x85, 0xde, 0x93, 0x8e, 0xac, 0xd6, 0xdb, 0x47, 0xfb, 0x4d, 0x19, 0x25, 0x70, 0x09, 0x80, - 0x03, 0x0f, 0x9b, 0xed, 0x6a, 0x0f, 0x25, 0xa3, 0x71, 0xa3, 0xd5, 0xbb, 0x77, 0x07, 0xa5, 0x22, - 0x85, 0x23, 0x01, 0xa4, 0xe3, 0x84, 0xdb, 0xbb, 0x28, 0x83, 0x11, 0x14, 0x85, 0x81, 0xc6, 0x63, - 0xb9, 0x7e, 0xef, 0x0e, 0xca, 0x4e, 0x23, 0xb7, 0x77, 0xd1, 0x0a, 0x5e, 0x85, 0x3c, 0x47, 0xf6, - 0xdb, 0xed, 0x26, 0xca, 0x45, 0x36, 0xbb, 0x3d, 0xa5, 0xd1, 0x3a, 0x40, 0xf9, 0xc8, 0xe6, 0x81, - 0xd2, 0x3e, 0xea, 0x20, 0x88, 0x2c, 0x1c, 0xca, 0xdd, 0x6e, 0xf5, 0x40, 0x46, 0x85, 0x88, 0xb1, - 0xff, 0xa4, 0x27, 0x77, 0x51, 0x71, 0xca, 0xad, 0xdb, 0xbb, 0x68, 0x35, 0x9a, 0x42, 0x6e, 0x1d, - 0x1d, 0xa2, 0x12, 0x5e, 0x87, 0x55, 0x31, 0x45, 0xe8, 0xc4, 0xda, 0x0c, 0x74, 0xef, 0x0e, 0x42, - 0x13, 0x47, 0x84, 0x95, 0xf5, 0x29, 0xe0, 0xde, 0x1d, 0x84, 0xcb, 0x35, 0xc8, 0xf0, 0xec, 0xc2, - 0x18, 0x4a, 0xcd, 0xea, 0xbe, 0xdc, 0x54, 0xdb, 0x9d, 0x5e, 0xa3, 0xdd, 0xaa, 0x36, 0x51, 0x62, - 0x82, 0x29, 0xf2, 0x2f, 0x8e, 0x1a, 0x8a, 0x5c, 0x47, 0xc9, 0x38, 0xd6, 0x91, 0xab, 0x3d, 0xb9, - 0x8e, 0x52, 0x65, 0x0d, 0x36, 0x17, 0x9d, 0x93, 0x0b, 0x77, 0x46, 0xec, 0x13, 0x27, 0x97, 0x7c, - 0x62, 0x6e, 0x6b, 0xee, 0x13, 0x7f, 0x9b, 0x80, 0x8d, 0x05, 0xb5, 0x62, 0xe1, 0x24, 0x3f, 0x85, - 0x8c, 0x48, 0x51, 0x51, 0x3d, 0xaf, 0x2f, 0x2c, 0x3a, 0x3c, 0x61, 0xe7, 0x2a, 0x28, 0xd7, 0x8b, - 0x77, 0x10, 0xa9, 0x25, 0x1d, 0x04, 0x33, 0x31, 0xe7, 0xe4, 0xaf, 0x13, 0x20, 0x2d, 0xb3, 0xfd, - 0x96, 0x83, 0x22, 0x39, 0x75, 0x50, 0x7c, 0x3e, 0xeb, 0xc0, 0xa5, 0xe5, 0x6b, 0x98, 0xf3, 0xe2, - 0xbb, 0x04, 0x9c, 0x59, 0xdc, 0x68, 0x2d, 0xf4, 0xe1, 0x0b, 0xc8, 0x8e, 0xa8, 0x3f, 0xb4, 0xc3, - 0x66, 0xe3, 0x93, 0x05, 0x25, 0x8c, 0x89, 0x67, 0x63, 0x15, 0x68, 0xc5, 0x6b, 0x60, 0x6a, 0x59, - 0xb7, 0x24, 0xbc, 0x99, 0xf3, 0xf4, 0x37, 0x49, 0xf8, 0x70, 0xa1, 0xf1, 0x85, 0x8e, 0x5e, 0x00, - 0x30, 0x2c, 0x67, 0xec, 0x8b, 0x86, 0x42, 0x9c, 0x4f, 0x79, 0x8e, 0xf0, 0xbd, 0xcf, 0xce, 0x9e, - 0xb1, 0x1f, 0xc9, 0x53, 0x5c, 0x0e, 0x02, 0xe2, 0x84, 0x07, 0x13, 0x47, 0xd3, 0xdc, 0xd1, 0x8f, - 0x97, 0xac, 0x74, 0xae, 0x56, 0x7f, 0x0a, 0x48, 0x33, 0x0d, 0x6a, 0xf9, 0xaa, 0xe7, 0xbb, 0x94, - 0x8c, 0x0c, 0x6b, 0xc0, 0x0f, 0xe0, 0xdc, 0x5e, 0xa6, 0x4f, 0x4c, 0x8f, 0x2a, 0x6b, 0x42, 0xdc, - 0x0d, 0xa5, 0x4c, 0x83, 0xd7, 0x38, 0x37, 0xa6, 0x91, 0x9d, 0xd2, 0x10, 0xe2, 0x48, 0xa3, 0xfc, - 0xa7, 0x1c, 0x14, 0x62, 0x6d, 0x29, 0xbe, 0x04, 0xc5, 0xa7, 0xe4, 0x39, 0x51, 0xc3, 0xab, 0x86, - 0x88, 0x44, 0x81, 0x61, 0x9d, 0xe0, 0xba, 0xf1, 0x29, 0x6c, 0x72, 0x8a, 0x3d, 0xf6, 0xa9, 0xab, - 0x6a, 0x26, 0xf1, 0x3c, 0x1e, 0xb4, 0x1c, 0xa7, 0x62, 0x26, 0x6b, 0x33, 0x51, 0x2d, 0x94, 0xe0, - 0xbb, 0xb0, 0xc1, 0x35, 0x46, 0x63, 0xd3, 0x37, 0x1c, 0x93, 0xaa, 0xec, 0xf2, 0xe3, 0xf1, 0x83, - 0x38, 0xf2, 0x6c, 0x9d, 0x31, 0x0e, 0x03, 0x02, 0xf3, 0xc8, 0xc3, 0x75, 0xb8, 0xc0, 0xd5, 0x06, - 0xd4, 0xa2, 0x2e, 0xf1, 0xa9, 0x4a, 0xbf, 0x19, 0x13, 0xd3, 0x53, 0x89, 0xa5, 0xab, 0x43, 0xe2, - 0x0d, 0xa5, 0x4d, 0x66, 0x60, 0x3f, 0x29, 0x25, 0x94, 0x73, 0x8c, 0x78, 0x10, 0xf0, 0x64, 0x4e, - 0xab, 0x5a, 0xfa, 0x97, 0xc4, 0x1b, 0xe2, 0x3d, 0x38, 0xc3, 0xad, 0x78, 0xbe, 0x6b, 0x58, 0x03, - 0x55, 0x1b, 0x52, 0xed, 0x99, 0x3a, 0xf6, 0xfb, 0x0f, 0xa4, 0xf3, 0xf1, 0xf9, 0xb9, 0x87, 0x5d, - 0xce, 0xa9, 0x31, 0xca, 0x91, 0xdf, 0x7f, 0x80, 0xbb, 0x50, 0x64, 0x1f, 0x63, 0x64, 0xbc, 0xa2, - 0x6a, 0xdf, 0x76, 0x79, 0x65, 0x29, 0x2d, 0xd8, 0xd9, 0xb1, 0x08, 0x56, 0xda, 0x81, 0xc2, 0xa1, - 0xad, 0xd3, 0xbd, 0x4c, 0xb7, 0x23, 0xcb, 0x75, 0xa5, 0x10, 0x5a, 0x79, 0x68, 0xbb, 0x2c, 0xa1, - 0x06, 0x76, 0x14, 0xe0, 0x82, 0x48, 0xa8, 0x81, 0x1d, 0x86, 0xf7, 0x2e, 0x6c, 0x68, 0x9a, 0x58, - 0xb3, 0xa1, 0xa9, 0xc1, 0x15, 0xc5, 0x93, 0xd0, 0x54, 0xb0, 0x34, 0xed, 0x40, 0x10, 0x82, 0x1c, - 0xf7, 0xf0, 0x67, 0xf0, 0xe1, 0x24, 0x58, 0x71, 0xc5, 0xf5, 0xb9, 0x55, 0xce, 0xaa, 0xde, 0x85, - 0x0d, 0xe7, 0x64, 0x5e, 0x11, 0x4f, 0xcd, 0xe8, 0x9c, 0xcc, 0xaa, 0xdd, 0x87, 0x4d, 0x67, 0xe8, - 0xcc, 0xeb, 0x6d, 0xc4, 0xf5, 0xb0, 0x33, 0x74, 0x66, 0x15, 0xaf, 0xf2, 0xfb, 0xaa, 0x4b, 0x35, - 0xe2, 0x53, 0x5d, 0x3a, 0x1b, 0xa7, 0xc7, 0x04, 0xf8, 0x16, 0x20, 0x4d, 0x53, 0xa9, 0x45, 0x8e, - 0x4d, 0xaa, 0x12, 0x97, 0x5a, 0xc4, 0x93, 0x2e, 0xc6, 0xc9, 0x25, 0x4d, 0x93, 0xb9, 0xb4, 0xca, - 0x85, 0xf8, 0x06, 0xac, 0xdb, 0xc7, 0x4f, 0x35, 0x91, 0x92, 0xaa, 0xe3, 0xd2, 0xbe, 0xf1, 0x52, - 0xba, 0xc2, 0xe3, 0xbb, 0xc6, 0x04, 0x3c, 0x21, 0x3b, 0x1c, 0xc6, 0xd7, 0x01, 0x69, 0xde, 0x90, - 0xb8, 0x0e, 0xef, 0x09, 0x3c, 0x87, 0x68, 0x54, 0xba, 0x2a, 0xa8, 0x02, 0x6f, 0x85, 0x30, 0xdb, - 0x12, 0xde, 0x0b, 0xa3, 0xef, 0x87, 0x16, 0xaf, 0x89, 0x2d, 0xc1, 0xb1, 0xc0, 0xda, 0x0e, 0x20, - 0x16, 0x8a, 0xa9, 0x89, 0x77, 0x38, 0xad, 0xe4, 0x0c, 0x9d, 0xf8, 0xbc, 0x97, 0x61, 0x95, 0x31, - 0x27, 0x93, 0x5e, 0x17, 0xfd, 0x8c, 0x33, 0x8c, 0xcd, 0xf8, 0xce, 0x5a, 0xcb, 0xf2, 0x1e, 0x14, - 0xe3, 0xf9, 0x89, 0xf3, 0x20, 0x32, 0x14, 0x25, 0x58, 0xad, 0xaf, 0xb5, 0xeb, 0xac, 0x4a, 0x7f, - 0x2d, 0xa3, 0x24, 0xeb, 0x16, 0x9a, 0x8d, 0x9e, 0xac, 0x2a, 0x47, 0xad, 0x5e, 0xe3, 0x50, 0x46, - 0xa9, 0x78, 0x5b, 0xfa, 0xb7, 0x24, 0x94, 0xa6, 0x6f, 0x18, 0xf8, 0x27, 0x70, 0x36, 0x7c, 0x0e, - 0xf0, 0xa8, 0xaf, 0xbe, 0x30, 0x5c, 0xbe, 0x65, 0x46, 0x44, 0x74, 0xd8, 0xd1, 0x47, 0xdb, 0x0c, - 0x58, 0x5d, 0xea, 0x7f, 0x65, 0xb8, 0x6c, 0x43, 0x8c, 0x88, 0x8f, 0x9b, 0x70, 0xd1, 0xb2, 0x55, - 0xcf, 0x27, 0x96, 0x4e, 0x5c, 0x5d, 0x9d, 0x3c, 0xc4, 0xa8, 0x44, 0xd3, 0xa8, 0xe7, 0xd9, 0xa2, - 0x54, 0x45, 0x56, 0x3e, 0xb2, 0xec, 0x6e, 0x40, 0x9e, 0x9c, 0xe1, 0xd5, 0x80, 0x3a, 0x93, 0x60, - 0xa9, 0x65, 0x09, 0x76, 0x1e, 0xf2, 0x23, 0xe2, 0xa8, 0xd4, 0xf2, 0xdd, 0x13, 0xde, 0x57, 0xe6, - 0x94, 0xdc, 0x88, 0x38, 0x32, 0x1b, 0xbf, 0x9f, 0xf6, 0xfe, 0x9f, 0x29, 0x28, 0xc6, 0x7b, 0x4b, - 0xd6, 0xaa, 0x6b, 0xbc, 0x8e, 0x24, 0xf8, 0x49, 0x73, 0xf9, 0x8d, 0x9d, 0x68, 0xa5, 0xc6, 0x0a, - 0xcc, 0x5e, 0x56, 0x74, 0x7c, 0x8a, 0xd0, 0x64, 0xc5, 0x9d, 0x9d, 0x2d, 0x54, 0xdc, 0x62, 0x72, - 0x4a, 0x30, 0xc2, 0x07, 0x90, 0x7d, 0xea, 0x71, 0xdb, 0x59, 0x6e, 0xfb, 0xca, 0x9b, 0x6d, 0x3f, - 0xea, 0x72, 0xe3, 0xf9, 0x47, 0x5d, 0xb5, 0xd5, 0x56, 0x0e, 0xab, 0x4d, 0x25, 0x50, 0xc7, 0xe7, - 0x20, 0x6d, 0x92, 0x57, 0x27, 0xd3, 0xa5, 0x88, 0x43, 0xa7, 0x0d, 0xfc, 0x39, 0x48, 0xbf, 0xa0, - 0xe4, 0xd9, 0x74, 0x01, 0xe0, 0xd0, 0x3b, 0x4c, 0xfd, 0x5b, 0x90, 0xe1, 0xf1, 0xc2, 0x00, 0x41, - 0xc4, 0xd0, 0x07, 0x38, 0x07, 0xe9, 0x5a, 0x5b, 0x61, 0xe9, 0x8f, 0xa0, 0x28, 0x50, 0xb5, 0xd3, - 0x90, 0x6b, 0x32, 0x4a, 0x96, 0xef, 0x42, 0x56, 0x04, 0x81, 0x6d, 0x8d, 0x28, 0x0c, 0xe8, 0x83, - 0x60, 0x18, 0xd8, 0x48, 0x84, 0xd2, 0xa3, 0xc3, 0x7d, 0x59, 0x41, 0xc9, 0xf8, 0xe7, 0xf5, 0xa0, - 0x18, 0x6f, 0x2b, 0xdf, 0x4f, 0x4e, 0xfd, 0x35, 0x01, 0x85, 0x58, 0x9b, 0xc8, 0x1a, 0x14, 0x62, - 0x9a, 0xf6, 0x0b, 0x95, 0x98, 0x06, 0xf1, 0x82, 0xa4, 0x00, 0x0e, 0x55, 0x19, 0x72, 0xda, 0x8f, - 0xf6, 0x5e, 0x9c, 0xff, 0x43, 0x02, 0xd0, 0x6c, 0x8b, 0x39, 0xe3, 0x60, 0xe2, 0x07, 0x75, 0xf0, - 0xf7, 0x09, 0x28, 0x4d, 0xf7, 0x95, 0x33, 0xee, 0x5d, 0xfa, 0x41, 0xdd, 0xfb, 0x57, 0x12, 0x56, - 0xa7, 0xba, 0xc9, 0xd3, 0x7a, 0xf7, 0x0d, 0xac, 0x1b, 0x3a, 0x1d, 0x39, 0xb6, 0x4f, 0x2d, 0xed, - 0x44, 0x35, 0xe9, 0x73, 0x6a, 0x4a, 0x65, 0x7e, 0x50, 0xdc, 0x7a, 0x73, 0xbf, 0x5a, 0x69, 0x4c, - 0xf4, 0x9a, 0x4c, 0x6d, 0x6f, 0xa3, 0x51, 0x97, 0x0f, 0x3b, 0xed, 0x9e, 0xdc, 0xaa, 0x3d, 0x51, - 0x8f, 0x5a, 0x3f, 0x6f, 0xb5, 0xbf, 0x6a, 0x29, 0xc8, 0x98, 0xa1, 0xbd, 0xc3, 0xad, 0xde, 0x01, - 0x34, 0xeb, 0x14, 0x3e, 0x0b, 0x8b, 0xdc, 0x42, 0x1f, 0xe0, 0x0d, 0x58, 0x6b, 0xb5, 0xd5, 0x6e, - 0xa3, 0x2e, 0xab, 0xf2, 0xc3, 0x87, 0x72, 0xad, 0xd7, 0x15, 0x17, 0xf8, 0x88, 0xdd, 0x9b, 0xde, - 0xd4, 0xbf, 0x4b, 0xc1, 0xc6, 0x02, 0x4f, 0x70, 0x35, 0xb8, 0x3b, 0x88, 0xeb, 0xcc, 0x8f, 0x4f, - 0xe3, 0x7d, 0x85, 0x95, 0xfc, 0x0e, 0x71, 0xfd, 0xe0, 0xaa, 0x71, 0x1d, 0x58, 0x94, 0x2c, 0xdf, - 0xe8, 0x1b, 0xd4, 0x0d, 0xde, 0x3b, 0xc4, 0x85, 0x62, 0x6d, 0x82, 0x8b, 0x27, 0x8f, 0x1f, 0x01, - 0x76, 0x6c, 0xcf, 0xf0, 0x8d, 0xe7, 0x54, 0x35, 0xac, 0xf0, 0x71, 0x84, 0x5d, 0x30, 0xd2, 0x0a, - 0x0a, 0x25, 0x0d, 0xcb, 0x8f, 0xd8, 0x16, 0x1d, 0x90, 0x19, 0x36, 0x3b, 0xc0, 0x53, 0x0a, 0x0a, - 0x25, 0x11, 0xfb, 0x12, 0x14, 0x75, 0x7b, 0xcc, 0xba, 0x2e, 0xc1, 0x63, 0xf5, 0x22, 0xa1, 0x14, - 0x04, 0x16, 0x51, 0x82, 0x7e, 0x7a, 0xf2, 0x2a, 0x53, 0x54, 0x0a, 0x02, 0x13, 0x94, 0x6b, 0xb0, - 0x46, 0x06, 0x03, 0x97, 0x19, 0x0f, 0x0d, 0x89, 0x1b, 0x42, 0x29, 0x82, 0x39, 0x71, 0xeb, 0x11, - 0xe4, 0xc2, 0x38, 0xb0, 0x92, 0xcc, 0x22, 0xa1, 0x3a, 0xe2, 0x65, 0x2e, 0xb9, 0x93, 0x57, 0x72, - 0x56, 0x28, 0xbc, 0x04, 0x45, 0xc3, 0x53, 0x27, 0x8f, 0xcc, 0xc9, 0xed, 0xe4, 0x4e, 0x4e, 0x29, - 0x18, 0x5e, 0xf4, 0x40, 0x57, 0xfe, 0x2e, 0x09, 0xa5, 0xe9, 0x47, 0x72, 0x5c, 0x87, 0x9c, 0x69, - 0x6b, 0x84, 0xa7, 0x96, 0xf8, 0x85, 0x66, 0xe7, 0x2d, 0xef, 0xea, 0x95, 0x66, 0xc0, 0x57, 0x22, - 0xcd, 0xad, 0xbf, 0x27, 0x20, 0x17, 0xc2, 0xf8, 0x0c, 0xa4, 0x1d, 0xe2, 0x0f, 0xb9, 0xb9, 0xcc, - 0x7e, 0x12, 0x25, 0x14, 0x3e, 0x66, 0xb8, 0xe7, 0x10, 0x8b, 0xa7, 0x40, 0x80, 0xb3, 0x31, 0xfb, - 0xae, 0x26, 0x25, 0x3a, 0xbf, 0x7e, 0xd8, 0xa3, 0x11, 0xb5, 0x7c, 0x2f, 0xfc, 0xae, 0x01, 0x5e, - 0x0b, 0x60, 0x7c, 0x13, 0xd6, 0x7d, 0x97, 0x18, 0xe6, 0x14, 0x37, 0xcd, 0xb9, 0x28, 0x14, 0x44, - 0xe4, 0x3d, 0x38, 0x17, 0xda, 0xd5, 0xa9, 0x4f, 0xb4, 0x21, 0xd5, 0x27, 0x4a, 0x59, 0xfe, 0x02, - 0x7b, 0x36, 0x20, 0xd4, 0x03, 0x79, 0xa8, 0x5b, 0xfe, 0x47, 0x02, 0xd6, 0xc3, 0x0b, 0x93, 0x1e, - 0x05, 0xeb, 0x10, 0x80, 0x58, 0x96, 0xed, 0xc7, 0xc3, 0x35, 0x9f, 0xca, 0x73, 0x7a, 0x95, 0x6a, - 0xa4, 0xa4, 0xc4, 0x0c, 0x6c, 0x8d, 0x00, 0x26, 0x92, 0xa5, 0x61, 0xbb, 0x08, 0x85, 0xe0, 0x17, - 0x10, 0xfe, 0x33, 0x9a, 0xb8, 0x62, 0x83, 0x80, 0xd8, 0xcd, 0x0a, 0x6f, 0x42, 0xe6, 0x98, 0x0e, - 0x0c, 0x2b, 0x78, 0xd7, 0x14, 0x83, 0xf0, 0xad, 0x36, 0x1d, 0xbd, 0xd5, 0xee, 0x3f, 0x86, 0x0d, - 0xcd, 0x1e, 0xcd, 0xba, 0xbb, 0x8f, 0x66, 0xae, 0xf9, 0xde, 0x97, 0x89, 0xaf, 0x61, 0xd2, 0x62, - 0x7e, 0x9b, 0x4c, 0x1d, 0x74, 0xf6, 0xff, 0x98, 0xdc, 0x3a, 0x10, 0x7a, 0x9d, 0x70, 0x99, 0x0a, - 0xed, 0x9b, 0x54, 0x63, 0xae, 0xff, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x6b, 0x03, 0xf1, 0x99, 0x1b, - 0x1c, 0x00, 0x00, + 0x15, 0x5f, 0x7d, 0x5a, 0x7a, 0x92, 0xe5, 0xf1, 0xd8, 0x9b, 0x30, 0xde, 0x8f, 0x38, 0xda, 0x8f, + 0x38, 0x49, 0xab, 0x2c, 0x9c, 0xc4, 0xc9, 0x3a, 0xc5, 0xb6, 0xb2, 0xc4, 0x78, 0x95, 0xca, 0x92, + 0x4a, 0xc9, 0xdd, 0x64, 0x8b, 0x82, 0x18, 0x93, 0x23, 0x89, 0x09, 0x45, 0x72, 0x49, 0x2a, 0x89, + 0x83, 0x1e, 0x02, 0xf4, 0xd4, 0xff, 0xa0, 0x28, 0x8a, 0x1e, 0x7a, 0x59, 0xa0, 0xd7, 0x02, 0x05, + 0xda, 0x7b, 0xaf, 0x05, 0x7a, 0xef, 0xa1, 0x40, 0x0b, 0xb4, 0x7f, 0x42, 0x8f, 0xc5, 0xcc, 0x90, + 0x14, 0xf5, 0x95, 0x78, 0x17, 0x48, 0xf6, 0x64, 0xcf, 0xef, 0xfd, 0xde, 0xe3, 0x9b, 0x37, 0x6f, + 0xde, 0xbc, 0x19, 0x01, 0xd2, 0xa9, 0xa7, 0xb9, 0x86, 0xe3, 0xdb, 0x6e, 0xc5, 0x71, 0x6d, 0xdf, + 0xc6, 0x6b, 0x03, 0xdb, 0x1e, 0x98, 0x54, 0x8c, 0x4e, 0xc6, 0xfd, 0xf2, 0x11, 0xac, 0xdf, 0x33, + 0x4c, 0x5a, 0x8f, 0x88, 0x5d, 0xea, 0xe3, 0x3b, 0x90, 0xee, 0x1b, 0x26, 0x95, 0x12, 0xdb, 0xa9, + 0x9d, 0xc2, 0xee, 0x87, 0x95, 0x19, 0xa5, 0xca, 0xb4, 0x46, 0x87, 0xc1, 0x0a, 0xd7, 0x28, 0xff, + 0x3b, 0x0d, 0x1b, 0x0b, 0xa4, 0x18, 0x43, 0xda, 0x22, 0x23, 0x66, 0x31, 0xb1, 0x93, 0x57, 0xf8, + 0xff, 0x58, 0x82, 0x15, 0x87, 0x68, 0x8f, 0xc9, 0x80, 0x4a, 0x49, 0x0e, 0x87, 0x43, 0xfc, 0x3e, + 0x80, 0x4e, 0x1d, 0x6a, 0xe9, 0xd4, 0xd2, 0x4e, 0xa5, 0xd4, 0x76, 0x6a, 0x27, 0xaf, 0xc4, 0x10, + 0x7c, 0x0d, 0xd6, 0x9d, 0xf1, 0x89, 0x69, 0x68, 0x6a, 0x8c, 0x06, 0xdb, 0xa9, 0x9d, 0x8c, 0x82, + 0x84, 0xa0, 0x3e, 0x21, 0x5f, 0x86, 0xb5, 0xa7, 0x94, 0x3c, 0x8e, 0x53, 0x0b, 0x9c, 0x5a, 0x62, + 0x70, 0x8c, 0x58, 0x83, 0xe2, 0x88, 0x7a, 0x1e, 0x19, 0x50, 0xd5, 0x3f, 0x75, 0xa8, 0x94, 0xe6, + 0xb3, 0xdf, 0x9e, 0x9b, 0xfd, 0xec, 0xcc, 0x0b, 0x81, 0x56, 0xef, 0xd4, 0xa1, 0xb8, 0x0a, 0x79, + 0x6a, 0x8d, 0x47, 0xc2, 0x42, 0x66, 0x49, 0xfc, 0x64, 0x6b, 0x3c, 0x9a, 0xb5, 0x92, 0x63, 0x6a, + 0x81, 0x89, 0x15, 0x8f, 0xba, 0x4f, 0x0c, 0x8d, 0x4a, 0x59, 0x6e, 0xe0, 0xf2, 0x9c, 0x81, 0xae, + 0x90, 0xcf, 0xda, 0x08, 0xf5, 0x70, 0x0d, 0xf2, 0xf4, 0x99, 0x4f, 0x2d, 0xcf, 0xb0, 0x2d, 0x69, + 0x85, 0x1b, 0xf9, 0x68, 0xc1, 0x2a, 0x52, 0x53, 0x9f, 0x35, 0x31, 0xd1, 0xc3, 0x7b, 0xb0, 0x62, + 0x3b, 0xbe, 0x61, 0x5b, 0x9e, 0x94, 0xdb, 0x4e, 0xec, 0x14, 0x76, 0xdf, 0x5d, 0x98, 0x08, 0x6d, + 0xc1, 0x51, 0x42, 0x32, 0x6e, 0x00, 0xf2, 0xec, 0xb1, 0xab, 0x51, 0x55, 0xb3, 0x75, 0xaa, 0x1a, + 0x56, 0xdf, 0x96, 0xf2, 0xdc, 0xc0, 0xc5, 0xf9, 0x89, 0x70, 0x62, 0xcd, 0xd6, 0x69, 0xc3, 0xea, + 0xdb, 0x4a, 0xc9, 0x9b, 0x1a, 0xe3, 0x73, 0x90, 0xf5, 0x4e, 0x2d, 0x9f, 0x3c, 0x93, 0x8a, 0x3c, + 0x43, 0x82, 0x51, 0xf9, 0xcf, 0x59, 0x58, 0x3b, 0x4b, 0x8a, 0xdd, 0x85, 0x4c, 0x9f, 0xcd, 0x52, + 0x4a, 0x7e, 0x93, 0x18, 0x08, 0x9d, 0xe9, 0x20, 0x66, 0xbf, 0x65, 0x10, 0xab, 0x50, 0xb0, 0xa8, + 0xe7, 0x53, 0x5d, 0x64, 0x44, 0xea, 0x8c, 0x39, 0x05, 0x42, 0x69, 0x3e, 0xa5, 0xd2, 0xdf, 0x2a, + 0xa5, 0x1e, 0xc0, 0x5a, 0xe4, 0x92, 0xea, 0x12, 0x6b, 0x10, 0xe6, 0xe6, 0xf5, 0x57, 0x79, 0x52, + 0x91, 0x43, 0x3d, 0x85, 0xa9, 0x29, 0x25, 0x3a, 0x35, 0xc6, 0x75, 0x00, 0xdb, 0xa2, 0x76, 0x5f, + 0xd5, 0xa9, 0x66, 0x4a, 0xb9, 0x25, 0x51, 0x6a, 0x33, 0xca, 0x5c, 0x94, 0x6c, 0x81, 0x6a, 0x26, + 0xfe, 0x74, 0x92, 0x6a, 0x2b, 0x4b, 0x32, 0xe5, 0x48, 0x6c, 0xb2, 0xb9, 0x6c, 0x3b, 0x86, 0x92, + 0x4b, 0x59, 0xde, 0x53, 0x3d, 0x98, 0x59, 0x9e, 0x3b, 0x51, 0x79, 0xe5, 0xcc, 0x94, 0x40, 0x4d, + 0x4c, 0x6c, 0xd5, 0x8d, 0x0f, 0xf1, 0x07, 0x10, 0x01, 0x2a, 0x4f, 0x2b, 0xe0, 0x55, 0xa8, 0x18, + 0x82, 0x2d, 0x32, 0xa2, 0x5b, 0xcf, 0xa1, 0x34, 0x1d, 0x1e, 0xbc, 0x09, 0x19, 0xcf, 0x27, 0xae, + 0xcf, 0xb3, 0x30, 0xa3, 0x88, 0x01, 0x46, 0x90, 0xa2, 0x96, 0xce, 0xab, 0x5c, 0x46, 0x61, 0xff, + 0xe2, 0x1f, 0x4d, 0x26, 0x9c, 0xe2, 0x13, 0xfe, 0x78, 0x7e, 0x45, 0xa7, 0x2c, 0xcf, 0xce, 0x7b, + 0xeb, 0x36, 0xac, 0x4e, 0x4d, 0xe0, 0xac, 0x9f, 0x2e, 0xff, 0x02, 0xde, 0x5e, 0x68, 0x1a, 0x3f, + 0x80, 0xcd, 0xb1, 0x65, 0x58, 0x3e, 0x75, 0x1d, 0x97, 0xb2, 0x8c, 0x15, 0x9f, 0x92, 0xfe, 0xb3, + 0xb2, 0x24, 0xe7, 0x8e, 0xe3, 0x6c, 0x61, 0x45, 0xd9, 0x18, 0xcf, 0x83, 0x57, 0xf3, 0xb9, 0xff, + 0xae, 0xa0, 0x17, 0x2f, 0x5e, 0xbc, 0x48, 0x96, 0x7f, 0x9d, 0x85, 0xcd, 0x45, 0x7b, 0x66, 0xe1, + 0xf6, 0x3d, 0x07, 0x59, 0x6b, 0x3c, 0x3a, 0xa1, 0x2e, 0x0f, 0x52, 0x46, 0x09, 0x46, 0xb8, 0x0a, + 0x19, 0x93, 0x9c, 0x50, 0x53, 0x4a, 0x6f, 0x27, 0x76, 0x4a, 0xbb, 0xd7, 0xce, 0xb4, 0x2b, 0x2b, + 0x4d, 0xa6, 0xa2, 0x08, 0x4d, 0xfc, 0x19, 0xa4, 0x83, 0x12, 0xcd, 0x2c, 0x5c, 0x3d, 0x9b, 0x05, + 0xb6, 0x97, 0x14, 0xae, 0x87, 0xdf, 0x81, 0x3c, 0xfb, 0x2b, 0x72, 0x23, 0xcb, 0x7d, 0xce, 0x31, + 0x80, 0xe5, 0x05, 0xde, 0x82, 0x1c, 0xdf, 0x26, 0x3a, 0x0d, 0x8f, 0xb6, 0x68, 0xcc, 0x12, 0x4b, + 0xa7, 0x7d, 0x32, 0x36, 0x7d, 0xf5, 0x09, 0x31, 0xc7, 0x94, 0x27, 0x7c, 0x5e, 0x29, 0x06, 0xe0, + 0x4f, 0x19, 0x86, 0x2f, 0x42, 0x41, 0xec, 0x2a, 0xc3, 0xd2, 0xe9, 0x33, 0x5e, 0x3d, 0x33, 0x8a, + 0xd8, 0x68, 0x0d, 0x86, 0xb0, 0xcf, 0x3f, 0xf2, 0x6c, 0x2b, 0x4c, 0x4d, 0xfe, 0x09, 0x06, 0xf0, + 0xcf, 0xdf, 0x9e, 0x2d, 0xdc, 0xef, 0x2d, 0x9e, 0xde, 0x6c, 0x4e, 0x95, 0xff, 0x94, 0x84, 0x34, + 0xaf, 0x17, 0x6b, 0x50, 0xe8, 0x3d, 0xec, 0xc8, 0x6a, 0xbd, 0x7d, 0x7c, 0xd0, 0x94, 0x51, 0x02, + 0x97, 0x00, 0x38, 0x70, 0xaf, 0xd9, 0xae, 0xf6, 0x50, 0x32, 0x1a, 0x37, 0x5a, 0xbd, 0xbd, 0x9b, + 0x28, 0x15, 0x29, 0x1c, 0x0b, 0x20, 0x1d, 0x27, 0xdc, 0xd8, 0x45, 0x19, 0x8c, 0xa0, 0x28, 0x0c, + 0x34, 0x1e, 0xc8, 0xf5, 0xbd, 0x9b, 0x28, 0x3b, 0x8d, 0xdc, 0xd8, 0x45, 0x2b, 0x78, 0x15, 0xf2, + 0x1c, 0x39, 0x68, 0xb7, 0x9b, 0x28, 0x17, 0xd9, 0xec, 0xf6, 0x94, 0x46, 0xeb, 0x10, 0xe5, 0x23, + 0x9b, 0x87, 0x4a, 0xfb, 0xb8, 0x83, 0x20, 0xb2, 0x70, 0x24, 0x77, 0xbb, 0xd5, 0x43, 0x19, 0x15, + 0x22, 0xc6, 0xc1, 0xc3, 0x9e, 0xdc, 0x45, 0xc5, 0x29, 0xb7, 0x6e, 0xec, 0xa2, 0xd5, 0xe8, 0x13, + 0x72, 0xeb, 0xf8, 0x08, 0x95, 0xf0, 0x3a, 0xac, 0x8a, 0x4f, 0x84, 0x4e, 0xac, 0xcd, 0x40, 0x7b, + 0x37, 0x11, 0x9a, 0x38, 0x22, 0xac, 0xac, 0x4f, 0x01, 0x7b, 0x37, 0x11, 0x2e, 0xd7, 0x20, 0xc3, + 0xb3, 0x0b, 0x63, 0x28, 0x35, 0xab, 0x07, 0x72, 0x53, 0x6d, 0x77, 0x7a, 0x8d, 0x76, 0xab, 0xda, + 0x44, 0x89, 0x09, 0xa6, 0xc8, 0x3f, 0x39, 0x6e, 0x28, 0x72, 0x1d, 0x25, 0xe3, 0x58, 0x47, 0xae, + 0xf6, 0xe4, 0x3a, 0x4a, 0x95, 0x35, 0xd8, 0x5c, 0x54, 0x27, 0x17, 0xee, 0x8c, 0xd8, 0x12, 0x27, + 0x97, 0x2c, 0x31, 0xb7, 0x35, 0xb7, 0xc4, 0xff, 0x4a, 0xc2, 0xc6, 0x82, 0xb3, 0x62, 0xe1, 0x47, + 0x7e, 0x08, 0x19, 0x91, 0xa2, 0xe2, 0xf4, 0xbc, 0xb2, 0xf0, 0xd0, 0xe1, 0x09, 0x3b, 0x77, 0x82, + 0x72, 0xbd, 0x78, 0x07, 0x91, 0x5a, 0xd2, 0x41, 0x30, 0x13, 0x73, 0x35, 0xfd, 0xe7, 0x73, 0x35, + 0x5d, 0x1c, 0x7b, 0x7b, 0x67, 0x39, 0xf6, 0x38, 0xf6, 0xcd, 0x6a, 0x7b, 0x66, 0x41, 0x6d, 0xbf, + 0x0b, 0xeb, 0x73, 0x86, 0xce, 0x5c, 0x63, 0x7f, 0x99, 0x00, 0x69, 0x59, 0x70, 0x5e, 0x51, 0xe9, + 0x92, 0x53, 0x95, 0xee, 0xee, 0x6c, 0x04, 0x2f, 0x2d, 0x5f, 0x84, 0xb9, 0xb5, 0xfe, 0x3a, 0x01, + 0xe7, 0x16, 0x77, 0x8a, 0x0b, 0x7d, 0xf8, 0x0c, 0xb2, 0x23, 0xea, 0x0f, 0xed, 0xb0, 0x5b, 0xfa, + 0x78, 0xc1, 0x19, 0xcc, 0xc4, 0xb3, 0x8b, 0x1d, 0x68, 0xc5, 0x0f, 0xf1, 0xd4, 0xb2, 0x76, 0x4f, + 0x78, 0x33, 0xe7, 0xe9, 0xaf, 0x92, 0xf0, 0xf6, 0x42, 0xe3, 0x0b, 0x1d, 0x7d, 0x0f, 0xc0, 0xb0, + 0x9c, 0xb1, 0x2f, 0x3a, 0x22, 0x51, 0x60, 0xf3, 0x1c, 0xe1, 0xc5, 0x8b, 0x15, 0xcf, 0xb1, 0x1f, + 0xc9, 0x53, 0x5c, 0x0e, 0x02, 0xe2, 0x84, 0x3b, 0x13, 0x47, 0xd3, 0xdc, 0xd1, 0xf7, 0x97, 0xcc, + 0x74, 0x2e, 0x31, 0x3f, 0x01, 0xa4, 0x99, 0x06, 0xb5, 0x7c, 0xd5, 0xf3, 0x5d, 0x4a, 0x46, 0x86, + 0x35, 0xe0, 0x27, 0x48, 0x6e, 0x3f, 0xd3, 0x27, 0xa6, 0x47, 0x95, 0x35, 0x21, 0xee, 0x86, 0x52, + 0xa6, 0xc1, 0x13, 0xc8, 0x8d, 0x69, 0x64, 0xa7, 0x34, 0x84, 0x38, 0xd2, 0x28, 0xff, 0x31, 0x07, + 0x85, 0x58, 0x5f, 0x8d, 0x2f, 0x41, 0xf1, 0x11, 0x79, 0x42, 0xd4, 0xf0, 0xae, 0x24, 0x22, 0x51, + 0x60, 0x58, 0x27, 0xb8, 0x2f, 0x7d, 0x02, 0x9b, 0x9c, 0x62, 0x8f, 0x7d, 0xea, 0xaa, 0x9a, 0x49, + 0x3c, 0x8f, 0x07, 0x2d, 0xc7, 0xa9, 0x98, 0xc9, 0xda, 0x4c, 0x54, 0x0b, 0x25, 0xf8, 0x16, 0x6c, + 0x70, 0x8d, 0xd1, 0xd8, 0xf4, 0x0d, 0xc7, 0xa4, 0x2a, 0xbb, 0xbd, 0x79, 0xfc, 0x24, 0x89, 0x3c, + 0x5b, 0x67, 0x8c, 0xa3, 0x80, 0xc0, 0x3c, 0xf2, 0x70, 0x1d, 0xde, 0xe3, 0x6a, 0x03, 0x6a, 0x51, + 0x97, 0xf8, 0x54, 0xa5, 0x5f, 0x8d, 0x89, 0xe9, 0xa9, 0xc4, 0xd2, 0xd5, 0x21, 0xf1, 0x86, 0xd2, + 0x26, 0x33, 0x70, 0x90, 0x94, 0x12, 0xca, 0x05, 0x46, 0x3c, 0x0c, 0x78, 0x32, 0xa7, 0x55, 0x2d, + 0xfd, 0x73, 0xe2, 0x0d, 0xf1, 0x3e, 0x9c, 0xe3, 0x56, 0x3c, 0xdf, 0x35, 0xac, 0x81, 0xaa, 0x0d, + 0xa9, 0xf6, 0x58, 0x1d, 0xfb, 0xfd, 0x3b, 0xd2, 0x3b, 0xf1, 0xef, 0x73, 0x0f, 0xbb, 0x9c, 0x53, + 0x63, 0x94, 0x63, 0xbf, 0x7f, 0x07, 0x77, 0xa1, 0xc8, 0x16, 0x63, 0x64, 0x3c, 0xa7, 0x6a, 0xdf, + 0x76, 0xf9, 0xd1, 0x58, 0x5a, 0x50, 0x9a, 0x62, 0x11, 0xac, 0xb4, 0x03, 0x85, 0x23, 0x5b, 0xa7, + 0xfb, 0x99, 0x6e, 0x47, 0x96, 0xeb, 0x4a, 0x21, 0xb4, 0x72, 0xcf, 0x76, 0x59, 0x42, 0x0d, 0xec, + 0x28, 0xc0, 0x05, 0x91, 0x50, 0x03, 0x3b, 0x0c, 0xef, 0x2d, 0xd8, 0xd0, 0x34, 0x31, 0x67, 0x43, + 0x53, 0x83, 0x3b, 0x96, 0x27, 0xa1, 0xa9, 0x60, 0x69, 0xda, 0xa1, 0x20, 0x04, 0x39, 0xee, 0xe1, + 0x4f, 0xe1, 0xed, 0x49, 0xb0, 0xe2, 0x8a, 0xeb, 0x73, 0xb3, 0x9c, 0x55, 0xbd, 0x05, 0x1b, 0xce, + 0xe9, 0xbc, 0x22, 0x9e, 0xfa, 0xa2, 0x73, 0x3a, 0xab, 0x76, 0x1b, 0x36, 0x9d, 0xa1, 0x33, 0xaf, + 0x77, 0x35, 0xae, 0x87, 0x9d, 0xa1, 0x33, 0xab, 0xf8, 0x11, 0xbf, 0x70, 0xbb, 0x54, 0x23, 0x3e, + 0xd5, 0xa5, 0xf3, 0x71, 0x7a, 0x4c, 0x80, 0xaf, 0x03, 0xd2, 0x34, 0x95, 0x5a, 0xe4, 0xc4, 0xa4, + 0x2a, 0x71, 0xa9, 0x45, 0x3c, 0xe9, 0x62, 0x9c, 0x5c, 0xd2, 0x34, 0x99, 0x4b, 0xab, 0x5c, 0x88, + 0xaf, 0xc2, 0xba, 0x7d, 0xf2, 0x48, 0x13, 0x29, 0xa9, 0x3a, 0x2e, 0xed, 0x1b, 0xcf, 0xa4, 0x0f, + 0x79, 0x7c, 0xd7, 0x98, 0x80, 0x27, 0x64, 0x87, 0xc3, 0xf8, 0x0a, 0x20, 0xcd, 0x1b, 0x12, 0xd7, + 0xe1, 0x35, 0xd9, 0x73, 0x88, 0x46, 0xa5, 0x8f, 0x04, 0x55, 0xe0, 0xad, 0x10, 0x66, 0x5b, 0xc2, + 0x7b, 0x6a, 0xf4, 0xfd, 0xd0, 0xe2, 0x65, 0xb1, 0x25, 0x38, 0x16, 0x58, 0xdb, 0x01, 0xc4, 0x42, + 0x31, 0xf5, 0xe1, 0x1d, 0x4e, 0x2b, 0x39, 0x43, 0x27, 0xfe, 0xdd, 0x0f, 0x60, 0x95, 0x31, 0x27, + 0x1f, 0xbd, 0x22, 0x1a, 0x32, 0x67, 0x18, 0xfb, 0xe2, 0x6b, 0xeb, 0x8d, 0xcb, 0xfb, 0x50, 0x8c, + 0xe7, 0x27, 0xce, 0x83, 0xc8, 0x50, 0x94, 0x60, 0xcd, 0x4a, 0xad, 0x5d, 0x67, 0x6d, 0xc6, 0x97, + 0x32, 0x4a, 0xb2, 0x76, 0xa7, 0xd9, 0xe8, 0xc9, 0xaa, 0x72, 0xdc, 0xea, 0x35, 0x8e, 0x64, 0x94, + 0x8a, 0xf7, 0xd5, 0x7f, 0x4d, 0x42, 0x69, 0xfa, 0x8a, 0x84, 0x7f, 0x00, 0xe7, 0xc3, 0xf7, 0x0c, + 0x8f, 0xfa, 0xea, 0x53, 0xc3, 0xe5, 0x5b, 0x66, 0x44, 0xc4, 0xf1, 0x15, 0x2d, 0xda, 0x66, 0xc0, + 0xea, 0x52, 0xff, 0x0b, 0xc3, 0x65, 0x1b, 0x62, 0x44, 0x7c, 0xdc, 0x84, 0x8b, 0x96, 0xad, 0x7a, + 0x3e, 0xb1, 0x74, 0xe2, 0xea, 0xea, 0xe4, 0x25, 0x49, 0x25, 0x9a, 0x46, 0x3d, 0xcf, 0x16, 0x47, + 0x55, 0x64, 0xe5, 0x5d, 0xcb, 0xee, 0x06, 0xe4, 0x49, 0x0d, 0xaf, 0x06, 0xd4, 0x99, 0x04, 0x4b, + 0x2d, 0x4b, 0xb0, 0x77, 0x20, 0x3f, 0x22, 0x8e, 0x4a, 0x2d, 0xdf, 0x3d, 0xe5, 0x8d, 0x71, 0x4e, + 0xc9, 0x8d, 0x88, 0x23, 0xb3, 0xf1, 0x9b, 0xb9, 0x9f, 0xfc, 0x23, 0x05, 0xc5, 0x78, 0x73, 0xcc, + 0xee, 0x1a, 0x1a, 0x3f, 0x47, 0x12, 0xbc, 0xd2, 0x7c, 0xf0, 0xd2, 0x56, 0xba, 0x52, 0x63, 0x07, + 0xcc, 0x7e, 0x56, 0xb4, 0xac, 0x8a, 0xd0, 0x64, 0x87, 0x3b, 0xab, 0x2d, 0x54, 0xb4, 0x08, 0x39, + 0x25, 0x18, 0xe1, 0x43, 0xc8, 0x3e, 0xf2, 0xb8, 0xed, 0x2c, 0xb7, 0xfd, 0xe1, 0xcb, 0x6d, 0xdf, + 0xef, 0x72, 0xe3, 0xf9, 0xfb, 0x5d, 0xb5, 0xd5, 0x56, 0x8e, 0xaa, 0x4d, 0x25, 0x50, 0xc7, 0x17, + 0x20, 0x6d, 0x92, 0xe7, 0xa7, 0xd3, 0x47, 0x11, 0x87, 0xce, 0x1a, 0xf8, 0x0b, 0x90, 0x7e, 0x4a, + 0xc9, 0xe3, 0xe9, 0x03, 0x80, 0x43, 0xaf, 0x31, 0xf5, 0xaf, 0x43, 0x86, 0xc7, 0x0b, 0x03, 0x04, + 0x11, 0x43, 0x6f, 0xe1, 0x1c, 0xa4, 0x6b, 0x6d, 0x85, 0xa5, 0x3f, 0x82, 0xa2, 0x40, 0xd5, 0x4e, + 0x43, 0xae, 0xc9, 0x28, 0x59, 0xbe, 0x05, 0x59, 0x11, 0x04, 0xb6, 0x35, 0xa2, 0x30, 0xa0, 0xb7, + 0x82, 0x61, 0x60, 0x23, 0x11, 0x4a, 0x8f, 0x8f, 0x0e, 0x64, 0x05, 0x25, 0xe3, 0xcb, 0xeb, 0x41, + 0x31, 0xde, 0x17, 0xbf, 0x99, 0x9c, 0xfa, 0x4b, 0x02, 0x0a, 0xb1, 0x3e, 0x97, 0x35, 0x28, 0xc4, + 0x34, 0xed, 0xa7, 0x2a, 0x31, 0x0d, 0xe2, 0x05, 0x49, 0x01, 0x1c, 0xaa, 0x32, 0xe4, 0xac, 0x8b, + 0xf6, 0x46, 0x9c, 0xff, 0x5d, 0x02, 0xd0, 0x6c, 0x8b, 0x39, 0xe3, 0x60, 0xe2, 0x3b, 0x75, 0xf0, + 0xb7, 0x09, 0x28, 0x4d, 0xf7, 0x95, 0x33, 0xee, 0x5d, 0xfa, 0x4e, 0xdd, 0xfb, 0x67, 0x12, 0x56, + 0xa7, 0xba, 0xc9, 0xb3, 0x7a, 0xf7, 0x15, 0xac, 0x1b, 0x3a, 0x1d, 0x39, 0xb6, 0x4f, 0x2d, 0xed, + 0x54, 0x35, 0xe9, 0x13, 0x6a, 0x4a, 0x65, 0x5e, 0x28, 0xae, 0xbf, 0xbc, 0x5f, 0xad, 0x34, 0x26, + 0x7a, 0x4d, 0xa6, 0xb6, 0xbf, 0xd1, 0xa8, 0xcb, 0x47, 0x9d, 0x76, 0x4f, 0x6e, 0xd5, 0x1e, 0xaa, + 0xc7, 0xad, 0x1f, 0xb7, 0xda, 0x5f, 0xb4, 0x14, 0x64, 0xcc, 0xd0, 0x5e, 0xe3, 0x56, 0xef, 0x00, + 0x9a, 0x75, 0x0a, 0x9f, 0x87, 0x45, 0x6e, 0xa1, 0xb7, 0xf0, 0x06, 0xac, 0xb5, 0xda, 0x6a, 0xb7, + 0x51, 0x97, 0x55, 0xf9, 0xde, 0x3d, 0xb9, 0xd6, 0xeb, 0x8a, 0x17, 0x88, 0x88, 0xdd, 0x9b, 0xde, + 0xd4, 0xbf, 0x49, 0xc1, 0xc6, 0x02, 0x4f, 0x70, 0x35, 0xb8, 0x3b, 0x88, 0xeb, 0xcc, 0xf7, 0xcf, + 0xe2, 0x7d, 0x85, 0x1d, 0xf9, 0x1d, 0xe2, 0xfa, 0xc1, 0x55, 0xe3, 0x0a, 0xb0, 0x28, 0x59, 0xbe, + 0xd1, 0x37, 0xa8, 0x1b, 0x3c, 0xd8, 0x88, 0x0b, 0xc5, 0xda, 0x04, 0x17, 0x6f, 0x36, 0xdf, 0x03, + 0xec, 0xd8, 0x9e, 0xe1, 0x1b, 0x4f, 0xa8, 0x6a, 0x58, 0xe1, 0xeb, 0x0e, 0xbb, 0x60, 0xa4, 0x15, + 0x14, 0x4a, 0x1a, 0x96, 0x1f, 0xb1, 0x2d, 0x3a, 0x20, 0x33, 0x6c, 0x56, 0xc0, 0x53, 0x0a, 0x0a, + 0x25, 0x11, 0xfb, 0x12, 0x14, 0x75, 0x7b, 0xcc, 0xba, 0x2e, 0xc1, 0x63, 0xe7, 0x45, 0x42, 0x29, + 0x08, 0x2c, 0xa2, 0x04, 0xfd, 0xf4, 0xe4, 0x59, 0xa9, 0xa8, 0x14, 0x04, 0x26, 0x28, 0x97, 0x61, + 0x8d, 0x0c, 0x06, 0x2e, 0x33, 0x1e, 0x1a, 0x12, 0x37, 0x84, 0x52, 0x04, 0x73, 0xe2, 0xd6, 0x7d, + 0xc8, 0x85, 0x71, 0x60, 0x47, 0x32, 0x8b, 0x84, 0xea, 0x88, 0x6b, 0x6f, 0x72, 0x27, 0xaf, 0xe4, + 0xac, 0x50, 0x78, 0x09, 0x8a, 0x86, 0xa7, 0x4e, 0x5e, 0xc9, 0x93, 0xdb, 0xc9, 0x9d, 0x9c, 0x52, + 0x30, 0xbc, 0xe8, 0x85, 0xb1, 0xfc, 0x75, 0x12, 0x4a, 0xd3, 0xaf, 0xfc, 0xb8, 0x0e, 0x39, 0xd3, + 0xd6, 0x08, 0x4f, 0x2d, 0xf1, 0x13, 0xd3, 0xce, 0x2b, 0x7e, 0x18, 0xa8, 0x34, 0x03, 0xbe, 0x12, + 0x69, 0x6e, 0xfd, 0x2d, 0x01, 0xb9, 0x10, 0xc6, 0xe7, 0x20, 0xed, 0x10, 0x7f, 0xc8, 0xcd, 0x65, + 0x0e, 0x92, 0x28, 0xa1, 0xf0, 0x31, 0xc3, 0x3d, 0x87, 0x58, 0x3c, 0x05, 0x02, 0x9c, 0x8d, 0xd9, + 0xba, 0x9a, 0x94, 0xe8, 0xfc, 0xfa, 0x61, 0x8f, 0x46, 0xd4, 0xf2, 0xbd, 0x70, 0x5d, 0x03, 0xbc, + 0x16, 0xc0, 0xf8, 0x1a, 0xac, 0xfb, 0x2e, 0x31, 0xcc, 0x29, 0x6e, 0x9a, 0x73, 0x51, 0x28, 0x88, + 0xc8, 0xfb, 0x70, 0x21, 0xb4, 0xab, 0x53, 0x9f, 0x68, 0x43, 0xaa, 0x4f, 0x94, 0xb2, 0xfc, 0x99, + 0xe1, 0x7c, 0x40, 0xa8, 0x07, 0xf2, 0x50, 0xb7, 0xfc, 0xf7, 0x04, 0xac, 0x87, 0x17, 0x26, 0x3d, + 0x0a, 0xd6, 0x11, 0x00, 0xb1, 0x2c, 0xdb, 0x8f, 0x87, 0x6b, 0x3e, 0x95, 0xe7, 0xf4, 0x2a, 0xd5, + 0x48, 0x49, 0x89, 0x19, 0xd8, 0x1a, 0x01, 0x4c, 0x24, 0x4b, 0xc3, 0x76, 0x11, 0x0a, 0xc1, 0x4f, + 0x38, 0xfc, 0x77, 0x40, 0x71, 0xc5, 0x06, 0x01, 0xb1, 0x9b, 0x15, 0xde, 0x84, 0xcc, 0x09, 0x1d, + 0x18, 0x56, 0xf0, 0x30, 0x2b, 0x06, 0xe1, 0x43, 0x48, 0x3a, 0x7a, 0x08, 0x39, 0xf8, 0x19, 0x6c, + 0x68, 0xf6, 0x68, 0xd6, 0xdd, 0x03, 0x34, 0x73, 0xcd, 0xf7, 0x3e, 0x4f, 0x7c, 0x09, 0x93, 0x16, + 0xf3, 0x7f, 0x89, 0xc4, 0xef, 0x93, 0xa9, 0xc3, 0xce, 0xc1, 0x1f, 0x92, 0x5b, 0x87, 0x42, 0xb5, + 0x13, 0xce, 0x54, 0xa1, 0x7d, 0x93, 0x6a, 0xcc, 0xfb, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0xa3, + 0x58, 0x22, 0x30, 0xdf, 0x1c, 0x00, 0x00, } diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go index be534f0fa1..ec6eb168d6 100644 --- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go @@ -1,35 +1,6 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: descriptor.proto -/* -Package descriptor is a generated protocol buffer package. - -It is generated from these files: - descriptor.proto - -It has these top-level messages: - FileDescriptorSet - FileDescriptorProto - DescriptorProto - ExtensionRangeOptions - FieldDescriptorProto - OneofDescriptorProto - EnumDescriptorProto - EnumValueDescriptorProto - ServiceDescriptorProto - MethodDescriptorProto - FileOptions - MessageOptions - FieldOptions - OneofOptions - EnumOptions - EnumValueOptions - ServiceOptions - MethodOptions - UninterpretedOption - SourceCodeInfo - GeneratedCodeInfo -*/ package descriptor import fmt "fmt" @@ -270,7 +241,7 @@ func (this *EnumDescriptorProto) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 7) + s := make([]string, 0, 9) s = append(s, "&descriptor.EnumDescriptorProto{") if this.Name != nil { s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") @@ -281,6 +252,30 @@ func (this *EnumDescriptorProto) GoString() string { if this.Options != nil { s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") } + if this.ReservedRange != nil { + s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n") + } + if this.ReservedName != nil { + s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumDescriptorProto_EnumReservedRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.EnumDescriptorProto_EnumReservedRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } if this.XXX_unrecognized != nil { s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") } diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_test.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_test.go deleted file mode 100644 index d4248b4833..0000000000 --- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package descriptor_test - -import ( - "fmt" - "testing" - - tpb "github.com/gogo/protobuf/proto/testdata" - "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" -) - -func TestMessage(t *testing.T) { - var msg *descriptor.DescriptorProto - fd, md := descriptor.ForMessage(msg) - if pkg, want := fd.GetPackage(), "google.protobuf"; pkg != want { - t.Errorf("descriptor.ForMessage(%T).GetPackage() = %q; want %q", msg, pkg, want) - } - if name, want := md.GetName(), "DescriptorProto"; name != want { - t.Fatalf("descriptor.ForMessage(%T).GetName() = %q; want %q", msg, name, want) - } -} - -func Example_Options() { - var msg *tpb.MyMessageSet - _, md := descriptor.ForMessage(msg) - if md.GetOptions().GetMessageSetWireFormat() { - fmt.Printf("%v uses option message_set_wire_format.\n", md.GetName()) - } - - // Output: - // MyMessageSet uses option message_set_wire_format. -} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/doc.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/doc.go deleted file mode 100644 index 15c7cf43c2..0000000000 --- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/doc.go +++ /dev/null @@ -1,51 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* - A plugin for the Google protocol buffer compiler to generate Go code. - Run it by building this program and putting it in your path with the name - protoc-gen-gogo - That word 'gogo' at the end becomes part of the option string set for the - protocol compiler, so once the protocol compiler (protoc) is installed - you can run - protoc --gogo_out=output_directory input_directory/file.proto - to generate Go bindings for the protocol defined by file.proto. - With that input, the output will be written to - output_directory/go_package/file.pb.go - - The generated code is documented in the package comment for - the library. - - See the README and documentation for protocol buffers to learn more: - https://developers.google.com/protocol-buffers/ - -*/ -package documentation diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/main.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/main.go deleted file mode 100644 index dd8e795030..0000000000 --- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/main.go +++ /dev/null @@ -1,57 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// protoc-gen-go is a plugin for the Google protocol buffer compiler to generate -// Go code. Run it by building this program and putting it in your path with -// the name -// protoc-gen-gogo -// That word 'gogo' at the end becomes part of the option string set for the -// protocol compiler, so once the protocol compiler (protoc) is installed -// you can run -// protoc --gogo_out=output_directory input_directory/file.proto -// to generate Go bindings for the protocol defined by file.proto. -// With that input, the output will be written to -// output_directory/file.pb.go -// -// The generated code is documented in the package comment for -// the library. -// -// See the README and documentation for protocol buffers to learn more: -// https://developers.google.com/protocol-buffers/ -package main - -import ( - "github.com/gogo/protobuf/vanity/command" -) - -func main() { - command.Write(command.Generate(command.Read())) -} diff --git a/vendor/github.com/golang/glog/README b/vendor/github.com/golang/glog/README deleted file mode 100644 index 387b4eb689..0000000000 --- a/vendor/github.com/golang/glog/README +++ /dev/null @@ -1,44 +0,0 @@ -glog -==== - -Leveled execution logs for Go. - -This is an efficient pure Go implementation of leveled logs in the -manner of the open source C++ package - https://github.com/google/glog - -By binding methods to booleans it is possible to use the log package -without paying the expense of evaluating the arguments to the log. -Through the -vmodule flag, the package also provides fine-grained -control over logging at the file level. - -The comment from glog.go introduces the ideas: - - Package glog implements logging analogous to the Google-internal - C++ INFO/ERROR/V setup. It provides functions Info, Warning, - Error, Fatal, plus formatting variants such as Infof. It - also provides V-style logging controlled by the -v and - -vmodule=file=2 flags. - - Basic examples: - - glog.Info("Prepare to repel boarders") - - glog.Fatalf("Initialization failed: %s", err) - - See the documentation for the V function for an explanation - of these examples: - - if glog.V(2) { - glog.Info("Starting transaction...") - } - - glog.V(2).Infoln("Processed", nItems, "elements") - - -The repository contains an open source version of the log package -used inside Google. The master copy of the source lives inside -Google, not here. The code in this repo is for export only and is not itself -under development. Feature requests will be ignored. - -Send bug reports to golang-nuts@googlegroups.com. diff --git a/vendor/github.com/golang/glog/glog.go b/vendor/github.com/golang/glog/glog.go deleted file mode 100644 index 54bd7afdca..0000000000 --- a/vendor/github.com/golang/glog/glog.go +++ /dev/null @@ -1,1180 +0,0 @@ -// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ -// -// Copyright 2013 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package glog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup. -// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as -// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags. -// -// Basic examples: -// -// glog.Info("Prepare to repel boarders") -// -// glog.Fatalf("Initialization failed: %s", err) -// -// See the documentation for the V function for an explanation of these examples: -// -// if glog.V(2) { -// glog.Info("Starting transaction...") -// } -// -// glog.V(2).Infoln("Processed", nItems, "elements") -// -// Log output is buffered and written periodically using Flush. Programs -// should call Flush before exiting to guarantee all log output is written. -// -// By default, all log statements write to files in a temporary directory. -// This package provides several flags that modify this behavior. -// As a result, flag.Parse must be called before any logging is done. -// -// -logtostderr=false -// Logs are written to standard error instead of to files. -// -alsologtostderr=false -// Logs are written to standard error as well as to files. -// -stderrthreshold=ERROR -// Log events at or above this severity are logged to standard -// error as well as to files. -// -log_dir="" -// Log files will be written to this directory instead of the -// default temporary directory. -// -// Other flags provide aids to debugging. -// -// -log_backtrace_at="" -// When set to a file and line number holding a logging statement, -// such as -// -log_backtrace_at=gopherflakes.go:234 -// a stack trace will be written to the Info log whenever execution -// hits that statement. (Unlike with -vmodule, the ".go" must be -// present.) -// -v=0 -// Enable V-leveled logging at the specified level. -// -vmodule="" -// The syntax of the argument is a comma-separated list of pattern=N, -// where pattern is a literal file name (minus the ".go" suffix) or -// "glob" pattern and N is a V level. For instance, -// -vmodule=gopher*=3 -// sets the V level to 3 in all Go files whose names begin "gopher". -// -package glog - -import ( - "bufio" - "bytes" - "errors" - "flag" - "fmt" - "io" - stdLog "log" - "os" - "path/filepath" - "runtime" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" -) - -// severity identifies the sort of log: info, warning etc. It also implements -// the flag.Value interface. The -stderrthreshold flag is of type severity and -// should be modified only through the flag.Value interface. The values match -// the corresponding constants in C++. -type severity int32 // sync/atomic int32 - -// These constants identify the log levels in order of increasing severity. -// A message written to a high-severity log file is also written to each -// lower-severity log file. -const ( - infoLog severity = iota - warningLog - errorLog - fatalLog - numSeverity = 4 -) - -const severityChar = "IWEF" - -var severityName = []string{ - infoLog: "INFO", - warningLog: "WARNING", - errorLog: "ERROR", - fatalLog: "FATAL", -} - -// get returns the value of the severity. -func (s *severity) get() severity { - return severity(atomic.LoadInt32((*int32)(s))) -} - -// set sets the value of the severity. -func (s *severity) set(val severity) { - atomic.StoreInt32((*int32)(s), int32(val)) -} - -// String is part of the flag.Value interface. -func (s *severity) String() string { - return strconv.FormatInt(int64(*s), 10) -} - -// Get is part of the flag.Value interface. -func (s *severity) Get() interface{} { - return *s -} - -// Set is part of the flag.Value interface. -func (s *severity) Set(value string) error { - var threshold severity - // Is it a known name? - if v, ok := severityByName(value); ok { - threshold = v - } else { - v, err := strconv.Atoi(value) - if err != nil { - return err - } - threshold = severity(v) - } - logging.stderrThreshold.set(threshold) - return nil -} - -func severityByName(s string) (severity, bool) { - s = strings.ToUpper(s) - for i, name := range severityName { - if name == s { - return severity(i), true - } - } - return 0, false -} - -// OutputStats tracks the number of output lines and bytes written. -type OutputStats struct { - lines int64 - bytes int64 -} - -// Lines returns the number of lines written. -func (s *OutputStats) Lines() int64 { - return atomic.LoadInt64(&s.lines) -} - -// Bytes returns the number of bytes written. -func (s *OutputStats) Bytes() int64 { - return atomic.LoadInt64(&s.bytes) -} - -// Stats tracks the number of lines of output and number of bytes -// per severity level. Values must be read with atomic.LoadInt64. -var Stats struct { - Info, Warning, Error OutputStats -} - -var severityStats = [numSeverity]*OutputStats{ - infoLog: &Stats.Info, - warningLog: &Stats.Warning, - errorLog: &Stats.Error, -} - -// Level is exported because it appears in the arguments to V and is -// the type of the v flag, which can be set programmatically. -// It's a distinct type because we want to discriminate it from logType. -// Variables of type level are only changed under logging.mu. -// The -v flag is read only with atomic ops, so the state of the logging -// module is consistent. - -// Level is treated as a sync/atomic int32. - -// Level specifies a level of verbosity for V logs. *Level implements -// flag.Value; the -v flag is of type Level and should be modified -// only through the flag.Value interface. -type Level int32 - -// get returns the value of the Level. -func (l *Level) get() Level { - return Level(atomic.LoadInt32((*int32)(l))) -} - -// set sets the value of the Level. -func (l *Level) set(val Level) { - atomic.StoreInt32((*int32)(l), int32(val)) -} - -// String is part of the flag.Value interface. -func (l *Level) String() string { - return strconv.FormatInt(int64(*l), 10) -} - -// Get is part of the flag.Value interface. -func (l *Level) Get() interface{} { - return *l -} - -// Set is part of the flag.Value interface. -func (l *Level) Set(value string) error { - v, err := strconv.Atoi(value) - if err != nil { - return err - } - logging.mu.Lock() - defer logging.mu.Unlock() - logging.setVState(Level(v), logging.vmodule.filter, false) - return nil -} - -// moduleSpec represents the setting of the -vmodule flag. -type moduleSpec struct { - filter []modulePat -} - -// modulePat contains a filter for the -vmodule flag. -// It holds a verbosity level and a file pattern to match. -type modulePat struct { - pattern string - literal bool // The pattern is a literal string - level Level -} - -// match reports whether the file matches the pattern. It uses a string -// comparison if the pattern contains no metacharacters. -func (m *modulePat) match(file string) bool { - if m.literal { - return file == m.pattern - } - match, _ := filepath.Match(m.pattern, file) - return match -} - -func (m *moduleSpec) String() string { - // Lock because the type is not atomic. TODO: clean this up. - logging.mu.Lock() - defer logging.mu.Unlock() - var b bytes.Buffer - for i, f := range m.filter { - if i > 0 { - b.WriteRune(',') - } - fmt.Fprintf(&b, "%s=%d", f.pattern, f.level) - } - return b.String() -} - -// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the -// struct is not exported. -func (m *moduleSpec) Get() interface{} { - return nil -} - -var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N") - -// Syntax: -vmodule=recordio=2,file=1,gfs*=3 -func (m *moduleSpec) Set(value string) error { - var filter []modulePat - for _, pat := range strings.Split(value, ",") { - if len(pat) == 0 { - // Empty strings such as from a trailing comma can be ignored. - continue - } - patLev := strings.Split(pat, "=") - if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { - return errVmoduleSyntax - } - pattern := patLev[0] - v, err := strconv.Atoi(patLev[1]) - if err != nil { - return errors.New("syntax error: expect comma-separated list of filename=N") - } - if v < 0 { - return errors.New("negative value for vmodule level") - } - if v == 0 { - continue // Ignore. It's harmless but no point in paying the overhead. - } - // TODO: check syntax of filter? - filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)}) - } - logging.mu.Lock() - defer logging.mu.Unlock() - logging.setVState(logging.verbosity, filter, true) - return nil -} - -// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters -// that require filepath.Match to be called to match the pattern. -func isLiteral(pattern string) bool { - return !strings.ContainsAny(pattern, `\*?[]`) -} - -// traceLocation represents the setting of the -log_backtrace_at flag. -type traceLocation struct { - file string - line int -} - -// isSet reports whether the trace location has been specified. -// logging.mu is held. -func (t *traceLocation) isSet() bool { - return t.line > 0 -} - -// match reports whether the specified file and line matches the trace location. -// The argument file name is the full path, not the basename specified in the flag. -// logging.mu is held. -func (t *traceLocation) match(file string, line int) bool { - if t.line != line { - return false - } - if i := strings.LastIndex(file, "/"); i >= 0 { - file = file[i+1:] - } - return t.file == file -} - -func (t *traceLocation) String() string { - // Lock because the type is not atomic. TODO: clean this up. - logging.mu.Lock() - defer logging.mu.Unlock() - return fmt.Sprintf("%s:%d", t.file, t.line) -} - -// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the -// struct is not exported -func (t *traceLocation) Get() interface{} { - return nil -} - -var errTraceSyntax = errors.New("syntax error: expect file.go:234") - -// Syntax: -log_backtrace_at=gopherflakes.go:234 -// Note that unlike vmodule the file extension is included here. -func (t *traceLocation) Set(value string) error { - if value == "" { - // Unset. - t.line = 0 - t.file = "" - } - fields := strings.Split(value, ":") - if len(fields) != 2 { - return errTraceSyntax - } - file, line := fields[0], fields[1] - if !strings.Contains(file, ".") { - return errTraceSyntax - } - v, err := strconv.Atoi(line) - if err != nil { - return errTraceSyntax - } - if v <= 0 { - return errors.New("negative or zero value for level") - } - logging.mu.Lock() - defer logging.mu.Unlock() - t.line = v - t.file = file - return nil -} - -// flushSyncWriter is the interface satisfied by logging destinations. -type flushSyncWriter interface { - Flush() error - Sync() error - io.Writer -} - -func init() { - flag.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files") - flag.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files") - flag.Var(&logging.verbosity, "v", "log level for V logs") - flag.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") - flag.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") - flag.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") - - // Default stderrThreshold is ERROR. - logging.stderrThreshold = errorLog - - logging.setVState(0, nil, false) - go logging.flushDaemon() -} - -// Flush flushes all pending log I/O. -func Flush() { - logging.lockAndFlushAll() -} - -// loggingT collects all the global state of the logging setup. -type loggingT struct { - // Boolean flags. Not handled atomically because the flag.Value interface - // does not let us avoid the =true, and that shorthand is necessary for - // compatibility. TODO: does this matter enough to fix? Seems unlikely. - toStderr bool // The -logtostderr flag. - alsoToStderr bool // The -alsologtostderr flag. - - // Level flag. Handled atomically. - stderrThreshold severity // The -stderrthreshold flag. - - // freeList is a list of byte buffers, maintained under freeListMu. - freeList *buffer - // freeListMu maintains the free list. It is separate from the main mutex - // so buffers can be grabbed and printed to without holding the main lock, - // for better parallelization. - freeListMu sync.Mutex - - // mu protects the remaining elements of this structure and is - // used to synchronize logging. - mu sync.Mutex - // file holds writer for each of the log types. - file [numSeverity]flushSyncWriter - // pcs is used in V to avoid an allocation when computing the caller's PC. - pcs [1]uintptr - // vmap is a cache of the V Level for each V() call site, identified by PC. - // It is wiped whenever the vmodule flag changes state. - vmap map[uintptr]Level - // filterLength stores the length of the vmodule filter chain. If greater - // than zero, it means vmodule is enabled. It may be read safely - // using sync.LoadInt32, but is only modified under mu. - filterLength int32 - // traceLocation is the state of the -log_backtrace_at flag. - traceLocation traceLocation - // These flags are modified only under lock, although verbosity may be fetched - // safely using atomic.LoadInt32. - vmodule moduleSpec // The state of the -vmodule flag. - verbosity Level // V logging level, the value of the -v flag/ -} - -// buffer holds a byte Buffer for reuse. The zero value is ready for use. -type buffer struct { - bytes.Buffer - tmp [64]byte // temporary byte array for creating headers. - next *buffer -} - -var logging loggingT - -// setVState sets a consistent state for V logging. -// l.mu is held. -func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) { - // Turn verbosity off so V will not fire while we are in transition. - logging.verbosity.set(0) - // Ditto for filter length. - atomic.StoreInt32(&logging.filterLength, 0) - - // Set the new filters and wipe the pc->Level map if the filter has changed. - if setFilter { - logging.vmodule.filter = filter - logging.vmap = make(map[uintptr]Level) - } - - // Things are consistent now, so enable filtering and verbosity. - // They are enabled in order opposite to that in V. - atomic.StoreInt32(&logging.filterLength, int32(len(filter))) - logging.verbosity.set(verbosity) -} - -// getBuffer returns a new, ready-to-use buffer. -func (l *loggingT) getBuffer() *buffer { - l.freeListMu.Lock() - b := l.freeList - if b != nil { - l.freeList = b.next - } - l.freeListMu.Unlock() - if b == nil { - b = new(buffer) - } else { - b.next = nil - b.Reset() - } - return b -} - -// putBuffer returns a buffer to the free list. -func (l *loggingT) putBuffer(b *buffer) { - if b.Len() >= 256 { - // Let big buffers die a natural death. - return - } - l.freeListMu.Lock() - b.next = l.freeList - l.freeList = b - l.freeListMu.Unlock() -} - -var timeNow = time.Now // Stubbed out for testing. - -/* -header formats a log header as defined by the C++ implementation. -It returns a buffer containing the formatted header and the user's file and line number. -The depth specifies how many stack frames above lives the source line to be identified in the log message. - -Log lines have this form: - Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg... -where the fields are defined as follows: - L A single character, representing the log level (eg 'I' for INFO) - mm The month (zero padded; ie May is '05') - dd The day (zero padded) - hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds - threadid The space-padded thread ID as returned by GetTID() - file The file name - line The line number - msg The user-supplied message -*/ -func (l *loggingT) header(s severity, depth int) (*buffer, string, int) { - _, file, line, ok := runtime.Caller(3 + depth) - if !ok { - file = "???" - line = 1 - } else { - slash := strings.LastIndex(file, "/") - if slash >= 0 { - file = file[slash+1:] - } - } - return l.formatHeader(s, file, line), file, line -} - -// formatHeader formats a log header using the provided file name and line number. -func (l *loggingT) formatHeader(s severity, file string, line int) *buffer { - now := timeNow() - if line < 0 { - line = 0 // not a real line number, but acceptable to someDigits - } - if s > fatalLog { - s = infoLog // for safety. - } - buf := l.getBuffer() - - // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. - // It's worth about 3X. Fprintf is hard. - _, month, day := now.Date() - hour, minute, second := now.Clock() - // Lmmdd hh:mm:ss.uuuuuu threadid file:line] - buf.tmp[0] = severityChar[s] - buf.twoDigits(1, int(month)) - buf.twoDigits(3, day) - buf.tmp[5] = ' ' - buf.twoDigits(6, hour) - buf.tmp[8] = ':' - buf.twoDigits(9, minute) - buf.tmp[11] = ':' - buf.twoDigits(12, second) - buf.tmp[14] = '.' - buf.nDigits(6, 15, now.Nanosecond()/1000, '0') - buf.tmp[21] = ' ' - buf.nDigits(7, 22, pid, ' ') // TODO: should be TID - buf.tmp[29] = ' ' - buf.Write(buf.tmp[:30]) - buf.WriteString(file) - buf.tmp[0] = ':' - n := buf.someDigits(1, line) - buf.tmp[n+1] = ']' - buf.tmp[n+2] = ' ' - buf.Write(buf.tmp[:n+3]) - return buf -} - -// Some custom tiny helper functions to print the log header efficiently. - -const digits = "0123456789" - -// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i]. -func (buf *buffer) twoDigits(i, d int) { - buf.tmp[i+1] = digits[d%10] - d /= 10 - buf.tmp[i] = digits[d%10] -} - -// nDigits formats an n-digit integer at buf.tmp[i], -// padding with pad on the left. -// It assumes d >= 0. -func (buf *buffer) nDigits(n, i, d int, pad byte) { - j := n - 1 - for ; j >= 0 && d > 0; j-- { - buf.tmp[i+j] = digits[d%10] - d /= 10 - } - for ; j >= 0; j-- { - buf.tmp[i+j] = pad - } -} - -// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i]. -func (buf *buffer) someDigits(i, d int) int { - // Print into the top, then copy down. We know there's space for at least - // a 10-digit number. - j := len(buf.tmp) - for { - j-- - buf.tmp[j] = digits[d%10] - d /= 10 - if d == 0 { - break - } - } - return copy(buf.tmp[i:], buf.tmp[j:]) -} - -func (l *loggingT) println(s severity, args ...interface{}) { - buf, file, line := l.header(s, 0) - fmt.Fprintln(buf, args...) - l.output(s, buf, file, line, false) -} - -func (l *loggingT) print(s severity, args ...interface{}) { - l.printDepth(s, 1, args...) -} - -func (l *loggingT) printDepth(s severity, depth int, args ...interface{}) { - buf, file, line := l.header(s, depth) - fmt.Fprint(buf, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { - buf.WriteByte('\n') - } - l.output(s, buf, file, line, false) -} - -func (l *loggingT) printf(s severity, format string, args ...interface{}) { - buf, file, line := l.header(s, 0) - fmt.Fprintf(buf, format, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { - buf.WriteByte('\n') - } - l.output(s, buf, file, line, false) -} - -// printWithFileLine behaves like print but uses the provided file and line number. If -// alsoLogToStderr is true, the log message always appears on standard error; it -// will also appear in the log file unless --logtostderr is set. -func (l *loggingT) printWithFileLine(s severity, file string, line int, alsoToStderr bool, args ...interface{}) { - buf := l.formatHeader(s, file, line) - fmt.Fprint(buf, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { - buf.WriteByte('\n') - } - l.output(s, buf, file, line, alsoToStderr) -} - -// output writes the data to the log files and releases the buffer. -func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoToStderr bool) { - l.mu.Lock() - if l.traceLocation.isSet() { - if l.traceLocation.match(file, line) { - buf.Write(stacks(false)) - } - } - data := buf.Bytes() - if !flag.Parsed() { - os.Stderr.Write([]byte("ERROR: logging before flag.Parse: ")) - os.Stderr.Write(data) - } else if l.toStderr { - os.Stderr.Write(data) - } else { - if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() { - os.Stderr.Write(data) - } - if l.file[s] == nil { - if err := l.createFiles(s); err != nil { - os.Stderr.Write(data) // Make sure the message appears somewhere. - l.exit(err) - } - } - switch s { - case fatalLog: - l.file[fatalLog].Write(data) - fallthrough - case errorLog: - l.file[errorLog].Write(data) - fallthrough - case warningLog: - l.file[warningLog].Write(data) - fallthrough - case infoLog: - l.file[infoLog].Write(data) - } - } - if s == fatalLog { - // If we got here via Exit rather than Fatal, print no stacks. - if atomic.LoadUint32(&fatalNoStacks) > 0 { - l.mu.Unlock() - timeoutFlush(10 * time.Second) - os.Exit(1) - } - // Dump all goroutine stacks before exiting. - // First, make sure we see the trace for the current goroutine on standard error. - // If -logtostderr has been specified, the loop below will do that anyway - // as the first stack in the full dump. - if !l.toStderr { - os.Stderr.Write(stacks(false)) - } - // Write the stack trace for all goroutines to the files. - trace := stacks(true) - logExitFunc = func(error) {} // If we get a write error, we'll still exit below. - for log := fatalLog; log >= infoLog; log-- { - if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set. - f.Write(trace) - } - } - l.mu.Unlock() - timeoutFlush(10 * time.Second) - os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway. - } - l.putBuffer(buf) - l.mu.Unlock() - if stats := severityStats[s]; stats != nil { - atomic.AddInt64(&stats.lines, 1) - atomic.AddInt64(&stats.bytes, int64(len(data))) - } -} - -// timeoutFlush calls Flush and returns when it completes or after timeout -// elapses, whichever happens first. This is needed because the hooks invoked -// by Flush may deadlock when glog.Fatal is called from a hook that holds -// a lock. -func timeoutFlush(timeout time.Duration) { - done := make(chan bool, 1) - go func() { - Flush() // calls logging.lockAndFlushAll() - done <- true - }() - select { - case <-done: - case <-time.After(timeout): - fmt.Fprintln(os.Stderr, "glog: Flush took longer than", timeout) - } -} - -// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines. -func stacks(all bool) []byte { - // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though. - n := 10000 - if all { - n = 100000 - } - var trace []byte - for i := 0; i < 5; i++ { - trace = make([]byte, n) - nbytes := runtime.Stack(trace, all) - if nbytes < len(trace) { - return trace[:nbytes] - } - n *= 2 - } - return trace -} - -// logExitFunc provides a simple mechanism to override the default behavior -// of exiting on error. Used in testing and to guarantee we reach a required exit -// for fatal logs. Instead, exit could be a function rather than a method but that -// would make its use clumsier. -var logExitFunc func(error) - -// exit is called if there is trouble creating or writing log files. -// It flushes the logs and exits the program; there's no point in hanging around. -// l.mu is held. -func (l *loggingT) exit(err error) { - fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err) - // If logExitFunc is set, we do that instead of exiting. - if logExitFunc != nil { - logExitFunc(err) - return - } - l.flushAll() - os.Exit(2) -} - -// syncBuffer joins a bufio.Writer to its underlying file, providing access to the -// file's Sync method and providing a wrapper for the Write method that provides log -// file rotation. There are conflicting methods, so the file cannot be embedded. -// l.mu is held for all its methods. -type syncBuffer struct { - logger *loggingT - *bufio.Writer - file *os.File - sev severity - nbytes uint64 // The number of bytes written to this file -} - -func (sb *syncBuffer) Sync() error { - return sb.file.Sync() -} - -func (sb *syncBuffer) Write(p []byte) (n int, err error) { - if sb.nbytes+uint64(len(p)) >= MaxSize { - if err := sb.rotateFile(time.Now()); err != nil { - sb.logger.exit(err) - } - } - n, err = sb.Writer.Write(p) - sb.nbytes += uint64(n) - if err != nil { - sb.logger.exit(err) - } - return -} - -// rotateFile closes the syncBuffer's file and starts a new one. -func (sb *syncBuffer) rotateFile(now time.Time) error { - if sb.file != nil { - sb.Flush() - sb.file.Close() - } - var err error - sb.file, _, err = create(severityName[sb.sev], now) - sb.nbytes = 0 - if err != nil { - return err - } - - sb.Writer = bufio.NewWriterSize(sb.file, bufferSize) - - // Write header. - var buf bytes.Buffer - fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05")) - fmt.Fprintf(&buf, "Running on machine: %s\n", host) - fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH) - fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n") - n, err := sb.file.Write(buf.Bytes()) - sb.nbytes += uint64(n) - return err -} - -// bufferSize sizes the buffer associated with each log file. It's large -// so that log records can accumulate without the logging thread blocking -// on disk I/O. The flushDaemon will block instead. -const bufferSize = 256 * 1024 - -// createFiles creates all the log files for severity from sev down to infoLog. -// l.mu is held. -func (l *loggingT) createFiles(sev severity) error { - now := time.Now() - // Files are created in decreasing severity order, so as soon as we find one - // has already been created, we can stop. - for s := sev; s >= infoLog && l.file[s] == nil; s-- { - sb := &syncBuffer{ - logger: l, - sev: s, - } - if err := sb.rotateFile(now); err != nil { - return err - } - l.file[s] = sb - } - return nil -} - -const flushInterval = 30 * time.Second - -// flushDaemon periodically flushes the log file buffers. -func (l *loggingT) flushDaemon() { - for _ = range time.NewTicker(flushInterval).C { - l.lockAndFlushAll() - } -} - -// lockAndFlushAll is like flushAll but locks l.mu first. -func (l *loggingT) lockAndFlushAll() { - l.mu.Lock() - l.flushAll() - l.mu.Unlock() -} - -// flushAll flushes all the logs and attempts to "sync" their data to disk. -// l.mu is held. -func (l *loggingT) flushAll() { - // Flush from fatal down, in case there's trouble flushing. - for s := fatalLog; s >= infoLog; s-- { - file := l.file[s] - if file != nil { - file.Flush() // ignore error - file.Sync() // ignore error - } - } -} - -// CopyStandardLogTo arranges for messages written to the Go "log" package's -// default logs to also appear in the Google logs for the named and lower -// severities. Subsequent changes to the standard log's default output location -// or format may break this behavior. -// -// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not -// recognized, CopyStandardLogTo panics. -func CopyStandardLogTo(name string) { - sev, ok := severityByName(name) - if !ok { - panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name)) - } - // Set a log format that captures the user's file and line: - // d.go:23: message - stdLog.SetFlags(stdLog.Lshortfile) - stdLog.SetOutput(logBridge(sev)) -} - -// logBridge provides the Write method that enables CopyStandardLogTo to connect -// Go's standard logs to the logs provided by this package. -type logBridge severity - -// Write parses the standard logging line and passes its components to the -// logger for severity(lb). -func (lb logBridge) Write(b []byte) (n int, err error) { - var ( - file = "???" - line = 1 - text string - ) - // Split "d.go:23: message" into "d.go", "23", and "message". - if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 { - text = fmt.Sprintf("bad log format: %s", b) - } else { - file = string(parts[0]) - text = string(parts[2][1:]) // skip leading space - line, err = strconv.Atoi(string(parts[1])) - if err != nil { - text = fmt.Sprintf("bad line number: %s", b) - line = 1 - } - } - // printWithFileLine with alsoToStderr=true, so standard log messages - // always appear on standard error. - logging.printWithFileLine(severity(lb), file, line, true, text) - return len(b), nil -} - -// setV computes and remembers the V level for a given PC -// when vmodule is enabled. -// File pattern matching takes the basename of the file, stripped -// of its .go suffix, and uses filepath.Match, which is a little more -// general than the *? matching used in C++. -// l.mu is held. -func (l *loggingT) setV(pc uintptr) Level { - fn := runtime.FuncForPC(pc) - file, _ := fn.FileLine(pc) - // The file is something like /a/b/c/d.go. We want just the d. - if strings.HasSuffix(file, ".go") { - file = file[:len(file)-3] - } - if slash := strings.LastIndex(file, "/"); slash >= 0 { - file = file[slash+1:] - } - for _, filter := range l.vmodule.filter { - if filter.match(file) { - l.vmap[pc] = filter.level - return filter.level - } - } - l.vmap[pc] = 0 - return 0 -} - -// Verbose is a boolean type that implements Infof (like Printf) etc. -// See the documentation of V for more information. -type Verbose bool - -// V reports whether verbosity at the call site is at least the requested level. -// The returned value is a boolean of type Verbose, which implements Info, Infoln -// and Infof. These methods will write to the Info log if called. -// Thus, one may write either -// if glog.V(2) { glog.Info("log this") } -// or -// glog.V(2).Info("log this") -// The second form is shorter but the first is cheaper if logging is off because it does -// not evaluate its arguments. -// -// Whether an individual call to V generates a log record depends on the setting of -// the -v and --vmodule flags; both are off by default. If the level in the call to -// V is at least the value of -v, or of -vmodule for the source file containing the -// call, the V call will log. -func V(level Level) Verbose { - // This function tries hard to be cheap unless there's work to do. - // The fast path is two atomic loads and compares. - - // Here is a cheap but safe test to see if V logging is enabled globally. - if logging.verbosity.get() >= level { - return Verbose(true) - } - - // It's off globally but it vmodule may still be set. - // Here is another cheap but safe test to see if vmodule is enabled. - if atomic.LoadInt32(&logging.filterLength) > 0 { - // Now we need a proper lock to use the logging structure. The pcs field - // is shared so we must lock before accessing it. This is fairly expensive, - // but if V logging is enabled we're slow anyway. - logging.mu.Lock() - defer logging.mu.Unlock() - if runtime.Callers(2, logging.pcs[:]) == 0 { - return Verbose(false) - } - v, ok := logging.vmap[logging.pcs[0]] - if !ok { - v = logging.setV(logging.pcs[0]) - } - return Verbose(v >= level) - } - return Verbose(false) -} - -// Info is equivalent to the global Info function, guarded by the value of v. -// See the documentation of V for usage. -func (v Verbose) Info(args ...interface{}) { - if v { - logging.print(infoLog, args...) - } -} - -// Infoln is equivalent to the global Infoln function, guarded by the value of v. -// See the documentation of V for usage. -func (v Verbose) Infoln(args ...interface{}) { - if v { - logging.println(infoLog, args...) - } -} - -// Infof is equivalent to the global Infof function, guarded by the value of v. -// See the documentation of V for usage. -func (v Verbose) Infof(format string, args ...interface{}) { - if v { - logging.printf(infoLog, format, args...) - } -} - -// Info logs to the INFO log. -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Info(args ...interface{}) { - logging.print(infoLog, args...) -} - -// InfoDepth acts as Info but uses depth to determine which call frame to log. -// InfoDepth(0, "msg") is the same as Info("msg"). -func InfoDepth(depth int, args ...interface{}) { - logging.printDepth(infoLog, depth, args...) -} - -// Infoln logs to the INFO log. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Infoln(args ...interface{}) { - logging.println(infoLog, args...) -} - -// Infof logs to the INFO log. -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Infof(format string, args ...interface{}) { - logging.printf(infoLog, format, args...) -} - -// Warning logs to the WARNING and INFO logs. -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Warning(args ...interface{}) { - logging.print(warningLog, args...) -} - -// WarningDepth acts as Warning but uses depth to determine which call frame to log. -// WarningDepth(0, "msg") is the same as Warning("msg"). -func WarningDepth(depth int, args ...interface{}) { - logging.printDepth(warningLog, depth, args...) -} - -// Warningln logs to the WARNING and INFO logs. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Warningln(args ...interface{}) { - logging.println(warningLog, args...) -} - -// Warningf logs to the WARNING and INFO logs. -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Warningf(format string, args ...interface{}) { - logging.printf(warningLog, format, args...) -} - -// Error logs to the ERROR, WARNING, and INFO logs. -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Error(args ...interface{}) { - logging.print(errorLog, args...) -} - -// ErrorDepth acts as Error but uses depth to determine which call frame to log. -// ErrorDepth(0, "msg") is the same as Error("msg"). -func ErrorDepth(depth int, args ...interface{}) { - logging.printDepth(errorLog, depth, args...) -} - -// Errorln logs to the ERROR, WARNING, and INFO logs. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Errorln(args ...interface{}) { - logging.println(errorLog, args...) -} - -// Errorf logs to the ERROR, WARNING, and INFO logs. -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Errorf(format string, args ...interface{}) { - logging.printf(errorLog, format, args...) -} - -// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Fatal(args ...interface{}) { - logging.print(fatalLog, args...) -} - -// FatalDepth acts as Fatal but uses depth to determine which call frame to log. -// FatalDepth(0, "msg") is the same as Fatal("msg"). -func FatalDepth(depth int, args ...interface{}) { - logging.printDepth(fatalLog, depth, args...) -} - -// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Fatalln(args ...interface{}) { - logging.println(fatalLog, args...) -} - -// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Fatalf(format string, args ...interface{}) { - logging.printf(fatalLog, format, args...) -} - -// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks. -// It allows Exit and relatives to use the Fatal logs. -var fatalNoStacks uint32 - -// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Exit(args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.print(fatalLog, args...) -} - -// ExitDepth acts as Exit but uses depth to determine which call frame to log. -// ExitDepth(0, "msg") is the same as Exit("msg"). -func ExitDepth(depth int, args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.printDepth(fatalLog, depth, args...) -} - -// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). -func Exitln(args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.println(fatalLog, args...) -} - -// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Exitf(format string, args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.printf(fatalLog, format, args...) -} diff --git a/vendor/github.com/golang/glog/glog_file.go b/vendor/github.com/golang/glog/glog_file.go deleted file mode 100644 index 65075d2811..0000000000 --- a/vendor/github.com/golang/glog/glog_file.go +++ /dev/null @@ -1,124 +0,0 @@ -// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ -// -// Copyright 2013 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// File I/O for logs. - -package glog - -import ( - "errors" - "flag" - "fmt" - "os" - "os/user" - "path/filepath" - "strings" - "sync" - "time" -) - -// MaxSize is the maximum size of a log file in bytes. -var MaxSize uint64 = 1024 * 1024 * 1800 - -// logDirs lists the candidate directories for new log files. -var logDirs []string - -// If non-empty, overrides the choice of directory in which to write logs. -// See createLogDirs for the full list of possible destinations. -var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory") - -func createLogDirs() { - if *logDir != "" { - logDirs = append(logDirs, *logDir) - } - logDirs = append(logDirs, os.TempDir()) -} - -var ( - pid = os.Getpid() - program = filepath.Base(os.Args[0]) - host = "unknownhost" - userName = "unknownuser" -) - -func init() { - h, err := os.Hostname() - if err == nil { - host = shortHostname(h) - } - - current, err := user.Current() - if err == nil { - userName = current.Username - } - - // Sanitize userName since it may contain filepath separators on Windows. - userName = strings.Replace(userName, `\`, "_", -1) -} - -// shortHostname returns its argument, truncating at the first period. -// For instance, given "www.google.com" it returns "www". -func shortHostname(hostname string) string { - if i := strings.Index(hostname, "."); i >= 0 { - return hostname[:i] - } - return hostname -} - -// logName returns a new log file name containing tag, with start time t, and -// the name for the symlink for tag. -func logName(tag string, t time.Time) (name, link string) { - name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d", - program, - host, - userName, - tag, - t.Year(), - t.Month(), - t.Day(), - t.Hour(), - t.Minute(), - t.Second(), - pid) - return name, program + "." + tag -} - -var onceLogDirs sync.Once - -// create creates a new log file and returns the file and its filename, which -// contains tag ("INFO", "FATAL", etc.) and t. If the file is created -// successfully, create also attempts to update the symlink for that tag, ignoring -// errors. -func create(tag string, t time.Time) (f *os.File, filename string, err error) { - onceLogDirs.Do(createLogDirs) - if len(logDirs) == 0 { - return nil, "", errors.New("log: no log dirs") - } - name, link := logName(tag, t) - var lastErr error - for _, dir := range logDirs { - fname := filepath.Join(dir, name) - f, err := os.Create(fname) - if err == nil { - symlink := filepath.Join(dir, link) - os.Remove(symlink) // ignore err - os.Symlink(name, symlink) // ignore err - return f, fname, nil - } - lastErr = err - } - return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr) -} diff --git a/vendor/github.com/golang/glog/glog_test.go b/vendor/github.com/golang/glog/glog_test.go deleted file mode 100644 index 0fb376e1fd..0000000000 --- a/vendor/github.com/golang/glog/glog_test.go +++ /dev/null @@ -1,415 +0,0 @@ -// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ -// -// Copyright 2013 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package glog - -import ( - "bytes" - "fmt" - stdLog "log" - "path/filepath" - "runtime" - "strconv" - "strings" - "testing" - "time" -) - -// Test that shortHostname works as advertised. -func TestShortHostname(t *testing.T) { - for hostname, expect := range map[string]string{ - "": "", - "host": "host", - "host.google.com": "host", - } { - if got := shortHostname(hostname); expect != got { - t.Errorf("shortHostname(%q): expected %q, got %q", hostname, expect, got) - } - } -} - -// flushBuffer wraps a bytes.Buffer to satisfy flushSyncWriter. -type flushBuffer struct { - bytes.Buffer -} - -func (f *flushBuffer) Flush() error { - return nil -} - -func (f *flushBuffer) Sync() error { - return nil -} - -// swap sets the log writers and returns the old array. -func (l *loggingT) swap(writers [numSeverity]flushSyncWriter) (old [numSeverity]flushSyncWriter) { - l.mu.Lock() - defer l.mu.Unlock() - old = l.file - for i, w := range writers { - logging.file[i] = w - } - return -} - -// newBuffers sets the log writers to all new byte buffers and returns the old array. -func (l *loggingT) newBuffers() [numSeverity]flushSyncWriter { - return l.swap([numSeverity]flushSyncWriter{new(flushBuffer), new(flushBuffer), new(flushBuffer), new(flushBuffer)}) -} - -// contents returns the specified log value as a string. -func contents(s severity) string { - return logging.file[s].(*flushBuffer).String() -} - -// contains reports whether the string is contained in the log. -func contains(s severity, str string, t *testing.T) bool { - return strings.Contains(contents(s), str) -} - -// setFlags configures the logging flags how the test expects them. -func setFlags() { - logging.toStderr = false -} - -// Test that Info works as advertised. -func TestInfo(t *testing.T) { - setFlags() - defer logging.swap(logging.newBuffers()) - Info("test") - if !contains(infoLog, "I", t) { - t.Errorf("Info has wrong character: %q", contents(infoLog)) - } - if !contains(infoLog, "test", t) { - t.Error("Info failed") - } -} - -func TestInfoDepth(t *testing.T) { - setFlags() - defer logging.swap(logging.newBuffers()) - - f := func() { InfoDepth(1, "depth-test1") } - - // The next three lines must stay together - _, _, wantLine, _ := runtime.Caller(0) - InfoDepth(0, "depth-test0") - f() - - msgs := strings.Split(strings.TrimSuffix(contents(infoLog), "\n"), "\n") - if len(msgs) != 2 { - t.Fatalf("Got %d lines, expected 2", len(msgs)) - } - - for i, m := range msgs { - if !strings.HasPrefix(m, "I") { - t.Errorf("InfoDepth[%d] has wrong character: %q", i, m) - } - w := fmt.Sprintf("depth-test%d", i) - if !strings.Contains(m, w) { - t.Errorf("InfoDepth[%d] missing %q: %q", i, w, m) - } - - // pull out the line number (between : and ]) - msg := m[strings.LastIndex(m, ":")+1:] - x := strings.Index(msg, "]") - if x < 0 { - t.Errorf("InfoDepth[%d]: missing ']': %q", i, m) - continue - } - line, err := strconv.Atoi(msg[:x]) - if err != nil { - t.Errorf("InfoDepth[%d]: bad line number: %q", i, m) - continue - } - wantLine++ - if wantLine != line { - t.Errorf("InfoDepth[%d]: got line %d, want %d", i, line, wantLine) - } - } -} - -func init() { - CopyStandardLogTo("INFO") -} - -// Test that CopyStandardLogTo panics on bad input. -func TestCopyStandardLogToPanic(t *testing.T) { - defer func() { - if s, ok := recover().(string); !ok || !strings.Contains(s, "LOG") { - t.Errorf(`CopyStandardLogTo("LOG") should have panicked: %v`, s) - } - }() - CopyStandardLogTo("LOG") -} - -// Test that using the standard log package logs to INFO. -func TestStandardLog(t *testing.T) { - setFlags() - defer logging.swap(logging.newBuffers()) - stdLog.Print("test") - if !contains(infoLog, "I", t) { - t.Errorf("Info has wrong character: %q", contents(infoLog)) - } - if !contains(infoLog, "test", t) { - t.Error("Info failed") - } -} - -// Test that the header has the correct format. -func TestHeader(t *testing.T) { - setFlags() - defer logging.swap(logging.newBuffers()) - defer func(previous func() time.Time) { timeNow = previous }(timeNow) - timeNow = func() time.Time { - return time.Date(2006, 1, 2, 15, 4, 5, .067890e9, time.Local) - } - pid = 1234 - Info("test") - var line int - format := "I0102 15:04:05.067890 1234 glog_test.go:%d] test\n" - n, err := fmt.Sscanf(contents(infoLog), format, &line) - if n != 1 || err != nil { - t.Errorf("log format error: %d elements, error %s:\n%s", n, err, contents(infoLog)) - } - // Scanf treats multiple spaces as equivalent to a single space, - // so check for correct space-padding also. - want := fmt.Sprintf(format, line) - if contents(infoLog) != want { - t.Errorf("log format error: got:\n\t%q\nwant:\t%q", contents(infoLog), want) - } -} - -// Test that an Error log goes to Warning and Info. -// Even in the Info log, the source character will be E, so the data should -// all be identical. -func TestError(t *testing.T) { - setFlags() - defer logging.swap(logging.newBuffers()) - Error("test") - if !contains(errorLog, "E", t) { - t.Errorf("Error has wrong character: %q", contents(errorLog)) - } - if !contains(errorLog, "test", t) { - t.Error("Error failed") - } - str := contents(errorLog) - if !contains(warningLog, str, t) { - t.Error("Warning failed") - } - if !contains(infoLog, str, t) { - t.Error("Info failed") - } -} - -// Test that a Warning log goes to Info. -// Even in the Info log, the source character will be W, so the data should -// all be identical. -func TestWarning(t *testing.T) { - setFlags() - defer logging.swap(logging.newBuffers()) - Warning("test") - if !contains(warningLog, "W", t) { - t.Errorf("Warning has wrong character: %q", contents(warningLog)) - } - if !contains(warningLog, "test", t) { - t.Error("Warning failed") - } - str := contents(warningLog) - if !contains(infoLog, str, t) { - t.Error("Info failed") - } -} - -// Test that a V log goes to Info. -func TestV(t *testing.T) { - setFlags() - defer logging.swap(logging.newBuffers()) - logging.verbosity.Set("2") - defer logging.verbosity.Set("0") - V(2).Info("test") - if !contains(infoLog, "I", t) { - t.Errorf("Info has wrong character: %q", contents(infoLog)) - } - if !contains(infoLog, "test", t) { - t.Error("Info failed") - } -} - -// Test that a vmodule enables a log in this file. -func TestVmoduleOn(t *testing.T) { - setFlags() - defer logging.swap(logging.newBuffers()) - logging.vmodule.Set("glog_test=2") - defer logging.vmodule.Set("") - if !V(1) { - t.Error("V not enabled for 1") - } - if !V(2) { - t.Error("V not enabled for 2") - } - if V(3) { - t.Error("V enabled for 3") - } - V(2).Info("test") - if !contains(infoLog, "I", t) { - t.Errorf("Info has wrong character: %q", contents(infoLog)) - } - if !contains(infoLog, "test", t) { - t.Error("Info failed") - } -} - -// Test that a vmodule of another file does not enable a log in this file. -func TestVmoduleOff(t *testing.T) { - setFlags() - defer logging.swap(logging.newBuffers()) - logging.vmodule.Set("notthisfile=2") - defer logging.vmodule.Set("") - for i := 1; i <= 3; i++ { - if V(Level(i)) { - t.Errorf("V enabled for %d", i) - } - } - V(2).Info("test") - if contents(infoLog) != "" { - t.Error("V logged incorrectly") - } -} - -// vGlobs are patterns that match/don't match this file at V=2. -var vGlobs = map[string]bool{ - // Easy to test the numeric match here. - "glog_test=1": false, // If -vmodule sets V to 1, V(2) will fail. - "glog_test=2": true, - "glog_test=3": true, // If -vmodule sets V to 1, V(3) will succeed. - // These all use 2 and check the patterns. All are true. - "*=2": true, - "?l*=2": true, - "????_*=2": true, - "??[mno]?_*t=2": true, - // These all use 2 and check the patterns. All are false. - "*x=2": false, - "m*=2": false, - "??_*=2": false, - "?[abc]?_*t=2": false, -} - -// Test that vmodule globbing works as advertised. -func testVmoduleGlob(pat string, match bool, t *testing.T) { - setFlags() - defer logging.swap(logging.newBuffers()) - defer logging.vmodule.Set("") - logging.vmodule.Set(pat) - if V(2) != Verbose(match) { - t.Errorf("incorrect match for %q: got %t expected %t", pat, V(2), match) - } -} - -// Test that a vmodule globbing works as advertised. -func TestVmoduleGlob(t *testing.T) { - for glob, match := range vGlobs { - testVmoduleGlob(glob, match, t) - } -} - -func TestRollover(t *testing.T) { - setFlags() - var err error - defer func(previous func(error)) { logExitFunc = previous }(logExitFunc) - logExitFunc = func(e error) { - err = e - } - defer func(previous uint64) { MaxSize = previous }(MaxSize) - MaxSize = 512 - - Info("x") // Be sure we have a file. - info, ok := logging.file[infoLog].(*syncBuffer) - if !ok { - t.Fatal("info wasn't created") - } - if err != nil { - t.Fatalf("info has initial error: %v", err) - } - fname0 := info.file.Name() - Info(strings.Repeat("x", int(MaxSize))) // force a rollover - if err != nil { - t.Fatalf("info has error after big write: %v", err) - } - - // Make sure the next log file gets a file name with a different - // time stamp. - // - // TODO: determine whether we need to support subsecond log - // rotation. C++ does not appear to handle this case (nor does it - // handle Daylight Savings Time properly). - time.Sleep(1 * time.Second) - - Info("x") // create a new file - if err != nil { - t.Fatalf("error after rotation: %v", err) - } - fname1 := info.file.Name() - if fname0 == fname1 { - t.Errorf("info.f.Name did not change: %v", fname0) - } - if info.nbytes >= MaxSize { - t.Errorf("file size was not reset: %d", info.nbytes) - } -} - -func TestLogBacktraceAt(t *testing.T) { - setFlags() - defer logging.swap(logging.newBuffers()) - // The peculiar style of this code simplifies line counting and maintenance of the - // tracing block below. - var infoLine string - setTraceLocation := func(file string, line int, ok bool, delta int) { - if !ok { - t.Fatal("could not get file:line") - } - _, file = filepath.Split(file) - infoLine = fmt.Sprintf("%s:%d", file, line+delta) - err := logging.traceLocation.Set(infoLine) - if err != nil { - t.Fatal("error setting log_backtrace_at: ", err) - } - } - { - // Start of tracing block. These lines know about each other's relative position. - _, file, line, ok := runtime.Caller(0) - setTraceLocation(file, line, ok, +2) // Two lines between Caller and Info calls. - Info("we want a stack trace here") - } - numAppearances := strings.Count(contents(infoLog), infoLine) - if numAppearances < 2 { - // Need 2 appearances, one in the log header and one in the trace: - // log_test.go:281: I0511 16:36:06.952398 02238 log_test.go:280] we want a stack trace here - // ... - // github.com/glog/glog_test.go:280 (0x41ba91) - // ... - // We could be more precise but that would require knowing the details - // of the traceback format, which may not be dependable. - t.Fatal("got no trace back; log is ", contents(infoLog)) - } -} - -func BenchmarkHeader(b *testing.B) { - for i := 0; i < b.N; i++ { - buf, _, _ := logging.header(infoLog, 0) - logging.putBuffer(buf) - } -} diff --git a/vendor/github.com/golang/groupcache/.gitignore b/vendor/github.com/golang/groupcache/.gitignore deleted file mode 100644 index b25c15b81f..0000000000 --- a/vendor/github.com/golang/groupcache/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*~ diff --git a/vendor/github.com/golang/groupcache/README.md b/vendor/github.com/golang/groupcache/README.md deleted file mode 100644 index 70c29da160..0000000000 --- a/vendor/github.com/golang/groupcache/README.md +++ /dev/null @@ -1,73 +0,0 @@ -# groupcache - -## Summary - -groupcache is a caching and cache-filling library, intended as a -replacement for memcached in many cases. - -For API docs and examples, see http://godoc.org/github.com/golang/groupcache - -## Comparison to memcached - -### **Like memcached**, groupcache: - - * shards by key to select which peer is responsible for that key - -### **Unlike memcached**, groupcache: - - * does not require running a separate set of servers, thus massively - reducing deployment/configuration pain. groupcache is a client - library as well as a server. It connects to its own peers. - - * comes with a cache filling mechanism. Whereas memcached just says - "Sorry, cache miss", often resulting in a thundering herd of - database (or whatever) loads from an unbounded number of clients - (which has resulted in several fun outages), groupcache coordinates - cache fills such that only one load in one process of an entire - replicated set of processes populates the cache, then multiplexes - the loaded value to all callers. - - * does not support versioned values. If key "foo" is value "bar", - key "foo" must always be "bar". There are neither cache expiration - times, nor explicit cache evictions. Thus there is also no CAS, - nor Increment/Decrement. This also means that groupcache.... - - * ... supports automatic mirroring of super-hot items to multiple - processes. This prevents memcached hot spotting where a machine's - CPU and/or NIC are overloaded by very popular keys/values. - - * is currently only available for Go. It's very unlikely that I - (bradfitz@) will port the code to any other language. - -## Loading process - -In a nutshell, a groupcache lookup of **Get("foo")** looks like: - -(On machine #5 of a set of N machines running the same code) - - 1. Is the value of "foo" in local memory because it's super hot? If so, use it. - - 2. Is the value of "foo" in local memory because peer #5 (the current - peer) is the owner of it? If so, use it. - - 3. Amongst all the peers in my set of N, am I the owner of the key - "foo"? (e.g. does it consistent hash to 5?) If so, load it. If - other callers come in, via the same process or via RPC requests - from peers, they block waiting for the load to finish and get the - same answer. If not, RPC to the peer that's the owner and get - the answer. If the RPC fails, just load it locally (still with - local dup suppression). - -## Users - -groupcache is in production use by dl.google.com (its original user), -parts of Blogger, parts of Google Code, parts of Google Fiber, parts -of Google production monitoring systems, etc. - -## Presentations - -See http://talks.golang.org/2013/oscon-dl.slide - -## Help - -Use the golang-nuts mailing list for any discussion or questions. diff --git a/vendor/github.com/golang/groupcache/byteview.go b/vendor/github.com/golang/groupcache/byteview.go deleted file mode 100644 index a2c2c493de..0000000000 --- a/vendor/github.com/golang/groupcache/byteview.go +++ /dev/null @@ -1,175 +0,0 @@ -/* -Copyright 2012 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package groupcache - -import ( - "bytes" - "errors" - "io" - "strings" -) - -// A ByteView holds an immutable view of bytes. -// Internally it wraps either a []byte or a string, -// but that detail is invisible to callers. -// -// A ByteView is meant to be used as a value type, not -// a pointer (like a time.Time). -type ByteView struct { - // If b is non-nil, b is used, else s is used. - b []byte - s string -} - -// Len returns the view's length. -func (v ByteView) Len() int { - if v.b != nil { - return len(v.b) - } - return len(v.s) -} - -// ByteSlice returns a copy of the data as a byte slice. -func (v ByteView) ByteSlice() []byte { - if v.b != nil { - return cloneBytes(v.b) - } - return []byte(v.s) -} - -// String returns the data as a string, making a copy if necessary. -func (v ByteView) String() string { - if v.b != nil { - return string(v.b) - } - return v.s -} - -// At returns the byte at index i. -func (v ByteView) At(i int) byte { - if v.b != nil { - return v.b[i] - } - return v.s[i] -} - -// Slice slices the view between the provided from and to indices. -func (v ByteView) Slice(from, to int) ByteView { - if v.b != nil { - return ByteView{b: v.b[from:to]} - } - return ByteView{s: v.s[from:to]} -} - -// SliceFrom slices the view from the provided index until the end. -func (v ByteView) SliceFrom(from int) ByteView { - if v.b != nil { - return ByteView{b: v.b[from:]} - } - return ByteView{s: v.s[from:]} -} - -// Copy copies b into dest and returns the number of bytes copied. -func (v ByteView) Copy(dest []byte) int { - if v.b != nil { - return copy(dest, v.b) - } - return copy(dest, v.s) -} - -// Equal returns whether the bytes in b are the same as the bytes in -// b2. -func (v ByteView) Equal(b2 ByteView) bool { - if b2.b == nil { - return v.EqualString(b2.s) - } - return v.EqualBytes(b2.b) -} - -// EqualString returns whether the bytes in b are the same as the bytes -// in s. -func (v ByteView) EqualString(s string) bool { - if v.b == nil { - return v.s == s - } - l := v.Len() - if len(s) != l { - return false - } - for i, bi := range v.b { - if bi != s[i] { - return false - } - } - return true -} - -// EqualBytes returns whether the bytes in b are the same as the bytes -// in b2. -func (v ByteView) EqualBytes(b2 []byte) bool { - if v.b != nil { - return bytes.Equal(v.b, b2) - } - l := v.Len() - if len(b2) != l { - return false - } - for i, bi := range b2 { - if bi != v.s[i] { - return false - } - } - return true -} - -// Reader returns an io.ReadSeeker for the bytes in v. -func (v ByteView) Reader() io.ReadSeeker { - if v.b != nil { - return bytes.NewReader(v.b) - } - return strings.NewReader(v.s) -} - -// ReadAt implements io.ReaderAt on the bytes in v. -func (v ByteView) ReadAt(p []byte, off int64) (n int, err error) { - if off < 0 { - return 0, errors.New("view: invalid offset") - } - if off >= int64(v.Len()) { - return 0, io.EOF - } - n = v.SliceFrom(int(off)).Copy(p) - if n < len(p) { - err = io.EOF - } - return -} - -// WriteTo implements io.WriterTo on the bytes in v. -func (v ByteView) WriteTo(w io.Writer) (n int64, err error) { - var m int - if v.b != nil { - m, err = w.Write(v.b) - } else { - m, err = io.WriteString(w, v.s) - } - if err == nil && m < v.Len() { - err = io.ErrShortWrite - } - n = int64(m) - return -} diff --git a/vendor/github.com/golang/groupcache/byteview_test.go b/vendor/github.com/golang/groupcache/byteview_test.go deleted file mode 100644 index a09757aa82..0000000000 --- a/vendor/github.com/golang/groupcache/byteview_test.go +++ /dev/null @@ -1,147 +0,0 @@ -/* -Copyright 2012 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package groupcache - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "testing" -) - -func TestByteView(t *testing.T) { - for _, s := range []string{"", "x", "yy"} { - for _, v := range []ByteView{of([]byte(s)), of(s)} { - name := fmt.Sprintf("string %q, view %+v", s, v) - if v.Len() != len(s) { - t.Errorf("%s: Len = %d; want %d", name, v.Len(), len(s)) - } - if v.String() != s { - t.Errorf("%s: String = %q; want %q", name, v.String(), s) - } - var longDest [3]byte - if n := v.Copy(longDest[:]); n != len(s) { - t.Errorf("%s: long Copy = %d; want %d", name, n, len(s)) - } - var shortDest [1]byte - if n := v.Copy(shortDest[:]); n != min(len(s), 1) { - t.Errorf("%s: short Copy = %d; want %d", name, n, min(len(s), 1)) - } - if got, err := ioutil.ReadAll(v.Reader()); err != nil || string(got) != s { - t.Errorf("%s: Reader = %q, %v; want %q", name, got, err, s) - } - if got, err := ioutil.ReadAll(io.NewSectionReader(v, 0, int64(len(s)))); err != nil || string(got) != s { - t.Errorf("%s: SectionReader of ReaderAt = %q, %v; want %q", name, got, err, s) - } - var dest bytes.Buffer - if _, err := v.WriteTo(&dest); err != nil || !bytes.Equal(dest.Bytes(), []byte(s)) { - t.Errorf("%s: WriteTo = %q, %v; want %q", name, dest.Bytes(), err, s) - } - } - } -} - -// of returns a byte view of the []byte or string in x. -func of(x interface{}) ByteView { - if bytes, ok := x.([]byte); ok { - return ByteView{b: bytes} - } - return ByteView{s: x.(string)} -} - -func TestByteViewEqual(t *testing.T) { - tests := []struct { - a interface{} // string or []byte - b interface{} // string or []byte - want bool - }{ - {"x", "x", true}, - {"x", "y", false}, - {"x", "yy", false}, - {[]byte("x"), []byte("x"), true}, - {[]byte("x"), []byte("y"), false}, - {[]byte("x"), []byte("yy"), false}, - {[]byte("x"), "x", true}, - {[]byte("x"), "y", false}, - {[]byte("x"), "yy", false}, - {"x", []byte("x"), true}, - {"x", []byte("y"), false}, - {"x", []byte("yy"), false}, - } - for i, tt := range tests { - va := of(tt.a) - if bytes, ok := tt.b.([]byte); ok { - if got := va.EqualBytes(bytes); got != tt.want { - t.Errorf("%d. EqualBytes = %v; want %v", i, got, tt.want) - } - } else { - if got := va.EqualString(tt.b.(string)); got != tt.want { - t.Errorf("%d. EqualString = %v; want %v", i, got, tt.want) - } - } - if got := va.Equal(of(tt.b)); got != tt.want { - t.Errorf("%d. Equal = %v; want %v", i, got, tt.want) - } - } -} - -func TestByteViewSlice(t *testing.T) { - tests := []struct { - in string - from int - to interface{} // nil to mean the end (SliceFrom); else int - want string - }{ - { - in: "abc", - from: 1, - to: 2, - want: "b", - }, - { - in: "abc", - from: 1, - want: "bc", - }, - { - in: "abc", - to: 2, - want: "ab", - }, - } - for i, tt := range tests { - for _, v := range []ByteView{of([]byte(tt.in)), of(tt.in)} { - name := fmt.Sprintf("test %d, view %+v", i, v) - if tt.to != nil { - v = v.Slice(tt.from, tt.to.(int)) - } else { - v = v.SliceFrom(tt.from) - } - if v.String() != tt.want { - t.Errorf("%s: got %q; want %q", name, v.String(), tt.want) - } - } - } -} - -func min(a, b int) int { - if a < b { - return a - } - return b -} diff --git a/vendor/github.com/golang/groupcache/groupcache.go b/vendor/github.com/golang/groupcache/groupcache.go deleted file mode 100644 index 316ca49409..0000000000 --- a/vendor/github.com/golang/groupcache/groupcache.go +++ /dev/null @@ -1,491 +0,0 @@ -/* -Copyright 2012 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package groupcache provides a data loading mechanism with caching -// and de-duplication that works across a set of peer processes. -// -// Each data Get first consults its local cache, otherwise delegates -// to the requested key's canonical owner, which then checks its cache -// or finally gets the data. In the common case, many concurrent -// cache misses across a set of peers for the same key result in just -// one cache fill. -package groupcache - -import ( - "errors" - "math/rand" - "strconv" - "sync" - "sync/atomic" - - pb "github.com/golang/groupcache/groupcachepb" - "github.com/golang/groupcache/lru" - "github.com/golang/groupcache/singleflight" -) - -// A Getter loads data for a key. -type Getter interface { - // Get returns the value identified by key, populating dest. - // - // The returned data must be unversioned. That is, key must - // uniquely describe the loaded data, without an implicit - // current time, and without relying on cache expiration - // mechanisms. - Get(ctx Context, key string, dest Sink) error -} - -// A GetterFunc implements Getter with a function. -type GetterFunc func(ctx Context, key string, dest Sink) error - -func (f GetterFunc) Get(ctx Context, key string, dest Sink) error { - return f(ctx, key, dest) -} - -var ( - mu sync.RWMutex - groups = make(map[string]*Group) - - initPeerServerOnce sync.Once - initPeerServer func() -) - -// GetGroup returns the named group previously created with NewGroup, or -// nil if there's no such group. -func GetGroup(name string) *Group { - mu.RLock() - g := groups[name] - mu.RUnlock() - return g -} - -// NewGroup creates a coordinated group-aware Getter from a Getter. -// -// The returned Getter tries (but does not guarantee) to run only one -// Get call at once for a given key across an entire set of peer -// processes. Concurrent callers both in the local process and in -// other processes receive copies of the answer once the original Get -// completes. -// -// The group name must be unique for each getter. -func NewGroup(name string, cacheBytes int64, getter Getter) *Group { - return newGroup(name, cacheBytes, getter, nil) -} - -// If peers is nil, the peerPicker is called via a sync.Once to initialize it. -func newGroup(name string, cacheBytes int64, getter Getter, peers PeerPicker) *Group { - if getter == nil { - panic("nil Getter") - } - mu.Lock() - defer mu.Unlock() - initPeerServerOnce.Do(callInitPeerServer) - if _, dup := groups[name]; dup { - panic("duplicate registration of group " + name) - } - g := &Group{ - name: name, - getter: getter, - peers: peers, - cacheBytes: cacheBytes, - loadGroup: &singleflight.Group{}, - } - if fn := newGroupHook; fn != nil { - fn(g) - } - groups[name] = g - return g -} - -// newGroupHook, if non-nil, is called right after a new group is created. -var newGroupHook func(*Group) - -// RegisterNewGroupHook registers a hook that is run each time -// a group is created. -func RegisterNewGroupHook(fn func(*Group)) { - if newGroupHook != nil { - panic("RegisterNewGroupHook called more than once") - } - newGroupHook = fn -} - -// RegisterServerStart registers a hook that is run when the first -// group is created. -func RegisterServerStart(fn func()) { - if initPeerServer != nil { - panic("RegisterServerStart called more than once") - } - initPeerServer = fn -} - -func callInitPeerServer() { - if initPeerServer != nil { - initPeerServer() - } -} - -// A Group is a cache namespace and associated data loaded spread over -// a group of 1 or more machines. -type Group struct { - name string - getter Getter - peersOnce sync.Once - peers PeerPicker - cacheBytes int64 // limit for sum of mainCache and hotCache size - - // mainCache is a cache of the keys for which this process - // (amongst its peers) is authoritative. That is, this cache - // contains keys which consistent hash on to this process's - // peer number. - mainCache cache - - // hotCache contains keys/values for which this peer is not - // authoritative (otherwise they would be in mainCache), but - // are popular enough to warrant mirroring in this process to - // avoid going over the network to fetch from a peer. Having - // a hotCache avoids network hotspotting, where a peer's - // network card could become the bottleneck on a popular key. - // This cache is used sparingly to maximize the total number - // of key/value pairs that can be stored globally. - hotCache cache - - // loadGroup ensures that each key is only fetched once - // (either locally or remotely), regardless of the number of - // concurrent callers. - loadGroup flightGroup - - _ int32 // force Stats to be 8-byte aligned on 32-bit platforms - - // Stats are statistics on the group. - Stats Stats -} - -// flightGroup is defined as an interface which flightgroup.Group -// satisfies. We define this so that we may test with an alternate -// implementation. -type flightGroup interface { - // Done is called when Do is done. - Do(key string, fn func() (interface{}, error)) (interface{}, error) -} - -// Stats are per-group statistics. -type Stats struct { - Gets AtomicInt // any Get request, including from peers - CacheHits AtomicInt // either cache was good - PeerLoads AtomicInt // either remote load or remote cache hit (not an error) - PeerErrors AtomicInt - Loads AtomicInt // (gets - cacheHits) - LoadsDeduped AtomicInt // after singleflight - LocalLoads AtomicInt // total good local loads - LocalLoadErrs AtomicInt // total bad local loads - ServerRequests AtomicInt // gets that came over the network from peers -} - -// Name returns the name of the group. -func (g *Group) Name() string { - return g.name -} - -func (g *Group) initPeers() { - if g.peers == nil { - g.peers = getPeers(g.name) - } -} - -func (g *Group) Get(ctx Context, key string, dest Sink) error { - g.peersOnce.Do(g.initPeers) - g.Stats.Gets.Add(1) - if dest == nil { - return errors.New("groupcache: nil dest Sink") - } - value, cacheHit := g.lookupCache(key) - - if cacheHit { - g.Stats.CacheHits.Add(1) - return setSinkView(dest, value) - } - - // Optimization to avoid double unmarshalling or copying: keep - // track of whether the dest was already populated. One caller - // (if local) will set this; the losers will not. The common - // case will likely be one caller. - destPopulated := false - value, destPopulated, err := g.load(ctx, key, dest) - if err != nil { - return err - } - if destPopulated { - return nil - } - return setSinkView(dest, value) -} - -// load loads key either by invoking the getter locally or by sending it to another machine. -func (g *Group) load(ctx Context, key string, dest Sink) (value ByteView, destPopulated bool, err error) { - g.Stats.Loads.Add(1) - viewi, err := g.loadGroup.Do(key, func() (interface{}, error) { - // Check the cache again because singleflight can only dedup calls - // that overlap concurrently. It's possible for 2 concurrent - // requests to miss the cache, resulting in 2 load() calls. An - // unfortunate goroutine scheduling would result in this callback - // being run twice, serially. If we don't check the cache again, - // cache.nbytes would be incremented below even though there will - // be only one entry for this key. - // - // Consider the following serialized event ordering for two - // goroutines in which this callback gets called twice for hte - // same key: - // 1: Get("key") - // 2: Get("key") - // 1: lookupCache("key") - // 2: lookupCache("key") - // 1: load("key") - // 2: load("key") - // 1: loadGroup.Do("key", fn) - // 1: fn() - // 2: loadGroup.Do("key", fn) - // 2: fn() - if value, cacheHit := g.lookupCache(key); cacheHit { - g.Stats.CacheHits.Add(1) - return value, nil - } - g.Stats.LoadsDeduped.Add(1) - var value ByteView - var err error - if peer, ok := g.peers.PickPeer(key); ok { - value, err = g.getFromPeer(ctx, peer, key) - if err == nil { - g.Stats.PeerLoads.Add(1) - return value, nil - } - g.Stats.PeerErrors.Add(1) - // TODO(bradfitz): log the peer's error? keep - // log of the past few for /groupcachez? It's - // probably boring (normal task movement), so not - // worth logging I imagine. - } - value, err = g.getLocally(ctx, key, dest) - if err != nil { - g.Stats.LocalLoadErrs.Add(1) - return nil, err - } - g.Stats.LocalLoads.Add(1) - destPopulated = true // only one caller of load gets this return value - g.populateCache(key, value, &g.mainCache) - return value, nil - }) - if err == nil { - value = viewi.(ByteView) - } - return -} - -func (g *Group) getLocally(ctx Context, key string, dest Sink) (ByteView, error) { - err := g.getter.Get(ctx, key, dest) - if err != nil { - return ByteView{}, err - } - return dest.view() -} - -func (g *Group) getFromPeer(ctx Context, peer ProtoGetter, key string) (ByteView, error) { - req := &pb.GetRequest{ - Group: &g.name, - Key: &key, - } - res := &pb.GetResponse{} - err := peer.Get(ctx, req, res) - if err != nil { - return ByteView{}, err - } - value := ByteView{b: res.Value} - // TODO(bradfitz): use res.MinuteQps or something smart to - // conditionally populate hotCache. For now just do it some - // percentage of the time. - if rand.Intn(10) == 0 { - g.populateCache(key, value, &g.hotCache) - } - return value, nil -} - -func (g *Group) lookupCache(key string) (value ByteView, ok bool) { - if g.cacheBytes <= 0 { - return - } - value, ok = g.mainCache.get(key) - if ok { - return - } - value, ok = g.hotCache.get(key) - return -} - -func (g *Group) populateCache(key string, value ByteView, cache *cache) { - if g.cacheBytes <= 0 { - return - } - cache.add(key, value) - - // Evict items from cache(s) if necessary. - for { - mainBytes := g.mainCache.bytes() - hotBytes := g.hotCache.bytes() - if mainBytes+hotBytes <= g.cacheBytes { - return - } - - // TODO(bradfitz): this is good-enough-for-now logic. - // It should be something based on measurements and/or - // respecting the costs of different resources. - victim := &g.mainCache - if hotBytes > mainBytes/8 { - victim = &g.hotCache - } - victim.removeOldest() - } -} - -// CacheType represents a type of cache. -type CacheType int - -const ( - // The MainCache is the cache for items that this peer is the - // owner for. - MainCache CacheType = iota + 1 - - // The HotCache is the cache for items that seem popular - // enough to replicate to this node, even though it's not the - // owner. - HotCache -) - -// CacheStats returns stats about the provided cache within the group. -func (g *Group) CacheStats(which CacheType) CacheStats { - switch which { - case MainCache: - return g.mainCache.stats() - case HotCache: - return g.hotCache.stats() - default: - return CacheStats{} - } -} - -// cache is a wrapper around an *lru.Cache that adds synchronization, -// makes values always be ByteView, and counts the size of all keys and -// values. -type cache struct { - mu sync.RWMutex - nbytes int64 // of all keys and values - lru *lru.Cache - nhit, nget int64 - nevict int64 // number of evictions -} - -func (c *cache) stats() CacheStats { - c.mu.RLock() - defer c.mu.RUnlock() - return CacheStats{ - Bytes: c.nbytes, - Items: c.itemsLocked(), - Gets: c.nget, - Hits: c.nhit, - Evictions: c.nevict, - } -} - -func (c *cache) add(key string, value ByteView) { - c.mu.Lock() - defer c.mu.Unlock() - if c.lru == nil { - c.lru = &lru.Cache{ - OnEvicted: func(key lru.Key, value interface{}) { - val := value.(ByteView) - c.nbytes -= int64(len(key.(string))) + int64(val.Len()) - c.nevict++ - }, - } - } - c.lru.Add(key, value) - c.nbytes += int64(len(key)) + int64(value.Len()) -} - -func (c *cache) get(key string) (value ByteView, ok bool) { - c.mu.Lock() - defer c.mu.Unlock() - c.nget++ - if c.lru == nil { - return - } - vi, ok := c.lru.Get(key) - if !ok { - return - } - c.nhit++ - return vi.(ByteView), true -} - -func (c *cache) removeOldest() { - c.mu.Lock() - defer c.mu.Unlock() - if c.lru != nil { - c.lru.RemoveOldest() - } -} - -func (c *cache) bytes() int64 { - c.mu.RLock() - defer c.mu.RUnlock() - return c.nbytes -} - -func (c *cache) items() int64 { - c.mu.RLock() - defer c.mu.RUnlock() - return c.itemsLocked() -} - -func (c *cache) itemsLocked() int64 { - if c.lru == nil { - return 0 - } - return int64(c.lru.Len()) -} - -// An AtomicInt is an int64 to be accessed atomically. -type AtomicInt int64 - -// Add atomically adds n to i. -func (i *AtomicInt) Add(n int64) { - atomic.AddInt64((*int64)(i), n) -} - -// Get atomically gets the value of i. -func (i *AtomicInt) Get() int64 { - return atomic.LoadInt64((*int64)(i)) -} - -func (i *AtomicInt) String() string { - return strconv.FormatInt(i.Get(), 10) -} - -// CacheStats are returned by stats accessors on Group. -type CacheStats struct { - Bytes int64 - Items int64 - Gets int64 - Hits int64 - Evictions int64 -} diff --git a/vendor/github.com/golang/groupcache/groupcache_test.go b/vendor/github.com/golang/groupcache/groupcache_test.go deleted file mode 100644 index ea05cac4aa..0000000000 --- a/vendor/github.com/golang/groupcache/groupcache_test.go +++ /dev/null @@ -1,456 +0,0 @@ -/* -Copyright 2012 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Tests for groupcache. - -package groupcache - -import ( - "errors" - "fmt" - "hash/crc32" - "math/rand" - "reflect" - "sync" - "testing" - "time" - "unsafe" - - "github.com/golang/protobuf/proto" - - pb "github.com/golang/groupcache/groupcachepb" - testpb "github.com/golang/groupcache/testpb" -) - -var ( - once sync.Once - stringGroup, protoGroup Getter - - stringc = make(chan string) - - dummyCtx Context - - // cacheFills is the number of times stringGroup or - // protoGroup's Getter have been called. Read using the - // cacheFills function. - cacheFills AtomicInt -) - -const ( - stringGroupName = "string-group" - protoGroupName = "proto-group" - testMessageType = "google3/net/groupcache/go/test_proto.TestMessage" - fromChan = "from-chan" - cacheSize = 1 << 20 -) - -func testSetup() { - stringGroup = NewGroup(stringGroupName, cacheSize, GetterFunc(func(_ Context, key string, dest Sink) error { - if key == fromChan { - key = <-stringc - } - cacheFills.Add(1) - return dest.SetString("ECHO:" + key) - })) - - protoGroup = NewGroup(protoGroupName, cacheSize, GetterFunc(func(_ Context, key string, dest Sink) error { - if key == fromChan { - key = <-stringc - } - cacheFills.Add(1) - return dest.SetProto(&testpb.TestMessage{ - Name: proto.String("ECHO:" + key), - City: proto.String("SOME-CITY"), - }) - })) -} - -// tests that a Getter's Get method is only called once with two -// outstanding callers. This is the string variant. -func TestGetDupSuppressString(t *testing.T) { - once.Do(testSetup) - // Start two getters. The first should block (waiting reading - // from stringc) and the second should latch on to the first - // one. - resc := make(chan string, 2) - for i := 0; i < 2; i++ { - go func() { - var s string - if err := stringGroup.Get(dummyCtx, fromChan, StringSink(&s)); err != nil { - resc <- "ERROR:" + err.Error() - return - } - resc <- s - }() - } - - // Wait a bit so both goroutines get merged together via - // singleflight. - // TODO(bradfitz): decide whether there are any non-offensive - // debug/test hooks that could be added to singleflight to - // make a sleep here unnecessary. - time.Sleep(250 * time.Millisecond) - - // Unblock the first getter, which should unblock the second - // as well. - stringc <- "foo" - - for i := 0; i < 2; i++ { - select { - case v := <-resc: - if v != "ECHO:foo" { - t.Errorf("got %q; want %q", v, "ECHO:foo") - } - case <-time.After(5 * time.Second): - t.Errorf("timeout waiting on getter #%d of 2", i+1) - } - } -} - -// tests that a Getter's Get method is only called once with two -// outstanding callers. This is the proto variant. -func TestGetDupSuppressProto(t *testing.T) { - once.Do(testSetup) - // Start two getters. The first should block (waiting reading - // from stringc) and the second should latch on to the first - // one. - resc := make(chan *testpb.TestMessage, 2) - for i := 0; i < 2; i++ { - go func() { - tm := new(testpb.TestMessage) - if err := protoGroup.Get(dummyCtx, fromChan, ProtoSink(tm)); err != nil { - tm.Name = proto.String("ERROR:" + err.Error()) - } - resc <- tm - }() - } - - // Wait a bit so both goroutines get merged together via - // singleflight. - // TODO(bradfitz): decide whether there are any non-offensive - // debug/test hooks that could be added to singleflight to - // make a sleep here unnecessary. - time.Sleep(250 * time.Millisecond) - - // Unblock the first getter, which should unblock the second - // as well. - stringc <- "Fluffy" - want := &testpb.TestMessage{ - Name: proto.String("ECHO:Fluffy"), - City: proto.String("SOME-CITY"), - } - for i := 0; i < 2; i++ { - select { - case v := <-resc: - if !reflect.DeepEqual(v, want) { - t.Errorf(" Got: %v\nWant: %v", proto.CompactTextString(v), proto.CompactTextString(want)) - } - case <-time.After(5 * time.Second): - t.Errorf("timeout waiting on getter #%d of 2", i+1) - } - } -} - -func countFills(f func()) int64 { - fills0 := cacheFills.Get() - f() - return cacheFills.Get() - fills0 -} - -func TestCaching(t *testing.T) { - once.Do(testSetup) - fills := countFills(func() { - for i := 0; i < 10; i++ { - var s string - if err := stringGroup.Get(dummyCtx, "TestCaching-key", StringSink(&s)); err != nil { - t.Fatal(err) - } - } - }) - if fills != 1 { - t.Errorf("expected 1 cache fill; got %d", fills) - } -} - -func TestCacheEviction(t *testing.T) { - once.Do(testSetup) - testKey := "TestCacheEviction-key" - getTestKey := func() { - var res string - for i := 0; i < 10; i++ { - if err := stringGroup.Get(dummyCtx, testKey, StringSink(&res)); err != nil { - t.Fatal(err) - } - } - } - fills := countFills(getTestKey) - if fills != 1 { - t.Fatalf("expected 1 cache fill; got %d", fills) - } - - g := stringGroup.(*Group) - evict0 := g.mainCache.nevict - - // Trash the cache with other keys. - var bytesFlooded int64 - // cacheSize/len(testKey) is approximate - for bytesFlooded < cacheSize+1024 { - var res string - key := fmt.Sprintf("dummy-key-%d", bytesFlooded) - stringGroup.Get(dummyCtx, key, StringSink(&res)) - bytesFlooded += int64(len(key) + len(res)) - } - evicts := g.mainCache.nevict - evict0 - if evicts <= 0 { - t.Errorf("evicts = %v; want more than 0", evicts) - } - - // Test that the key is gone. - fills = countFills(getTestKey) - if fills != 1 { - t.Fatalf("expected 1 cache fill after cache trashing; got %d", fills) - } -} - -type fakePeer struct { - hits int - fail bool -} - -func (p *fakePeer) Get(_ Context, in *pb.GetRequest, out *pb.GetResponse) error { - p.hits++ - if p.fail { - return errors.New("simulated error from peer") - } - out.Value = []byte("got:" + in.GetKey()) - return nil -} - -type fakePeers []ProtoGetter - -func (p fakePeers) PickPeer(key string) (peer ProtoGetter, ok bool) { - if len(p) == 0 { - return - } - n := crc32.Checksum([]byte(key), crc32.IEEETable) % uint32(len(p)) - return p[n], p[n] != nil -} - -// tests that peers (virtual, in-process) are hit, and how much. -func TestPeers(t *testing.T) { - once.Do(testSetup) - rand.Seed(123) - peer0 := &fakePeer{} - peer1 := &fakePeer{} - peer2 := &fakePeer{} - peerList := fakePeers([]ProtoGetter{peer0, peer1, peer2, nil}) - const cacheSize = 0 // disabled - localHits := 0 - getter := func(_ Context, key string, dest Sink) error { - localHits++ - return dest.SetString("got:" + key) - } - testGroup := newGroup("TestPeers-group", cacheSize, GetterFunc(getter), peerList) - run := func(name string, n int, wantSummary string) { - // Reset counters - localHits = 0 - for _, p := range []*fakePeer{peer0, peer1, peer2} { - p.hits = 0 - } - - for i := 0; i < n; i++ { - key := fmt.Sprintf("key-%d", i) - want := "got:" + key - var got string - err := testGroup.Get(dummyCtx, key, StringSink(&got)) - if err != nil { - t.Errorf("%s: error on key %q: %v", name, key, err) - continue - } - if got != want { - t.Errorf("%s: for key %q, got %q; want %q", name, key, got, want) - } - } - summary := func() string { - return fmt.Sprintf("localHits = %d, peers = %d %d %d", localHits, peer0.hits, peer1.hits, peer2.hits) - } - if got := summary(); got != wantSummary { - t.Errorf("%s: got %q; want %q", name, got, wantSummary) - } - } - resetCacheSize := func(maxBytes int64) { - g := testGroup - g.cacheBytes = maxBytes - g.mainCache = cache{} - g.hotCache = cache{} - } - - // Base case; peers all up, with no problems. - resetCacheSize(1 << 20) - run("base", 200, "localHits = 49, peers = 51 49 51") - - // Verify cache was hit. All localHits are gone, and some of - // the peer hits (the ones randomly selected to be maybe hot) - run("cached_base", 200, "localHits = 0, peers = 49 47 48") - resetCacheSize(0) - - // With one of the peers being down. - // TODO(bradfitz): on a peer number being unavailable, the - // consistent hashing should maybe keep trying others to - // spread the load out. Currently it fails back to local - // execution if the first consistent-hash slot is unavailable. - peerList[0] = nil - run("one_peer_down", 200, "localHits = 100, peers = 0 49 51") - - // Failing peer - peerList[0] = peer0 - peer0.fail = true - run("peer0_failing", 200, "localHits = 100, peers = 51 49 51") -} - -func TestTruncatingByteSliceTarget(t *testing.T) { - var buf [100]byte - s := buf[:] - if err := stringGroup.Get(dummyCtx, "short", TruncatingByteSliceSink(&s)); err != nil { - t.Fatal(err) - } - if want := "ECHO:short"; string(s) != want { - t.Errorf("short key got %q; want %q", s, want) - } - - s = buf[:6] - if err := stringGroup.Get(dummyCtx, "truncated", TruncatingByteSliceSink(&s)); err != nil { - t.Fatal(err) - } - if want := "ECHO:t"; string(s) != want { - t.Errorf("truncated key got %q; want %q", s, want) - } -} - -func TestAllocatingByteSliceTarget(t *testing.T) { - var dst []byte - sink := AllocatingByteSliceSink(&dst) - - inBytes := []byte("some bytes") - sink.SetBytes(inBytes) - if want := "some bytes"; string(dst) != want { - t.Errorf("SetBytes resulted in %q; want %q", dst, want) - } - v, err := sink.view() - if err != nil { - t.Fatalf("view after SetBytes failed: %v", err) - } - if &inBytes[0] == &dst[0] { - t.Error("inBytes and dst share memory") - } - if &inBytes[0] == &v.b[0] { - t.Error("inBytes and view share memory") - } - if &dst[0] == &v.b[0] { - t.Error("dst and view share memory") - } -} - -// orderedFlightGroup allows the caller to force the schedule of when -// orig.Do will be called. This is useful to serialize calls such -// that singleflight cannot dedup them. -type orderedFlightGroup struct { - mu sync.Mutex - stage1 chan bool - stage2 chan bool - orig flightGroup -} - -func (g *orderedFlightGroup) Do(key string, fn func() (interface{}, error)) (interface{}, error) { - <-g.stage1 - <-g.stage2 - g.mu.Lock() - defer g.mu.Unlock() - return g.orig.Do(key, fn) -} - -// TestNoDedup tests invariants on the cache size when singleflight is -// unable to dedup calls. -func TestNoDedup(t *testing.T) { - const testkey = "testkey" - const testval = "testval" - g := newGroup("testgroup", 1024, GetterFunc(func(_ Context, key string, dest Sink) error { - return dest.SetString(testval) - }), nil) - - orderedGroup := &orderedFlightGroup{ - stage1: make(chan bool), - stage2: make(chan bool), - orig: g.loadGroup, - } - // Replace loadGroup with our wrapper so we can control when - // loadGroup.Do is entered for each concurrent request. - g.loadGroup = orderedGroup - - // Issue two idential requests concurrently. Since the cache is - // empty, it will miss. Both will enter load(), but we will only - // allow one at a time to enter singleflight.Do, so the callback - // function will be called twice. - resc := make(chan string, 2) - for i := 0; i < 2; i++ { - go func() { - var s string - if err := g.Get(dummyCtx, testkey, StringSink(&s)); err != nil { - resc <- "ERROR:" + err.Error() - return - } - resc <- s - }() - } - - // Ensure both goroutines have entered the Do routine. This implies - // both concurrent requests have checked the cache, found it empty, - // and called load(). - orderedGroup.stage1 <- true - orderedGroup.stage1 <- true - orderedGroup.stage2 <- true - orderedGroup.stage2 <- true - - for i := 0; i < 2; i++ { - if s := <-resc; s != testval { - t.Errorf("result is %s want %s", s, testval) - } - } - - const wantItems = 1 - if g.mainCache.items() != wantItems { - t.Errorf("mainCache has %d items, want %d", g.mainCache.items(), wantItems) - } - - // If the singleflight callback doesn't double-check the cache again - // upon entry, we would increment nbytes twice but the entry would - // only be in the cache once. - const wantBytes = int64(len(testkey) + len(testval)) - if g.mainCache.nbytes != wantBytes { - t.Errorf("cache has %d bytes, want %d", g.mainCache.nbytes, wantBytes) - } -} - -func TestGroupStatsAlignment(t *testing.T) { - var g Group - off := unsafe.Offsetof(g.Stats) - if off%8 != 0 { - t.Fatal("Stats structure is not 8-byte aligned.") - } -} - -// TODO(bradfitz): port the Google-internal full integration test into here, -// using HTTP requests instead of our RPC system. diff --git a/vendor/github.com/golang/groupcache/http.go b/vendor/github.com/golang/groupcache/http.go deleted file mode 100644 index f37467a788..0000000000 --- a/vendor/github.com/golang/groupcache/http.go +++ /dev/null @@ -1,227 +0,0 @@ -/* -Copyright 2013 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package groupcache - -import ( - "bytes" - "fmt" - "io" - "net/http" - "net/url" - "strings" - "sync" - - "github.com/golang/groupcache/consistenthash" - pb "github.com/golang/groupcache/groupcachepb" - "github.com/golang/protobuf/proto" -) - -const defaultBasePath = "/_groupcache/" - -const defaultReplicas = 50 - -// HTTPPool implements PeerPicker for a pool of HTTP peers. -type HTTPPool struct { - // Context optionally specifies a context for the server to use when it - // receives a request. - // If nil, the server uses a nil Context. - Context func(*http.Request) Context - - // Transport optionally specifies an http.RoundTripper for the client - // to use when it makes a request. - // If nil, the client uses http.DefaultTransport. - Transport func(Context) http.RoundTripper - - // this peer's base URL, e.g. "https://example.net:8000" - self string - - // opts specifies the options. - opts HTTPPoolOptions - - mu sync.Mutex // guards peers and httpGetters - peers *consistenthash.Map - httpGetters map[string]*httpGetter // keyed by e.g. "http://10.0.0.2:8008" -} - -// HTTPPoolOptions are the configurations of a HTTPPool. -type HTTPPoolOptions struct { - // BasePath specifies the HTTP path that will serve groupcache requests. - // If blank, it defaults to "/_groupcache/". - BasePath string - - // Replicas specifies the number of key replicas on the consistent hash. - // If blank, it defaults to 50. - Replicas int - - // HashFn specifies the hash function of the consistent hash. - // If blank, it defaults to crc32.ChecksumIEEE. - HashFn consistenthash.Hash -} - -// NewHTTPPool initializes an HTTP pool of peers, and registers itself as a PeerPicker. -// For convenience, it also registers itself as an http.Handler with http.DefaultServeMux. -// The self argument should be a valid base URL that points to the current server, -// for example "http://example.net:8000". -func NewHTTPPool(self string) *HTTPPool { - p := NewHTTPPoolOpts(self, nil) - http.Handle(p.opts.BasePath, p) - return p -} - -var httpPoolMade bool - -// NewHTTPPoolOpts initializes an HTTP pool of peers with the given options. -// Unlike NewHTTPPool, this function does not register the created pool as an HTTP handler. -// The returned *HTTPPool implements http.Handler and must be registered using http.Handle. -func NewHTTPPoolOpts(self string, o *HTTPPoolOptions) *HTTPPool { - if httpPoolMade { - panic("groupcache: NewHTTPPool must be called only once") - } - httpPoolMade = true - - p := &HTTPPool{ - self: self, - httpGetters: make(map[string]*httpGetter), - } - if o != nil { - p.opts = *o - } - if p.opts.BasePath == "" { - p.opts.BasePath = defaultBasePath - } - if p.opts.Replicas == 0 { - p.opts.Replicas = defaultReplicas - } - p.peers = consistenthash.New(p.opts.Replicas, p.opts.HashFn) - - RegisterPeerPicker(func() PeerPicker { return p }) - return p -} - -// Set updates the pool's list of peers. -// Each peer value should be a valid base URL, -// for example "http://example.net:8000". -func (p *HTTPPool) Set(peers ...string) { - p.mu.Lock() - defer p.mu.Unlock() - p.peers = consistenthash.New(p.opts.Replicas, p.opts.HashFn) - p.peers.Add(peers...) - p.httpGetters = make(map[string]*httpGetter, len(peers)) - for _, peer := range peers { - p.httpGetters[peer] = &httpGetter{transport: p.Transport, baseURL: peer + p.opts.BasePath} - } -} - -func (p *HTTPPool) PickPeer(key string) (ProtoGetter, bool) { - p.mu.Lock() - defer p.mu.Unlock() - if p.peers.IsEmpty() { - return nil, false - } - if peer := p.peers.Get(key); peer != p.self { - return p.httpGetters[peer], true - } - return nil, false -} - -func (p *HTTPPool) ServeHTTP(w http.ResponseWriter, r *http.Request) { - // Parse request. - if !strings.HasPrefix(r.URL.Path, p.opts.BasePath) { - panic("HTTPPool serving unexpected path: " + r.URL.Path) - } - parts := strings.SplitN(r.URL.Path[len(p.opts.BasePath):], "/", 2) - if len(parts) != 2 { - http.Error(w, "bad request", http.StatusBadRequest) - return - } - groupName := parts[0] - key := parts[1] - - // Fetch the value for this group/key. - group := GetGroup(groupName) - if group == nil { - http.Error(w, "no such group: "+groupName, http.StatusNotFound) - return - } - var ctx Context - if p.Context != nil { - ctx = p.Context(r) - } - - group.Stats.ServerRequests.Add(1) - var value []byte - err := group.Get(ctx, key, AllocatingByteSliceSink(&value)) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - // Write the value to the response body as a proto message. - body, err := proto.Marshal(&pb.GetResponse{Value: value}) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - w.Header().Set("Content-Type", "application/x-protobuf") - w.Write(body) -} - -type httpGetter struct { - transport func(Context) http.RoundTripper - baseURL string -} - -var bufferPool = sync.Pool{ - New: func() interface{} { return new(bytes.Buffer) }, -} - -func (h *httpGetter) Get(context Context, in *pb.GetRequest, out *pb.GetResponse) error { - u := fmt.Sprintf( - "%v%v/%v", - h.baseURL, - url.QueryEscape(in.GetGroup()), - url.QueryEscape(in.GetKey()), - ) - req, err := http.NewRequest("GET", u, nil) - if err != nil { - return err - } - tr := http.DefaultTransport - if h.transport != nil { - tr = h.transport(context) - } - res, err := tr.RoundTrip(req) - if err != nil { - return err - } - defer res.Body.Close() - if res.StatusCode != http.StatusOK { - return fmt.Errorf("server returned: %v", res.Status) - } - b := bufferPool.Get().(*bytes.Buffer) - b.Reset() - defer bufferPool.Put(b) - _, err = io.Copy(b, res.Body) - if err != nil { - return fmt.Errorf("reading response body: %v", err) - } - err = proto.Unmarshal(b.Bytes(), out) - if err != nil { - return fmt.Errorf("decoding response body: %v", err) - } - return nil -} diff --git a/vendor/github.com/golang/groupcache/http_test.go b/vendor/github.com/golang/groupcache/http_test.go deleted file mode 100644 index b42edd7f09..0000000000 --- a/vendor/github.com/golang/groupcache/http_test.go +++ /dev/null @@ -1,166 +0,0 @@ -/* -Copyright 2013 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package groupcache - -import ( - "errors" - "flag" - "log" - "net" - "net/http" - "os" - "os/exec" - "strconv" - "strings" - "sync" - "testing" - "time" -) - -var ( - peerAddrs = flag.String("test_peer_addrs", "", "Comma-separated list of peer addresses; used by TestHTTPPool") - peerIndex = flag.Int("test_peer_index", -1, "Index of which peer this child is; used by TestHTTPPool") - peerChild = flag.Bool("test_peer_child", false, "True if running as a child process; used by TestHTTPPool") -) - -func TestHTTPPool(t *testing.T) { - if *peerChild { - beChildForTestHTTPPool() - os.Exit(0) - } - - const ( - nChild = 4 - nGets = 100 - ) - - var childAddr []string - for i := 0; i < nChild; i++ { - childAddr = append(childAddr, pickFreeAddr(t)) - } - - var cmds []*exec.Cmd - var wg sync.WaitGroup - for i := 0; i < nChild; i++ { - cmd := exec.Command(os.Args[0], - "--test.run=TestHTTPPool", - "--test_peer_child", - "--test_peer_addrs="+strings.Join(childAddr, ","), - "--test_peer_index="+strconv.Itoa(i), - ) - cmds = append(cmds, cmd) - wg.Add(1) - if err := cmd.Start(); err != nil { - t.Fatal("failed to start child process: ", err) - } - go awaitAddrReady(t, childAddr[i], &wg) - } - defer func() { - for i := 0; i < nChild; i++ { - if cmds[i].Process != nil { - cmds[i].Process.Kill() - } - } - }() - wg.Wait() - - // Use a dummy self address so that we don't handle gets in-process. - p := NewHTTPPool("should-be-ignored") - p.Set(addrToURL(childAddr)...) - - // Dummy getter function. Gets should go to children only. - // The only time this process will handle a get is when the - // children can't be contacted for some reason. - getter := GetterFunc(func(ctx Context, key string, dest Sink) error { - return errors.New("parent getter called; something's wrong") - }) - g := NewGroup("httpPoolTest", 1<<20, getter) - - for _, key := range testKeys(nGets) { - var value string - if err := g.Get(nil, key, StringSink(&value)); err != nil { - t.Fatal(err) - } - if suffix := ":" + key; !strings.HasSuffix(value, suffix) { - t.Errorf("Get(%q) = %q, want value ending in %q", key, value, suffix) - } - t.Logf("Get key=%q, value=%q (peer:key)", key, value) - } -} - -func testKeys(n int) (keys []string) { - keys = make([]string, n) - for i := range keys { - keys[i] = strconv.Itoa(i) - } - return -} - -func beChildForTestHTTPPool() { - addrs := strings.Split(*peerAddrs, ",") - - p := NewHTTPPool("http://" + addrs[*peerIndex]) - p.Set(addrToURL(addrs)...) - - getter := GetterFunc(func(ctx Context, key string, dest Sink) error { - dest.SetString(strconv.Itoa(*peerIndex) + ":" + key) - return nil - }) - NewGroup("httpPoolTest", 1<<20, getter) - - log.Fatal(http.ListenAndServe(addrs[*peerIndex], p)) -} - -// This is racy. Another process could swoop in and steal the port between the -// call to this function and the next listen call. Should be okay though. -// The proper way would be to pass the l.File() as ExtraFiles to the child -// process, and then close your copy once the child starts. -func pickFreeAddr(t *testing.T) string { - l, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatal(err) - } - defer l.Close() - return l.Addr().String() -} - -func addrToURL(addr []string) []string { - url := make([]string, len(addr)) - for i := range addr { - url[i] = "http://" + addr[i] - } - return url -} - -func awaitAddrReady(t *testing.T, addr string, wg *sync.WaitGroup) { - defer wg.Done() - const max = 1 * time.Second - tries := 0 - for { - tries++ - c, err := net.Dial("tcp", addr) - if err == nil { - c.Close() - return - } - delay := time.Duration(tries) * 25 * time.Millisecond - if delay > max { - delay = max - } - time.Sleep(delay) - } -} diff --git a/vendor/github.com/golang/groupcache/lru/lru.go b/vendor/github.com/golang/groupcache/lru/lru.go index 532cc45e6d..eac1c7664f 100644 --- a/vendor/github.com/golang/groupcache/lru/lru.go +++ b/vendor/github.com/golang/groupcache/lru/lru.go @@ -25,7 +25,7 @@ type Cache struct { // an item is evicted. Zero means no limit. MaxEntries int - // OnEvicted optionally specificies a callback function to be + // OnEvicted optionally specifies a callback function to be // executed when an entry is purged from the cache. OnEvicted func(key Key, value interface{}) diff --git a/vendor/github.com/golang/groupcache/lru/lru_test.go b/vendor/github.com/golang/groupcache/lru/lru_test.go deleted file mode 100644 index b7d9d8acf4..0000000000 --- a/vendor/github.com/golang/groupcache/lru/lru_test.go +++ /dev/null @@ -1,97 +0,0 @@ -/* -Copyright 2013 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package lru - -import ( - "fmt" - "testing" -) - -type simpleStruct struct { - int - string -} - -type complexStruct struct { - int - simpleStruct -} - -var getTests = []struct { - name string - keyToAdd interface{} - keyToGet interface{} - expectedOk bool -}{ - {"string_hit", "myKey", "myKey", true}, - {"string_miss", "myKey", "nonsense", false}, - {"simple_struct_hit", simpleStruct{1, "two"}, simpleStruct{1, "two"}, true}, - {"simeple_struct_miss", simpleStruct{1, "two"}, simpleStruct{0, "noway"}, false}, - {"complex_struct_hit", complexStruct{1, simpleStruct{2, "three"}}, - complexStruct{1, simpleStruct{2, "three"}}, true}, -} - -func TestGet(t *testing.T) { - for _, tt := range getTests { - lru := New(0) - lru.Add(tt.keyToAdd, 1234) - val, ok := lru.Get(tt.keyToGet) - if ok != tt.expectedOk { - t.Fatalf("%s: cache hit = %v; want %v", tt.name, ok, !ok) - } else if ok && val != 1234 { - t.Fatalf("%s expected get to return 1234 but got %v", tt.name, val) - } - } -} - -func TestRemove(t *testing.T) { - lru := New(0) - lru.Add("myKey", 1234) - if val, ok := lru.Get("myKey"); !ok { - t.Fatal("TestRemove returned no match") - } else if val != 1234 { - t.Fatalf("TestRemove failed. Expected %d, got %v", 1234, val) - } - - lru.Remove("myKey") - if _, ok := lru.Get("myKey"); ok { - t.Fatal("TestRemove returned a removed entry") - } -} - -func TestEvict(t *testing.T) { - evictedKeys := make([]Key, 0) - onEvictedFun := func(key Key, value interface{}) { - evictedKeys = append(evictedKeys, key) - } - - lru := New(20) - lru.OnEvicted = onEvictedFun - for i := 0; i < 22; i++ { - lru.Add(fmt.Sprintf("myKey%d", i), 1234) - } - - if len(evictedKeys) != 2 { - t.Fatalf("got %d evicted keys; want 2", len(evictedKeys)) - } - if evictedKeys[0] != Key("myKey0") { - t.Fatalf("got %v in first evicted key; want %s", evictedKeys[0], "myKey0") - } - if evictedKeys[1] != Key("myKey1") { - t.Fatalf("got %v in second evicted key; want %s", evictedKeys[1], "myKey1") - } -} diff --git a/vendor/github.com/golang/groupcache/peers.go b/vendor/github.com/golang/groupcache/peers.go deleted file mode 100644 index 1625ff0430..0000000000 --- a/vendor/github.com/golang/groupcache/peers.go +++ /dev/null @@ -1,85 +0,0 @@ -/* -Copyright 2012 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// peers.go defines how processes find and communicate with their peers. - -package groupcache - -import ( - pb "github.com/golang/groupcache/groupcachepb" -) - -// Context is an opaque value passed through calls to the -// ProtoGetter. It may be nil if your ProtoGetter implementation does -// not require a context. -type Context interface{} - -// ProtoGetter is the interface that must be implemented by a peer. -type ProtoGetter interface { - Get(context Context, in *pb.GetRequest, out *pb.GetResponse) error -} - -// PeerPicker is the interface that must be implemented to locate -// the peer that owns a specific key. -type PeerPicker interface { - // PickPeer returns the peer that owns the specific key - // and true to indicate that a remote peer was nominated. - // It returns nil, false if the key owner is the current peer. - PickPeer(key string) (peer ProtoGetter, ok bool) -} - -// NoPeers is an implementation of PeerPicker that never finds a peer. -type NoPeers struct{} - -func (NoPeers) PickPeer(key string) (peer ProtoGetter, ok bool) { return } - -var ( - portPicker func(groupName string) PeerPicker -) - -// RegisterPeerPicker registers the peer initialization function. -// It is called once, when the first group is created. -// Either RegisterPeerPicker or RegisterPerGroupPeerPicker should be -// called exactly once, but not both. -func RegisterPeerPicker(fn func() PeerPicker) { - if portPicker != nil { - panic("RegisterPeerPicker called more than once") - } - portPicker = func(_ string) PeerPicker { return fn() } -} - -// RegisterPerGroupPeerPicker registers the peer initialization function, -// which takes the groupName, to be used in choosing a PeerPicker. -// It is called once, when the first group is created. -// Either RegisterPeerPicker or RegisterPerGroupPeerPicker should be -// called exactly once, but not both. -func RegisterPerGroupPeerPicker(fn func(groupName string) PeerPicker) { - if portPicker != nil { - panic("RegisterPeerPicker called more than once") - } - portPicker = fn -} - -func getPeers(groupName string) PeerPicker { - if portPicker == nil { - return NoPeers{} - } - pk := portPicker(groupName) - if pk == nil { - pk = NoPeers{} - } - return pk -} diff --git a/vendor/github.com/golang/groupcache/sinks.go b/vendor/github.com/golang/groupcache/sinks.go deleted file mode 100644 index cb42b41b4d..0000000000 --- a/vendor/github.com/golang/groupcache/sinks.go +++ /dev/null @@ -1,322 +0,0 @@ -/* -Copyright 2012 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package groupcache - -import ( - "errors" - - "github.com/golang/protobuf/proto" -) - -// A Sink receives data from a Get call. -// -// Implementation of Getter must call exactly one of the Set methods -// on success. -type Sink interface { - // SetString sets the value to s. - SetString(s string) error - - // SetBytes sets the value to the contents of v. - // The caller retains ownership of v. - SetBytes(v []byte) error - - // SetProto sets the value to the encoded version of m. - // The caller retains ownership of m. - SetProto(m proto.Message) error - - // view returns a frozen view of the bytes for caching. - view() (ByteView, error) -} - -func cloneBytes(b []byte) []byte { - c := make([]byte, len(b)) - copy(c, b) - return c -} - -func setSinkView(s Sink, v ByteView) error { - // A viewSetter is a Sink that can also receive its value from - // a ByteView. This is a fast path to minimize copies when the - // item was already cached locally in memory (where it's - // cached as a ByteView) - type viewSetter interface { - setView(v ByteView) error - } - if vs, ok := s.(viewSetter); ok { - return vs.setView(v) - } - if v.b != nil { - return s.SetBytes(v.b) - } - return s.SetString(v.s) -} - -// StringSink returns a Sink that populates the provided string pointer. -func StringSink(sp *string) Sink { - return &stringSink{sp: sp} -} - -type stringSink struct { - sp *string - v ByteView - // TODO(bradfitz): track whether any Sets were called. -} - -func (s *stringSink) view() (ByteView, error) { - // TODO(bradfitz): return an error if no Set was called - return s.v, nil -} - -func (s *stringSink) SetString(v string) error { - s.v.b = nil - s.v.s = v - *s.sp = v - return nil -} - -func (s *stringSink) SetBytes(v []byte) error { - return s.SetString(string(v)) -} - -func (s *stringSink) SetProto(m proto.Message) error { - b, err := proto.Marshal(m) - if err != nil { - return err - } - s.v.b = b - *s.sp = string(b) - return nil -} - -// ByteViewSink returns a Sink that populates a ByteView. -func ByteViewSink(dst *ByteView) Sink { - if dst == nil { - panic("nil dst") - } - return &byteViewSink{dst: dst} -} - -type byteViewSink struct { - dst *ByteView - - // if this code ever ends up tracking that at least one set* - // method was called, don't make it an error to call set - // methods multiple times. Lorry's payload.go does that, and - // it makes sense. The comment at the top of this file about - // "exactly one of the Set methods" is overly strict. We - // really care about at least once (in a handler), but if - // multiple handlers fail (or multiple functions in a program - // using a Sink), it's okay to re-use the same one. -} - -func (s *byteViewSink) setView(v ByteView) error { - *s.dst = v - return nil -} - -func (s *byteViewSink) view() (ByteView, error) { - return *s.dst, nil -} - -func (s *byteViewSink) SetProto(m proto.Message) error { - b, err := proto.Marshal(m) - if err != nil { - return err - } - *s.dst = ByteView{b: b} - return nil -} - -func (s *byteViewSink) SetBytes(b []byte) error { - *s.dst = ByteView{b: cloneBytes(b)} - return nil -} - -func (s *byteViewSink) SetString(v string) error { - *s.dst = ByteView{s: v} - return nil -} - -// ProtoSink returns a sink that unmarshals binary proto values into m. -func ProtoSink(m proto.Message) Sink { - return &protoSink{ - dst: m, - } -} - -type protoSink struct { - dst proto.Message // authorative value - typ string - - v ByteView // encoded -} - -func (s *protoSink) view() (ByteView, error) { - return s.v, nil -} - -func (s *protoSink) SetBytes(b []byte) error { - err := proto.Unmarshal(b, s.dst) - if err != nil { - return err - } - s.v.b = cloneBytes(b) - s.v.s = "" - return nil -} - -func (s *protoSink) SetString(v string) error { - b := []byte(v) - err := proto.Unmarshal(b, s.dst) - if err != nil { - return err - } - s.v.b = b - s.v.s = "" - return nil -} - -func (s *protoSink) SetProto(m proto.Message) error { - b, err := proto.Marshal(m) - if err != nil { - return err - } - // TODO(bradfitz): optimize for same-task case more and write - // right through? would need to document ownership rules at - // the same time. but then we could just assign *dst = *m - // here. This works for now: - err = proto.Unmarshal(b, s.dst) - if err != nil { - return err - } - s.v.b = b - s.v.s = "" - return nil -} - -// AllocatingByteSliceSink returns a Sink that allocates -// a byte slice to hold the received value and assigns -// it to *dst. The memory is not retained by groupcache. -func AllocatingByteSliceSink(dst *[]byte) Sink { - return &allocBytesSink{dst: dst} -} - -type allocBytesSink struct { - dst *[]byte - v ByteView -} - -func (s *allocBytesSink) view() (ByteView, error) { - return s.v, nil -} - -func (s *allocBytesSink) setView(v ByteView) error { - if v.b != nil { - *s.dst = cloneBytes(v.b) - } else { - *s.dst = []byte(v.s) - } - s.v = v - return nil -} - -func (s *allocBytesSink) SetProto(m proto.Message) error { - b, err := proto.Marshal(m) - if err != nil { - return err - } - return s.setBytesOwned(b) -} - -func (s *allocBytesSink) SetBytes(b []byte) error { - return s.setBytesOwned(cloneBytes(b)) -} - -func (s *allocBytesSink) setBytesOwned(b []byte) error { - if s.dst == nil { - return errors.New("nil AllocatingByteSliceSink *[]byte dst") - } - *s.dst = cloneBytes(b) // another copy, protecting the read-only s.v.b view - s.v.b = b - s.v.s = "" - return nil -} - -func (s *allocBytesSink) SetString(v string) error { - if s.dst == nil { - return errors.New("nil AllocatingByteSliceSink *[]byte dst") - } - *s.dst = []byte(v) - s.v.b = nil - s.v.s = v - return nil -} - -// TruncatingByteSliceSink returns a Sink that writes up to len(*dst) -// bytes to *dst. If more bytes are available, they're silently -// truncated. If fewer bytes are available than len(*dst), *dst -// is shrunk to fit the number of bytes available. -func TruncatingByteSliceSink(dst *[]byte) Sink { - return &truncBytesSink{dst: dst} -} - -type truncBytesSink struct { - dst *[]byte - v ByteView -} - -func (s *truncBytesSink) view() (ByteView, error) { - return s.v, nil -} - -func (s *truncBytesSink) SetProto(m proto.Message) error { - b, err := proto.Marshal(m) - if err != nil { - return err - } - return s.setBytesOwned(b) -} - -func (s *truncBytesSink) SetBytes(b []byte) error { - return s.setBytesOwned(cloneBytes(b)) -} - -func (s *truncBytesSink) setBytesOwned(b []byte) error { - if s.dst == nil { - return errors.New("nil TruncatingByteSliceSink *[]byte dst") - } - n := copy(*s.dst, b) - if n < len(*s.dst) { - *s.dst = (*s.dst)[:n] - } - s.v.b = b - s.v.s = "" - return nil -} - -func (s *truncBytesSink) SetString(v string) error { - if s.dst == nil { - return errors.New("nil TruncatingByteSliceSink *[]byte dst") - } - n := copy(*s.dst, v) - if n < len(*s.dst) { - *s.dst = (*s.dst)[:n] - } - s.v.b = nil - s.v.s = v - return nil -} diff --git a/vendor/github.com/golang/protobuf/.gitignore b/vendor/github.com/golang/protobuf/.gitignore deleted file mode 100644 index 8f5b596b14..0000000000 --- a/vendor/github.com/golang/protobuf/.gitignore +++ /dev/null @@ -1,16 +0,0 @@ -.DS_Store -*.[568ao] -*.ao -*.so -*.pyc -._* -.nfs.* -[568a].out -*~ -*.orig -core -_obj -_test -_testmain.go -protoc-gen-go/testdata/multi/*.pb.go -_conformance/_conformance diff --git a/vendor/github.com/golang/protobuf/.travis.yml b/vendor/github.com/golang/protobuf/.travis.yml deleted file mode 100644 index 93c67805b9..0000000000 --- a/vendor/github.com/golang/protobuf/.travis.yml +++ /dev/null @@ -1,18 +0,0 @@ -sudo: false -language: go -go: -- 1.6.x -- 1.7.x -- 1.8.x -- 1.9.x - -install: - - go get -v -d -t github.com/golang/protobuf/... - - curl -L https://github.com/google/protobuf/releases/download/v3.3.0/protoc-3.3.0-linux-x86_64.zip -o /tmp/protoc.zip - - unzip /tmp/protoc.zip -d $HOME/protoc - -env: - - PATH=$HOME/protoc/bin:$PATH - -script: - - make all test diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE index 1b1b1921ef..0f646931a4 100644 --- a/vendor/github.com/golang/protobuf/LICENSE +++ b/vendor/github.com/golang/protobuf/LICENSE @@ -1,7 +1,4 @@ -Go support for Protocol Buffers - Google's data interchange format - Copyright 2010 The Go Authors. All rights reserved. -https://github.com/golang/protobuf Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/github.com/golang/protobuf/Make.protobuf b/vendor/github.com/golang/protobuf/Make.protobuf deleted file mode 100644 index 15071de101..0000000000 --- a/vendor/github.com/golang/protobuf/Make.protobuf +++ /dev/null @@ -1,40 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -# Includable Makefile to add a rule for generating .pb.go files from .proto files -# (Google protocol buffer descriptions). -# Typical use if myproto.proto is a file in package mypackage in this directory: -# -# include $(GOROOT)/src/pkg/github.com/golang/protobuf/Make.protobuf - -%.pb.go: %.proto - protoc --go_out=. $< - diff --git a/vendor/github.com/golang/protobuf/Makefile b/vendor/github.com/golang/protobuf/Makefile deleted file mode 100644 index a1421d8b73..0000000000 --- a/vendor/github.com/golang/protobuf/Makefile +++ /dev/null @@ -1,55 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -all: install - -install: - go install ./proto ./jsonpb ./ptypes - go install ./protoc-gen-go - -test: - go test ./proto ./jsonpb ./ptypes - make -C protoc-gen-go/testdata test - -clean: - go clean ./... - -nuke: - go clean -i ./... - -regenerate: - make -C protoc-gen-go/descriptor regenerate - make -C protoc-gen-go/plugin regenerate - make -C protoc-gen-go/testdata regenerate - make -C proto/testdata regenerate - make -C jsonpb/jsonpb_test_proto regenerate - make -C _conformance regenerate diff --git a/vendor/github.com/golang/protobuf/README.md b/vendor/github.com/golang/protobuf/README.md deleted file mode 100644 index 9c4c815c02..0000000000 --- a/vendor/github.com/golang/protobuf/README.md +++ /dev/null @@ -1,244 +0,0 @@ -# Go support for Protocol Buffers - -[![Build Status](https://travis-ci.org/golang/protobuf.svg?branch=master)](https://travis-ci.org/golang/protobuf) -[![GoDoc](https://godoc.org/github.com/golang/protobuf?status.svg)](https://godoc.org/github.com/golang/protobuf) - -Google's data interchange format. -Copyright 2010 The Go Authors. -https://github.com/golang/protobuf - -This package and the code it generates requires at least Go 1.4. - -This software implements Go bindings for protocol buffers. For -information about protocol buffers themselves, see - https://developers.google.com/protocol-buffers/ - -## Installation ## - -To use this software, you must: -- Install the standard C++ implementation of protocol buffers from - https://developers.google.com/protocol-buffers/ -- Of course, install the Go compiler and tools from - https://golang.org/ - See - https://golang.org/doc/install - for details or, if you are using gccgo, follow the instructions at - https://golang.org/doc/install/gccgo -- Grab the code from the repository and install the proto package. - The simplest way is to run `go get -u github.com/golang/protobuf/protoc-gen-go`. - The compiler plugin, protoc-gen-go, will be installed in $GOBIN, - defaulting to $GOPATH/bin. It must be in your $PATH for the protocol - compiler, protoc, to find it. - -This software has two parts: a 'protocol compiler plugin' that -generates Go source files that, once compiled, can access and manage -protocol buffers; and a library that implements run-time support for -encoding (marshaling), decoding (unmarshaling), and accessing protocol -buffers. - -There is support for gRPC in Go using protocol buffers. -See the note at the bottom of this file for details. - -There are no insertion points in the plugin. - - -## Using protocol buffers with Go ## - -Once the software is installed, there are two steps to using it. -First you must compile the protocol buffer definitions and then import -them, with the support library, into your program. - -To compile the protocol buffer definition, run protoc with the --go_out -parameter set to the directory you want to output the Go code to. - - protoc --go_out=. *.proto - -The generated files will be suffixed .pb.go. See the Test code below -for an example using such a file. - - -The package comment for the proto library contains text describing -the interface provided in Go for protocol buffers. Here is an edited -version. - -========== - -The proto package converts data structures to and from the -wire format of protocol buffers. It works in concert with the -Go source code generated for .proto files by the protocol compiler. - -A summary of the properties of the protocol buffer interface -for a protocol buffer variable v: - - - Names are turned from camel_case to CamelCase for export. - - There are no methods on v to set fields; just treat - them as structure fields. - - There are getters that return a field's value if set, - and return the field's default value if unset. - The getters work even if the receiver is a nil message. - - The zero value for a struct is its correct initialization state. - All desired fields must be set before marshaling. - - A Reset() method will restore a protobuf struct to its zero state. - - Non-repeated fields are pointers to the values; nil means unset. - That is, optional or required field int32 f becomes F *int32. - - Repeated fields are slices. - - Helper functions are available to aid the setting of fields. - Helpers for getting values are superseded by the - GetFoo methods and their use is deprecated. - msg.Foo = proto.String("hello") // set field - - Constants are defined to hold the default values of all fields that - have them. They have the form Default_StructName_FieldName. - Because the getter methods handle defaulted values, - direct use of these constants should be rare. - - Enums are given type names and maps from names to values. - Enum values are prefixed with the enum's type name. Enum types have - a String method, and a Enum method to assist in message construction. - - Nested groups and enums have type names prefixed with the name of - the surrounding message type. - - Extensions are given descriptor names that start with E_, - followed by an underscore-delimited list of the nested messages - that contain it (if any) followed by the CamelCased name of the - extension field itself. HasExtension, ClearExtension, GetExtension - and SetExtension are functions for manipulating extensions. - - Oneof field sets are given a single field in their message, - with distinguished wrapper types for each possible field value. - - Marshal and Unmarshal are functions to encode and decode the wire format. - -When the .proto file specifies `syntax="proto3"`, there are some differences: - - - Non-repeated fields of non-message type are values instead of pointers. - - Enum types do not get an Enum method. - -Consider file test.proto, containing - -```proto - syntax = "proto2"; - package example; - - enum FOO { X = 17; }; - - message Test { - required string label = 1; - optional int32 type = 2 [default=77]; - repeated int64 reps = 3; - optional group OptionalGroup = 4 { - required string RequiredField = 5; - } - } -``` - -To create and play with a Test object from the example package, - -```go - package main - - import ( - "log" - - "github.com/golang/protobuf/proto" - "path/to/example" - ) - - func main() { - test := &example.Test { - Label: proto.String("hello"), - Type: proto.Int32(17), - Reps: []int64{1, 2, 3}, - Optionalgroup: &example.Test_OptionalGroup { - RequiredField: proto.String("good bye"), - }, - } - data, err := proto.Marshal(test) - if err != nil { - log.Fatal("marshaling error: ", err) - } - newTest := &example.Test{} - err = proto.Unmarshal(data, newTest) - if err != nil { - log.Fatal("unmarshaling error: ", err) - } - // Now test and newTest contain the same data. - if test.GetLabel() != newTest.GetLabel() { - log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) - } - // etc. - } -``` - -## Parameters ## - -To pass extra parameters to the plugin, use a comma-separated -parameter list separated from the output directory by a colon: - - - protoc --go_out=plugins=grpc,import_path=mypackage:. *.proto - - -- `import_prefix=xxx` - a prefix that is added onto the beginning of - all imports. Useful for things like generating protos in a - subdirectory, or regenerating vendored protobufs in-place. -- `import_path=foo/bar` - used as the package if no input files - declare `go_package`. If it contains slashes, everything up to the - rightmost slash is ignored. -- `plugins=plugin1+plugin2` - specifies the list of sub-plugins to - load. The only plugin in this repo is `grpc`. -- `Mfoo/bar.proto=quux/shme` - declares that foo/bar.proto is - associated with Go package quux/shme. This is subject to the - import_prefix parameter. - -## gRPC Support ## - -If a proto file specifies RPC services, protoc-gen-go can be instructed to -generate code compatible with gRPC (http://www.grpc.io/). To do this, pass -the `plugins` parameter to protoc-gen-go; the usual way is to insert it into -the --go_out argument to protoc: - - protoc --go_out=plugins=grpc:. *.proto - -## Compatibility ## - -The library and the generated code are expected to be stable over time. -However, we reserve the right to make breaking changes without notice for the -following reasons: - -- Security. A security issue in the specification or implementation may come to - light whose resolution requires breaking compatibility. We reserve the right - to address such security issues. -- Unspecified behavior. There are some aspects of the Protocol Buffers - specification that are undefined. Programs that depend on such unspecified - behavior may break in future releases. -- Specification errors or changes. If it becomes necessary to address an - inconsistency, incompleteness, or change in the Protocol Buffers - specification, resolving the issue could affect the meaning or legality of - existing programs. We reserve the right to address such issues, including - updating the implementations. -- Bugs. If the library has a bug that violates the specification, a program - that depends on the buggy behavior may break if the bug is fixed. We reserve - the right to fix such bugs. -- Adding methods or fields to generated structs. These may conflict with field - names that already exist in a schema, causing applications to break. When the - code generator encounters a field in the schema that would collide with a - generated field or method name, the code generator will append an underscore - to the generated field or method name. -- Adding, removing, or changing methods or fields in generated structs that - start with `XXX`. These parts of the generated code are exported out of - necessity, but should not be considered part of the public API. -- Adding, removing, or changing unexported symbols in generated code. - -Any breaking changes outside of these will be announced 6 months in advance to -protobuf@googlegroups.com. - -You should, whenever possible, use generated code created by the `protoc-gen-go` -tool built at the same commit as the `proto` package. The `proto` package -declares package-level constants in the form `ProtoPackageIsVersionX`. -Application code and generated code may depend on one of these constants to -ensure that compilation will fail if the available version of the proto library -is too old. Whenever we make a change to the generated code that requires newer -library support, in the same commit we will increment the version number of the -generated code and declare a new package-level constant whose name incorporates -the latest version number. Removing a compatibility constant is considered a -breaking change and would be subject to the announcement policy stated above. - -The `protoc-gen-go/generator` package exposes a plugin interface, -which is used by the gRPC code generation. This interface is not -supported and is subject to incompatible changes without notice. diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go new file mode 100644 index 0000000000..ada2b78e89 --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go @@ -0,0 +1,1271 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2015 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package jsonpb provides marshaling and unmarshaling between protocol buffers and JSON. +It follows the specification at https://developers.google.com/protocol-buffers/docs/proto3#json. + +This package produces a different output than the standard "encoding/json" package, +which does not operate correctly on protocol buffers. +*/ +package jsonpb + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + + stpb "github.com/golang/protobuf/ptypes/struct" +) + +const secondInNanos = int64(time.Second / time.Nanosecond) + +// Marshaler is a configurable object for converting between +// protocol buffer objects and a JSON representation for them. +type Marshaler struct { + // Whether to render enum values as integers, as opposed to string values. + EnumsAsInts bool + + // Whether to render fields with zero values. + EmitDefaults bool + + // A string to indent each level by. The presence of this field will + // also cause a space to appear between the field separator and + // value, and for newlines to be appear between fields and array + // elements. + Indent string + + // Whether to use the original (.proto) name for fields. + OrigName bool + + // A custom URL resolver to use when marshaling Any messages to JSON. + // If unset, the default resolution strategy is to extract the + // fully-qualified type name from the type URL and pass that to + // proto.MessageType(string). + AnyResolver AnyResolver +} + +// AnyResolver takes a type URL, present in an Any message, and resolves it into +// an instance of the associated message. +type AnyResolver interface { + Resolve(typeUrl string) (proto.Message, error) +} + +func defaultResolveAny(typeUrl string) (proto.Message, error) { + // Only the part of typeUrl after the last slash is relevant. + mname := typeUrl + if slash := strings.LastIndex(mname, "/"); slash >= 0 { + mname = mname[slash+1:] + } + mt := proto.MessageType(mname) + if mt == nil { + return nil, fmt.Errorf("unknown message type %q", mname) + } + return reflect.New(mt.Elem()).Interface().(proto.Message), nil +} + +// JSONPBMarshaler is implemented by protobuf messages that customize the +// way they are marshaled to JSON. Messages that implement this should +// also implement JSONPBUnmarshaler so that the custom format can be +// parsed. +// +// The JSON marshaling must follow the proto to JSON specification: +// https://developers.google.com/protocol-buffers/docs/proto3#json +type JSONPBMarshaler interface { + MarshalJSONPB(*Marshaler) ([]byte, error) +} + +// JSONPBUnmarshaler is implemented by protobuf messages that customize +// the way they are unmarshaled from JSON. Messages that implement this +// should also implement JSONPBMarshaler so that the custom format can be +// produced. +// +// The JSON unmarshaling must follow the JSON to proto specification: +// https://developers.google.com/protocol-buffers/docs/proto3#json +type JSONPBUnmarshaler interface { + UnmarshalJSONPB(*Unmarshaler, []byte) error +} + +// Marshal marshals a protocol buffer into JSON. +func (m *Marshaler) Marshal(out io.Writer, pb proto.Message) error { + v := reflect.ValueOf(pb) + if pb == nil || (v.Kind() == reflect.Ptr && v.IsNil()) { + return errors.New("Marshal called with nil") + } + // Check for unset required fields first. + if err := checkRequiredFields(pb); err != nil { + return err + } + writer := &errWriter{writer: out} + return m.marshalObject(writer, pb, "", "") +} + +// MarshalToString converts a protocol buffer object to JSON string. +func (m *Marshaler) MarshalToString(pb proto.Message) (string, error) { + var buf bytes.Buffer + if err := m.Marshal(&buf, pb); err != nil { + return "", err + } + return buf.String(), nil +} + +type int32Slice []int32 + +var nonFinite = map[string]float64{ + `"NaN"`: math.NaN(), + `"Infinity"`: math.Inf(1), + `"-Infinity"`: math.Inf(-1), +} + +// For sorting extensions ids to ensure stable output. +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +type wkt interface { + XXX_WellKnownType() string +} + +// marshalObject writes a struct to the Writer. +func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeURL string) error { + if jsm, ok := v.(JSONPBMarshaler); ok { + b, err := jsm.MarshalJSONPB(m) + if err != nil { + return err + } + if typeURL != "" { + // we are marshaling this object to an Any type + var js map[string]*json.RawMessage + if err = json.Unmarshal(b, &js); err != nil { + return fmt.Errorf("type %T produced invalid JSON: %v", v, err) + } + turl, err := json.Marshal(typeURL) + if err != nil { + return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err) + } + js["@type"] = (*json.RawMessage)(&turl) + if b, err = json.Marshal(js); err != nil { + return err + } + } + + out.write(string(b)) + return out.err + } + + s := reflect.ValueOf(v).Elem() + + // Handle well-known types. + if wkt, ok := v.(wkt); ok { + switch wkt.XXX_WellKnownType() { + case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", + "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": + // "Wrappers use the same representation in JSON + // as the wrapped primitive type, ..." + sprop := proto.GetProperties(s.Type()) + return m.marshalValue(out, sprop.Prop[0], s.Field(0), indent) + case "Any": + // Any is a bit more involved. + return m.marshalAny(out, v, indent) + case "Duration": + // "Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision." + s, ns := s.Field(0).Int(), s.Field(1).Int() + if ns <= -secondInNanos || ns >= secondInNanos { + return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos) + } + if (s > 0 && ns < 0) || (s < 0 && ns > 0) { + return errors.New("signs of seconds and nanos do not match") + } + if s < 0 { + ns = -ns + } + x := fmt.Sprintf("%d.%09d", s, ns) + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + out.write(`"`) + out.write(x) + out.write(`s"`) + return out.err + case "Struct", "ListValue": + // Let marshalValue handle the `Struct.fields` map or the `ListValue.values` slice. + // TODO: pass the correct Properties if needed. + return m.marshalValue(out, &proto.Properties{}, s.Field(0), indent) + case "Timestamp": + // "RFC 3339, where generated output will always be Z-normalized + // and uses 0, 3, 6 or 9 fractional digits." + s, ns := s.Field(0).Int(), s.Field(1).Int() + if ns < 0 || ns >= secondInNanos { + return fmt.Errorf("ns out of range [0, %v)", secondInNanos) + } + t := time.Unix(s, ns).UTC() + // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits). + x := t.Format("2006-01-02T15:04:05.000000000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + out.write(`"`) + out.write(x) + out.write(`Z"`) + return out.err + case "Value": + // Value has a single oneof. + kind := s.Field(0) + if kind.IsNil() { + // "absence of any variant indicates an error" + return errors.New("nil Value") + } + // oneof -> *T -> T -> T.F + x := kind.Elem().Elem().Field(0) + // TODO: pass the correct Properties if needed. + return m.marshalValue(out, &proto.Properties{}, x, indent) + } + } + + out.write("{") + if m.Indent != "" { + out.write("\n") + } + + firstField := true + + if typeURL != "" { + if err := m.marshalTypeURL(out, indent, typeURL); err != nil { + return err + } + firstField = false + } + + for i := 0; i < s.NumField(); i++ { + value := s.Field(i) + valueField := s.Type().Field(i) + if strings.HasPrefix(valueField.Name, "XXX_") { + continue + } + + // IsNil will panic on most value kinds. + switch value.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface: + if value.IsNil() { + continue + } + } + + if !m.EmitDefaults { + switch value.Kind() { + case reflect.Bool: + if !value.Bool() { + continue + } + case reflect.Int32, reflect.Int64: + if value.Int() == 0 { + continue + } + case reflect.Uint32, reflect.Uint64: + if value.Uint() == 0 { + continue + } + case reflect.Float32, reflect.Float64: + if value.Float() == 0 { + continue + } + case reflect.String: + if value.Len() == 0 { + continue + } + case reflect.Map, reflect.Ptr, reflect.Slice: + if value.IsNil() { + continue + } + } + } + + // Oneof fields need special handling. + if valueField.Tag.Get("protobuf_oneof") != "" { + // value is an interface containing &T{real_value}. + sv := value.Elem().Elem() // interface -> *T -> T + value = sv.Field(0) + valueField = sv.Type().Field(0) + } + prop := jsonProperties(valueField, m.OrigName) + if !firstField { + m.writeSep(out) + } + if err := m.marshalField(out, prop, value, indent); err != nil { + return err + } + firstField = false + } + + // Handle proto2 extensions. + if ep, ok := v.(proto.Message); ok { + extensions := proto.RegisteredExtensions(v) + // Sort extensions for stable output. + ids := make([]int32, 0, len(extensions)) + for id, desc := range extensions { + if !proto.HasExtension(ep, desc) { + continue + } + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + for _, id := range ids { + desc := extensions[id] + if desc == nil { + // unknown extension + continue + } + ext, extErr := proto.GetExtension(ep, desc) + if extErr != nil { + return extErr + } + value := reflect.ValueOf(ext) + var prop proto.Properties + prop.Parse(desc.Tag) + prop.JSONName = fmt.Sprintf("[%s]", desc.Name) + if !firstField { + m.writeSep(out) + } + if err := m.marshalField(out, &prop, value, indent); err != nil { + return err + } + firstField = false + } + + } + + if m.Indent != "" { + out.write("\n") + out.write(indent) + } + out.write("}") + return out.err +} + +func (m *Marshaler) writeSep(out *errWriter) { + if m.Indent != "" { + out.write(",\n") + } else { + out.write(",") + } +} + +func (m *Marshaler) marshalAny(out *errWriter, any proto.Message, indent string) error { + // "If the Any contains a value that has a special JSON mapping, + // it will be converted as follows: {"@type": xxx, "value": yyy}. + // Otherwise, the value will be converted into a JSON object, + // and the "@type" field will be inserted to indicate the actual data type." + v := reflect.ValueOf(any).Elem() + turl := v.Field(0).String() + val := v.Field(1).Bytes() + + var msg proto.Message + var err error + if m.AnyResolver != nil { + msg, err = m.AnyResolver.Resolve(turl) + } else { + msg, err = defaultResolveAny(turl) + } + if err != nil { + return err + } + + if err := proto.Unmarshal(val, msg); err != nil { + return err + } + + if _, ok := msg.(wkt); ok { + out.write("{") + if m.Indent != "" { + out.write("\n") + } + if err := m.marshalTypeURL(out, indent, turl); err != nil { + return err + } + m.writeSep(out) + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + out.write(`"value": `) + } else { + out.write(`"value":`) + } + if err := m.marshalObject(out, msg, indent+m.Indent, ""); err != nil { + return err + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + } + out.write("}") + return out.err + } + + return m.marshalObject(out, msg, indent, turl) +} + +func (m *Marshaler) marshalTypeURL(out *errWriter, indent, typeURL string) error { + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + } + out.write(`"@type":`) + if m.Indent != "" { + out.write(" ") + } + b, err := json.Marshal(typeURL) + if err != nil { + return err + } + out.write(string(b)) + return out.err +} + +// marshalField writes field description and value to the Writer. +func (m *Marshaler) marshalField(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + } + out.write(`"`) + out.write(prop.JSONName) + out.write(`":`) + if m.Indent != "" { + out.write(" ") + } + if err := m.marshalValue(out, prop, v, indent); err != nil { + return err + } + return nil +} + +// marshalValue writes the value to the Writer. +func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { + var err error + v = reflect.Indirect(v) + + // Handle nil pointer + if v.Kind() == reflect.Invalid { + out.write("null") + return out.err + } + + // Handle repeated elements. + if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 { + out.write("[") + comma := "" + for i := 0; i < v.Len(); i++ { + sliceVal := v.Index(i) + out.write(comma) + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + out.write(m.Indent) + } + if err := m.marshalValue(out, prop, sliceVal, indent+m.Indent); err != nil { + return err + } + comma = "," + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + } + out.write("]") + return out.err + } + + // Handle well-known types. + // Most are handled up in marshalObject (because 99% are messages). + if wkt, ok := v.Interface().(wkt); ok { + switch wkt.XXX_WellKnownType() { + case "NullValue": + out.write("null") + return out.err + } + } + + // Handle enumerations. + if !m.EnumsAsInts && prop.Enum != "" { + // Unknown enum values will are stringified by the proto library as their + // value. Such values should _not_ be quoted or they will be interpreted + // as an enum string instead of their value. + enumStr := v.Interface().(fmt.Stringer).String() + var valStr string + if v.Kind() == reflect.Ptr { + valStr = strconv.Itoa(int(v.Elem().Int())) + } else { + valStr = strconv.Itoa(int(v.Int())) + } + isKnownEnum := enumStr != valStr + if isKnownEnum { + out.write(`"`) + } + out.write(enumStr) + if isKnownEnum { + out.write(`"`) + } + return out.err + } + + // Handle nested messages. + if v.Kind() == reflect.Struct { + return m.marshalObject(out, v.Addr().Interface().(proto.Message), indent+m.Indent, "") + } + + // Handle maps. + // Since Go randomizes map iteration, we sort keys for stable output. + if v.Kind() == reflect.Map { + out.write(`{`) + keys := v.MapKeys() + sort.Sort(mapKeys(keys)) + for i, k := range keys { + if i > 0 { + out.write(`,`) + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + out.write(m.Indent) + } + + // TODO handle map key prop properly + b, err := json.Marshal(k.Interface()) + if err != nil { + return err + } + s := string(b) + + // If the JSON is not a string value, encode it again to make it one. + if !strings.HasPrefix(s, `"`) { + b, err := json.Marshal(s) + if err != nil { + return err + } + s = string(b) + } + + out.write(s) + out.write(`:`) + if m.Indent != "" { + out.write(` `) + } + + vprop := prop + if prop != nil && prop.MapValProp != nil { + vprop = prop.MapValProp + } + if err := m.marshalValue(out, vprop, v.MapIndex(k), indent+m.Indent); err != nil { + return err + } + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + } + out.write(`}`) + return out.err + } + + // Handle non-finite floats, e.g. NaN, Infinity and -Infinity. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + f := v.Float() + var sval string + switch { + case math.IsInf(f, 1): + sval = `"Infinity"` + case math.IsInf(f, -1): + sval = `"-Infinity"` + case math.IsNaN(f): + sval = `"NaN"` + } + if sval != "" { + out.write(sval) + return out.err + } + } + + // Default handling defers to the encoding/json library. + b, err := json.Marshal(v.Interface()) + if err != nil { + return err + } + needToQuote := string(b[0]) != `"` && (v.Kind() == reflect.Int64 || v.Kind() == reflect.Uint64) + if needToQuote { + out.write(`"`) + } + out.write(string(b)) + if needToQuote { + out.write(`"`) + } + return out.err +} + +// Unmarshaler is a configurable object for converting from a JSON +// representation to a protocol buffer object. +type Unmarshaler struct { + // Whether to allow messages to contain unknown fields, as opposed to + // failing to unmarshal. + AllowUnknownFields bool + + // A custom URL resolver to use when unmarshaling Any messages from JSON. + // If unset, the default resolution strategy is to extract the + // fully-qualified type name from the type URL and pass that to + // proto.MessageType(string). + AnyResolver AnyResolver +} + +// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. +// This function is lenient and will decode any options permutations of the +// related Marshaler. +func (u *Unmarshaler) UnmarshalNext(dec *json.Decoder, pb proto.Message) error { + inputValue := json.RawMessage{} + if err := dec.Decode(&inputValue); err != nil { + return err + } + if err := u.unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil); err != nil { + return err + } + return checkRequiredFields(pb) +} + +// Unmarshal unmarshals a JSON object stream into a protocol +// buffer. This function is lenient and will decode any options +// permutations of the related Marshaler. +func (u *Unmarshaler) Unmarshal(r io.Reader, pb proto.Message) error { + dec := json.NewDecoder(r) + return u.UnmarshalNext(dec, pb) +} + +// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. +// This function is lenient and will decode any options permutations of the +// related Marshaler. +func UnmarshalNext(dec *json.Decoder, pb proto.Message) error { + return new(Unmarshaler).UnmarshalNext(dec, pb) +} + +// Unmarshal unmarshals a JSON object stream into a protocol +// buffer. This function is lenient and will decode any options +// permutations of the related Marshaler. +func Unmarshal(r io.Reader, pb proto.Message) error { + return new(Unmarshaler).Unmarshal(r, pb) +} + +// UnmarshalString will populate the fields of a protocol buffer based +// on a JSON string. This function is lenient and will decode any options +// permutations of the related Marshaler. +func UnmarshalString(str string, pb proto.Message) error { + return new(Unmarshaler).Unmarshal(strings.NewReader(str), pb) +} + +// unmarshalValue converts/copies a value into the target. +// prop may be nil. +func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMessage, prop *proto.Properties) error { + targetType := target.Type() + + // Allocate memory for pointer fields. + if targetType.Kind() == reflect.Ptr { + // If input value is "null" and target is a pointer type, then the field should be treated as not set + // UNLESS the target is structpb.Value, in which case it should be set to structpb.NullValue. + _, isJSONPBUnmarshaler := target.Interface().(JSONPBUnmarshaler) + if string(inputValue) == "null" && targetType != reflect.TypeOf(&stpb.Value{}) && !isJSONPBUnmarshaler { + return nil + } + target.Set(reflect.New(targetType.Elem())) + + return u.unmarshalValue(target.Elem(), inputValue, prop) + } + + if jsu, ok := target.Addr().Interface().(JSONPBUnmarshaler); ok { + return jsu.UnmarshalJSONPB(u, []byte(inputValue)) + } + + // Handle well-known types that are not pointers. + if w, ok := target.Addr().Interface().(wkt); ok { + switch w.XXX_WellKnownType() { + case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", + "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": + return u.unmarshalValue(target.Field(0), inputValue, prop) + case "Any": + // Use json.RawMessage pointer type instead of value to support pre-1.8 version. + // 1.8 changed RawMessage.MarshalJSON from pointer type to value type, see + // https://github.com/golang/go/issues/14493 + var jsonFields map[string]*json.RawMessage + if err := json.Unmarshal(inputValue, &jsonFields); err != nil { + return err + } + + val, ok := jsonFields["@type"] + if !ok || val == nil { + return errors.New("Any JSON doesn't have '@type'") + } + + var turl string + if err := json.Unmarshal([]byte(*val), &turl); err != nil { + return fmt.Errorf("can't unmarshal Any's '@type': %q", *val) + } + target.Field(0).SetString(turl) + + var m proto.Message + var err error + if u.AnyResolver != nil { + m, err = u.AnyResolver.Resolve(turl) + } else { + m, err = defaultResolveAny(turl) + } + if err != nil { + return err + } + + if _, ok := m.(wkt); ok { + val, ok := jsonFields["value"] + if !ok { + return errors.New("Any JSON doesn't have 'value'") + } + + if err := u.unmarshalValue(reflect.ValueOf(m).Elem(), *val, nil); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err) + } + } else { + delete(jsonFields, "@type") + nestedProto, err := json.Marshal(jsonFields) + if err != nil { + return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err) + } + + if err = u.unmarshalValue(reflect.ValueOf(m).Elem(), nestedProto, nil); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err) + } + } + + b, err := proto.Marshal(m) + if err != nil { + return fmt.Errorf("can't marshal proto %T into Any.Value: %v", m, err) + } + target.Field(1).SetBytes(b) + + return nil + case "Duration": + unq, err := unquote(string(inputValue)) + if err != nil { + return err + } + + d, err := time.ParseDuration(unq) + if err != nil { + return fmt.Errorf("bad Duration: %v", err) + } + + ns := d.Nanoseconds() + s := ns / 1e9 + ns %= 1e9 + target.Field(0).SetInt(s) + target.Field(1).SetInt(ns) + return nil + case "Timestamp": + unq, err := unquote(string(inputValue)) + if err != nil { + return err + } + + t, err := time.Parse(time.RFC3339Nano, unq) + if err != nil { + return fmt.Errorf("bad Timestamp: %v", err) + } + + target.Field(0).SetInt(t.Unix()) + target.Field(1).SetInt(int64(t.Nanosecond())) + return nil + case "Struct": + var m map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &m); err != nil { + return fmt.Errorf("bad StructValue: %v", err) + } + + target.Field(0).Set(reflect.ValueOf(map[string]*stpb.Value{})) + for k, jv := range m { + pv := &stpb.Value{} + if err := u.unmarshalValue(reflect.ValueOf(pv).Elem(), jv, prop); err != nil { + return fmt.Errorf("bad value in StructValue for key %q: %v", k, err) + } + target.Field(0).SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(pv)) + } + return nil + case "ListValue": + var s []json.RawMessage + if err := json.Unmarshal(inputValue, &s); err != nil { + return fmt.Errorf("bad ListValue: %v", err) + } + + target.Field(0).Set(reflect.ValueOf(make([]*stpb.Value, len(s)))) + for i, sv := range s { + if err := u.unmarshalValue(target.Field(0).Index(i), sv, prop); err != nil { + return err + } + } + return nil + case "Value": + ivStr := string(inputValue) + if ivStr == "null" { + target.Field(0).Set(reflect.ValueOf(&stpb.Value_NullValue{})) + } else if v, err := strconv.ParseFloat(ivStr, 0); err == nil { + target.Field(0).Set(reflect.ValueOf(&stpb.Value_NumberValue{v})) + } else if v, err := unquote(ivStr); err == nil { + target.Field(0).Set(reflect.ValueOf(&stpb.Value_StringValue{v})) + } else if v, err := strconv.ParseBool(ivStr); err == nil { + target.Field(0).Set(reflect.ValueOf(&stpb.Value_BoolValue{v})) + } else if err := json.Unmarshal(inputValue, &[]json.RawMessage{}); err == nil { + lv := &stpb.ListValue{} + target.Field(0).Set(reflect.ValueOf(&stpb.Value_ListValue{lv})) + return u.unmarshalValue(reflect.ValueOf(lv).Elem(), inputValue, prop) + } else if err := json.Unmarshal(inputValue, &map[string]json.RawMessage{}); err == nil { + sv := &stpb.Struct{} + target.Field(0).Set(reflect.ValueOf(&stpb.Value_StructValue{sv})) + return u.unmarshalValue(reflect.ValueOf(sv).Elem(), inputValue, prop) + } else { + return fmt.Errorf("unrecognized type for Value %q", ivStr) + } + return nil + } + } + + // Handle enums, which have an underlying type of int32, + // and may appear as strings. + // The case of an enum appearing as a number is handled + // at the bottom of this function. + if inputValue[0] == '"' && prop != nil && prop.Enum != "" { + vmap := proto.EnumValueMap(prop.Enum) + // Don't need to do unquoting; valid enum names + // are from a limited character set. + s := inputValue[1 : len(inputValue)-1] + n, ok := vmap[string(s)] + if !ok { + return fmt.Errorf("unknown value %q for enum %s", s, prop.Enum) + } + if target.Kind() == reflect.Ptr { // proto2 + target.Set(reflect.New(targetType.Elem())) + target = target.Elem() + } + if targetType.Kind() != reflect.Int32 { + return fmt.Errorf("invalid target %q for enum %s", targetType.Kind(), prop.Enum) + } + target.SetInt(int64(n)) + return nil + } + + // Handle nested messages. + if targetType.Kind() == reflect.Struct { + var jsonFields map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &jsonFields); err != nil { + return err + } + + consumeField := func(prop *proto.Properties) (json.RawMessage, bool) { + // Be liberal in what names we accept; both orig_name and camelName are okay. + fieldNames := acceptedJSONFieldNames(prop) + + vOrig, okOrig := jsonFields[fieldNames.orig] + vCamel, okCamel := jsonFields[fieldNames.camel] + if !okOrig && !okCamel { + return nil, false + } + // If, for some reason, both are present in the data, favour the camelName. + var raw json.RawMessage + if okOrig { + raw = vOrig + delete(jsonFields, fieldNames.orig) + } + if okCamel { + raw = vCamel + delete(jsonFields, fieldNames.camel) + } + return raw, true + } + + sprops := proto.GetProperties(targetType) + for i := 0; i < target.NumField(); i++ { + ft := target.Type().Field(i) + if strings.HasPrefix(ft.Name, "XXX_") { + continue + } + + valueForField, ok := consumeField(sprops.Prop[i]) + if !ok { + continue + } + + if err := u.unmarshalValue(target.Field(i), valueForField, sprops.Prop[i]); err != nil { + return err + } + } + // Check for any oneof fields. + if len(jsonFields) > 0 { + for _, oop := range sprops.OneofTypes { + raw, ok := consumeField(oop.Prop) + if !ok { + continue + } + nv := reflect.New(oop.Type.Elem()) + target.Field(oop.Field).Set(nv) + if err := u.unmarshalValue(nv.Elem().Field(0), raw, oop.Prop); err != nil { + return err + } + } + } + // Handle proto2 extensions. + if len(jsonFields) > 0 { + if ep, ok := target.Addr().Interface().(proto.Message); ok { + for _, ext := range proto.RegisteredExtensions(ep) { + name := fmt.Sprintf("[%s]", ext.Name) + raw, ok := jsonFields[name] + if !ok { + continue + } + delete(jsonFields, name) + nv := reflect.New(reflect.TypeOf(ext.ExtensionType).Elem()) + if err := u.unmarshalValue(nv.Elem(), raw, nil); err != nil { + return err + } + if err := proto.SetExtension(ep, ext, nv.Interface()); err != nil { + return err + } + } + } + } + if !u.AllowUnknownFields && len(jsonFields) > 0 { + // Pick any field to be the scapegoat. + var f string + for fname := range jsonFields { + f = fname + break + } + return fmt.Errorf("unknown field %q in %v", f, targetType) + } + return nil + } + + // Handle arrays (which aren't encoded bytes) + if targetType.Kind() == reflect.Slice && targetType.Elem().Kind() != reflect.Uint8 { + var slc []json.RawMessage + if err := json.Unmarshal(inputValue, &slc); err != nil { + return err + } + if slc != nil { + l := len(slc) + target.Set(reflect.MakeSlice(targetType, l, l)) + for i := 0; i < l; i++ { + if err := u.unmarshalValue(target.Index(i), slc[i], prop); err != nil { + return err + } + } + } + return nil + } + + // Handle maps (whose keys are always strings) + if targetType.Kind() == reflect.Map { + var mp map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &mp); err != nil { + return err + } + if mp != nil { + target.Set(reflect.MakeMap(targetType)) + for ks, raw := range mp { + // Unmarshal map key. The core json library already decoded the key into a + // string, so we handle that specially. Other types were quoted post-serialization. + var k reflect.Value + if targetType.Key().Kind() == reflect.String { + k = reflect.ValueOf(ks) + } else { + k = reflect.New(targetType.Key()).Elem() + var kprop *proto.Properties + if prop != nil && prop.MapKeyProp != nil { + kprop = prop.MapKeyProp + } + if err := u.unmarshalValue(k, json.RawMessage(ks), kprop); err != nil { + return err + } + } + + // Unmarshal map value. + v := reflect.New(targetType.Elem()).Elem() + var vprop *proto.Properties + if prop != nil && prop.MapValProp != nil { + vprop = prop.MapValProp + } + if err := u.unmarshalValue(v, raw, vprop); err != nil { + return err + } + target.SetMapIndex(k, v) + } + } + return nil + } + + // Non-finite numbers can be encoded as strings. + isFloat := targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64 + if isFloat { + if num, ok := nonFinite[string(inputValue)]; ok { + target.SetFloat(num) + return nil + } + } + + // integers & floats can be encoded as strings. In this case we drop + // the quotes and proceed as normal. + isNum := targetType.Kind() == reflect.Int64 || targetType.Kind() == reflect.Uint64 || + targetType.Kind() == reflect.Int32 || targetType.Kind() == reflect.Uint32 || + targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64 + if isNum && strings.HasPrefix(string(inputValue), `"`) { + inputValue = inputValue[1 : len(inputValue)-1] + } + + // Use the encoding/json for parsing other value types. + return json.Unmarshal(inputValue, target.Addr().Interface()) +} + +func unquote(s string) (string, error) { + var ret string + err := json.Unmarshal([]byte(s), &ret) + return ret, err +} + +// jsonProperties returns parsed proto.Properties for the field and corrects JSONName attribute. +func jsonProperties(f reflect.StructField, origName bool) *proto.Properties { + var prop proto.Properties + prop.Init(f.Type, f.Name, f.Tag.Get("protobuf"), &f) + if origName || prop.JSONName == "" { + prop.JSONName = prop.OrigName + } + return &prop +} + +type fieldNames struct { + orig, camel string +} + +func acceptedJSONFieldNames(prop *proto.Properties) fieldNames { + opts := fieldNames{orig: prop.OrigName, camel: prop.OrigName} + if prop.JSONName != "" { + opts.camel = prop.JSONName + } + return opts +} + +// Writer wrapper inspired by https://blog.golang.org/errors-are-values +type errWriter struct { + writer io.Writer + err error +} + +func (w *errWriter) write(str string) { + if w.err != nil { + return + } + _, w.err = w.writer.Write([]byte(str)) +} + +// Map fields may have key types of non-float scalars, strings and enums. +// The easiest way to sort them in some deterministic order is to use fmt. +// If this turns out to be inefficient we can always consider other options, +// such as doing a Schwartzian transform. +// +// Numeric keys are sorted in numeric order per +// https://developers.google.com/protocol-buffers/docs/proto#maps. +type mapKeys []reflect.Value + +func (s mapKeys) Len() int { return len(s) } +func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s mapKeys) Less(i, j int) bool { + if k := s[i].Kind(); k == s[j].Kind() { + switch k { + case reflect.String: + return s[i].String() < s[j].String() + case reflect.Int32, reflect.Int64: + return s[i].Int() < s[j].Int() + case reflect.Uint32, reflect.Uint64: + return s[i].Uint() < s[j].Uint() + } + } + return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface()) +} + +// checkRequiredFields returns an error if any required field in the given proto message is not set. +// This function is used by both Marshal and Unmarshal. While required fields only exist in a +// proto2 message, a proto3 message can contain proto2 message(s). +func checkRequiredFields(pb proto.Message) error { + // Most well-known type messages do not contain required fields. The "Any" type may contain + // a message that has required fields. + // + // When an Any message is being marshaled, the code will invoked proto.Unmarshal on Any.Value + // field in order to transform that into JSON, and that should have returned an error if a + // required field is not set in the embedded message. + // + // When an Any message is being unmarshaled, the code will have invoked proto.Marshal on the + // embedded message to store the serialized message in Any.Value field, and that should have + // returned an error if a required field is not set. + if _, ok := pb.(wkt); ok { + return nil + } + + v := reflect.ValueOf(pb) + // Skip message if it is not a struct pointer. + if v.Kind() != reflect.Ptr { + return nil + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return nil + } + + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + sfield := v.Type().Field(i) + + if sfield.PkgPath != "" { + // blank PkgPath means the field is exported; skip if not exported + continue + } + + if strings.HasPrefix(sfield.Name, "XXX_") { + continue + } + + // Oneof field is an interface implemented by wrapper structs containing the actual oneof + // field, i.e. an interface containing &T{real_value}. + if sfield.Tag.Get("protobuf_oneof") != "" { + if field.Kind() != reflect.Interface { + continue + } + v := field.Elem() + if v.Kind() != reflect.Ptr || v.IsNil() { + continue + } + v = v.Elem() + if v.Kind() != reflect.Struct || v.NumField() < 1 { + continue + } + field = v.Field(0) + sfield = v.Type().Field(0) + } + + protoTag := sfield.Tag.Get("protobuf") + if protoTag == "" { + continue + } + var prop proto.Properties + prop.Init(sfield.Type, sfield.Name, protoTag, &sfield) + + switch field.Kind() { + case reflect.Map: + if field.IsNil() { + continue + } + // Check each map value. + keys := field.MapKeys() + for _, k := range keys { + v := field.MapIndex(k) + if err := checkRequiredFieldsInValue(v); err != nil { + return err + } + } + case reflect.Slice: + // Handle non-repeated type, e.g. bytes. + if !prop.Repeated { + if prop.Required && field.IsNil() { + return fmt.Errorf("required field %q is not set", prop.Name) + } + continue + } + + // Handle repeated type. + if field.IsNil() { + continue + } + // Check each slice item. + for i := 0; i < field.Len(); i++ { + v := field.Index(i) + if err := checkRequiredFieldsInValue(v); err != nil { + return err + } + } + case reflect.Ptr: + if field.IsNil() { + if prop.Required { + return fmt.Errorf("required field %q is not set", prop.Name) + } + continue + } + if err := checkRequiredFieldsInValue(field); err != nil { + return err + } + } + } + + // Handle proto2 extensions. + for _, ext := range proto.RegisteredExtensions(pb) { + if !proto.HasExtension(pb, ext) { + continue + } + ep, err := proto.GetExtension(pb, ext) + if err != nil { + return err + } + err = checkRequiredFieldsInValue(reflect.ValueOf(ep)) + if err != nil { + return err + } + } + + return nil +} + +func checkRequiredFieldsInValue(v reflect.Value) error { + if pm, ok := v.Interface().(proto.Message); ok { + return checkRequiredFields(pm) + } + return nil +} diff --git a/vendor/github.com/golang/protobuf/proto/Makefile b/vendor/github.com/golang/protobuf/proto/Makefile deleted file mode 100644 index e2e0651a93..0000000000 --- a/vendor/github.com/golang/protobuf/proto/Makefile +++ /dev/null @@ -1,43 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -install: - go install - -test: install generate-test-pbs - go test - - -generate-test-pbs: - make install - make -C testdata - protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto - make diff --git a/vendor/github.com/golang/protobuf/proto/all_test.go b/vendor/github.com/golang/protobuf/proto/all_test.go deleted file mode 100644 index 41451a4073..0000000000 --- a/vendor/github.com/golang/protobuf/proto/all_test.go +++ /dev/null @@ -1,2278 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "math" - "math/rand" - "reflect" - "runtime/debug" - "strings" - "testing" - "time" - - . "github.com/golang/protobuf/proto" - . "github.com/golang/protobuf/proto/testdata" -) - -var globalO *Buffer - -func old() *Buffer { - if globalO == nil { - globalO = NewBuffer(nil) - } - globalO.Reset() - return globalO -} - -func equalbytes(b1, b2 []byte, t *testing.T) { - if len(b1) != len(b2) { - t.Errorf("wrong lengths: 2*%d != %d", len(b1), len(b2)) - return - } - for i := 0; i < len(b1); i++ { - if b1[i] != b2[i] { - t.Errorf("bad byte[%d]:%x %x: %s %s", i, b1[i], b2[i], b1, b2) - } - } -} - -func initGoTestField() *GoTestField { - f := new(GoTestField) - f.Label = String("label") - f.Type = String("type") - return f -} - -// These are all structurally equivalent but the tag numbers differ. -// (It's remarkable that required, optional, and repeated all have -// 8 letters.) -func initGoTest_RequiredGroup() *GoTest_RequiredGroup { - return &GoTest_RequiredGroup{ - RequiredField: String("required"), - } -} - -func initGoTest_OptionalGroup() *GoTest_OptionalGroup { - return &GoTest_OptionalGroup{ - RequiredField: String("optional"), - } -} - -func initGoTest_RepeatedGroup() *GoTest_RepeatedGroup { - return &GoTest_RepeatedGroup{ - RequiredField: String("repeated"), - } -} - -func initGoTest(setdefaults bool) *GoTest { - pb := new(GoTest) - if setdefaults { - pb.F_BoolDefaulted = Bool(Default_GoTest_F_BoolDefaulted) - pb.F_Int32Defaulted = Int32(Default_GoTest_F_Int32Defaulted) - pb.F_Int64Defaulted = Int64(Default_GoTest_F_Int64Defaulted) - pb.F_Fixed32Defaulted = Uint32(Default_GoTest_F_Fixed32Defaulted) - pb.F_Fixed64Defaulted = Uint64(Default_GoTest_F_Fixed64Defaulted) - pb.F_Uint32Defaulted = Uint32(Default_GoTest_F_Uint32Defaulted) - pb.F_Uint64Defaulted = Uint64(Default_GoTest_F_Uint64Defaulted) - pb.F_FloatDefaulted = Float32(Default_GoTest_F_FloatDefaulted) - pb.F_DoubleDefaulted = Float64(Default_GoTest_F_DoubleDefaulted) - pb.F_StringDefaulted = String(Default_GoTest_F_StringDefaulted) - pb.F_BytesDefaulted = Default_GoTest_F_BytesDefaulted - pb.F_Sint32Defaulted = Int32(Default_GoTest_F_Sint32Defaulted) - pb.F_Sint64Defaulted = Int64(Default_GoTest_F_Sint64Defaulted) - } - - pb.Kind = GoTest_TIME.Enum() - pb.RequiredField = initGoTestField() - pb.F_BoolRequired = Bool(true) - pb.F_Int32Required = Int32(3) - pb.F_Int64Required = Int64(6) - pb.F_Fixed32Required = Uint32(32) - pb.F_Fixed64Required = Uint64(64) - pb.F_Uint32Required = Uint32(3232) - pb.F_Uint64Required = Uint64(6464) - pb.F_FloatRequired = Float32(3232) - pb.F_DoubleRequired = Float64(6464) - pb.F_StringRequired = String("string") - pb.F_BytesRequired = []byte("bytes") - pb.F_Sint32Required = Int32(-32) - pb.F_Sint64Required = Int64(-64) - pb.Requiredgroup = initGoTest_RequiredGroup() - - return pb -} - -func fail(msg string, b *bytes.Buffer, s string, t *testing.T) { - data := b.Bytes() - ld := len(data) - ls := len(s) / 2 - - fmt.Printf("fail %s ld=%d ls=%d\n", msg, ld, ls) - - // find the interesting spot - n - n := ls - if ld < ls { - n = ld - } - j := 0 - for i := 0; i < n; i++ { - bs := hex(s[j])*16 + hex(s[j+1]) - j += 2 - if data[i] == bs { - continue - } - n = i - break - } - l := n - 10 - if l < 0 { - l = 0 - } - h := n + 10 - - // find the interesting spot - n - fmt.Printf("is[%d]:", l) - for i := l; i < h; i++ { - if i >= ld { - fmt.Printf(" --") - continue - } - fmt.Printf(" %.2x", data[i]) - } - fmt.Printf("\n") - - fmt.Printf("sb[%d]:", l) - for i := l; i < h; i++ { - if i >= ls { - fmt.Printf(" --") - continue - } - bs := hex(s[j])*16 + hex(s[j+1]) - j += 2 - fmt.Printf(" %.2x", bs) - } - fmt.Printf("\n") - - t.Fail() - - // t.Errorf("%s: \ngood: %s\nbad: %x", msg, s, b.Bytes()) - // Print the output in a partially-decoded format; can - // be helpful when updating the test. It produces the output - // that is pasted, with minor edits, into the argument to verify(). - // data := b.Bytes() - // nesting := 0 - // for b.Len() > 0 { - // start := len(data) - b.Len() - // var u uint64 - // u, err := DecodeVarint(b) - // if err != nil { - // fmt.Printf("decode error on varint:", err) - // return - // } - // wire := u & 0x7 - // tag := u >> 3 - // switch wire { - // case WireVarint: - // v, err := DecodeVarint(b) - // if err != nil { - // fmt.Printf("decode error on varint:", err) - // return - // } - // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", - // data[start:len(data)-b.Len()], tag, wire, v) - // case WireFixed32: - // v, err := DecodeFixed32(b) - // if err != nil { - // fmt.Printf("decode error on fixed32:", err) - // return - // } - // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", - // data[start:len(data)-b.Len()], tag, wire, v) - // case WireFixed64: - // v, err := DecodeFixed64(b) - // if err != nil { - // fmt.Printf("decode error on fixed64:", err) - // return - // } - // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", - // data[start:len(data)-b.Len()], tag, wire, v) - // case WireBytes: - // nb, err := DecodeVarint(b) - // if err != nil { - // fmt.Printf("decode error on bytes:", err) - // return - // } - // after_tag := len(data) - b.Len() - // str := make([]byte, nb) - // _, err = b.Read(str) - // if err != nil { - // fmt.Printf("decode error on bytes:", err) - // return - // } - // fmt.Printf("\t\t\"%x\" \"%x\" // field %d, encoding %d (FIELD)\n", - // data[start:after_tag], str, tag, wire) - // case WireStartGroup: - // nesting++ - // fmt.Printf("\t\t\"%x\"\t\t// start group field %d level %d\n", - // data[start:len(data)-b.Len()], tag, nesting) - // case WireEndGroup: - // fmt.Printf("\t\t\"%x\"\t\t// end group field %d level %d\n", - // data[start:len(data)-b.Len()], tag, nesting) - // nesting-- - // default: - // fmt.Printf("unrecognized wire type %d\n", wire) - // return - // } - // } -} - -func hex(c uint8) uint8 { - if '0' <= c && c <= '9' { - return c - '0' - } - if 'a' <= c && c <= 'f' { - return 10 + c - 'a' - } - if 'A' <= c && c <= 'F' { - return 10 + c - 'A' - } - return 0 -} - -func equal(b []byte, s string, t *testing.T) bool { - if 2*len(b) != len(s) { - // fail(fmt.Sprintf("wrong lengths: 2*%d != %d", len(b), len(s)), b, s, t) - fmt.Printf("wrong lengths: 2*%d != %d\n", len(b), len(s)) - return false - } - for i, j := 0, 0; i < len(b); i, j = i+1, j+2 { - x := hex(s[j])*16 + hex(s[j+1]) - if b[i] != x { - // fail(fmt.Sprintf("bad byte[%d]:%x %x", i, b[i], x), b, s, t) - fmt.Printf("bad byte[%d]:%x %x", i, b[i], x) - return false - } - } - return true -} - -func overify(t *testing.T, pb *GoTest, expected string) { - o := old() - err := o.Marshal(pb) - if err != nil { - fmt.Printf("overify marshal-1 err = %v", err) - o.DebugPrint("", o.Bytes()) - t.Fatalf("expected = %s", expected) - } - if !equal(o.Bytes(), expected, t) { - o.DebugPrint("overify neq 1", o.Bytes()) - t.Fatalf("expected = %s", expected) - } - - // Now test Unmarshal by recreating the original buffer. - pbd := new(GoTest) - err = o.Unmarshal(pbd) - if err != nil { - t.Fatalf("overify unmarshal err = %v", err) - o.DebugPrint("", o.Bytes()) - t.Fatalf("string = %s", expected) - } - o.Reset() - err = o.Marshal(pbd) - if err != nil { - t.Errorf("overify marshal-2 err = %v", err) - o.DebugPrint("", o.Bytes()) - t.Fatalf("string = %s", expected) - } - if !equal(o.Bytes(), expected, t) { - o.DebugPrint("overify neq 2", o.Bytes()) - t.Fatalf("string = %s", expected) - } -} - -// Simple tests for numeric encode/decode primitives (varint, etc.) -func TestNumericPrimitives(t *testing.T) { - for i := uint64(0); i < 1e6; i += 111 { - o := old() - if o.EncodeVarint(i) != nil { - t.Error("EncodeVarint") - break - } - x, e := o.DecodeVarint() - if e != nil { - t.Fatal("DecodeVarint") - } - if x != i { - t.Fatal("varint decode fail:", i, x) - } - - o = old() - if o.EncodeFixed32(i) != nil { - t.Fatal("encFixed32") - } - x, e = o.DecodeFixed32() - if e != nil { - t.Fatal("decFixed32") - } - if x != i { - t.Fatal("fixed32 decode fail:", i, x) - } - - o = old() - if o.EncodeFixed64(i*1234567) != nil { - t.Error("encFixed64") - break - } - x, e = o.DecodeFixed64() - if e != nil { - t.Error("decFixed64") - break - } - if x != i*1234567 { - t.Error("fixed64 decode fail:", i*1234567, x) - break - } - - o = old() - i32 := int32(i - 12345) - if o.EncodeZigzag32(uint64(i32)) != nil { - t.Fatal("EncodeZigzag32") - } - x, e = o.DecodeZigzag32() - if e != nil { - t.Fatal("DecodeZigzag32") - } - if x != uint64(uint32(i32)) { - t.Fatal("zigzag32 decode fail:", i32, x) - } - - o = old() - i64 := int64(i - 12345) - if o.EncodeZigzag64(uint64(i64)) != nil { - t.Fatal("EncodeZigzag64") - } - x, e = o.DecodeZigzag64() - if e != nil { - t.Fatal("DecodeZigzag64") - } - if x != uint64(i64) { - t.Fatal("zigzag64 decode fail:", i64, x) - } - } -} - -// fakeMarshaler is a simple struct implementing Marshaler and Message interfaces. -type fakeMarshaler struct { - b []byte - err error -} - -func (f *fakeMarshaler) Marshal() ([]byte, error) { return f.b, f.err } -func (f *fakeMarshaler) String() string { return fmt.Sprintf("Bytes: %v Error: %v", f.b, f.err) } -func (f *fakeMarshaler) ProtoMessage() {} -func (f *fakeMarshaler) Reset() {} - -type msgWithFakeMarshaler struct { - M *fakeMarshaler `protobuf:"bytes,1,opt,name=fake"` -} - -func (m *msgWithFakeMarshaler) String() string { return CompactTextString(m) } -func (m *msgWithFakeMarshaler) ProtoMessage() {} -func (m *msgWithFakeMarshaler) Reset() {} - -// Simple tests for proto messages that implement the Marshaler interface. -func TestMarshalerEncoding(t *testing.T) { - tests := []struct { - name string - m Message - want []byte - errType reflect.Type - }{ - { - name: "Marshaler that fails", - m: &fakeMarshaler{ - err: errors.New("some marshal err"), - b: []byte{5, 6, 7}, - }, - // Since the Marshal method returned bytes, they should be written to the - // buffer. (For efficiency, we assume that Marshal implementations are - // always correct w.r.t. RequiredNotSetError and output.) - want: []byte{5, 6, 7}, - errType: reflect.TypeOf(errors.New("some marshal err")), - }, - { - name: "Marshaler that fails with RequiredNotSetError", - m: &msgWithFakeMarshaler{ - M: &fakeMarshaler{ - err: &RequiredNotSetError{}, - b: []byte{5, 6, 7}, - }, - }, - // Since there's an error that can be continued after, - // the buffer should be written. - want: []byte{ - 10, 3, // for &msgWithFakeMarshaler - 5, 6, 7, // for &fakeMarshaler - }, - errType: reflect.TypeOf(&RequiredNotSetError{}), - }, - { - name: "Marshaler that succeeds", - m: &fakeMarshaler{ - b: []byte{0, 1, 2, 3, 4, 127, 255}, - }, - want: []byte{0, 1, 2, 3, 4, 127, 255}, - }, - } - for _, test := range tests { - b := NewBuffer(nil) - err := b.Marshal(test.m) - if reflect.TypeOf(err) != test.errType { - t.Errorf("%s: got err %T(%v) wanted %T", test.name, err, err, test.errType) - } - if !reflect.DeepEqual(test.want, b.Bytes()) { - t.Errorf("%s: got bytes %v wanted %v", test.name, b.Bytes(), test.want) - } - if size := Size(test.m); size != len(b.Bytes()) { - t.Errorf("%s: Size(_) = %v, but marshaled to %v bytes", test.name, size, len(b.Bytes())) - } - - m, mErr := Marshal(test.m) - if !bytes.Equal(b.Bytes(), m) { - t.Errorf("%s: Marshal returned %v, but (*Buffer).Marshal wrote %v", test.name, m, b.Bytes()) - } - if !reflect.DeepEqual(err, mErr) { - t.Errorf("%s: Marshal err = %q, but (*Buffer).Marshal returned %q", - test.name, fmt.Sprint(mErr), fmt.Sprint(err)) - } - } -} - -// Simple tests for bytes -func TestBytesPrimitives(t *testing.T) { - o := old() - bytes := []byte{'n', 'o', 'w', ' ', 'i', 's', ' ', 't', 'h', 'e', ' ', 't', 'i', 'm', 'e'} - if o.EncodeRawBytes(bytes) != nil { - t.Error("EncodeRawBytes") - } - decb, e := o.DecodeRawBytes(false) - if e != nil { - t.Error("DecodeRawBytes") - } - equalbytes(bytes, decb, t) -} - -// Simple tests for strings -func TestStringPrimitives(t *testing.T) { - o := old() - s := "now is the time" - if o.EncodeStringBytes(s) != nil { - t.Error("enc_string") - } - decs, e := o.DecodeStringBytes() - if e != nil { - t.Error("dec_string") - } - if s != decs { - t.Error("string encode/decode fail:", s, decs) - } -} - -// Do we catch the "required bit not set" case? -func TestRequiredBit(t *testing.T) { - o := old() - pb := new(GoTest) - err := o.Marshal(pb) - if err == nil { - t.Error("did not catch missing required fields") - } else if strings.Index(err.Error(), "Kind") < 0 { - t.Error("wrong error type:", err) - } -} - -// Check that all fields are nil. -// Clearly silly, and a residue from a more interesting test with an earlier, -// different initialization property, but it once caught a compiler bug so -// it lives. -func checkInitialized(pb *GoTest, t *testing.T) { - if pb.F_BoolDefaulted != nil { - t.Error("New or Reset did not set boolean:", *pb.F_BoolDefaulted) - } - if pb.F_Int32Defaulted != nil { - t.Error("New or Reset did not set int32:", *pb.F_Int32Defaulted) - } - if pb.F_Int64Defaulted != nil { - t.Error("New or Reset did not set int64:", *pb.F_Int64Defaulted) - } - if pb.F_Fixed32Defaulted != nil { - t.Error("New or Reset did not set fixed32:", *pb.F_Fixed32Defaulted) - } - if pb.F_Fixed64Defaulted != nil { - t.Error("New or Reset did not set fixed64:", *pb.F_Fixed64Defaulted) - } - if pb.F_Uint32Defaulted != nil { - t.Error("New or Reset did not set uint32:", *pb.F_Uint32Defaulted) - } - if pb.F_Uint64Defaulted != nil { - t.Error("New or Reset did not set uint64:", *pb.F_Uint64Defaulted) - } - if pb.F_FloatDefaulted != nil { - t.Error("New or Reset did not set float:", *pb.F_FloatDefaulted) - } - if pb.F_DoubleDefaulted != nil { - t.Error("New or Reset did not set double:", *pb.F_DoubleDefaulted) - } - if pb.F_StringDefaulted != nil { - t.Error("New or Reset did not set string:", *pb.F_StringDefaulted) - } - if pb.F_BytesDefaulted != nil { - t.Error("New or Reset did not set bytes:", string(pb.F_BytesDefaulted)) - } - if pb.F_Sint32Defaulted != nil { - t.Error("New or Reset did not set int32:", *pb.F_Sint32Defaulted) - } - if pb.F_Sint64Defaulted != nil { - t.Error("New or Reset did not set int64:", *pb.F_Sint64Defaulted) - } -} - -// Does Reset() reset? -func TestReset(t *testing.T) { - pb := initGoTest(true) - // muck with some values - pb.F_BoolDefaulted = Bool(false) - pb.F_Int32Defaulted = Int32(237) - pb.F_Int64Defaulted = Int64(12346) - pb.F_Fixed32Defaulted = Uint32(32000) - pb.F_Fixed64Defaulted = Uint64(666) - pb.F_Uint32Defaulted = Uint32(323232) - pb.F_Uint64Defaulted = nil - pb.F_FloatDefaulted = nil - pb.F_DoubleDefaulted = Float64(0) - pb.F_StringDefaulted = String("gotcha") - pb.F_BytesDefaulted = []byte("asdfasdf") - pb.F_Sint32Defaulted = Int32(123) - pb.F_Sint64Defaulted = Int64(789) - pb.Reset() - checkInitialized(pb, t) -} - -// All required fields set, no defaults provided. -func TestEncodeDecode1(t *testing.T) { - pb := initGoTest(false) - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 0x20 - "714000000000000000"+ // field 14, encoding 1, value 0x40 - "78a019"+ // field 15, encoding 0, value 0xca0 = 3232 - "8001c032"+ // field 16, encoding 0, value 0x1940 = 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2, string "string" - "b304"+ // field 70, encoding 3, start group - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // field 70, encoding 4, end group - "aa0605"+"6279746573"+ // field 101, encoding 2, string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f") // field 103, encoding 0, 0x7f zigzag64 -} - -// All required fields set, defaults provided. -func TestEncodeDecode2(t *testing.T) { - pb := initGoTest(true) - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 32 - "714000000000000000"+ // field 14, encoding 1, value 64 - "78a019"+ // field 15, encoding 0, value 3232 - "8001c032"+ // field 16, encoding 0, value 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" - "c00201"+ // field 40, encoding 0, value 1 - "c80220"+ // field 41, encoding 0, value 32 - "d00240"+ // field 42, encoding 0, value 64 - "dd0240010000"+ // field 43, encoding 5, value 320 - "e1028002000000000000"+ // field 44, encoding 1, value 640 - "e8028019"+ // field 45, encoding 0, value 3200 - "f0028032"+ // field 46, encoding 0, value 6400 - "fd02e0659948"+ // field 47, encoding 5, value 314159.0 - "81030000000050971041"+ // field 48, encoding 1, value 271828.0 - "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" - "b304"+ // start group field 70 level 1 - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // end group field 70 level 1 - "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 - "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" - "90193f"+ // field 402, encoding 0, value 63 - "98197f") // field 403, encoding 0, value 127 - -} - -// All default fields set to their default value by hand -func TestEncodeDecode3(t *testing.T) { - pb := initGoTest(false) - pb.F_BoolDefaulted = Bool(true) - pb.F_Int32Defaulted = Int32(32) - pb.F_Int64Defaulted = Int64(64) - pb.F_Fixed32Defaulted = Uint32(320) - pb.F_Fixed64Defaulted = Uint64(640) - pb.F_Uint32Defaulted = Uint32(3200) - pb.F_Uint64Defaulted = Uint64(6400) - pb.F_FloatDefaulted = Float32(314159) - pb.F_DoubleDefaulted = Float64(271828) - pb.F_StringDefaulted = String("hello, \"world!\"\n") - pb.F_BytesDefaulted = []byte("Bignose") - pb.F_Sint32Defaulted = Int32(-32) - pb.F_Sint64Defaulted = Int64(-64) - - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 32 - "714000000000000000"+ // field 14, encoding 1, value 64 - "78a019"+ // field 15, encoding 0, value 3232 - "8001c032"+ // field 16, encoding 0, value 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" - "c00201"+ // field 40, encoding 0, value 1 - "c80220"+ // field 41, encoding 0, value 32 - "d00240"+ // field 42, encoding 0, value 64 - "dd0240010000"+ // field 43, encoding 5, value 320 - "e1028002000000000000"+ // field 44, encoding 1, value 640 - "e8028019"+ // field 45, encoding 0, value 3200 - "f0028032"+ // field 46, encoding 0, value 6400 - "fd02e0659948"+ // field 47, encoding 5, value 314159.0 - "81030000000050971041"+ // field 48, encoding 1, value 271828.0 - "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" - "b304"+ // start group field 70 level 1 - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // end group field 70 level 1 - "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 - "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" - "90193f"+ // field 402, encoding 0, value 63 - "98197f") // field 403, encoding 0, value 127 - -} - -// All required fields set, defaults provided, all non-defaulted optional fields have values. -func TestEncodeDecode4(t *testing.T) { - pb := initGoTest(true) - pb.Table = String("hello") - pb.Param = Int32(7) - pb.OptionalField = initGoTestField() - pb.F_BoolOptional = Bool(true) - pb.F_Int32Optional = Int32(32) - pb.F_Int64Optional = Int64(64) - pb.F_Fixed32Optional = Uint32(3232) - pb.F_Fixed64Optional = Uint64(6464) - pb.F_Uint32Optional = Uint32(323232) - pb.F_Uint64Optional = Uint64(646464) - pb.F_FloatOptional = Float32(32.) - pb.F_DoubleOptional = Float64(64.) - pb.F_StringOptional = String("hello") - pb.F_BytesOptional = []byte("Bignose") - pb.F_Sint32Optional = Int32(-32) - pb.F_Sint64Optional = Int64(-64) - pb.Optionalgroup = initGoTest_OptionalGroup() - - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "1205"+"68656c6c6f"+ // field 2, encoding 2, string "hello" - "1807"+ // field 3, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "320d"+"0a056c6162656c120474797065"+ // field 6, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 32 - "714000000000000000"+ // field 14, encoding 1, value 64 - "78a019"+ // field 15, encoding 0, value 3232 - "8001c032"+ // field 16, encoding 0, value 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" - "f00101"+ // field 30, encoding 0, value 1 - "f80120"+ // field 31, encoding 0, value 32 - "800240"+ // field 32, encoding 0, value 64 - "8d02a00c0000"+ // field 33, encoding 5, value 3232 - "91024019000000000000"+ // field 34, encoding 1, value 6464 - "9802a0dd13"+ // field 35, encoding 0, value 323232 - "a002c0ba27"+ // field 36, encoding 0, value 646464 - "ad0200000042"+ // field 37, encoding 5, value 32.0 - "b1020000000000005040"+ // field 38, encoding 1, value 64.0 - "ba0205"+"68656c6c6f"+ // field 39, encoding 2, string "hello" - "c00201"+ // field 40, encoding 0, value 1 - "c80220"+ // field 41, encoding 0, value 32 - "d00240"+ // field 42, encoding 0, value 64 - "dd0240010000"+ // field 43, encoding 5, value 320 - "e1028002000000000000"+ // field 44, encoding 1, value 640 - "e8028019"+ // field 45, encoding 0, value 3200 - "f0028032"+ // field 46, encoding 0, value 6400 - "fd02e0659948"+ // field 47, encoding 5, value 314159.0 - "81030000000050971041"+ // field 48, encoding 1, value 271828.0 - "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" - "b304"+ // start group field 70 level 1 - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // end group field 70 level 1 - "d305"+ // start group field 90 level 1 - "da0508"+"6f7074696f6e616c"+ // field 91, encoding 2, string "optional" - "d405"+ // end group field 90 level 1 - "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 - "ea1207"+"4269676e6f7365"+ // field 301, encoding 2, string "Bignose" - "f0123f"+ // field 302, encoding 0, value 63 - "f8127f"+ // field 303, encoding 0, value 127 - "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" - "90193f"+ // field 402, encoding 0, value 63 - "98197f") // field 403, encoding 0, value 127 - -} - -// All required fields set, defaults provided, all repeated fields given two values. -func TestEncodeDecode5(t *testing.T) { - pb := initGoTest(true) - pb.RepeatedField = []*GoTestField{initGoTestField(), initGoTestField()} - pb.F_BoolRepeated = []bool{false, true} - pb.F_Int32Repeated = []int32{32, 33} - pb.F_Int64Repeated = []int64{64, 65} - pb.F_Fixed32Repeated = []uint32{3232, 3333} - pb.F_Fixed64Repeated = []uint64{6464, 6565} - pb.F_Uint32Repeated = []uint32{323232, 333333} - pb.F_Uint64Repeated = []uint64{646464, 656565} - pb.F_FloatRepeated = []float32{32., 33.} - pb.F_DoubleRepeated = []float64{64., 65.} - pb.F_StringRepeated = []string{"hello", "sailor"} - pb.F_BytesRepeated = [][]byte{[]byte("big"), []byte("nose")} - pb.F_Sint32Repeated = []int32{32, -32} - pb.F_Sint64Repeated = []int64{64, -64} - pb.Repeatedgroup = []*GoTest_RepeatedGroup{initGoTest_RepeatedGroup(), initGoTest_RepeatedGroup()} - - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField) - "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 32 - "714000000000000000"+ // field 14, encoding 1, value 64 - "78a019"+ // field 15, encoding 0, value 3232 - "8001c032"+ // field 16, encoding 0, value 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" - "a00100"+ // field 20, encoding 0, value 0 - "a00101"+ // field 20, encoding 0, value 1 - "a80120"+ // field 21, encoding 0, value 32 - "a80121"+ // field 21, encoding 0, value 33 - "b00140"+ // field 22, encoding 0, value 64 - "b00141"+ // field 22, encoding 0, value 65 - "bd01a00c0000"+ // field 23, encoding 5, value 3232 - "bd01050d0000"+ // field 23, encoding 5, value 3333 - "c1014019000000000000"+ // field 24, encoding 1, value 6464 - "c101a519000000000000"+ // field 24, encoding 1, value 6565 - "c801a0dd13"+ // field 25, encoding 0, value 323232 - "c80195ac14"+ // field 25, encoding 0, value 333333 - "d001c0ba27"+ // field 26, encoding 0, value 646464 - "d001b58928"+ // field 26, encoding 0, value 656565 - "dd0100000042"+ // field 27, encoding 5, value 32.0 - "dd0100000442"+ // field 27, encoding 5, value 33.0 - "e1010000000000005040"+ // field 28, encoding 1, value 64.0 - "e1010000000000405040"+ // field 28, encoding 1, value 65.0 - "ea0105"+"68656c6c6f"+ // field 29, encoding 2, string "hello" - "ea0106"+"7361696c6f72"+ // field 29, encoding 2, string "sailor" - "c00201"+ // field 40, encoding 0, value 1 - "c80220"+ // field 41, encoding 0, value 32 - "d00240"+ // field 42, encoding 0, value 64 - "dd0240010000"+ // field 43, encoding 5, value 320 - "e1028002000000000000"+ // field 44, encoding 1, value 640 - "e8028019"+ // field 45, encoding 0, value 3200 - "f0028032"+ // field 46, encoding 0, value 6400 - "fd02e0659948"+ // field 47, encoding 5, value 314159.0 - "81030000000050971041"+ // field 48, encoding 1, value 271828.0 - "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" - "b304"+ // start group field 70 level 1 - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // end group field 70 level 1 - "8305"+ // start group field 80 level 1 - "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated" - "8405"+ // end group field 80 level 1 - "8305"+ // start group field 80 level 1 - "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated" - "8405"+ // end group field 80 level 1 - "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 - "ca0c03"+"626967"+ // field 201, encoding 2, string "big" - "ca0c04"+"6e6f7365"+ // field 201, encoding 2, string "nose" - "d00c40"+ // field 202, encoding 0, value 32 - "d00c3f"+ // field 202, encoding 0, value -32 - "d80c8001"+ // field 203, encoding 0, value 64 - "d80c7f"+ // field 203, encoding 0, value -64 - "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" - "90193f"+ // field 402, encoding 0, value 63 - "98197f") // field 403, encoding 0, value 127 - -} - -// All required fields set, all packed repeated fields given two values. -func TestEncodeDecode6(t *testing.T) { - pb := initGoTest(false) - pb.F_BoolRepeatedPacked = []bool{false, true} - pb.F_Int32RepeatedPacked = []int32{32, 33} - pb.F_Int64RepeatedPacked = []int64{64, 65} - pb.F_Fixed32RepeatedPacked = []uint32{3232, 3333} - pb.F_Fixed64RepeatedPacked = []uint64{6464, 6565} - pb.F_Uint32RepeatedPacked = []uint32{323232, 333333} - pb.F_Uint64RepeatedPacked = []uint64{646464, 656565} - pb.F_FloatRepeatedPacked = []float32{32., 33.} - pb.F_DoubleRepeatedPacked = []float64{64., 65.} - pb.F_Sint32RepeatedPacked = []int32{32, -32} - pb.F_Sint64RepeatedPacked = []int64{64, -64} - - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 32 - "714000000000000000"+ // field 14, encoding 1, value 64 - "78a019"+ // field 15, encoding 0, value 3232 - "8001c032"+ // field 16, encoding 0, value 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" - "9203020001"+ // field 50, encoding 2, 2 bytes, value 0, value 1 - "9a03022021"+ // field 51, encoding 2, 2 bytes, value 32, value 33 - "a203024041"+ // field 52, encoding 2, 2 bytes, value 64, value 65 - "aa0308"+ // field 53, encoding 2, 8 bytes - "a00c0000050d0000"+ // value 3232, value 3333 - "b20310"+ // field 54, encoding 2, 16 bytes - "4019000000000000a519000000000000"+ // value 6464, value 6565 - "ba0306"+ // field 55, encoding 2, 6 bytes - "a0dd1395ac14"+ // value 323232, value 333333 - "c20306"+ // field 56, encoding 2, 6 bytes - "c0ba27b58928"+ // value 646464, value 656565 - "ca0308"+ // field 57, encoding 2, 8 bytes - "0000004200000442"+ // value 32.0, value 33.0 - "d20310"+ // field 58, encoding 2, 16 bytes - "00000000000050400000000000405040"+ // value 64.0, value 65.0 - "b304"+ // start group field 70 level 1 - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // end group field 70 level 1 - "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 - "b21f02"+ // field 502, encoding 2, 2 bytes - "403f"+ // value 32, value -32 - "ba1f03"+ // field 503, encoding 2, 3 bytes - "80017f") // value 64, value -64 -} - -// Test that we can encode empty bytes fields. -func TestEncodeDecodeBytes1(t *testing.T) { - pb := initGoTest(false) - - // Create our bytes - pb.F_BytesRequired = []byte{} - pb.F_BytesRepeated = [][]byte{{}} - pb.F_BytesOptional = []byte{} - - d, err := Marshal(pb) - if err != nil { - t.Error(err) - } - - pbd := new(GoTest) - if err := Unmarshal(d, pbd); err != nil { - t.Error(err) - } - - if pbd.F_BytesRequired == nil || len(pbd.F_BytesRequired) != 0 { - t.Error("required empty bytes field is incorrect") - } - if pbd.F_BytesRepeated == nil || len(pbd.F_BytesRepeated) == 1 && pbd.F_BytesRepeated[0] == nil { - t.Error("repeated empty bytes field is incorrect") - } - if pbd.F_BytesOptional == nil || len(pbd.F_BytesOptional) != 0 { - t.Error("optional empty bytes field is incorrect") - } -} - -// Test that we encode nil-valued fields of a repeated bytes field correctly. -// Since entries in a repeated field cannot be nil, nil must mean empty value. -func TestEncodeDecodeBytes2(t *testing.T) { - pb := initGoTest(false) - - // Create our bytes - pb.F_BytesRepeated = [][]byte{nil} - - d, err := Marshal(pb) - if err != nil { - t.Error(err) - } - - pbd := new(GoTest) - if err := Unmarshal(d, pbd); err != nil { - t.Error(err) - } - - if len(pbd.F_BytesRepeated) != 1 || pbd.F_BytesRepeated[0] == nil { - t.Error("Unexpected value for repeated bytes field") - } -} - -// All required fields set, defaults provided, all repeated fields given two values. -func TestSkippingUnrecognizedFields(t *testing.T) { - o := old() - pb := initGoTestField() - - // Marshal it normally. - o.Marshal(pb) - - // Now new a GoSkipTest record. - skip := &GoSkipTest{ - SkipInt32: Int32(32), - SkipFixed32: Uint32(3232), - SkipFixed64: Uint64(6464), - SkipString: String("skipper"), - Skipgroup: &GoSkipTest_SkipGroup{ - GroupInt32: Int32(75), - GroupString: String("wxyz"), - }, - } - - // Marshal it into same buffer. - o.Marshal(skip) - - pbd := new(GoTestField) - o.Unmarshal(pbd) - - // The __unrecognized field should be a marshaling of GoSkipTest - skipd := new(GoSkipTest) - - o.SetBuf(pbd.XXX_unrecognized) - o.Unmarshal(skipd) - - if *skipd.SkipInt32 != *skip.SkipInt32 { - t.Error("skip int32", skipd.SkipInt32) - } - if *skipd.SkipFixed32 != *skip.SkipFixed32 { - t.Error("skip fixed32", skipd.SkipFixed32) - } - if *skipd.SkipFixed64 != *skip.SkipFixed64 { - t.Error("skip fixed64", skipd.SkipFixed64) - } - if *skipd.SkipString != *skip.SkipString { - t.Error("skip string", *skipd.SkipString) - } - if *skipd.Skipgroup.GroupInt32 != *skip.Skipgroup.GroupInt32 { - t.Error("skip group int32", skipd.Skipgroup.GroupInt32) - } - if *skipd.Skipgroup.GroupString != *skip.Skipgroup.GroupString { - t.Error("skip group string", *skipd.Skipgroup.GroupString) - } -} - -// Check that unrecognized fields of a submessage are preserved. -func TestSubmessageUnrecognizedFields(t *testing.T) { - nm := &NewMessage{ - Nested: &NewMessage_Nested{ - Name: String("Nigel"), - FoodGroup: String("carbs"), - }, - } - b, err := Marshal(nm) - if err != nil { - t.Fatalf("Marshal of NewMessage: %v", err) - } - - // Unmarshal into an OldMessage. - om := new(OldMessage) - if err := Unmarshal(b, om); err != nil { - t.Fatalf("Unmarshal to OldMessage: %v", err) - } - exp := &OldMessage{ - Nested: &OldMessage_Nested{ - Name: String("Nigel"), - // normal protocol buffer users should not do this - XXX_unrecognized: []byte("\x12\x05carbs"), - }, - } - if !Equal(om, exp) { - t.Errorf("om = %v, want %v", om, exp) - } - - // Clone the OldMessage. - om = Clone(om).(*OldMessage) - if !Equal(om, exp) { - t.Errorf("Clone(om) = %v, want %v", om, exp) - } - - // Marshal the OldMessage, then unmarshal it into an empty NewMessage. - if b, err = Marshal(om); err != nil { - t.Fatalf("Marshal of OldMessage: %v", err) - } - t.Logf("Marshal(%v) -> %q", om, b) - nm2 := new(NewMessage) - if err := Unmarshal(b, nm2); err != nil { - t.Fatalf("Unmarshal to NewMessage: %v", err) - } - if !Equal(nm, nm2) { - t.Errorf("NewMessage round-trip: %v => %v", nm, nm2) - } -} - -// Check that an int32 field can be upgraded to an int64 field. -func TestNegativeInt32(t *testing.T) { - om := &OldMessage{ - Num: Int32(-1), - } - b, err := Marshal(om) - if err != nil { - t.Fatalf("Marshal of OldMessage: %v", err) - } - - // Check the size. It should be 11 bytes; - // 1 for the field/wire type, and 10 for the negative number. - if len(b) != 11 { - t.Errorf("%v marshaled as %q, wanted 11 bytes", om, b) - } - - // Unmarshal into a NewMessage. - nm := new(NewMessage) - if err := Unmarshal(b, nm); err != nil { - t.Fatalf("Unmarshal to NewMessage: %v", err) - } - want := &NewMessage{ - Num: Int64(-1), - } - if !Equal(nm, want) { - t.Errorf("nm = %v, want %v", nm, want) - } -} - -// Check that we can grow an array (repeated field) to have many elements. -// This test doesn't depend only on our encoding; for variety, it makes sure -// we create, encode, and decode the correct contents explicitly. It's therefore -// a bit messier. -// This test also uses (and hence tests) the Marshal/Unmarshal functions -// instead of the methods. -func TestBigRepeated(t *testing.T) { - pb := initGoTest(true) - - // Create the arrays - const N = 50 // Internally the library starts much smaller. - pb.Repeatedgroup = make([]*GoTest_RepeatedGroup, N) - pb.F_Sint64Repeated = make([]int64, N) - pb.F_Sint32Repeated = make([]int32, N) - pb.F_BytesRepeated = make([][]byte, N) - pb.F_StringRepeated = make([]string, N) - pb.F_DoubleRepeated = make([]float64, N) - pb.F_FloatRepeated = make([]float32, N) - pb.F_Uint64Repeated = make([]uint64, N) - pb.F_Uint32Repeated = make([]uint32, N) - pb.F_Fixed64Repeated = make([]uint64, N) - pb.F_Fixed32Repeated = make([]uint32, N) - pb.F_Int64Repeated = make([]int64, N) - pb.F_Int32Repeated = make([]int32, N) - pb.F_BoolRepeated = make([]bool, N) - pb.RepeatedField = make([]*GoTestField, N) - - // Fill in the arrays with checkable values. - igtf := initGoTestField() - igtrg := initGoTest_RepeatedGroup() - for i := 0; i < N; i++ { - pb.Repeatedgroup[i] = igtrg - pb.F_Sint64Repeated[i] = int64(i) - pb.F_Sint32Repeated[i] = int32(i) - s := fmt.Sprint(i) - pb.F_BytesRepeated[i] = []byte(s) - pb.F_StringRepeated[i] = s - pb.F_DoubleRepeated[i] = float64(i) - pb.F_FloatRepeated[i] = float32(i) - pb.F_Uint64Repeated[i] = uint64(i) - pb.F_Uint32Repeated[i] = uint32(i) - pb.F_Fixed64Repeated[i] = uint64(i) - pb.F_Fixed32Repeated[i] = uint32(i) - pb.F_Int64Repeated[i] = int64(i) - pb.F_Int32Repeated[i] = int32(i) - pb.F_BoolRepeated[i] = i%2 == 0 - pb.RepeatedField[i] = igtf - } - - // Marshal. - buf, _ := Marshal(pb) - - // Now test Unmarshal by recreating the original buffer. - pbd := new(GoTest) - Unmarshal(buf, pbd) - - // Check the checkable values - for i := uint64(0); i < N; i++ { - if pbd.Repeatedgroup[i] == nil { // TODO: more checking? - t.Error("pbd.Repeatedgroup bad") - } - var x uint64 - x = uint64(pbd.F_Sint64Repeated[i]) - if x != i { - t.Error("pbd.F_Sint64Repeated bad", x, i) - } - x = uint64(pbd.F_Sint32Repeated[i]) - if x != i { - t.Error("pbd.F_Sint32Repeated bad", x, i) - } - s := fmt.Sprint(i) - equalbytes(pbd.F_BytesRepeated[i], []byte(s), t) - if pbd.F_StringRepeated[i] != s { - t.Error("pbd.F_Sint32Repeated bad", pbd.F_StringRepeated[i], i) - } - x = uint64(pbd.F_DoubleRepeated[i]) - if x != i { - t.Error("pbd.F_DoubleRepeated bad", x, i) - } - x = uint64(pbd.F_FloatRepeated[i]) - if x != i { - t.Error("pbd.F_FloatRepeated bad", x, i) - } - x = pbd.F_Uint64Repeated[i] - if x != i { - t.Error("pbd.F_Uint64Repeated bad", x, i) - } - x = uint64(pbd.F_Uint32Repeated[i]) - if x != i { - t.Error("pbd.F_Uint32Repeated bad", x, i) - } - x = pbd.F_Fixed64Repeated[i] - if x != i { - t.Error("pbd.F_Fixed64Repeated bad", x, i) - } - x = uint64(pbd.F_Fixed32Repeated[i]) - if x != i { - t.Error("pbd.F_Fixed32Repeated bad", x, i) - } - x = uint64(pbd.F_Int64Repeated[i]) - if x != i { - t.Error("pbd.F_Int64Repeated bad", x, i) - } - x = uint64(pbd.F_Int32Repeated[i]) - if x != i { - t.Error("pbd.F_Int32Repeated bad", x, i) - } - if pbd.F_BoolRepeated[i] != (i%2 == 0) { - t.Error("pbd.F_BoolRepeated bad", x, i) - } - if pbd.RepeatedField[i] == nil { // TODO: more checking? - t.Error("pbd.RepeatedField bad") - } - } -} - -// Verify we give a useful message when decoding to the wrong structure type. -func TestTypeMismatch(t *testing.T) { - pb1 := initGoTest(true) - - // Marshal - o := old() - o.Marshal(pb1) - - // Now Unmarshal it to the wrong type. - pb2 := initGoTestField() - err := o.Unmarshal(pb2) - if err == nil { - t.Error("expected error, got no error") - } else if !strings.Contains(err.Error(), "bad wiretype") { - t.Error("expected bad wiretype error, got", err) - } -} - -func encodeDecode(t *testing.T, in, out Message, msg string) { - buf, err := Marshal(in) - if err != nil { - t.Fatalf("failed marshaling %v: %v", msg, err) - } - if err := Unmarshal(buf, out); err != nil { - t.Fatalf("failed unmarshaling %v: %v", msg, err) - } -} - -func TestPackedNonPackedDecoderSwitching(t *testing.T) { - np, p := new(NonPackedTest), new(PackedTest) - - // non-packed -> packed - np.A = []int32{0, 1, 1, 2, 3, 5} - encodeDecode(t, np, p, "non-packed -> packed") - if !reflect.DeepEqual(np.A, p.B) { - t.Errorf("failed non-packed -> packed; np.A=%+v, p.B=%+v", np.A, p.B) - } - - // packed -> non-packed - np.Reset() - p.B = []int32{3, 1, 4, 1, 5, 9} - encodeDecode(t, p, np, "packed -> non-packed") - if !reflect.DeepEqual(p.B, np.A) { - t.Errorf("failed packed -> non-packed; p.B=%+v, np.A=%+v", p.B, np.A) - } -} - -func TestProto1RepeatedGroup(t *testing.T) { - pb := &MessageList{ - Message: []*MessageList_Message{ - { - Name: String("blah"), - Count: Int32(7), - }, - // NOTE: pb.Message[1] is a nil - nil, - }, - } - - o := old() - err := o.Marshal(pb) - if err == nil || !strings.Contains(err.Error(), "repeated field Message has nil") { - t.Fatalf("unexpected or no error when marshaling: %v", err) - } -} - -// Test that enums work. Checks for a bug introduced by making enums -// named types instead of int32: newInt32FromUint64 would crash with -// a type mismatch in reflect.PointTo. -func TestEnum(t *testing.T) { - pb := new(GoEnum) - pb.Foo = FOO_FOO1.Enum() - o := old() - if err := o.Marshal(pb); err != nil { - t.Fatal("error encoding enum:", err) - } - pb1 := new(GoEnum) - if err := o.Unmarshal(pb1); err != nil { - t.Fatal("error decoding enum:", err) - } - if *pb1.Foo != FOO_FOO1 { - t.Error("expected 7 but got ", *pb1.Foo) - } -} - -// Enum types have String methods. Check that enum fields can be printed. -// We don't care what the value actually is, just as long as it doesn't crash. -func TestPrintingNilEnumFields(t *testing.T) { - pb := new(GoEnum) - _ = fmt.Sprintf("%+v", pb) -} - -// Verify that absent required fields cause Marshal/Unmarshal to return errors. -func TestRequiredFieldEnforcement(t *testing.T) { - pb := new(GoTestField) - _, err := Marshal(pb) - if err == nil { - t.Error("marshal: expected error, got nil") - } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Label") { - t.Errorf("marshal: bad error type: %v", err) - } - - // A slightly sneaky, yet valid, proto. It encodes the same required field twice, - // so simply counting the required fields is insufficient. - // field 1, encoding 2, value "hi" - buf := []byte("\x0A\x02hi\x0A\x02hi") - err = Unmarshal(buf, pb) - if err == nil { - t.Error("unmarshal: expected error, got nil") - } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "{Unknown}") { - t.Errorf("unmarshal: bad error type: %v", err) - } -} - -// Verify that absent required fields in groups cause Marshal/Unmarshal to return errors. -func TestRequiredFieldEnforcementGroups(t *testing.T) { - pb := &GoTestRequiredGroupField{Group: &GoTestRequiredGroupField_Group{}} - if _, err := Marshal(pb); err == nil { - t.Error("marshal: expected error, got nil") - } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Group.Field") { - t.Errorf("marshal: bad error type: %v", err) - } - - buf := []byte{11, 12} - if err := Unmarshal(buf, pb); err == nil { - t.Error("unmarshal: expected error, got nil") - } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Group.{Unknown}") { - t.Errorf("unmarshal: bad error type: %v", err) - } -} - -func TestTypedNilMarshal(t *testing.T) { - // A typed nil should return ErrNil and not crash. - { - var m *GoEnum - if _, err := Marshal(m); err != ErrNil { - t.Errorf("Marshal(%#v): got %v, want ErrNil", m, err) - } - } - - { - m := &Communique{Union: &Communique_Msg{nil}} - if _, err := Marshal(m); err == nil || err == ErrNil { - t.Errorf("Marshal(%#v): got %v, want errOneofHasNil", m, err) - } - } -} - -// A type that implements the Marshaler interface, but is not nillable. -type nonNillableInt uint64 - -func (nni nonNillableInt) Marshal() ([]byte, error) { - return EncodeVarint(uint64(nni)), nil -} - -type NNIMessage struct { - nni nonNillableInt -} - -func (*NNIMessage) Reset() {} -func (*NNIMessage) String() string { return "" } -func (*NNIMessage) ProtoMessage() {} - -// A type that implements the Marshaler interface and is nillable. -type nillableMessage struct { - x uint64 -} - -func (nm *nillableMessage) Marshal() ([]byte, error) { - return EncodeVarint(nm.x), nil -} - -type NMMessage struct { - nm *nillableMessage -} - -func (*NMMessage) Reset() {} -func (*NMMessage) String() string { return "" } -func (*NMMessage) ProtoMessage() {} - -// Verify a type that uses the Marshaler interface, but has a nil pointer. -func TestNilMarshaler(t *testing.T) { - // Try a struct with a Marshaler field that is nil. - // It should be directly marshable. - nmm := new(NMMessage) - if _, err := Marshal(nmm); err != nil { - t.Error("unexpected error marshaling nmm: ", err) - } - - // Try a struct with a Marshaler field that is not nillable. - nnim := new(NNIMessage) - nnim.nni = 7 - var _ Marshaler = nnim.nni // verify it is truly a Marshaler - if _, err := Marshal(nnim); err != nil { - t.Error("unexpected error marshaling nnim: ", err) - } -} - -func TestAllSetDefaults(t *testing.T) { - // Exercise SetDefaults with all scalar field types. - m := &Defaults{ - // NaN != NaN, so override that here. - F_Nan: Float32(1.7), - } - expected := &Defaults{ - F_Bool: Bool(true), - F_Int32: Int32(32), - F_Int64: Int64(64), - F_Fixed32: Uint32(320), - F_Fixed64: Uint64(640), - F_Uint32: Uint32(3200), - F_Uint64: Uint64(6400), - F_Float: Float32(314159), - F_Double: Float64(271828), - F_String: String(`hello, "world!"` + "\n"), - F_Bytes: []byte("Bignose"), - F_Sint32: Int32(-32), - F_Sint64: Int64(-64), - F_Enum: Defaults_GREEN.Enum(), - F_Pinf: Float32(float32(math.Inf(1))), - F_Ninf: Float32(float32(math.Inf(-1))), - F_Nan: Float32(1.7), - StrZero: String(""), - } - SetDefaults(m) - if !Equal(m, expected) { - t.Errorf("SetDefaults failed\n got %v\nwant %v", m, expected) - } -} - -func TestSetDefaultsWithSetField(t *testing.T) { - // Check that a set value is not overridden. - m := &Defaults{ - F_Int32: Int32(12), - } - SetDefaults(m) - if v := m.GetF_Int32(); v != 12 { - t.Errorf("m.FInt32 = %v, want 12", v) - } -} - -func TestSetDefaultsWithSubMessage(t *testing.T) { - m := &OtherMessage{ - Key: Int64(123), - Inner: &InnerMessage{ - Host: String("gopher"), - }, - } - expected := &OtherMessage{ - Key: Int64(123), - Inner: &InnerMessage{ - Host: String("gopher"), - Port: Int32(4000), - }, - } - SetDefaults(m) - if !Equal(m, expected) { - t.Errorf("\n got %v\nwant %v", m, expected) - } -} - -func TestSetDefaultsWithRepeatedSubMessage(t *testing.T) { - m := &MyMessage{ - RepInner: []*InnerMessage{{}}, - } - expected := &MyMessage{ - RepInner: []*InnerMessage{{ - Port: Int32(4000), - }}, - } - SetDefaults(m) - if !Equal(m, expected) { - t.Errorf("\n got %v\nwant %v", m, expected) - } -} - -func TestSetDefaultWithRepeatedNonMessage(t *testing.T) { - m := &MyMessage{ - Pet: []string{"turtle", "wombat"}, - } - expected := Clone(m) - SetDefaults(m) - if !Equal(m, expected) { - t.Errorf("\n got %v\nwant %v", m, expected) - } -} - -func TestMaximumTagNumber(t *testing.T) { - m := &MaxTag{ - LastField: String("natural goat essence"), - } - buf, err := Marshal(m) - if err != nil { - t.Fatalf("proto.Marshal failed: %v", err) - } - m2 := new(MaxTag) - if err := Unmarshal(buf, m2); err != nil { - t.Fatalf("proto.Unmarshal failed: %v", err) - } - if got, want := m2.GetLastField(), *m.LastField; got != want { - t.Errorf("got %q, want %q", got, want) - } -} - -func TestJSON(t *testing.T) { - m := &MyMessage{ - Count: Int32(4), - Pet: []string{"bunny", "kitty"}, - Inner: &InnerMessage{ - Host: String("cauchy"), - }, - Bikeshed: MyMessage_GREEN.Enum(), - } - const expected = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":1}` - - b, err := json.Marshal(m) - if err != nil { - t.Fatalf("json.Marshal failed: %v", err) - } - s := string(b) - if s != expected { - t.Errorf("got %s\nwant %s", s, expected) - } - - received := new(MyMessage) - if err := json.Unmarshal(b, received); err != nil { - t.Fatalf("json.Unmarshal failed: %v", err) - } - if !Equal(received, m) { - t.Fatalf("got %s, want %s", received, m) - } - - // Test unmarshalling of JSON with symbolic enum name. - const old = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":"GREEN"}` - received.Reset() - if err := json.Unmarshal([]byte(old), received); err != nil { - t.Fatalf("json.Unmarshal failed: %v", err) - } - if !Equal(received, m) { - t.Fatalf("got %s, want %s", received, m) - } -} - -func TestBadWireType(t *testing.T) { - b := []byte{7<<3 | 6} // field 7, wire type 6 - pb := new(OtherMessage) - if err := Unmarshal(b, pb); err == nil { - t.Errorf("Unmarshal did not fail") - } else if !strings.Contains(err.Error(), "unknown wire type") { - t.Errorf("wrong error: %v", err) - } -} - -func TestBytesWithInvalidLength(t *testing.T) { - // If a byte sequence has an invalid (negative) length, Unmarshal should not panic. - b := []byte{2<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0} - Unmarshal(b, new(MyMessage)) -} - -func TestLengthOverflow(t *testing.T) { - // Overflowing a length should not panic. - b := []byte{2<<3 | WireBytes, 1, 1, 3<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x01} - Unmarshal(b, new(MyMessage)) -} - -func TestVarintOverflow(t *testing.T) { - // Overflowing a 64-bit length should not be allowed. - b := []byte{1<<3 | WireVarint, 0x01, 3<<3 | WireBytes, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x01} - if err := Unmarshal(b, new(MyMessage)); err == nil { - t.Fatalf("Overflowed uint64 length without error") - } -} - -func TestUnmarshalFuzz(t *testing.T) { - const N = 1000 - seed := time.Now().UnixNano() - t.Logf("RNG seed is %d", seed) - rng := rand.New(rand.NewSource(seed)) - buf := make([]byte, 20) - for i := 0; i < N; i++ { - for j := range buf { - buf[j] = byte(rng.Intn(256)) - } - fuzzUnmarshal(t, buf) - } -} - -func TestMergeMessages(t *testing.T) { - pb := &MessageList{Message: []*MessageList_Message{{Name: String("x"), Count: Int32(1)}}} - data, err := Marshal(pb) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - - pb1 := new(MessageList) - if err := Unmarshal(data, pb1); err != nil { - t.Fatalf("first Unmarshal: %v", err) - } - if err := Unmarshal(data, pb1); err != nil { - t.Fatalf("second Unmarshal: %v", err) - } - if len(pb1.Message) != 1 { - t.Errorf("two Unmarshals produced %d Messages, want 1", len(pb1.Message)) - } - - pb2 := new(MessageList) - if err := UnmarshalMerge(data, pb2); err != nil { - t.Fatalf("first UnmarshalMerge: %v", err) - } - if err := UnmarshalMerge(data, pb2); err != nil { - t.Fatalf("second UnmarshalMerge: %v", err) - } - if len(pb2.Message) != 2 { - t.Errorf("two UnmarshalMerges produced %d Messages, want 2", len(pb2.Message)) - } -} - -func TestExtensionMarshalOrder(t *testing.T) { - m := &MyMessage{Count: Int(123)} - if err := SetExtension(m, E_Ext_More, &Ext{Data: String("alpha")}); err != nil { - t.Fatalf("SetExtension: %v", err) - } - if err := SetExtension(m, E_Ext_Text, String("aleph")); err != nil { - t.Fatalf("SetExtension: %v", err) - } - if err := SetExtension(m, E_Ext_Number, Int32(1)); err != nil { - t.Fatalf("SetExtension: %v", err) - } - - // Serialize m several times, and check we get the same bytes each time. - var orig []byte - for i := 0; i < 100; i++ { - b, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - if i == 0 { - orig = b - continue - } - if !bytes.Equal(b, orig) { - t.Errorf("Bytes differ on attempt #%d", i) - } - } -} - -// Many extensions, because small maps might not iterate differently on each iteration. -var exts = []*ExtensionDesc{ - E_X201, - E_X202, - E_X203, - E_X204, - E_X205, - E_X206, - E_X207, - E_X208, - E_X209, - E_X210, - E_X211, - E_X212, - E_X213, - E_X214, - E_X215, - E_X216, - E_X217, - E_X218, - E_X219, - E_X220, - E_X221, - E_X222, - E_X223, - E_X224, - E_X225, - E_X226, - E_X227, - E_X228, - E_X229, - E_X230, - E_X231, - E_X232, - E_X233, - E_X234, - E_X235, - E_X236, - E_X237, - E_X238, - E_X239, - E_X240, - E_X241, - E_X242, - E_X243, - E_X244, - E_X245, - E_X246, - E_X247, - E_X248, - E_X249, - E_X250, -} - -func TestMessageSetMarshalOrder(t *testing.T) { - m := &MyMessageSet{} - for _, x := range exts { - if err := SetExtension(m, x, &Empty{}); err != nil { - t.Fatalf("SetExtension: %v", err) - } - } - - buf, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - - // Serialize m several times, and check we get the same bytes each time. - for i := 0; i < 10; i++ { - b1, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - if !bytes.Equal(b1, buf) { - t.Errorf("Bytes differ on re-Marshal #%d", i) - } - - m2 := &MyMessageSet{} - if err := Unmarshal(buf, m2); err != nil { - t.Errorf("Unmarshal: %v", err) - } - b2, err := Marshal(m2) - if err != nil { - t.Errorf("re-Marshal: %v", err) - } - if !bytes.Equal(b2, buf) { - t.Errorf("Bytes differ on round-trip #%d", i) - } - } -} - -func TestUnmarshalMergesMessages(t *testing.T) { - // If a nested message occurs twice in the input, - // the fields should be merged when decoding. - a := &OtherMessage{ - Key: Int64(123), - Inner: &InnerMessage{ - Host: String("polhode"), - Port: Int32(1234), - }, - } - aData, err := Marshal(a) - if err != nil { - t.Fatalf("Marshal(a): %v", err) - } - b := &OtherMessage{ - Weight: Float32(1.2), - Inner: &InnerMessage{ - Host: String("herpolhode"), - Connected: Bool(true), - }, - } - bData, err := Marshal(b) - if err != nil { - t.Fatalf("Marshal(b): %v", err) - } - want := &OtherMessage{ - Key: Int64(123), - Weight: Float32(1.2), - Inner: &InnerMessage{ - Host: String("herpolhode"), - Port: Int32(1234), - Connected: Bool(true), - }, - } - got := new(OtherMessage) - if err := Unmarshal(append(aData, bData...), got); err != nil { - t.Fatalf("Unmarshal: %v", err) - } - if !Equal(got, want) { - t.Errorf("\n got %v\nwant %v", got, want) - } -} - -func TestEncodingSizes(t *testing.T) { - tests := []struct { - m Message - n int - }{ - {&Defaults{F_Int32: Int32(math.MaxInt32)}, 6}, - {&Defaults{F_Int32: Int32(math.MinInt32)}, 11}, - {&Defaults{F_Uint32: Uint32(uint32(math.MaxInt32) + 1)}, 6}, - {&Defaults{F_Uint32: Uint32(math.MaxUint32)}, 6}, - } - for _, test := range tests { - b, err := Marshal(test.m) - if err != nil { - t.Errorf("Marshal(%v): %v", test.m, err) - continue - } - if len(b) != test.n { - t.Errorf("Marshal(%v) yielded %d bytes, want %d bytes", test.m, len(b), test.n) - } - } -} - -func TestRequiredNotSetError(t *testing.T) { - pb := initGoTest(false) - pb.RequiredField.Label = nil - pb.F_Int32Required = nil - pb.F_Int64Required = nil - - expected := "0807" + // field 1, encoding 0, value 7 - "2206" + "120474797065" + // field 4, encoding 2 (GoTestField) - "5001" + // field 10, encoding 0, value 1 - "6d20000000" + // field 13, encoding 5, value 0x20 - "714000000000000000" + // field 14, encoding 1, value 0x40 - "78a019" + // field 15, encoding 0, value 0xca0 = 3232 - "8001c032" + // field 16, encoding 0, value 0x1940 = 6464 - "8d0100004a45" + // field 17, encoding 5, value 3232.0 - "9101000000000040b940" + // field 18, encoding 1, value 6464.0 - "9a0106" + "737472696e67" + // field 19, encoding 2, string "string" - "b304" + // field 70, encoding 3, start group - "ba0408" + "7265717569726564" + // field 71, encoding 2, string "required" - "b404" + // field 70, encoding 4, end group - "aa0605" + "6279746573" + // field 101, encoding 2, string "bytes" - "b0063f" + // field 102, encoding 0, 0x3f zigzag32 - "b8067f" // field 103, encoding 0, 0x7f zigzag64 - - o := old() - bytes, err := Marshal(pb) - if _, ok := err.(*RequiredNotSetError); !ok { - fmt.Printf("marshal-1 err = %v, want *RequiredNotSetError", err) - o.DebugPrint("", bytes) - t.Fatalf("expected = %s", expected) - } - if strings.Index(err.Error(), "RequiredField.Label") < 0 { - t.Errorf("marshal-1 wrong err msg: %v", err) - } - if !equal(bytes, expected, t) { - o.DebugPrint("neq 1", bytes) - t.Fatalf("expected = %s", expected) - } - - // Now test Unmarshal by recreating the original buffer. - pbd := new(GoTest) - err = Unmarshal(bytes, pbd) - if _, ok := err.(*RequiredNotSetError); !ok { - t.Fatalf("unmarshal err = %v, want *RequiredNotSetError", err) - o.DebugPrint("", bytes) - t.Fatalf("string = %s", expected) - } - if strings.Index(err.Error(), "RequiredField.{Unknown}") < 0 { - t.Errorf("unmarshal wrong err msg: %v", err) - } - bytes, err = Marshal(pbd) - if _, ok := err.(*RequiredNotSetError); !ok { - t.Errorf("marshal-2 err = %v, want *RequiredNotSetError", err) - o.DebugPrint("", bytes) - t.Fatalf("string = %s", expected) - } - if strings.Index(err.Error(), "RequiredField.Label") < 0 { - t.Errorf("marshal-2 wrong err msg: %v", err) - } - if !equal(bytes, expected, t) { - o.DebugPrint("neq 2", bytes) - t.Fatalf("string = %s", expected) - } -} - -func fuzzUnmarshal(t *testing.T, data []byte) { - defer func() { - if e := recover(); e != nil { - t.Errorf("These bytes caused a panic: %+v", data) - t.Logf("Stack:\n%s", debug.Stack()) - t.FailNow() - } - }() - - pb := new(MyMessage) - Unmarshal(data, pb) -} - -func TestMapFieldMarshal(t *testing.T) { - m := &MessageWithMap{ - NameMapping: map[int32]string{ - 1: "Rob", - 4: "Ian", - 8: "Dave", - }, - } - b, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - - // b should be the concatenation of these three byte sequences in some order. - parts := []string{ - "\n\a\b\x01\x12\x03Rob", - "\n\a\b\x04\x12\x03Ian", - "\n\b\b\x08\x12\x04Dave", - } - ok := false - for i := range parts { - for j := range parts { - if j == i { - continue - } - for k := range parts { - if k == i || k == j { - continue - } - try := parts[i] + parts[j] + parts[k] - if bytes.Equal(b, []byte(try)) { - ok = true - break - } - } - } - } - if !ok { - t.Fatalf("Incorrect Marshal output.\n got %q\nwant %q (or a permutation of that)", b, parts[0]+parts[1]+parts[2]) - } - t.Logf("FYI b: %q", b) - - (new(Buffer)).DebugPrint("Dump of b", b) -} - -func TestMapFieldRoundTrips(t *testing.T) { - m := &MessageWithMap{ - NameMapping: map[int32]string{ - 1: "Rob", - 4: "Ian", - 8: "Dave", - }, - MsgMapping: map[int64]*FloatingPoint{ - 0x7001: &FloatingPoint{F: Float64(2.0)}, - }, - ByteMapping: map[bool][]byte{ - false: []byte("that's not right!"), - true: []byte("aye, 'tis true!"), - }, - } - b, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - t.Logf("FYI b: %q", b) - m2 := new(MessageWithMap) - if err := Unmarshal(b, m2); err != nil { - t.Fatalf("Unmarshal: %v", err) - } - for _, pair := range [][2]interface{}{ - {m.NameMapping, m2.NameMapping}, - {m.MsgMapping, m2.MsgMapping}, - {m.ByteMapping, m2.ByteMapping}, - } { - if !reflect.DeepEqual(pair[0], pair[1]) { - t.Errorf("Map did not survive a round trip.\ninitial: %v\n final: %v", pair[0], pair[1]) - } - } -} - -func TestMapFieldWithNil(t *testing.T) { - m1 := &MessageWithMap{ - MsgMapping: map[int64]*FloatingPoint{ - 1: nil, - }, - } - b, err := Marshal(m1) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - m2 := new(MessageWithMap) - if err := Unmarshal(b, m2); err != nil { - t.Fatalf("Unmarshal: %v, got these bytes: %v", err, b) - } - if v, ok := m2.MsgMapping[1]; !ok { - t.Error("msg_mapping[1] not present") - } else if v != nil { - t.Errorf("msg_mapping[1] not nil: %v", v) - } -} - -func TestMapFieldWithNilBytes(t *testing.T) { - m1 := &MessageWithMap{ - ByteMapping: map[bool][]byte{ - false: []byte{}, - true: nil, - }, - } - n := Size(m1) - b, err := Marshal(m1) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - if n != len(b) { - t.Errorf("Size(m1) = %d; want len(Marshal(m1)) = %d", n, len(b)) - } - m2 := new(MessageWithMap) - if err := Unmarshal(b, m2); err != nil { - t.Fatalf("Unmarshal: %v, got these bytes: %v", err, b) - } - if v, ok := m2.ByteMapping[false]; !ok { - t.Error("byte_mapping[false] not present") - } else if len(v) != 0 { - t.Errorf("byte_mapping[false] not empty: %#v", v) - } - if v, ok := m2.ByteMapping[true]; !ok { - t.Error("byte_mapping[true] not present") - } else if len(v) != 0 { - t.Errorf("byte_mapping[true] not empty: %#v", v) - } -} - -func TestDecodeMapFieldMissingKey(t *testing.T) { - b := []byte{ - 0x0A, 0x03, // message, tag 1 (name_mapping), of length 3 bytes - // no key - 0x12, 0x01, 0x6D, // string value of length 1 byte, value "m" - } - got := &MessageWithMap{} - err := Unmarshal(b, got) - if err != nil { - t.Fatalf("failed to marshal map with missing key: %v", err) - } - want := &MessageWithMap{NameMapping: map[int32]string{0: "m"}} - if !Equal(got, want) { - t.Errorf("Unmarshaled map with no key was not as expected. got: %v, want %v", got, want) - } -} - -func TestDecodeMapFieldMissingValue(t *testing.T) { - b := []byte{ - 0x0A, 0x02, // message, tag 1 (name_mapping), of length 2 bytes - 0x08, 0x01, // varint key, value 1 - // no value - } - got := &MessageWithMap{} - err := Unmarshal(b, got) - if err != nil { - t.Fatalf("failed to marshal map with missing value: %v", err) - } - want := &MessageWithMap{NameMapping: map[int32]string{1: ""}} - if !Equal(got, want) { - t.Errorf("Unmarshaled map with no value was not as expected. got: %v, want %v", got, want) - } -} - -func TestOneof(t *testing.T) { - m := &Communique{} - b, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal of empty message with oneof: %v", err) - } - if len(b) != 0 { - t.Errorf("Marshal of empty message yielded too many bytes: %v", b) - } - - m = &Communique{ - Union: &Communique_Name{"Barry"}, - } - - // Round-trip. - b, err = Marshal(m) - if err != nil { - t.Fatalf("Marshal of message with oneof: %v", err) - } - if len(b) != 7 { // name tag/wire (1) + name len (1) + name (5) - t.Errorf("Incorrect marshal of message with oneof: %v", b) - } - m.Reset() - if err := Unmarshal(b, m); err != nil { - t.Fatalf("Unmarshal of message with oneof: %v", err) - } - if x, ok := m.Union.(*Communique_Name); !ok || x.Name != "Barry" { - t.Errorf("After round trip, Union = %+v", m.Union) - } - if name := m.GetName(); name != "Barry" { - t.Errorf("After round trip, GetName = %q, want %q", name, "Barry") - } - - // Let's try with a message in the oneof. - m.Union = &Communique_Msg{&Strings{StringField: String("deep deep string")}} - b, err = Marshal(m) - if err != nil { - t.Fatalf("Marshal of message with oneof set to message: %v", err) - } - if len(b) != 20 { // msg tag/wire (1) + msg len (1) + msg (1 + 1 + 16) - t.Errorf("Incorrect marshal of message with oneof set to message: %v", b) - } - m.Reset() - if err := Unmarshal(b, m); err != nil { - t.Fatalf("Unmarshal of message with oneof set to message: %v", err) - } - ss, ok := m.Union.(*Communique_Msg) - if !ok || ss.Msg.GetStringField() != "deep deep string" { - t.Errorf("After round trip with oneof set to message, Union = %+v", m.Union) - } -} - -func TestInefficientPackedBool(t *testing.T) { - // https://github.com/golang/protobuf/issues/76 - inp := []byte{ - 0x12, 0x02, // 0x12 = 2<<3|2; 2 bytes - // Usually a bool should take a single byte, - // but it is permitted to be any varint. - 0xb9, 0x30, - } - if err := Unmarshal(inp, new(MoreRepeated)); err != nil { - t.Error(err) - } -} - -// Benchmarks - -func testMsg() *GoTest { - pb := initGoTest(true) - const N = 1000 // Internally the library starts much smaller. - pb.F_Int32Repeated = make([]int32, N) - pb.F_DoubleRepeated = make([]float64, N) - for i := 0; i < N; i++ { - pb.F_Int32Repeated[i] = int32(i) - pb.F_DoubleRepeated[i] = float64(i) - } - return pb -} - -func bytesMsg() *GoTest { - pb := initGoTest(true) - buf := make([]byte, 4000) - for i := range buf { - buf[i] = byte(i) - } - pb.F_BytesDefaulted = buf - return pb -} - -func benchmarkMarshal(b *testing.B, pb Message, marshal func(Message) ([]byte, error)) { - d, _ := marshal(pb) - b.SetBytes(int64(len(d))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - marshal(pb) - } -} - -func benchmarkBufferMarshal(b *testing.B, pb Message) { - p := NewBuffer(nil) - benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) { - p.Reset() - err := p.Marshal(pb0) - return p.Bytes(), err - }) -} - -func benchmarkSize(b *testing.B, pb Message) { - benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) { - Size(pb) - return nil, nil - }) -} - -func newOf(pb Message) Message { - in := reflect.ValueOf(pb) - if in.IsNil() { - return pb - } - return reflect.New(in.Type().Elem()).Interface().(Message) -} - -func benchmarkUnmarshal(b *testing.B, pb Message, unmarshal func([]byte, Message) error) { - d, _ := Marshal(pb) - b.SetBytes(int64(len(d))) - pbd := newOf(pb) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - unmarshal(d, pbd) - } -} - -func benchmarkBufferUnmarshal(b *testing.B, pb Message) { - p := NewBuffer(nil) - benchmarkUnmarshal(b, pb, func(d []byte, pb0 Message) error { - p.SetBuf(d) - return p.Unmarshal(pb0) - }) -} - -// Benchmark{Marshal,BufferMarshal,Size,Unmarshal,BufferUnmarshal}{,Bytes} - -func BenchmarkMarshal(b *testing.B) { - benchmarkMarshal(b, testMsg(), Marshal) -} - -func BenchmarkBufferMarshal(b *testing.B) { - benchmarkBufferMarshal(b, testMsg()) -} - -func BenchmarkSize(b *testing.B) { - benchmarkSize(b, testMsg()) -} - -func BenchmarkUnmarshal(b *testing.B) { - benchmarkUnmarshal(b, testMsg(), Unmarshal) -} - -func BenchmarkBufferUnmarshal(b *testing.B) { - benchmarkBufferUnmarshal(b, testMsg()) -} - -func BenchmarkMarshalBytes(b *testing.B) { - benchmarkMarshal(b, bytesMsg(), Marshal) -} - -func BenchmarkBufferMarshalBytes(b *testing.B) { - benchmarkBufferMarshal(b, bytesMsg()) -} - -func BenchmarkSizeBytes(b *testing.B) { - benchmarkSize(b, bytesMsg()) -} - -func BenchmarkUnmarshalBytes(b *testing.B) { - benchmarkUnmarshal(b, bytesMsg(), Unmarshal) -} - -func BenchmarkBufferUnmarshalBytes(b *testing.B) { - benchmarkBufferUnmarshal(b, bytesMsg()) -} - -func BenchmarkUnmarshalUnrecognizedFields(b *testing.B) { - b.StopTimer() - pb := initGoTestField() - skip := &GoSkipTest{ - SkipInt32: Int32(32), - SkipFixed32: Uint32(3232), - SkipFixed64: Uint64(6464), - SkipString: String("skipper"), - Skipgroup: &GoSkipTest_SkipGroup{ - GroupInt32: Int32(75), - GroupString: String("wxyz"), - }, - } - - pbd := new(GoTestField) - p := NewBuffer(nil) - p.Marshal(pb) - p.Marshal(skip) - p2 := NewBuffer(nil) - - b.StartTimer() - for i := 0; i < b.N; i++ { - p2.SetBuf(p.Bytes()) - p2.Unmarshal(pbd) - } -} diff --git a/vendor/github.com/golang/protobuf/proto/any_test.go b/vendor/github.com/golang/protobuf/proto/any_test.go deleted file mode 100644 index 1a3c22ed41..0000000000 --- a/vendor/github.com/golang/protobuf/proto/any_test.go +++ /dev/null @@ -1,300 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "strings" - "testing" - - "github.com/golang/protobuf/proto" - - pb "github.com/golang/protobuf/proto/proto3_proto" - testpb "github.com/golang/protobuf/proto/testdata" - anypb "github.com/golang/protobuf/ptypes/any" -) - -var ( - expandedMarshaler = proto.TextMarshaler{ExpandAny: true} - expandedCompactMarshaler = proto.TextMarshaler{Compact: true, ExpandAny: true} -) - -// anyEqual reports whether two messages which may be google.protobuf.Any or may -// contain google.protobuf.Any fields are equal. We can't use proto.Equal for -// comparison, because semantically equivalent messages may be marshaled to -// binary in different tag order. Instead, trust that TextMarshaler with -// ExpandAny option works and compare the text marshaling results. -func anyEqual(got, want proto.Message) bool { - // if messages are proto.Equal, no need to marshal. - if proto.Equal(got, want) { - return true - } - g := expandedMarshaler.Text(got) - w := expandedMarshaler.Text(want) - return g == w -} - -type golden struct { - m proto.Message - t, c string -} - -var goldenMessages = makeGolden() - -func makeGolden() []golden { - nested := &pb.Nested{Bunny: "Monty"} - nb, err := proto.Marshal(nested) - if err != nil { - panic(err) - } - m1 := &pb.Message{ - Name: "David", - ResultCount: 47, - Anything: &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(nested), Value: nb}, - } - m2 := &pb.Message{ - Name: "David", - ResultCount: 47, - Anything: &anypb.Any{TypeUrl: "http://[::1]/type.googleapis.com/" + proto.MessageName(nested), Value: nb}, - } - m3 := &pb.Message{ - Name: "David", - ResultCount: 47, - Anything: &anypb.Any{TypeUrl: `type.googleapis.com/"/` + proto.MessageName(nested), Value: nb}, - } - m4 := &pb.Message{ - Name: "David", - ResultCount: 47, - Anything: &anypb.Any{TypeUrl: "type.googleapis.com/a/path/" + proto.MessageName(nested), Value: nb}, - } - m5 := &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(nested), Value: nb} - - any1 := &testpb.MyMessage{Count: proto.Int32(47), Name: proto.String("David")} - proto.SetExtension(any1, testpb.E_Ext_More, &testpb.Ext{Data: proto.String("foo")}) - proto.SetExtension(any1, testpb.E_Ext_Text, proto.String("bar")) - any1b, err := proto.Marshal(any1) - if err != nil { - panic(err) - } - any2 := &testpb.MyMessage{Count: proto.Int32(42), Bikeshed: testpb.MyMessage_GREEN.Enum(), RepBytes: [][]byte{[]byte("roboto")}} - proto.SetExtension(any2, testpb.E_Ext_More, &testpb.Ext{Data: proto.String("baz")}) - any2b, err := proto.Marshal(any2) - if err != nil { - panic(err) - } - m6 := &pb.Message{ - Name: "David", - ResultCount: 47, - Anything: &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any1), Value: any1b}, - ManyThings: []*anypb.Any{ - &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any2), Value: any2b}, - &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any1), Value: any1b}, - }, - } - - const ( - m1Golden = ` -name: "David" -result_count: 47 -anything: < - [type.googleapis.com/proto3_proto.Nested]: < - bunny: "Monty" - > -> -` - m2Golden = ` -name: "David" -result_count: 47 -anything: < - ["http://[::1]/type.googleapis.com/proto3_proto.Nested"]: < - bunny: "Monty" - > -> -` - m3Golden = ` -name: "David" -result_count: 47 -anything: < - ["type.googleapis.com/\"/proto3_proto.Nested"]: < - bunny: "Monty" - > -> -` - m4Golden = ` -name: "David" -result_count: 47 -anything: < - [type.googleapis.com/a/path/proto3_proto.Nested]: < - bunny: "Monty" - > -> -` - m5Golden = ` -[type.googleapis.com/proto3_proto.Nested]: < - bunny: "Monty" -> -` - m6Golden = ` -name: "David" -result_count: 47 -anything: < - [type.googleapis.com/testdata.MyMessage]: < - count: 47 - name: "David" - [testdata.Ext.more]: < - data: "foo" - > - [testdata.Ext.text]: "bar" - > -> -many_things: < - [type.googleapis.com/testdata.MyMessage]: < - count: 42 - bikeshed: GREEN - rep_bytes: "roboto" - [testdata.Ext.more]: < - data: "baz" - > - > -> -many_things: < - [type.googleapis.com/testdata.MyMessage]: < - count: 47 - name: "David" - [testdata.Ext.more]: < - data: "foo" - > - [testdata.Ext.text]: "bar" - > -> -` - ) - return []golden{ - {m1, strings.TrimSpace(m1Golden) + "\n", strings.TrimSpace(compact(m1Golden)) + " "}, - {m2, strings.TrimSpace(m2Golden) + "\n", strings.TrimSpace(compact(m2Golden)) + " "}, - {m3, strings.TrimSpace(m3Golden) + "\n", strings.TrimSpace(compact(m3Golden)) + " "}, - {m4, strings.TrimSpace(m4Golden) + "\n", strings.TrimSpace(compact(m4Golden)) + " "}, - {m5, strings.TrimSpace(m5Golden) + "\n", strings.TrimSpace(compact(m5Golden)) + " "}, - {m6, strings.TrimSpace(m6Golden) + "\n", strings.TrimSpace(compact(m6Golden)) + " "}, - } -} - -func TestMarshalGolden(t *testing.T) { - for _, tt := range goldenMessages { - if got, want := expandedMarshaler.Text(tt.m), tt.t; got != want { - t.Errorf("message %v: got:\n%s\nwant:\n%s", tt.m, got, want) - } - if got, want := expandedCompactMarshaler.Text(tt.m), tt.c; got != want { - t.Errorf("message %v: got:\n`%s`\nwant:\n`%s`", tt.m, got, want) - } - } -} - -func TestUnmarshalGolden(t *testing.T) { - for _, tt := range goldenMessages { - want := tt.m - got := proto.Clone(tt.m) - got.Reset() - if err := proto.UnmarshalText(tt.t, got); err != nil { - t.Errorf("failed to unmarshal\n%s\nerror: %v", tt.t, err) - } - if !anyEqual(got, want) { - t.Errorf("message:\n%s\ngot:\n%s\nwant:\n%s", tt.t, got, want) - } - got.Reset() - if err := proto.UnmarshalText(tt.c, got); err != nil { - t.Errorf("failed to unmarshal\n%s\nerror: %v", tt.c, err) - } - if !anyEqual(got, want) { - t.Errorf("message:\n%s\ngot:\n%s\nwant:\n%s", tt.c, got, want) - } - } -} - -func TestMarshalUnknownAny(t *testing.T) { - m := &pb.Message{ - Anything: &anypb.Any{ - TypeUrl: "foo", - Value: []byte("bar"), - }, - } - want := `anything: < - type_url: "foo" - value: "bar" -> -` - got := expandedMarshaler.Text(m) - if got != want { - t.Errorf("got\n`%s`\nwant\n`%s`", got, want) - } -} - -func TestAmbiguousAny(t *testing.T) { - pb := &anypb.Any{} - err := proto.UnmarshalText(` - type_url: "ttt/proto3_proto.Nested" - value: "\n\x05Monty" - `, pb) - t.Logf("result: %v (error: %v)", expandedMarshaler.Text(pb), err) - if err != nil { - t.Errorf("failed to parse ambiguous Any message: %v", err) - } -} - -func TestUnmarshalOverwriteAny(t *testing.T) { - pb := &anypb.Any{} - err := proto.UnmarshalText(` - [type.googleapis.com/a/path/proto3_proto.Nested]: < - bunny: "Monty" - > - [type.googleapis.com/a/path/proto3_proto.Nested]: < - bunny: "Rabbit of Caerbannog" - > - `, pb) - want := `line 7: Any message unpacked multiple times, or "type_url" already set` - if err.Error() != want { - t.Errorf("incorrect error.\nHave: %v\nWant: %v", err.Error(), want) - } -} - -func TestUnmarshalAnyMixAndMatch(t *testing.T) { - pb := &anypb.Any{} - err := proto.UnmarshalText(` - value: "\n\x05Monty" - [type.googleapis.com/a/path/proto3_proto.Nested]: < - bunny: "Rabbit of Caerbannog" - > - `, pb) - want := `line 5: Any message unpacked multiple times, or "value" already set` - if err.Error() != want { - t.Errorf("incorrect error.\nHave: %v\nWant: %v", err.Error(), want) - } -} diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go index e392575b35..3cd3249f70 100644 --- a/vendor/github.com/golang/protobuf/proto/clone.go +++ b/vendor/github.com/golang/protobuf/proto/clone.go @@ -35,22 +35,39 @@ package proto import ( + "fmt" "log" "reflect" "strings" ) // Clone returns a deep copy of a protocol buffer. -func Clone(pb Message) Message { - in := reflect.ValueOf(pb) +func Clone(src Message) Message { + in := reflect.ValueOf(src) if in.IsNil() { - return pb + return src } - out := reflect.New(in.Type().Elem()) - // out is empty so a merge is a deep copy. - mergeStruct(out.Elem(), in.Elem()) - return out.Interface().(Message) + dst := out.Interface().(Message) + Merge(dst, src) + return dst +} + +// Merger is the interface representing objects that can merge messages of the same type. +type Merger interface { + // Merge merges src into this message. + // Required and optional fields that are set in src will be set to that value in dst. + // Elements of repeated fields will be appended. + // + // Merge may panic if called with a different argument type than the receiver. + Merge(src Message) +} + +// generatedMerger is the custom merge method that generated protos will have. +// We must add this method since a generate Merge method will conflict with +// many existing protos that have a Merge data field already defined. +type generatedMerger interface { + XXX_Merge(src Message) } // Merge merges src into dst. @@ -58,17 +75,24 @@ func Clone(pb Message) Message { // Elements of repeated fields will be appended. // Merge panics if src and dst are not the same type, or if dst is nil. func Merge(dst, src Message) { + if m, ok := dst.(Merger); ok { + m.Merge(src) + return + } + in := reflect.ValueOf(src) out := reflect.ValueOf(dst) if out.IsNil() { panic("proto: nil destination") } if in.Type() != out.Type() { - // Explicit test prior to mergeStruct so that mistyped nils will fail - panic("proto: type mismatch") + panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) } if in.IsNil() { - // Merging nil into non-nil is a quiet no-op + return // Merge from nil src is a noop + } + if m, ok := dst.(generatedMerger); ok { + m.XXX_Merge(src) return } mergeStruct(out.Elem(), in.Elem()) @@ -84,7 +108,7 @@ func mergeStruct(out, in reflect.Value) { mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) } - if emIn, ok := extendable(in.Addr().Interface()); ok { + if emIn, err := extendable(in.Addr().Interface()); err == nil { emOut, _ := extendable(out.Addr().Interface()) mIn, muIn := emIn.extensionsRead() if mIn != nil { diff --git a/vendor/github.com/golang/protobuf/proto/clone_test.go b/vendor/github.com/golang/protobuf/proto/clone_test.go deleted file mode 100644 index f607ff49eb..0000000000 --- a/vendor/github.com/golang/protobuf/proto/clone_test.go +++ /dev/null @@ -1,300 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "testing" - - "github.com/golang/protobuf/proto" - - proto3pb "github.com/golang/protobuf/proto/proto3_proto" - pb "github.com/golang/protobuf/proto/testdata" -) - -var cloneTestMessage = &pb.MyMessage{ - Count: proto.Int32(42), - Name: proto.String("Dave"), - Pet: []string{"bunny", "kitty", "horsey"}, - Inner: &pb.InnerMessage{ - Host: proto.String("niles"), - Port: proto.Int32(9099), - Connected: proto.Bool(true), - }, - Others: []*pb.OtherMessage{ - { - Value: []byte("some bytes"), - }, - }, - Somegroup: &pb.MyMessage_SomeGroup{ - GroupField: proto.Int32(6), - }, - RepBytes: [][]byte{[]byte("sham"), []byte("wow")}, -} - -func init() { - ext := &pb.Ext{ - Data: proto.String("extension"), - } - if err := proto.SetExtension(cloneTestMessage, pb.E_Ext_More, ext); err != nil { - panic("SetExtension: " + err.Error()) - } -} - -func TestClone(t *testing.T) { - m := proto.Clone(cloneTestMessage).(*pb.MyMessage) - if !proto.Equal(m, cloneTestMessage) { - t.Errorf("Clone(%v) = %v", cloneTestMessage, m) - } - - // Verify it was a deep copy. - *m.Inner.Port++ - if proto.Equal(m, cloneTestMessage) { - t.Error("Mutating clone changed the original") - } - // Byte fields and repeated fields should be copied. - if &m.Pet[0] == &cloneTestMessage.Pet[0] { - t.Error("Pet: repeated field not copied") - } - if &m.Others[0] == &cloneTestMessage.Others[0] { - t.Error("Others: repeated field not copied") - } - if &m.Others[0].Value[0] == &cloneTestMessage.Others[0].Value[0] { - t.Error("Others[0].Value: bytes field not copied") - } - if &m.RepBytes[0] == &cloneTestMessage.RepBytes[0] { - t.Error("RepBytes: repeated field not copied") - } - if &m.RepBytes[0][0] == &cloneTestMessage.RepBytes[0][0] { - t.Error("RepBytes[0]: bytes field not copied") - } -} - -func TestCloneNil(t *testing.T) { - var m *pb.MyMessage - if c := proto.Clone(m); !proto.Equal(m, c) { - t.Errorf("Clone(%v) = %v", m, c) - } -} - -var mergeTests = []struct { - src, dst, want proto.Message -}{ - { - src: &pb.MyMessage{ - Count: proto.Int32(42), - }, - dst: &pb.MyMessage{ - Name: proto.String("Dave"), - }, - want: &pb.MyMessage{ - Count: proto.Int32(42), - Name: proto.String("Dave"), - }, - }, - { - src: &pb.MyMessage{ - Inner: &pb.InnerMessage{ - Host: proto.String("hey"), - Connected: proto.Bool(true), - }, - Pet: []string{"horsey"}, - Others: []*pb.OtherMessage{ - { - Value: []byte("some bytes"), - }, - }, - }, - dst: &pb.MyMessage{ - Inner: &pb.InnerMessage{ - Host: proto.String("niles"), - Port: proto.Int32(9099), - }, - Pet: []string{"bunny", "kitty"}, - Others: []*pb.OtherMessage{ - { - Key: proto.Int64(31415926535), - }, - { - // Explicitly test a src=nil field - Inner: nil, - }, - }, - }, - want: &pb.MyMessage{ - Inner: &pb.InnerMessage{ - Host: proto.String("hey"), - Connected: proto.Bool(true), - Port: proto.Int32(9099), - }, - Pet: []string{"bunny", "kitty", "horsey"}, - Others: []*pb.OtherMessage{ - { - Key: proto.Int64(31415926535), - }, - {}, - { - Value: []byte("some bytes"), - }, - }, - }, - }, - { - src: &pb.MyMessage{ - RepBytes: [][]byte{[]byte("wow")}, - }, - dst: &pb.MyMessage{ - Somegroup: &pb.MyMessage_SomeGroup{ - GroupField: proto.Int32(6), - }, - RepBytes: [][]byte{[]byte("sham")}, - }, - want: &pb.MyMessage{ - Somegroup: &pb.MyMessage_SomeGroup{ - GroupField: proto.Int32(6), - }, - RepBytes: [][]byte{[]byte("sham"), []byte("wow")}, - }, - }, - // Check that a scalar bytes field replaces rather than appends. - { - src: &pb.OtherMessage{Value: []byte("foo")}, - dst: &pb.OtherMessage{Value: []byte("bar")}, - want: &pb.OtherMessage{Value: []byte("foo")}, - }, - { - src: &pb.MessageWithMap{ - NameMapping: map[int32]string{6: "Nigel"}, - MsgMapping: map[int64]*pb.FloatingPoint{ - 0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)}, - 0x4002: &pb.FloatingPoint{ - F: proto.Float64(2.0), - }, - }, - ByteMapping: map[bool][]byte{true: []byte("wowsa")}, - }, - dst: &pb.MessageWithMap{ - NameMapping: map[int32]string{ - 6: "Bruce", // should be overwritten - 7: "Andrew", - }, - MsgMapping: map[int64]*pb.FloatingPoint{ - 0x4002: &pb.FloatingPoint{ - F: proto.Float64(3.0), - Exact: proto.Bool(true), - }, // the entire message should be overwritten - }, - }, - want: &pb.MessageWithMap{ - NameMapping: map[int32]string{ - 6: "Nigel", - 7: "Andrew", - }, - MsgMapping: map[int64]*pb.FloatingPoint{ - 0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)}, - 0x4002: &pb.FloatingPoint{ - F: proto.Float64(2.0), - }, - }, - ByteMapping: map[bool][]byte{true: []byte("wowsa")}, - }, - }, - // proto3 shouldn't merge zero values, - // in the same way that proto2 shouldn't merge nils. - { - src: &proto3pb.Message{ - Name: "Aaron", - Data: []byte(""), // zero value, but not nil - }, - dst: &proto3pb.Message{ - HeightInCm: 176, - Data: []byte("texas!"), - }, - want: &proto3pb.Message{ - Name: "Aaron", - HeightInCm: 176, - Data: []byte("texas!"), - }, - }, - // Oneof fields should merge by assignment. - { - src: &pb.Communique{ - Union: &pb.Communique_Number{41}, - }, - dst: &pb.Communique{ - Union: &pb.Communique_Name{"Bobby Tables"}, - }, - want: &pb.Communique{ - Union: &pb.Communique_Number{41}, - }, - }, - // Oneof nil is the same as not set. - { - src: &pb.Communique{}, - dst: &pb.Communique{ - Union: &pb.Communique_Name{"Bobby Tables"}, - }, - want: &pb.Communique{ - Union: &pb.Communique_Name{"Bobby Tables"}, - }, - }, - { - src: &proto3pb.Message{ - Terrain: map[string]*proto3pb.Nested{ - "kay_a": &proto3pb.Nested{Cute: true}, // replace - "kay_b": &proto3pb.Nested{Bunny: "rabbit"}, // insert - }, - }, - dst: &proto3pb.Message{ - Terrain: map[string]*proto3pb.Nested{ - "kay_a": &proto3pb.Nested{Bunny: "lost"}, // replaced - "kay_c": &proto3pb.Nested{Bunny: "bunny"}, // keep - }, - }, - want: &proto3pb.Message{ - Terrain: map[string]*proto3pb.Nested{ - "kay_a": &proto3pb.Nested{Cute: true}, - "kay_b": &proto3pb.Nested{Bunny: "rabbit"}, - "kay_c": &proto3pb.Nested{Bunny: "bunny"}, - }, - }, - }, -} - -func TestMerge(t *testing.T) { - for _, m := range mergeTests { - got := proto.Clone(m.dst) - proto.Merge(got, m.src) - if !proto.Equal(got, m.want) { - t.Errorf("Merge(%v, %v)\n got %v\nwant %v\n", m.dst, m.src, got, m.want) - } - } -} diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go index aa207298f9..63b0f08bef 100644 --- a/vendor/github.com/golang/protobuf/proto/decode.go +++ b/vendor/github.com/golang/protobuf/proto/decode.go @@ -39,8 +39,6 @@ import ( "errors" "fmt" "io" - "os" - "reflect" ) // errOverflow is returned when an integer is too large to be represented. @@ -50,10 +48,6 @@ var errOverflow = errors.New("proto: integer overflow") // wire type is encountered. It does not get returned to user code. var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") -// The fundamental decoders that interpret bytes on the wire. -// Those that take integer types all return uint64 and are -// therefore of type valueDecoder. - // DecodeVarint reads a varint-encoded integer from the slice. // It returns the integer and the number of bytes consumed, or // zero if there is not enough. @@ -192,7 +186,6 @@ func (p *Buffer) DecodeVarint() (x uint64, err error) { if b&0x80 == 0 { goto done } - // x -= 0x80 << 63 // Always zero. return 0, errOverflow @@ -267,9 +260,6 @@ func (p *Buffer) DecodeZigzag32() (x uint64, err error) { return } -// These are not ValueDecoders: they produce an array of bytes or a string. -// bytes, embedded messages - // DecodeRawBytes reads a count-delimited byte buffer from the Buffer. // This is the format used for the bytes protocol buffer // type and for embedded messages. @@ -311,81 +301,29 @@ func (p *Buffer) DecodeStringBytes() (s string, err error) { return string(buf), nil } -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -// If the protocol buffer has extensions, and the field matches, add it as an extension. -// Otherwise, if the XXX_unrecognized field exists, append the skipped data there. -func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { - oi := o.index - - err := o.skip(t, tag, wire) - if err != nil { - return err - } - - if !unrecField.IsValid() { - return nil - } - - ptr := structPointer_Bytes(base, unrecField) - - // Add the skipped field to struct field - obuf := o.buf - - o.buf = *ptr - o.EncodeVarint(uint64(tag<<3 | wire)) - *ptr = append(o.buf, obuf[oi:o.index]...) - - o.buf = obuf - - return nil -} - -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -func (o *Buffer) skip(t reflect.Type, tag, wire int) error { - - var u uint64 - var err error - - switch wire { - case WireVarint: - _, err = o.DecodeVarint() - case WireFixed64: - _, err = o.DecodeFixed64() - case WireBytes: - _, err = o.DecodeRawBytes(false) - case WireFixed32: - _, err = o.DecodeFixed32() - case WireStartGroup: - for { - u, err = o.DecodeVarint() - if err != nil { - break - } - fwire := int(u & 0x7) - if fwire == WireEndGroup { - break - } - ftag := int(u >> 3) - err = o.skip(t, ftag, fwire) - if err != nil { - break - } - } - default: - err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) - } - return err -} - // Unmarshaler is the interface representing objects that can -// unmarshal themselves. The method should reset the receiver before -// decoding starts. The argument points to data that may be +// unmarshal themselves. The argument points to data that may be // overwritten, so implementations should not keep references to the // buffer. +// Unmarshal implementations should not clear the receiver. +// Any unmarshaled data should be merged into the receiver. +// Callers of Unmarshal that do not want to retain existing data +// should Reset the receiver before calling Unmarshal. type Unmarshaler interface { Unmarshal([]byte) error } +// newUnmarshaler is the interface representing objects that can +// unmarshal themselves. The semantics are identical to Unmarshaler. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newUnmarshaler interface { + XXX_Unmarshal([]byte) error +} + // Unmarshal parses the protocol buffer representation in buf and places the // decoded result in pb. If the struct underlying pb does not match // the data in buf, the results can be unpredictable. @@ -395,7 +333,13 @@ type Unmarshaler interface { // to preserve and append to existing data. func Unmarshal(buf []byte, pb Message) error { pb.Reset() - return UnmarshalMerge(buf, pb) + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) } // UnmarshalMerge parses the protocol buffer representation in buf and @@ -405,8 +349,16 @@ func Unmarshal(buf []byte, pb Message) error { // UnmarshalMerge merges into existing data in pb. // Most code should use Unmarshal instead. func UnmarshalMerge(buf []byte, pb Message) error { - // If the object can unmarshal itself, let it. + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 return u.Unmarshal(buf) } return NewBuffer(buf).Unmarshal(pb) @@ -422,12 +374,17 @@ func (p *Buffer) DecodeMessage(pb Message) error { } // DecodeGroup reads a tag-delimited group from the Buffer. +// StartGroup tag is already consumed. This function consumes +// EndGroup tag. func (p *Buffer) DecodeGroup(pb Message) error { - typ, base, err := getbase(pb) - if err != nil { - return err + b := p.buf[p.index:] + x, y := findEndGroup(b) + if x < 0 { + return io.ErrUnexpectedEOF } - return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base) + err := Unmarshal(b[:x], pb) + p.index += y + return err } // Unmarshal parses the protocol buffer representation in the @@ -438,533 +395,33 @@ func (p *Buffer) DecodeGroup(pb Message) error { // Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. func (p *Buffer) Unmarshal(pb Message) error { // If the object can unmarshal itself, let it. - if u, ok := pb.(Unmarshaler); ok { - err := u.Unmarshal(p.buf[p.index:]) + if u, ok := pb.(newUnmarshaler); ok { + err := u.XXX_Unmarshal(p.buf[p.index:]) p.index = len(p.buf) return err } - - typ, base, err := getbase(pb) - if err != nil { - return err - } - - err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) - - if collectStats { - stats.Decode++ - } - - return err -} - -// unmarshalType does the work of unmarshaling a structure. -func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { - var state errorState - required, reqFields := prop.reqCount, uint64(0) - - var err error - for err == nil && o.index < len(o.buf) { - oi := o.index - var u uint64 - u, err = o.DecodeVarint() - if err != nil { - break - } - wire := int(u & 0x7) - if wire == WireEndGroup { - if is_group { - if required > 0 { - // Not enough information to determine the exact field. - // (See below.) - return &RequiredNotSetError{"{Unknown}"} - } - return nil // input is satisfied - } - return fmt.Errorf("proto: %s: wiretype end group for non-group", st) - } - tag := int(u >> 3) - if tag <= 0 { - return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) - } - fieldnum, ok := prop.decoderTags.get(tag) - if !ok { - // Maybe it's an extension? - if prop.extendable { - if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) { - if err = o.skip(st, tag, wire); err == nil { - extmap := e.extensionsWrite() - ext := extmap[int32(tag)] // may be missing - ext.enc = append(ext.enc, o.buf[oi:o.index]...) - extmap[int32(tag)] = ext - } - continue - } - } - // Maybe it's a oneof? - if prop.oneofUnmarshaler != nil { - m := structPointer_Interface(base, st).(Message) - // First return value indicates whether tag is a oneof field. - ok, err = prop.oneofUnmarshaler(m, tag, wire, o) - if err == ErrInternalBadWireType { - // Map the error to something more descriptive. - // Do the formatting here to save generated code space. - err = fmt.Errorf("bad wiretype for oneof field in %T", m) - } - if ok { - continue - } - } - err = o.skipAndSave(st, tag, wire, base, prop.unrecField) - continue - } - p := prop.Prop[fieldnum] - - if p.dec == nil { - fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) - continue - } - dec := p.dec - if wire != WireStartGroup && wire != p.WireType { - if wire == WireBytes && p.packedDec != nil { - // a packable field - dec = p.packedDec - } else { - err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) - continue - } - } - decErr := dec(o, p, base) - if decErr != nil && !state.shouldContinue(decErr, p) { - err = decErr - } - if err == nil && p.Required { - // Successfully decoded a required field. - if tag <= 64 { - // use bitmap for fields 1-64 to catch field reuse. - var mask uint64 = 1 << uint64(tag-1) - if reqFields&mask == 0 { - // new required field - reqFields |= mask - required-- - } - } else { - // This is imprecise. It can be fooled by a required field - // with a tag > 64 that is encoded twice; that's very rare. - // A fully correct implementation would require allocating - // a data structure, which we would like to avoid. - required-- - } - } - } - if err == nil { - if is_group { - return io.ErrUnexpectedEOF - } - if state.err != nil { - return state.err - } - if required > 0 { - // Not enough information to determine the exact field. If we use extra - // CPU, we could determine the field only if the missing required field - // has a tag <= 64 and we check reqFields. - return &RequiredNotSetError{"{Unknown}"} - } - } - return err -} - -// Individual type decoders -// For each, -// u is the decoded value, -// v is a pointer to the field (pointer) in the struct - -// Sizes of the pools to allocate inside the Buffer. -// The goal is modest amortization and allocation -// on at least 16-byte boundaries. -const ( - boolPoolSize = 16 - uint32PoolSize = 8 - uint64PoolSize = 4 -) - -// Decode a bool. -func (o *Buffer) dec_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - if len(o.bools) == 0 { - o.bools = make([]bool, boolPoolSize) - } - o.bools[0] = u != 0 - *structPointer_Bool(base, p.field) = &o.bools[0] - o.bools = o.bools[1:] - return nil -} - -func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - *structPointer_BoolVal(base, p.field) = u != 0 - return nil -} - -// Decode an int32. -func (o *Buffer) dec_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) - return nil -} - -func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) - return nil -} - -// Decode an int64. -func (o *Buffer) dec_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64_Set(structPointer_Word64(base, p.field), o, u) - return nil -} - -func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64Val_Set(structPointer_Word64Val(base, p.field), o, u) - return nil -} - -// Decode a string. -func (o *Buffer) dec_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_String(base, p.field) = &s - return nil -} - -func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_StringVal(base, p.field) = s - return nil -} - -// Decode a slice of bytes ([]byte). -func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - *structPointer_Bytes(base, p.field) = b - return nil -} - -// Decode a slice of bools ([]bool). -func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - v := structPointer_BoolSlice(base, p.field) - *v = append(*v, u != 0) - return nil -} - -// Decode a slice of bools ([]bool) in packed format. -func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { - v := structPointer_BoolSlice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded bools - fin := o.index + nb - if fin < o.index { - return errOverflow - } - - y := *v - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - y = append(y, u != 0) - } - - *v = y - return nil -} - -// Decode a slice of int32s ([]int32). -func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - structPointer_Word32Slice(base, p.field).Append(uint32(u)) - return nil -} - -// Decode a slice of int32s ([]int32) in packed format. -func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int32s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(uint32(u)) - } - return nil -} - -// Decode a slice of int64s ([]int64). -func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - - structPointer_Word64Slice(base, p.field).Append(u) - return nil -} - -// Decode a slice of int64s ([]int64) in packed format. -func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int64s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(u) - } - return nil -} - -// Decode a slice of strings ([]string). -func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - v := structPointer_StringSlice(base, p.field) - *v = append(*v, s) - return nil -} - -// Decode a slice of slice of bytes ([][]byte). -func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - v := structPointer_BytesSlice(base, p.field) - *v = append(*v, b) - return nil -} - -// Decode a map field. -func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { - raw, err := o.DecodeRawBytes(false) - if err != nil { - return err - } - oi := o.index // index at the end of this map entry - o.index -= len(raw) // move buffer back to start of map entry - - mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V - if mptr.Elem().IsNil() { - mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) - } - v := mptr.Elem() // map[K]V - - // Prepare addressable doubly-indirect placeholders for the key and value types. - // See enc_new_map for why. - keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K - keybase := toStructPointer(keyptr.Addr()) // **K - - var valbase structPointer - var valptr reflect.Value - switch p.mtype.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valptr = reflect.ValueOf(&dummy) // *[]byte - valbase = toStructPointer(valptr) // *[]byte - case reflect.Ptr: - // message; valptr is **Msg; need to allocate the intermediate pointer - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valptr.Set(reflect.New(valptr.Type().Elem())) - valbase = toStructPointer(valptr) - default: - // everything else - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valbase = toStructPointer(valptr.Addr()) // **V - } - - // Decode. - // This parses a restricted wire format, namely the encoding of a message - // with two fields. See enc_new_map for the format. - for o.index < oi { - // tagcode for key and value properties are always a single byte - // because they have tags 1 and 2. - tagcode := o.buf[o.index] - o.index++ - switch tagcode { - case p.mkeyprop.tagcode[0]: - if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { - return err - } - case p.mvalprop.tagcode[0]: - if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { - return err - } - default: - // TODO: Should we silently skip this instead? - return fmt.Errorf("proto: bad map data tag %d", raw[0]) - } - } - keyelem, valelem := keyptr.Elem(), valptr.Elem() - if !keyelem.IsValid() { - keyelem = reflect.Zero(p.mtype.Key()) - } - if !valelem.IsValid() { - valelem = reflect.Zero(p.mtype.Elem()) - } - - v.SetMapIndex(keyelem, valelem) - return nil -} - -// Decode a group. -func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - return o.unmarshalType(p.stype, p.sprop, true, bas) -} - -// Decode an embedded message. -func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { - raw, e := o.DecodeRawBytes(false) - if e != nil { - return e - } - - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := structPointer_Interface(bas, p.stype) - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, false, bas) - o.buf = obuf - o.index = oi - - return err -} - -// Decode a slice of embedded messages. -func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, false, base) -} - -// Decode a slice of embedded groups. -func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, true, base) -} - -// Decode a slice of structs ([]*struct). -func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { - v := reflect.New(p.stype) - bas := toStructPointer(v) - structPointer_StructPointerSlice(base, p.field).Append(bas) - - if is_group { - err := o.unmarshalType(p.stype, p.sprop, is_group, bas) - return err - } - - raw, err := o.DecodeRawBytes(false) - if err != nil { + if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 + err := u.Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) return err } - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := v.Interface() - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, is_group, bas) - - o.buf = obuf - o.index = oi - + // Slow workaround for messages that aren't Unmarshalers. + // This includes some hand-coded .pb.go files and + // bootstrap protos. + // TODO: fix all of those and then add Unmarshal to + // the Message interface. Then: + // The cast above and code below can be deleted. + // The old unmarshaler can be deleted. + // Clients can call Unmarshal directly (can already do that, actually). + var info InternalMessageInfo + err := info.Unmarshal(pb, p.buf[p.index:]) + p.index = len(p.buf) return err } diff --git a/vendor/github.com/golang/protobuf/proto/decode_test.go b/vendor/github.com/golang/protobuf/proto/decode_test.go deleted file mode 100644 index 2c4c31d127..0000000000 --- a/vendor/github.com/golang/protobuf/proto/decode_test.go +++ /dev/null @@ -1,258 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build go1.7 - -package proto_test - -import ( - "fmt" - "testing" - - "github.com/golang/protobuf/proto" - tpb "github.com/golang/protobuf/proto/proto3_proto" -) - -var ( - bytesBlackhole []byte - msgBlackhole = new(tpb.Message) -) - -// BenchmarkVarint32ArraySmall shows the performance on an array of small int32 fields (1 and -// 2 bytes long). -func BenchmarkVarint32ArraySmall(b *testing.B) { - for i := uint(1); i <= 10; i++ { - dist := genInt32Dist([7]int{0, 3, 1}, 1<>= 7 - if x == 0 { - break - } - } - return n + switch { + case x < 1<<7: + return 1 + case x < 1<<14: + return 2 + case x < 1<<21: + return 3 + case x < 1<<28: + return 4 + case x < 1<<35: + return 5 + case x < 1<<42: + return 6 + case x < 1<<49: + return 7 + case x < 1<<56: + return 8 + case x < 1<<63: + return 9 + } + return 10 } // EncodeFixed64 writes a 64-bit integer to the Buffer. @@ -149,10 +135,6 @@ func (p *Buffer) EncodeFixed64(x uint64) error { return nil } -func sizeFixed64(x uint64) int { - return 8 -} - // EncodeFixed32 writes a 32-bit integer to the Buffer. // This is the format for the // fixed32, sfixed32, and float protocol buffer types. @@ -165,20 +147,12 @@ func (p *Buffer) EncodeFixed32(x uint64) error { return nil } -func sizeFixed32(x uint64) int { - return 4 -} - // EncodeZigzag64 writes a zigzag-encoded 64-bit integer // to the Buffer. // This is the format used for the sint64 protocol buffer type. func (p *Buffer) EncodeZigzag64(x uint64) error { // use signed number to get arithmetic right shift. - return p.EncodeVarint((x << 1) ^ uint64((int64(x) >> 63))) -} - -func sizeZigzag64(x uint64) int { - return sizeVarint((x << 1) ^ uint64((int64(x) >> 63))) + return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } // EncodeZigzag32 writes a zigzag-encoded 32-bit integer @@ -189,10 +163,6 @@ func (p *Buffer) EncodeZigzag32(x uint64) error { return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) } -func sizeZigzag32(x uint64) int { - return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - // EncodeRawBytes writes a count-delimited byte buffer to the Buffer. // This is the format used for the bytes protocol buffer // type and for embedded messages. @@ -202,11 +172,6 @@ func (p *Buffer) EncodeRawBytes(b []byte) error { return nil } -func sizeRawBytes(b []byte) int { - return sizeVarint(uint64(len(b))) + - len(b) -} - // EncodeStringBytes writes an encoded string to the Buffer. // This is the format used for the proto2 string type. func (p *Buffer) EncodeStringBytes(s string) error { @@ -215,319 +180,17 @@ func (p *Buffer) EncodeStringBytes(s string) error { return nil } -func sizeStringBytes(s string) int { - return sizeVarint(uint64(len(s))) + - len(s) -} - // Marshaler is the interface representing objects that can marshal themselves. type Marshaler interface { Marshal() ([]byte, error) } -// Marshal takes the protocol buffer -// and encodes it into the wire format, returning the data. -func Marshal(pb Message) ([]byte, error) { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - return m.Marshal() - } - p := NewBuffer(nil) - err := p.Marshal(pb) - if p.buf == nil && err == nil { - // Return a non-nil slice on success. - return []byte{}, nil - } - return p.buf, err -} - // EncodeMessage writes the protocol buffer to the Buffer, // prefixed by a varint-encoded length. func (p *Buffer) EncodeMessage(pb Message) error { - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return ErrNil - } - if err == nil { - var state errorState - err = p.enc_len_struct(GetProperties(t.Elem()), base, &state) - } - return err -} - -// Marshal takes the protocol buffer -// and encodes it into the wire format, writing the result to the -// Buffer. -func (p *Buffer) Marshal(pb Message) error { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - data, err := m.Marshal() - p.buf = append(p.buf, data...) - return err - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return ErrNil - } - if err == nil { - err = p.enc_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - (stats).Encode++ // Parens are to work around a goimports bug. - } - - if len(p.buf) > maxMarshalSize { - return ErrTooLarge - } - return err -} - -// Size returns the encoded size of a protocol buffer. -func Size(pb Message) (n int) { - // Can the object marshal itself? If so, Size is slow. - // TODO: add Size to Marshaler, or add a Sizer interface. - if m, ok := pb.(Marshaler); ok { - b, _ := m.Marshal() - return len(b) - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return 0 - } - if err == nil { - n = size_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - (stats).Size++ // Parens are to work around a goimports bug. - } - - return -} - -// Individual type encoders. - -// Encode a bool. -func (o *Buffer) enc_bool(p *Properties, base structPointer) error { - v := *structPointer_Bool(base, p.field) - if v == nil { - return ErrNil - } - x := 0 - if *v { - x = 1 - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error { - v := *structPointer_BoolVal(base, p.field) - if !v { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, 1) - return nil -} - -func size_bool(p *Properties, base structPointer) int { - v := *structPointer_Bool(base, p.field) - if v == nil { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -func size_proto3_bool(p *Properties, base structPointer) int { - v := *structPointer_BoolVal(base, p.field) - if !v && !p.oneof { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -// Encode an int32. -func (o *Buffer) enc_int32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode a uint32. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := word32_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := word32_Get(v) - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode an int64. -func (o *Buffer) enc_int64(p *Properties, base structPointer) error { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return ErrNil - } - x := word64_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func size_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return 0 - } - x := word64_Get(v) - n += len(p.tagcode) - n += p.valSize(x) - return -} - -func size_proto3_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(x) - return -} - -// Encode a string. -func (o *Buffer) enc_string(p *Properties, base structPointer) error { - v := *structPointer_String(base, p.field) - if v == nil { - return ErrNil - } - x := *v - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(x) - return nil -} - -func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error { - v := *structPointer_StringVal(base, p.field) - if v == "" { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(v) - return nil -} - -func size_string(p *Properties, base structPointer) (n int) { - v := *structPointer_String(base, p.field) - if v == nil { - return 0 - } - x := *v - n += len(p.tagcode) - n += sizeStringBytes(x) - return -} - -func size_proto3_string(p *Properties, base structPointer) (n int) { - v := *structPointer_StringVal(base, p.field) - if v == "" && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeStringBytes(v) - return + siz := Size(pb) + p.EncodeVarint(uint64(siz)) + return p.Marshal(pb) } // All protocol buffer fields are nillable, but be careful. @@ -538,825 +201,3 @@ func isNil(v reflect.Value) bool { } return false } - -// Encode a message struct. -func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { - var state errorState - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return ErrNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return state.err - } - - o.buf = append(o.buf, p.tagcode...) - return o.enc_len_struct(p.sprop, structp, &state) -} - -func size_struct_message(p *Properties, base structPointer) int { - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return 0 - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n0 := len(p.tagcode) - n1 := sizeRawBytes(data) - return n0 + n1 - } - - n0 := len(p.tagcode) - n1 := size_struct(p.sprop, structp) - n2 := sizeVarint(uint64(n1)) // size of encoded length - return n0 + n1 + n2 -} - -// Encode a group struct. -func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { - var state errorState - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return ErrNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - err := o.enc_struct(p.sprop, b) - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return state.err -} - -func size_struct_group(p *Properties, base structPointer) (n int) { - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return 0 - } - - n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) - n += size_struct(p.sprop, b) - n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return -} - -// Encode a slice of bools ([]bool). -func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - for _, x := range s { - o.buf = append(o.buf, p.tagcode...) - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_bool(p *Properties, base structPointer) int { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - return l * (len(p.tagcode) + 1) // each bool takes exactly one byte -} - -// Encode a slice of bools ([]bool) in packed format. -func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(l)) // each bool takes exactly one byte - for _, x := range s { - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_packed_bool(p *Properties, base structPointer) (n int) { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - n += len(p.tagcode) - n += sizeVarint(uint64(l)) - n += l // each bool takes exactly one byte - return -} - -// Encode a slice of bytes ([]byte). -func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if s == nil { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func size_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if s == nil && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -// Encode a slice of int32s ([]int32). -func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of int32s ([]int32) in packed format. -func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(buf, uint64(x)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - bufSize += p.valSize(uint64(x)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of uint32s ([]uint32). -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := s.Index(i) - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := s.Index(i) - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of uint32s ([]uint32) in packed format. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, uint64(s.Index(i))) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(uint64(s.Index(i))) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of int64s ([]int64). -func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, s.Index(i)) - } - return nil -} - -func size_slice_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - n += p.valSize(s.Index(i)) - } - return -} - -// Encode a slice of int64s ([]int64) in packed format. -func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, s.Index(i)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(s.Index(i)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of slice of bytes ([][]byte). -func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(ss[i]) - } - return nil -} - -func size_slice_slice_byte(p *Properties, base structPointer) (n int) { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return 0 - } - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeRawBytes(ss[i]) - } - return -} - -// Encode a slice of strings ([]string). -func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(ss[i]) - } - return nil -} - -func size_slice_string(p *Properties, base structPointer) (n int) { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeStringBytes(ss[i]) - } - return -} - -// Encode a slice of message structs ([]*struct). -func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return errRepeatedHasNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - continue - } - - o.buf = append(o.buf, p.tagcode...) - err := o.enc_len_struct(p.sprop, structp, &state) - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - } - return state.err -} - -func size_slice_struct_message(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return // return the size up to this point - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n += sizeRawBytes(data) - continue - } - - n0 := size_struct(p.sprop, structp) - n1 := sizeVarint(uint64(n0)) // size of encoded length - n += n0 + n1 - } - return -} - -// Encode a slice of group structs ([]*struct). -func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return errRepeatedHasNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - - err := o.enc_struct(p.sprop, b) - - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - } - return state.err -} - -func size_slice_struct_group(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup)) - n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup)) - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return // return size up to this point - } - - n += size_struct(p.sprop, b) - } - return -} - -// Encode an extension map. -func (o *Buffer) enc_map(p *Properties, base structPointer) error { - exts := structPointer_ExtMap(base, p.field) - if err := encodeExtensionsMap(*exts); err != nil { - return err - } - - return o.enc_map_body(*exts) -} - -func (o *Buffer) enc_exts(p *Properties, base structPointer) error { - exts := structPointer_Extensions(base, p.field) - - v, mu := exts.extensionsRead() - if v == nil { - return nil - } - - mu.Lock() - defer mu.Unlock() - if err := encodeExtensionsMap(v); err != nil { - return err - } - - return o.enc_map_body(v) -} - -func (o *Buffer) enc_map_body(v map[int32]Extension) error { - // Fast-path for common cases: zero or one extensions. - if len(v) <= 1 { - for _, e := range v { - o.buf = append(o.buf, e.enc...) - } - return nil - } - - // Sort keys to provide a deterministic encoding. - keys := make([]int, 0, len(v)) - for k := range v { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, k := range keys { - o.buf = append(o.buf, v[int32(k)].enc...) - } - return nil -} - -func size_map(p *Properties, base structPointer) int { - v := structPointer_ExtMap(base, p.field) - return extensionsMapSize(*v) -} - -func size_exts(p *Properties, base structPointer) int { - v := structPointer_Extensions(base, p.field) - return extensionsSize(v) -} - -// Encode a map field. -func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { - var state errorState // XXX: or do we need to plumb this through? - - /* - A map defined as - map map_field = N; - is encoded in the same way as - message MapFieldEntry { - key_type key = 1; - value_type value = 2; - } - repeated MapFieldEntry map_field = N; - */ - - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - if v.Len() == 0 { - return nil - } - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - enc := func() error { - if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { - return err - } - if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil { - return err - } - return nil - } - - // Don't sort map keys. It is not required by the spec, and C++ doesn't do it. - for _, key := range v.MapKeys() { - val := v.MapIndex(key) - - keycopy.Set(key) - valcopy.Set(val) - - o.buf = append(o.buf, p.tagcode...) - if err := o.enc_len_thing(enc, &state); err != nil { - return err - } - } - return nil -} - -func size_new_map(p *Properties, base structPointer) int { - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - n := 0 - for _, key := range v.MapKeys() { - val := v.MapIndex(key) - keycopy.Set(key) - valcopy.Set(val) - - // Tag codes for key and val are the responsibility of the sub-sizer. - keysize := p.mkeyprop.size(p.mkeyprop, keybase) - valsize := p.mvalprop.size(p.mvalprop, valbase) - entry := keysize + valsize - // Add on tag code and length of map entry itself. - n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry - } - return n -} - -// mapEncodeScratch returns a new reflect.Value matching the map's value type, -// and a structPointer suitable for passing to an encoder or sizer. -func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) { - // Prepare addressable doubly-indirect placeholders for the key and value types. - // This is needed because the element-type encoders expect **T, but the map iteration produces T. - - keycopy = reflect.New(mapType.Key()).Elem() // addressable K - keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K - keyptr.Set(keycopy.Addr()) // - keybase = toStructPointer(keyptr.Addr()) // **K - - // Value types are more varied and require special handling. - switch mapType.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte - valbase = toStructPointer(valcopy.Addr()) - case reflect.Ptr: - // message; the generated field type is map[K]*Msg (so V is *Msg), - // so we only need one level of indirection. - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valbase = toStructPointer(valcopy.Addr()) - default: - // everything else - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V - valptr.Set(valcopy.Addr()) // - valbase = toStructPointer(valptr.Addr()) // **V - } - return -} - -// Encode a struct. -func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { - var state errorState - // Encode fields in tag order so that decoders may use optimizations - // that depend on the ordering. - // https://developers.google.com/protocol-buffers/docs/encoding#order - for _, i := range prop.order { - p := prop.Prop[i] - if p.enc != nil { - err := p.enc(o, p, base) - if err != nil { - if err == ErrNil { - if p.Required && state.err == nil { - state.err = &RequiredNotSetError{p.Name} - } - } else if err == errRepeatedHasNil { - // Give more context to nil values in repeated fields. - return errors.New("repeated field " + p.OrigName + " has nil element") - } else if !state.shouldContinue(err, p) { - return err - } - } - if len(o.buf) > maxMarshalSize { - return ErrTooLarge - } - } - } - - // Do oneof fields. - if prop.oneofMarshaler != nil { - m := structPointer_Interface(base, prop.stype).(Message) - if err := prop.oneofMarshaler(m, o); err == ErrNil { - return errOneofHasNil - } else if err != nil { - return err - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - if len(o.buf)+len(v) > maxMarshalSize { - return ErrTooLarge - } - if len(v) > 0 { - o.buf = append(o.buf, v...) - } - } - - return state.err -} - -func size_struct(prop *StructProperties, base structPointer) (n int) { - for _, i := range prop.order { - p := prop.Prop[i] - if p.size != nil { - n += p.size(p, base) - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - n += len(v) - } - - // Factor in any oneof fields. - if prop.oneofSizer != nil { - m := structPointer_Interface(base, prop.stype).(Message) - n += prop.oneofSizer(m) - } - - return -} - -var zeroes [20]byte // longer than any conceivable sizeVarint - -// Encode a struct, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error { - return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state) -} - -// Encode something, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error { - iLen := len(o.buf) - o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length - iMsg := len(o.buf) - err := enc() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - lMsg := len(o.buf) - iMsg - lLen := sizeVarint(uint64(lMsg)) - switch x := lLen - (iMsg - iLen); { - case x > 0: // actual length is x bytes larger than the space we reserved - // Move msg x bytes right. - o.buf = append(o.buf, zeroes[:x]...) - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - case x < 0: // actual length is x bytes smaller than the space we reserved - // Move msg x bytes left. - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - o.buf = o.buf[:len(o.buf)+x] // x is negative - } - // Encode the length in the reserved space. - o.buf = o.buf[:iLen] - o.EncodeVarint(uint64(lMsg)) - o.buf = o.buf[:len(o.buf)+lMsg] - return state.err -} - -// errorState maintains the first error that occurs and updates that error -// with additional context. -type errorState struct { - err error -} - -// shouldContinue reports whether encoding should continue upon encountering the -// given error. If the error is RequiredNotSetError, shouldContinue returns true -// and, if this is the first appearance of that error, remembers it for future -// reporting. -// -// If prop is not nil, it may update any error with additional context about the -// field with the error. -func (s *errorState) shouldContinue(err error, prop *Properties) bool { - // Ignore unset required fields. - reqNotSet, ok := err.(*RequiredNotSetError) - if !ok { - return false - } - if s.err == nil { - if prop != nil { - err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field} - } - s.err = err - } - return true -} diff --git a/vendor/github.com/golang/protobuf/proto/encode_test.go b/vendor/github.com/golang/protobuf/proto/encode_test.go deleted file mode 100644 index a7209475f1..0000000000 --- a/vendor/github.com/golang/protobuf/proto/encode_test.go +++ /dev/null @@ -1,85 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build go1.7 - -package proto_test - -import ( - "strconv" - "testing" - - "github.com/golang/protobuf/proto" - tpb "github.com/golang/protobuf/proto/proto3_proto" - "github.com/golang/protobuf/ptypes" -) - -var ( - blackhole []byte -) - -// BenchmarkAny creates increasingly large arbitrary Any messages. The type is always the -// same. -func BenchmarkAny(b *testing.B) { - data := make([]byte, 1<<20) - quantum := 1 << 10 - for i := uint(0); i <= 10; i++ { - b.Run(strconv.Itoa(quantum<= len(o.buf) { + if len(b) == 0 { break } } @@ -473,9 +445,9 @@ func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { // GetExtensions returns a slice of the extensions present in pb that are also listed in es. // The returned slice has the same length as es; missing extensions will appear as nil elements. func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { - epb, ok := extendable(pb) - if !ok { - return nil, errors.New("proto: not an extendable proto") + epb, err := extendable(pb) + if err != nil { + return nil, err } extensions = make([]interface{}, len(es)) for i, e := range es { @@ -494,9 +466,9 @@ func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, e // For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing // just the Field field, which defines the extension's field number. func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { - epb, ok := extendable(pb) - if !ok { - return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb) + epb, err := extendable(pb) + if err != nil { + return nil, err } registeredExtensions := RegisteredExtensions(pb) @@ -523,16 +495,16 @@ func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { // SetExtension sets the specified extension of pb to the specified value. func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { - epb, ok := extendable(pb) - if !ok { - return errors.New("proto: not an extendable proto") + epb, err := extendable(pb) + if err != nil { + return err } if err := checkExtensionTypes(epb, extension); err != nil { return err } typ := reflect.TypeOf(extension.ExtensionType) if typ != reflect.TypeOf(value) { - return errors.New("proto: bad extension value type") + return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType) } // nil extension values need to be caught early, because the // encoder can't distinguish an ErrNil due to a nil extension @@ -544,14 +516,14 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error } extmap := epb.extensionsWrite() - extmap[extension.Field] = Extension{desc: extension, value: value} + extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)} return nil } // ClearAllExtensions clears all extensions from pb. func ClearAllExtensions(pb Message) { - epb, ok := extendable(pb) - if !ok { + epb, err := extendable(pb) + if err != nil { return } m := epb.extensionsWrite() @@ -585,3 +557,51 @@ func RegisterExtension(desc *ExtensionDesc) { func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { return extensionMaps[reflect.TypeOf(pb).Elem()] } + +// extensionAsLegacyType converts an value in the storage type as the API type. +// See Extension.value. +func extensionAsLegacyType(v interface{}) interface{} { + switch rv := reflect.ValueOf(v); rv.Kind() { + case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: + // Represent primitive types as a pointer to the value. + rv2 := reflect.New(rv.Type()) + rv2.Elem().Set(rv) + v = rv2.Interface() + case reflect.Ptr: + // Represent slice types as the value itself. + switch rv.Type().Elem().Kind() { + case reflect.Slice: + if rv.IsNil() { + v = reflect.Zero(rv.Type().Elem()).Interface() + } else { + v = rv.Elem().Interface() + } + } + } + return v +} + +// extensionAsStorageType converts an value in the API type as the storage type. +// See Extension.value. +func extensionAsStorageType(v interface{}) interface{} { + switch rv := reflect.ValueOf(v); rv.Kind() { + case reflect.Ptr: + // Represent slice types as the value itself. + switch rv.Type().Elem().Kind() { + case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: + if rv.IsNil() { + v = reflect.Zero(rv.Type().Elem()).Interface() + } else { + v = rv.Elem().Interface() + } + } + case reflect.Slice: + // Represent slice types as a pointer to the value. + if rv.Type().Elem().Kind() != reflect.Uint8 { + rv2 := reflect.New(rv.Type()) + rv2.Elem().Set(rv) + v = rv2.Interface() + } + } + return v +} diff --git a/vendor/github.com/golang/protobuf/proto/extensions_test.go b/vendor/github.com/golang/protobuf/proto/extensions_test.go deleted file mode 100644 index b6d9114c56..0000000000 --- a/vendor/github.com/golang/protobuf/proto/extensions_test.go +++ /dev/null @@ -1,536 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2014 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "bytes" - "fmt" - "reflect" - "sort" - "testing" - - "github.com/golang/protobuf/proto" - pb "github.com/golang/protobuf/proto/testdata" - "golang.org/x/sync/errgroup" -) - -func TestGetExtensionsWithMissingExtensions(t *testing.T) { - msg := &pb.MyMessage{} - ext1 := &pb.Ext{} - if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil { - t.Fatalf("Could not set ext1: %s", err) - } - exts, err := proto.GetExtensions(msg, []*proto.ExtensionDesc{ - pb.E_Ext_More, - pb.E_Ext_Text, - }) - if err != nil { - t.Fatalf("GetExtensions() failed: %s", err) - } - if exts[0] != ext1 { - t.Errorf("ext1 not in returned extensions: %T %v", exts[0], exts[0]) - } - if exts[1] != nil { - t.Errorf("ext2 in returned extensions: %T %v", exts[1], exts[1]) - } -} - -func TestExtensionDescsWithMissingExtensions(t *testing.T) { - msg := &pb.MyMessage{Count: proto.Int32(0)} - extdesc1 := pb.E_Ext_More - if descs, err := proto.ExtensionDescs(msg); len(descs) != 0 || err != nil { - t.Errorf("proto.ExtensionDescs: got %d descs, error %v; want 0, nil", len(descs), err) - } - - ext1 := &pb.Ext{} - if err := proto.SetExtension(msg, extdesc1, ext1); err != nil { - t.Fatalf("Could not set ext1: %s", err) - } - extdesc2 := &proto.ExtensionDesc{ - ExtendedType: (*pb.MyMessage)(nil), - ExtensionType: (*bool)(nil), - Field: 123456789, - Name: "a.b", - Tag: "varint,123456789,opt", - } - ext2 := proto.Bool(false) - if err := proto.SetExtension(msg, extdesc2, ext2); err != nil { - t.Fatalf("Could not set ext2: %s", err) - } - - b, err := proto.Marshal(msg) - if err != nil { - t.Fatalf("Could not marshal msg: %v", err) - } - if err := proto.Unmarshal(b, msg); err != nil { - t.Fatalf("Could not unmarshal into msg: %v", err) - } - - descs, err := proto.ExtensionDescs(msg) - if err != nil { - t.Fatalf("proto.ExtensionDescs: got error %v", err) - } - sortExtDescs(descs) - wantDescs := []*proto.ExtensionDesc{extdesc1, &proto.ExtensionDesc{Field: extdesc2.Field}} - if !reflect.DeepEqual(descs, wantDescs) { - t.Errorf("proto.ExtensionDescs(msg) sorted extension ids: got %+v, want %+v", descs, wantDescs) - } -} - -type ExtensionDescSlice []*proto.ExtensionDesc - -func (s ExtensionDescSlice) Len() int { return len(s) } -func (s ExtensionDescSlice) Less(i, j int) bool { return s[i].Field < s[j].Field } -func (s ExtensionDescSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -func sortExtDescs(s []*proto.ExtensionDesc) { - sort.Sort(ExtensionDescSlice(s)) -} - -func TestGetExtensionStability(t *testing.T) { - check := func(m *pb.MyMessage) bool { - ext1, err := proto.GetExtension(m, pb.E_Ext_More) - if err != nil { - t.Fatalf("GetExtension() failed: %s", err) - } - ext2, err := proto.GetExtension(m, pb.E_Ext_More) - if err != nil { - t.Fatalf("GetExtension() failed: %s", err) - } - return ext1 == ext2 - } - msg := &pb.MyMessage{Count: proto.Int32(4)} - ext0 := &pb.Ext{} - if err := proto.SetExtension(msg, pb.E_Ext_More, ext0); err != nil { - t.Fatalf("Could not set ext1: %s", ext0) - } - if !check(msg) { - t.Errorf("GetExtension() not stable before marshaling") - } - bb, err := proto.Marshal(msg) - if err != nil { - t.Fatalf("Marshal() failed: %s", err) - } - msg1 := &pb.MyMessage{} - err = proto.Unmarshal(bb, msg1) - if err != nil { - t.Fatalf("Unmarshal() failed: %s", err) - } - if !check(msg1) { - t.Errorf("GetExtension() not stable after unmarshaling") - } -} - -func TestGetExtensionDefaults(t *testing.T) { - var setFloat64 float64 = 1 - var setFloat32 float32 = 2 - var setInt32 int32 = 3 - var setInt64 int64 = 4 - var setUint32 uint32 = 5 - var setUint64 uint64 = 6 - var setBool = true - var setBool2 = false - var setString = "Goodnight string" - var setBytes = []byte("Goodnight bytes") - var setEnum = pb.DefaultsMessage_TWO - - type testcase struct { - ext *proto.ExtensionDesc // Extension we are testing. - want interface{} // Expected value of extension, or nil (meaning that GetExtension will fail). - def interface{} // Expected value of extension after ClearExtension(). - } - tests := []testcase{ - {pb.E_NoDefaultDouble, setFloat64, nil}, - {pb.E_NoDefaultFloat, setFloat32, nil}, - {pb.E_NoDefaultInt32, setInt32, nil}, - {pb.E_NoDefaultInt64, setInt64, nil}, - {pb.E_NoDefaultUint32, setUint32, nil}, - {pb.E_NoDefaultUint64, setUint64, nil}, - {pb.E_NoDefaultSint32, setInt32, nil}, - {pb.E_NoDefaultSint64, setInt64, nil}, - {pb.E_NoDefaultFixed32, setUint32, nil}, - {pb.E_NoDefaultFixed64, setUint64, nil}, - {pb.E_NoDefaultSfixed32, setInt32, nil}, - {pb.E_NoDefaultSfixed64, setInt64, nil}, - {pb.E_NoDefaultBool, setBool, nil}, - {pb.E_NoDefaultBool, setBool2, nil}, - {pb.E_NoDefaultString, setString, nil}, - {pb.E_NoDefaultBytes, setBytes, nil}, - {pb.E_NoDefaultEnum, setEnum, nil}, - {pb.E_DefaultDouble, setFloat64, float64(3.1415)}, - {pb.E_DefaultFloat, setFloat32, float32(3.14)}, - {pb.E_DefaultInt32, setInt32, int32(42)}, - {pb.E_DefaultInt64, setInt64, int64(43)}, - {pb.E_DefaultUint32, setUint32, uint32(44)}, - {pb.E_DefaultUint64, setUint64, uint64(45)}, - {pb.E_DefaultSint32, setInt32, int32(46)}, - {pb.E_DefaultSint64, setInt64, int64(47)}, - {pb.E_DefaultFixed32, setUint32, uint32(48)}, - {pb.E_DefaultFixed64, setUint64, uint64(49)}, - {pb.E_DefaultSfixed32, setInt32, int32(50)}, - {pb.E_DefaultSfixed64, setInt64, int64(51)}, - {pb.E_DefaultBool, setBool, true}, - {pb.E_DefaultBool, setBool2, true}, - {pb.E_DefaultString, setString, "Hello, string"}, - {pb.E_DefaultBytes, setBytes, []byte("Hello, bytes")}, - {pb.E_DefaultEnum, setEnum, pb.DefaultsMessage_ONE}, - } - - checkVal := func(test testcase, msg *pb.DefaultsMessage, valWant interface{}) error { - val, err := proto.GetExtension(msg, test.ext) - if err != nil { - if valWant != nil { - return fmt.Errorf("GetExtension(): %s", err) - } - if want := proto.ErrMissingExtension; err != want { - return fmt.Errorf("Unexpected error: got %v, want %v", err, want) - } - return nil - } - - // All proto2 extension values are either a pointer to a value or a slice of values. - ty := reflect.TypeOf(val) - tyWant := reflect.TypeOf(test.ext.ExtensionType) - if got, want := ty, tyWant; got != want { - return fmt.Errorf("unexpected reflect.TypeOf(): got %v want %v", got, want) - } - tye := ty.Elem() - tyeWant := tyWant.Elem() - if got, want := tye, tyeWant; got != want { - return fmt.Errorf("unexpected reflect.TypeOf().Elem(): got %v want %v", got, want) - } - - // Check the name of the type of the value. - // If it is an enum it will be type int32 with the name of the enum. - if got, want := tye.Name(), tye.Name(); got != want { - return fmt.Errorf("unexpected reflect.TypeOf().Elem().Name(): got %v want %v", got, want) - } - - // Check that value is what we expect. - // If we have a pointer in val, get the value it points to. - valExp := val - if ty.Kind() == reflect.Ptr { - valExp = reflect.ValueOf(val).Elem().Interface() - } - if got, want := valExp, valWant; !reflect.DeepEqual(got, want) { - return fmt.Errorf("unexpected reflect.DeepEqual(): got %v want %v", got, want) - } - - return nil - } - - setTo := func(test testcase) interface{} { - setTo := reflect.ValueOf(test.want) - if typ := reflect.TypeOf(test.ext.ExtensionType); typ.Kind() == reflect.Ptr { - setTo = reflect.New(typ).Elem() - setTo.Set(reflect.New(setTo.Type().Elem())) - setTo.Elem().Set(reflect.ValueOf(test.want)) - } - return setTo.Interface() - } - - for _, test := range tests { - msg := &pb.DefaultsMessage{} - name := test.ext.Name - - // Check the initial value. - if err := checkVal(test, msg, test.def); err != nil { - t.Errorf("%s: %v", name, err) - } - - // Set the per-type value and check value. - name = fmt.Sprintf("%s (set to %T %v)", name, test.want, test.want) - if err := proto.SetExtension(msg, test.ext, setTo(test)); err != nil { - t.Errorf("%s: SetExtension(): %v", name, err) - continue - } - if err := checkVal(test, msg, test.want); err != nil { - t.Errorf("%s: %v", name, err) - continue - } - - // Set and check the value. - name += " (cleared)" - proto.ClearExtension(msg, test.ext) - if err := checkVal(test, msg, test.def); err != nil { - t.Errorf("%s: %v", name, err) - } - } -} - -func TestExtensionsRoundTrip(t *testing.T) { - msg := &pb.MyMessage{} - ext1 := &pb.Ext{ - Data: proto.String("hi"), - } - ext2 := &pb.Ext{ - Data: proto.String("there"), - } - exists := proto.HasExtension(msg, pb.E_Ext_More) - if exists { - t.Error("Extension More present unexpectedly") - } - if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil { - t.Error(err) - } - if err := proto.SetExtension(msg, pb.E_Ext_More, ext2); err != nil { - t.Error(err) - } - e, err := proto.GetExtension(msg, pb.E_Ext_More) - if err != nil { - t.Error(err) - } - x, ok := e.(*pb.Ext) - if !ok { - t.Errorf("e has type %T, expected testdata.Ext", e) - } else if *x.Data != "there" { - t.Errorf("SetExtension failed to overwrite, got %+v, not 'there'", x) - } - proto.ClearExtension(msg, pb.E_Ext_More) - if _, err = proto.GetExtension(msg, pb.E_Ext_More); err != proto.ErrMissingExtension { - t.Errorf("got %v, expected ErrMissingExtension", e) - } - if _, err := proto.GetExtension(msg, pb.E_X215); err == nil { - t.Error("expected bad extension error, got nil") - } - if err := proto.SetExtension(msg, pb.E_X215, 12); err == nil { - t.Error("expected extension err") - } - if err := proto.SetExtension(msg, pb.E_Ext_More, 12); err == nil { - t.Error("expected some sort of type mismatch error, got nil") - } -} - -func TestNilExtension(t *testing.T) { - msg := &pb.MyMessage{ - Count: proto.Int32(1), - } - if err := proto.SetExtension(msg, pb.E_Ext_Text, proto.String("hello")); err != nil { - t.Fatal(err) - } - if err := proto.SetExtension(msg, pb.E_Ext_More, (*pb.Ext)(nil)); err == nil { - t.Error("expected SetExtension to fail due to a nil extension") - } else if want := "proto: SetExtension called with nil value of type *testdata.Ext"; err.Error() != want { - t.Errorf("expected error %v, got %v", want, err) - } - // Note: if the behavior of Marshal is ever changed to ignore nil extensions, update - // this test to verify that E_Ext_Text is properly propagated through marshal->unmarshal. -} - -func TestMarshalUnmarshalRepeatedExtension(t *testing.T) { - // Add a repeated extension to the result. - tests := []struct { - name string - ext []*pb.ComplexExtension - }{ - { - "two fields", - []*pb.ComplexExtension{ - {First: proto.Int32(7)}, - {Second: proto.Int32(11)}, - }, - }, - { - "repeated field", - []*pb.ComplexExtension{ - {Third: []int32{1000}}, - {Third: []int32{2000}}, - }, - }, - { - "two fields and repeated field", - []*pb.ComplexExtension{ - {Third: []int32{1000}}, - {First: proto.Int32(9)}, - {Second: proto.Int32(21)}, - {Third: []int32{2000}}, - }, - }, - } - for _, test := range tests { - // Marshal message with a repeated extension. - msg1 := new(pb.OtherMessage) - err := proto.SetExtension(msg1, pb.E_RComplex, test.ext) - if err != nil { - t.Fatalf("[%s] Error setting extension: %v", test.name, err) - } - b, err := proto.Marshal(msg1) - if err != nil { - t.Fatalf("[%s] Error marshaling message: %v", test.name, err) - } - - // Unmarshal and read the merged proto. - msg2 := new(pb.OtherMessage) - err = proto.Unmarshal(b, msg2) - if err != nil { - t.Fatalf("[%s] Error unmarshaling message: %v", test.name, err) - } - e, err := proto.GetExtension(msg2, pb.E_RComplex) - if err != nil { - t.Fatalf("[%s] Error getting extension: %v", test.name, err) - } - ext := e.([]*pb.ComplexExtension) - if ext == nil { - t.Fatalf("[%s] Invalid extension", test.name) - } - if !reflect.DeepEqual(ext, test.ext) { - t.Errorf("[%s] Wrong value for ComplexExtension: got: %v want: %v\n", test.name, ext, test.ext) - } - } -} - -func TestUnmarshalRepeatingNonRepeatedExtension(t *testing.T) { - // We may see multiple instances of the same extension in the wire - // format. For example, the proto compiler may encode custom options in - // this way. Here, we verify that we merge the extensions together. - tests := []struct { - name string - ext []*pb.ComplexExtension - }{ - { - "two fields", - []*pb.ComplexExtension{ - {First: proto.Int32(7)}, - {Second: proto.Int32(11)}, - }, - }, - { - "repeated field", - []*pb.ComplexExtension{ - {Third: []int32{1000}}, - {Third: []int32{2000}}, - }, - }, - { - "two fields and repeated field", - []*pb.ComplexExtension{ - {Third: []int32{1000}}, - {First: proto.Int32(9)}, - {Second: proto.Int32(21)}, - {Third: []int32{2000}}, - }, - }, - } - for _, test := range tests { - var buf bytes.Buffer - var want pb.ComplexExtension - - // Generate a serialized representation of a repeated extension - // by catenating bytes together. - for i, e := range test.ext { - // Merge to create the wanted proto. - proto.Merge(&want, e) - - // serialize the message - msg := new(pb.OtherMessage) - err := proto.SetExtension(msg, pb.E_Complex, e) - if err != nil { - t.Fatalf("[%s] Error setting extension %d: %v", test.name, i, err) - } - b, err := proto.Marshal(msg) - if err != nil { - t.Fatalf("[%s] Error marshaling message %d: %v", test.name, i, err) - } - buf.Write(b) - } - - // Unmarshal and read the merged proto. - msg2 := new(pb.OtherMessage) - err := proto.Unmarshal(buf.Bytes(), msg2) - if err != nil { - t.Fatalf("[%s] Error unmarshaling message: %v", test.name, err) - } - e, err := proto.GetExtension(msg2, pb.E_Complex) - if err != nil { - t.Fatalf("[%s] Error getting extension: %v", test.name, err) - } - ext := e.(*pb.ComplexExtension) - if ext == nil { - t.Fatalf("[%s] Invalid extension", test.name) - } - if !reflect.DeepEqual(*ext, want) { - t.Errorf("[%s] Wrong value for ComplexExtension: got: %s want: %s\n", test.name, ext, want) - } - } -} - -func TestClearAllExtensions(t *testing.T) { - // unregistered extension - desc := &proto.ExtensionDesc{ - ExtendedType: (*pb.MyMessage)(nil), - ExtensionType: (*bool)(nil), - Field: 101010100, - Name: "emptyextension", - Tag: "varint,0,opt", - } - m := &pb.MyMessage{} - if proto.HasExtension(m, desc) { - t.Errorf("proto.HasExtension(%s): got true, want false", proto.MarshalTextString(m)) - } - if err := proto.SetExtension(m, desc, proto.Bool(true)); err != nil { - t.Errorf("proto.SetExtension(m, desc, true): got error %q, want nil", err) - } - if !proto.HasExtension(m, desc) { - t.Errorf("proto.HasExtension(%s): got false, want true", proto.MarshalTextString(m)) - } - proto.ClearAllExtensions(m) - if proto.HasExtension(m, desc) { - t.Errorf("proto.HasExtension(%s): got true, want false", proto.MarshalTextString(m)) - } -} - -func TestMarshalRace(t *testing.T) { - // unregistered extension - desc := &proto.ExtensionDesc{ - ExtendedType: (*pb.MyMessage)(nil), - ExtensionType: (*bool)(nil), - Field: 101010100, - Name: "emptyextension", - Tag: "varint,0,opt", - } - - m := &pb.MyMessage{Count: proto.Int32(4)} - if err := proto.SetExtension(m, desc, proto.Bool(true)); err != nil { - t.Errorf("proto.SetExtension(m, desc, true): got error %q, want nil", err) - } - - var g errgroup.Group - for n := 3; n > 0; n-- { - g.Go(func() error { - _, err := proto.Marshal(m) - return err - }) - } - if err := g.Wait(); err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go index 1c225504a0..fdd328bb7f 100644 --- a/vendor/github.com/golang/protobuf/proto/lib.go +++ b/vendor/github.com/golang/protobuf/proto/lib.go @@ -273,32 +273,73 @@ import ( "sync" ) -// Message is implemented by generated protocol buffer messages. -type Message interface { - Reset() - String() string - ProtoMessage() +// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. +// Marshal reports this when a required field is not initialized. +// Unmarshal reports this when a required field is missing from the wire data. +type RequiredNotSetError struct{ field string } + +func (e *RequiredNotSetError) Error() string { + if e.field == "" { + return fmt.Sprintf("proto: required field not set") + } + return fmt.Sprintf("proto: required field %q not set", e.field) +} +func (e *RequiredNotSetError) RequiredNotSet() bool { + return true } -// Stats records allocation details about the protocol buffer encoders -// and decoders. Useful for tuning the library itself. -type Stats struct { - Emalloc uint64 // mallocs in encode - Dmalloc uint64 // mallocs in decode - Encode uint64 // number of encodes - Decode uint64 // number of decodes - Chit uint64 // number of cache hits - Cmiss uint64 // number of cache misses - Size uint64 // number of sizes +type invalidUTF8Error struct{ field string } + +func (e *invalidUTF8Error) Error() string { + if e.field == "" { + return "proto: invalid UTF-8 detected" + } + return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field) +} +func (e *invalidUTF8Error) InvalidUTF8() bool { + return true } -// Set to true to enable stats collection. -const collectStats = false +// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8. +// This error should not be exposed to the external API as such errors should +// be recreated with the field information. +var errInvalidUTF8 = &invalidUTF8Error{} -var stats Stats +// isNonFatal reports whether the error is either a RequiredNotSet error +// or a InvalidUTF8 error. +func isNonFatal(err error) bool { + if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() { + return true + } + if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() { + return true + } + return false +} -// GetStats returns a copy of the global Stats structure. -func GetStats() Stats { return stats } +type nonFatal struct{ E error } + +// Merge merges err into nf and reports whether it was successful. +// Otherwise it returns false for any fatal non-nil errors. +func (nf *nonFatal) Merge(err error) (ok bool) { + if err == nil { + return true // not an error + } + if !isNonFatal(err) { + return false // fatal error + } + if nf.E == nil { + nf.E = err // store first instance of non-fatal error + } + return true +} + +// Message is implemented by generated protocol buffer messages. +type Message interface { + Reset() + String() string + ProtoMessage() +} // A Buffer is a buffer manager for marshaling and unmarshaling // protocol buffers. It may be reused between invocations to @@ -309,16 +350,7 @@ type Buffer struct { buf []byte // encode/decode byte stream index int // read point - // pools of basic types to amortize allocation. - bools []bool - uint32s []uint32 - uint64s []uint64 - - // extra pools, only used with pointer_reflect.go - int32s []int32 - int64s []int64 - float32s []float32 - float64s []float64 + deterministic bool } // NewBuffer allocates a new Buffer and initializes its internal data to @@ -343,6 +375,30 @@ func (p *Buffer) SetBuf(s []byte) { // Bytes returns the contents of the Buffer. func (p *Buffer) Bytes() []byte { return p.buf } +// SetDeterministic sets whether to use deterministic serialization. +// +// Deterministic serialization guarantees that for a given binary, equal +// messages will always be serialized to the same bytes. This implies: +// +// - Repeated serialization of a message will return the same bytes. +// - Different processes of the same binary (which may be executing on +// different machines) will serialize equal messages to the same bytes. +// +// Note that the deterministic serialization is NOT canonical across +// languages. It is not guaranteed to remain stable over time. It is unstable +// across different builds with schema changes due to unknown fields. +// Users who need canonical serialization (e.g., persistent storage in a +// canonical form, fingerprinting, etc.) should define their own +// canonicalization specification and implement their own serializer rather +// than relying on this API. +// +// If deterministic serialization is requested, map entries will be sorted +// by keys in lexographical order. This is an implementation detail and +// subject to change. +func (p *Buffer) SetDeterministic(deterministic bool) { + p.deterministic = deterministic +} + /* * Helper routines for simplifying the creation of optional fields of basic type. */ @@ -831,22 +887,12 @@ func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMes return sf, false, nil } +// mapKeys returns a sort.Interface to be used for sorting the map keys. // Map fields may have key types of non-float scalars, strings and enums. -// The easiest way to sort them in some deterministic order is to use fmt. -// If this turns out to be inefficient we can always consider other options, -// such as doing a Schwartzian transform. - func mapKeys(vs []reflect.Value) sort.Interface { - s := mapKeySorter{ - vs: vs, - // default Less function: textual comparison - less: func(a, b reflect.Value) bool { - return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface()) - }, - } + s := mapKeySorter{vs: vs} - // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps; - // numeric keys are sorted numerically. + // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. if len(vs) == 0 { return s } @@ -855,6 +901,12 @@ func mapKeys(vs []reflect.Value) sort.Interface { s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } case reflect.Uint32, reflect.Uint64: s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } + case reflect.Bool: + s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true + case reflect.String: + s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } + default: + panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) } return s @@ -888,10 +940,26 @@ func isProto3Zero(v reflect.Value) bool { return false } -// ProtoPackageIsVersion2 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the proto package. -const ProtoPackageIsVersion2 = true +const ( + // ProtoPackageIsVersion3 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + ProtoPackageIsVersion3 = true + + // ProtoPackageIsVersion2 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + ProtoPackageIsVersion2 = true -// ProtoPackageIsVersion1 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the proto package. -const ProtoPackageIsVersion1 = true + // ProtoPackageIsVersion1 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + ProtoPackageIsVersion1 = true +) + +// InternalMessageInfo is a type used internally by generated .pb.go files. +// This type is not intended to be used by non-generated code. +// This type is not subject to any compatibility guarantee. +type InternalMessageInfo struct { + marshal *marshalInfo + unmarshal *unmarshalInfo + merge *mergeInfo + discard *discardInfo +} diff --git a/vendor/github.com/golang/protobuf/proto/map_test.go b/vendor/github.com/golang/protobuf/proto/map_test.go deleted file mode 100644 index 313e879245..0000000000 --- a/vendor/github.com/golang/protobuf/proto/map_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package proto_test - -import ( - "fmt" - "testing" - - "github.com/golang/protobuf/proto" - ppb "github.com/golang/protobuf/proto/proto3_proto" -) - -func marshalled() []byte { - m := &ppb.IntMaps{} - for i := 0; i < 1000; i++ { - m.Maps = append(m.Maps, &ppb.IntMap{ - Rtt: map[int32]int32{1: 2}, - }) - } - b, err := proto.Marshal(m) - if err != nil { - panic(fmt.Sprintf("Can't marshal %+v: %v", m, err)) - } - return b -} - -func BenchmarkConcurrentMapUnmarshal(b *testing.B) { - in := marshalled() - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - var out ppb.IntMaps - if err := proto.Unmarshal(in, &out); err != nil { - b.Errorf("Can't unmarshal ppb.IntMaps: %v", err) - } - } - }) -} - -func BenchmarkSequentialMapUnmarshal(b *testing.B) { - in := marshalled() - b.ResetTimer() - for i := 0; i < b.N; i++ { - var out ppb.IntMaps - if err := proto.Unmarshal(in, &out); err != nil { - b.Errorf("Can't unmarshal ppb.IntMaps: %v", err) - } - } -} diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go index fd982decd6..f48a756761 100644 --- a/vendor/github.com/golang/protobuf/proto/message_set.go +++ b/vendor/github.com/golang/protobuf/proto/message_set.go @@ -36,12 +36,7 @@ package proto */ import ( - "bytes" - "encoding/json" "errors" - "fmt" - "reflect" - "sort" ) // errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. @@ -94,10 +89,7 @@ func (ms *messageSet) find(pb Message) *_MessageSet_Item { } func (ms *messageSet) Has(pb Message) bool { - if ms.find(pb) != nil { - return true - } - return false + return ms.find(pb) != nil } func (ms *messageSet) Unmarshal(pb Message) error { @@ -147,50 +139,9 @@ func skipVarint(buf []byte) []byte { return buf[i+1:] } -// MarshalMessageSet encodes the extension map represented by m in the message set wire format. -// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSet(exts interface{}) ([]byte, error) { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - if err := encodeExtensions(exts); err != nil { - return nil, err - } - m, _ = exts.extensionsRead() - case map[int32]Extension: - if err := encodeExtensionsMap(exts); err != nil { - return nil, err - } - m = exts - default: - return nil, errors.New("proto: not an extension map") - } - - // Sort extension IDs to provide a deterministic encoding. - // See also enc_map in encode.go. - ids := make([]int, 0, len(m)) - for id := range m { - ids = append(ids, int(id)) - } - sort.Ints(ids) - - ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))} - for _, id := range ids { - e := m[int32(id)] - // Remove the wire type and field number varint, as well as the length varint. - msg := skipVarint(skipVarint(e.enc)) - - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: Int32(int32(id)), - Message: msg, - }) - } - return Marshal(ms) -} - -// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. -// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSet(buf []byte, exts interface{}) error { +// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +func unmarshalMessageSet(buf []byte, exts interface{}) error { var m map[int32]Extension switch exts := exts.(type) { case *XXX_InternalExtensions: @@ -228,84 +179,3 @@ func UnmarshalMessageSet(buf []byte, exts interface{}) error { } return nil } - -// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. -// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - m, _ = exts.extensionsRead() - case map[int32]Extension: - m = exts - default: - return nil, errors.New("proto: not an extension map") - } - var b bytes.Buffer - b.WriteByte('{') - - // Process the map in key order for deterministic output. - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) // int32Slice defined in text.go - - for i, id := range ids { - ext := m[id] - if i > 0 { - b.WriteByte(',') - } - - msd, ok := messageSetMap[id] - if !ok { - // Unknown type; we can't render it, so skip it. - continue - } - fmt.Fprintf(&b, `"[%s]":`, msd.name) - - x := ext.value - if x == nil { - x = reflect.New(msd.t.Elem()).Interface() - if err := Unmarshal(ext.enc, x.(Message)); err != nil { - return nil, err - } - } - d, err := json.Marshal(x) - if err != nil { - return nil, err - } - b.Write(d) - } - b.WriteByte('}') - return b.Bytes(), nil -} - -// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. -// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error { - // Common-case fast path. - if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { - return nil - } - - // This is fairly tricky, and it's not clear that it is needed. - return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") -} - -// A global registry of types that can be used in a MessageSet. - -var messageSetMap = make(map[int32]messageSetDesc) - -type messageSetDesc struct { - t reflect.Type // pointer to struct - name string -} - -// RegisterMessageSetType is called from the generated code. -func RegisterMessageSetType(m Message, fieldNum int32, name string) { - messageSetMap[fieldNum] = messageSetDesc{ - t: reflect.TypeOf(m), - name: name, - } -} diff --git a/vendor/github.com/golang/protobuf/proto/message_set_test.go b/vendor/github.com/golang/protobuf/proto/message_set_test.go deleted file mode 100644 index 353a3ea769..0000000000 --- a/vendor/github.com/golang/protobuf/proto/message_set_test.go +++ /dev/null @@ -1,66 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2014 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "bytes" - "testing" -) - -func TestUnmarshalMessageSetWithDuplicate(t *testing.T) { - // Check that a repeated message set entry will be concatenated. - in := &messageSet{ - Item: []*_MessageSet_Item{ - {TypeId: Int32(12345), Message: []byte("hoo")}, - {TypeId: Int32(12345), Message: []byte("hah")}, - }, - } - b, err := Marshal(in) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - t.Logf("Marshaled bytes: %q", b) - - var extensions XXX_InternalExtensions - if err := UnmarshalMessageSet(b, &extensions); err != nil { - t.Fatalf("UnmarshalMessageSet: %v", err) - } - ext, ok := extensions.p.extensionMap[12345] - if !ok { - t.Fatalf("Didn't retrieve extension 12345; map is %v", extensions.p.extensionMap) - } - // Skip wire type/field number and length varints. - got := skipVarint(skipVarint(ext.enc)) - if want := []byte("hoohah"); !bytes.Equal(got, want) { - t.Errorf("Combined extension is %q, want %q", got, want) - } -} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go index fb512e2e16..94fa9194a8 100644 --- a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go +++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go @@ -29,7 +29,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// +build appengine js +// +build purego appengine js // This file contains an implementation of proto field accesses using package reflect. // It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can @@ -38,32 +38,13 @@ package proto import ( - "math" "reflect" + "sync" ) -// A structPointer is a pointer to a struct. -type structPointer struct { - v reflect.Value -} - -// toStructPointer returns a structPointer equivalent to the given reflect value. -// The reflect value must itself be a pointer to a struct. -func toStructPointer(v reflect.Value) structPointer { - return structPointer{v} -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p.v.IsNil() -} +const unsafeAllowed = false -// Interface returns the struct pointer as an interface value. -func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { - return p.v.Interface() -} - -// A field identifies a field in a struct, accessible from a structPointer. +// A field identifies a field in a struct, accessible from a pointer. // In this implementation, a field is identified by the sequence of field indices // passed to reflect's FieldByIndex. type field []int @@ -76,409 +57,304 @@ func toField(f *reflect.StructField) field { // invalidField is an invalid field identifier. var invalidField = field(nil) +// zeroField is a noop when calling pointer.offset. +var zeroField = field([]int{}) + // IsValid reports whether the field identifier is valid. func (f field) IsValid() bool { return f != nil } -// field returns the given field in the struct as a reflect value. -func structPointer_field(p structPointer, f field) reflect.Value { - // Special case: an extension map entry with a value of type T - // passes a *T to the struct-handling code with a zero field, - // expecting that it will be treated as equivalent to *struct{ X T }, - // which has the same memory layout. We have to handle that case - // specially, because reflect will panic if we call FieldByIndex on a - // non-struct. - if f == nil { - return p.v.Elem() - } - - return p.v.Elem().FieldByIndex(f) +// The pointer type is for the table-driven decoder. +// The implementation here uses a reflect.Value of pointer type to +// create a generic pointer. In pointer_unsafe.go we use unsafe +// instead of reflect to implement the same (but faster) interface. +type pointer struct { + v reflect.Value } -// ifield returns the given field in the struct as an interface value. -func structPointer_ifield(p structPointer, f field) interface{} { - return structPointer_field(p, f).Addr().Interface() +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + return pointer{v: reflect.ValueOf(*i)} } -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return structPointer_ifield(p, f).(*[]byte) +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr, deref bool) pointer { + v := reflect.ValueOf(*i) + u := reflect.New(v.Type()) + u.Elem().Set(v) + if deref { + u = u.Elem() + } + return pointer{v: u} } -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return structPointer_ifield(p, f).(*[][]byte) +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{v: v} } -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return structPointer_ifield(p, f).(**bool) +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} } -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return structPointer_ifield(p, f).(*bool) +func (p pointer) isNil() bool { + return p.v.IsNil() } -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return structPointer_ifield(p, f).(*[]bool) +// grow updates the slice s in place to make it one element longer. +// s must be addressable. +// Returns the (addressable) new element. +func grow(s reflect.Value) reflect.Value { + n, m := s.Len(), s.Cap() + if n < m { + s.SetLen(n + 1) + } else { + s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) + } + return s.Index(n) } -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return structPointer_ifield(p, f).(**string) +func (p pointer) toInt64() *int64 { + return p.v.Interface().(*int64) } - -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return structPointer_ifield(p, f).(*string) +func (p pointer) toInt64Ptr() **int64 { + return p.v.Interface().(**int64) } - -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return structPointer_ifield(p, f).(*[]string) +func (p pointer) toInt64Slice() *[]int64 { + return p.v.Interface().(*[]int64) } -// Extensions returns the address of an extension map field in the struct. -func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { - return structPointer_ifield(p, f).(*XXX_InternalExtensions) -} +var int32ptr = reflect.TypeOf((*int32)(nil)) -// ExtMap returns the address of an extension map field in the struct. -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return structPointer_ifield(p, f).(*map[int32]Extension) +func (p pointer) toInt32() *int32 { + return p.v.Convert(int32ptr).Interface().(*int32) } -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return structPointer_field(p, f).Addr() +// The toInt32Ptr/Slice methods don't work because of enums. +// Instead, we must use set/get methods for the int32ptr/slice case. +/* + func (p pointer) toInt32Ptr() **int32 { + return p.v.Interface().(**int32) } - -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - structPointer_field(p, f).Set(q.v) + func (p pointer) toInt32Slice() *[]int32 { + return p.v.Interface().(*[]int32) } - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return structPointer{structPointer_field(p, f)} +*/ +func (p pointer) getInt32Ptr() *int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().(*int32) + } + // an enum + return p.v.Elem().Convert(int32PtrType).Interface().(*int32) +} +func (p pointer) setInt32Ptr(v int32) { + // Allocate value in a *int32. Possibly convert that to a *enum. + // Then assign it to a **int32 or **enum. + // Note: we can convert *int32 to *enum, but we can't convert + // **int32 to **enum! + p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) +} + +// getInt32Slice copies []int32 from p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getInt32Slice() []int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().([]int32) + } + // an enum + // Allocate a []int32, then assign []enum's values into it. + // Note: we can't convert []enum to []int32. + slice := p.v.Elem() + s := make([]int32, slice.Len()) + for i := 0; i < slice.Len(); i++ { + s[i] = int32(slice.Index(i).Int()) + } + return s } -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { - return structPointerSlice{structPointer_field(p, f)} +// setInt32Slice copies []int32 into p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setInt32Slice(v []int32) { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + p.v.Elem().Set(reflect.ValueOf(v)) + return + } + // an enum + // Allocate a []enum, then assign []int32's values into it. + // Note: we can't convert []enum to []int32. + slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) + for i, x := range v { + slice.Index(i).SetInt(int64(x)) + } + p.v.Elem().Set(slice) } - -// A structPointerSlice represents the address of a slice of pointers to structs -// (themselves messages or groups). That is, v.Type() is *[]*struct{...}. -type structPointerSlice struct { - v reflect.Value +func (p pointer) appendInt32Slice(v int32) { + grow(p.v.Elem()).SetInt(int64(v)) } -func (p structPointerSlice) Len() int { return p.v.Len() } -func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } -func (p structPointerSlice) Append(q structPointer) { - p.v.Set(reflect.Append(p.v, q.v)) +func (p pointer) toUint64() *uint64 { + return p.v.Interface().(*uint64) } - -var ( - int32Type = reflect.TypeOf(int32(0)) - uint32Type = reflect.TypeOf(uint32(0)) - float32Type = reflect.TypeOf(float32(0)) - int64Type = reflect.TypeOf(int64(0)) - uint64Type = reflect.TypeOf(uint64(0)) - float64Type = reflect.TypeOf(float64(0)) -) - -// A word32 represents a field of type *int32, *uint32, *float32, or *enum. -// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. -type word32 struct { - v reflect.Value +func (p pointer) toUint64Ptr() **uint64 { + return p.v.Interface().(**uint64) } - -// IsNil reports whether p is nil. -func word32_IsNil(p word32) bool { - return p.v.IsNil() +func (p pointer) toUint64Slice() *[]uint64 { + return p.v.Interface().(*[]uint64) } - -// Set sets p to point at a newly allocated word with bits set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - t := p.v.Type().Elem() - switch t { - case int32Type: - if len(o.int32s) == 0 { - o.int32s = make([]int32, uint32PoolSize) - } - o.int32s[0] = int32(x) - p.v.Set(reflect.ValueOf(&o.int32s[0])) - o.int32s = o.int32s[1:] - return - case uint32Type: - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) - } - o.uint32s[0] = x - p.v.Set(reflect.ValueOf(&o.uint32s[0])) - o.uint32s = o.uint32s[1:] - return - case float32Type: - if len(o.float32s) == 0 { - o.float32s = make([]float32, uint32PoolSize) - } - o.float32s[0] = math.Float32frombits(x) - p.v.Set(reflect.ValueOf(&o.float32s[0])) - o.float32s = o.float32s[1:] - return - } - - // must be enum - p.v.Set(reflect.New(t)) - p.v.Elem().SetInt(int64(int32(x))) +func (p pointer) toUint32() *uint32 { + return p.v.Interface().(*uint32) } - -// Get gets the bits pointed at by p, as a uint32. -func word32_Get(p word32) uint32 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") +func (p pointer) toUint32Ptr() **uint32 { + return p.v.Interface().(**uint32) } - -// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32{structPointer_field(p, f)} +func (p pointer) toUint32Slice() *[]uint32 { + return p.v.Interface().(*[]uint32) } - -// A word32Val represents a field of type int32, uint32, float32, or enum. -// That is, v.Type() is int32, uint32, float32, or enum and v is assignable. -type word32Val struct { - v reflect.Value +func (p pointer) toBool() *bool { + return p.v.Interface().(*bool) } - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - switch p.v.Type() { - case int32Type: - p.v.SetInt(int64(x)) - return - case uint32Type: - p.v.SetUint(uint64(x)) - return - case float32Type: - p.v.SetFloat(float64(math.Float32frombits(x))) - return - } - - // must be enum - p.v.SetInt(int64(int32(x))) +func (p pointer) toBoolPtr() **bool { + return p.v.Interface().(**bool) } - -// Get gets the bits pointed at by p, as a uint32. -func word32Val_Get(p word32Val) uint32 { - elem := p.v - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") +func (p pointer) toBoolSlice() *[]bool { + return p.v.Interface().(*[]bool) } - -// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val{structPointer_field(p, f)} +func (p pointer) toFloat64() *float64 { + return p.v.Interface().(*float64) } - -// A word32Slice is a slice of 32-bit values. -// That is, v.Type() is []int32, []uint32, []float32, or []enum. -type word32Slice struct { - v reflect.Value +func (p pointer) toFloat64Ptr() **float64 { + return p.v.Interface().(**float64) } - -func (p word32Slice) Append(x uint32) { - n, m := p.v.Len(), p.v.Cap() - if n < m { - p.v.SetLen(n + 1) - } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) - } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int32: - elem.SetInt(int64(int32(x))) - case reflect.Uint32: - elem.SetUint(uint64(x)) - case reflect.Float32: - elem.SetFloat(float64(math.Float32frombits(x))) - } +func (p pointer) toFloat64Slice() *[]float64 { + return p.v.Interface().(*[]float64) } - -func (p word32Slice) Len() int { - return p.v.Len() +func (p pointer) toFloat32() *float32 { + return p.v.Interface().(*float32) } - -func (p word32Slice) Index(i int) uint32 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") +func (p pointer) toFloat32Ptr() **float32 { + return p.v.Interface().(**float32) } - -// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) word32Slice { - return word32Slice{structPointer_field(p, f)} +func (p pointer) toFloat32Slice() *[]float32 { + return p.v.Interface().(*[]float32) } - -// word64 is like word32 but for 64-bit values. -type word64 struct { - v reflect.Value +func (p pointer) toString() *string { + return p.v.Interface().(*string) } - -func word64_Set(p word64, o *Buffer, x uint64) { - t := p.v.Type().Elem() - switch t { - case int64Type: - if len(o.int64s) == 0 { - o.int64s = make([]int64, uint64PoolSize) - } - o.int64s[0] = int64(x) - p.v.Set(reflect.ValueOf(&o.int64s[0])) - o.int64s = o.int64s[1:] - return - case uint64Type: - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) - } - o.uint64s[0] = x - p.v.Set(reflect.ValueOf(&o.uint64s[0])) - o.uint64s = o.uint64s[1:] - return - case float64Type: - if len(o.float64s) == 0 { - o.float64s = make([]float64, uint64PoolSize) - } - o.float64s[0] = math.Float64frombits(x) - p.v.Set(reflect.ValueOf(&o.float64s[0])) - o.float64s = o.float64s[1:] - return - } - panic("unreachable") +func (p pointer) toStringPtr() **string { + return p.v.Interface().(**string) } - -func word64_IsNil(p word64) bool { - return p.v.IsNil() +func (p pointer) toStringSlice() *[]string { + return p.v.Interface().(*[]string) } - -func word64_Get(p word64) uint64 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) - } - panic("unreachable") +func (p pointer) toBytes() *[]byte { + return p.v.Interface().(*[]byte) } - -func structPointer_Word64(p structPointer, f field) word64 { - return word64{structPointer_field(p, f)} +func (p pointer) toBytesSlice() *[][]byte { + return p.v.Interface().(*[][]byte) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return p.v.Interface().(*XXX_InternalExtensions) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return p.v.Interface().(*map[int32]Extension) +} +func (p pointer) getPointer() pointer { + return pointer{v: p.v.Elem()} +} +func (p pointer) setPointer(q pointer) { + p.v.Elem().Set(q.v) +} +func (p pointer) appendPointer(q pointer) { + grow(p.v.Elem()).Set(q.v) } -// word64Val is like word32Val but for 64-bit values. -type word64Val struct { - v reflect.Value +// getPointerSlice copies []*T from p as a new []pointer. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getPointerSlice() []pointer { + if p.v.IsNil() { + return nil + } + n := p.v.Elem().Len() + s := make([]pointer, n) + for i := 0; i < n; i++ { + s[i] = pointer{v: p.v.Elem().Index(i)} + } + return s } -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - switch p.v.Type() { - case int64Type: - p.v.SetInt(int64(x)) - return - case uint64Type: - p.v.SetUint(x) - return - case float64Type: - p.v.SetFloat(math.Float64frombits(x)) +// setPointerSlice copies []pointer into p as a new []*T. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setPointerSlice(v []pointer) { + if v == nil { + p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) return } - panic("unreachable") + s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) + for _, p := range v { + s = reflect.Append(s, p.v) + } + p.v.Elem().Set(s) } -func word64Val_Get(p word64Val) uint64 { - elem := p.v - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + if p.v.Elem().IsNil() { + return pointer{v: p.v.Elem()} } - panic("unreachable") + return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct } -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val{structPointer_field(p, f)} +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + // TODO: check that p.v.Type().Elem() == t? + return p.v } -type word64Slice struct { - v reflect.Value +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p } - -func (p word64Slice) Append(x uint64) { - n, m := p.v.Len(), p.v.Cap() - if n < m { - p.v.SetLen(n + 1) - } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) - } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int64: - elem.SetInt(int64(int64(x))) - case reflect.Uint64: - elem.SetUint(uint64(x)) - case reflect.Float64: - elem.SetFloat(float64(math.Float64frombits(x))) - } +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v } - -func (p word64Slice) Len() int { - return p.v.Len() +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p } - -func (p word64Slice) Index(i int) uint64 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return uint64(elem.Uint()) - case reflect.Float64: - return math.Float64bits(float64(elem.Float())) - } - panic("unreachable") +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v } - -func structPointer_Word64Slice(p structPointer, f field) word64Slice { - return word64Slice{structPointer_field(p, f)} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v } +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} + +var atomicLock sync.Mutex diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go index 6b5567d47c..dbfffe071b 100644 --- a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go +++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go @@ -29,7 +29,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// +build !appengine,!js +// +build !purego,!appengine,!js // This file contains the implementation of the proto field accesses using package unsafe. @@ -37,38 +37,13 @@ package proto import ( "reflect" + "sync/atomic" "unsafe" ) -// NOTE: These type_Foo functions would more idiomatically be methods, -// but Go does not allow methods on pointer types, and we must preserve -// some pointer type for the garbage collector. We use these -// funcs with clunky names as our poor approximation to methods. -// -// An alternative would be -// type structPointer struct { p unsafe.Pointer } -// but that does not registerize as well. - -// A structPointer is a pointer to a struct. -type structPointer unsafe.Pointer - -// toStructPointer returns a structPointer equivalent to the given reflect value. -func toStructPointer(v reflect.Value) structPointer { - return structPointer(unsafe.Pointer(v.Pointer())) -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p == nil -} - -// Interface returns the struct pointer, assumed to have element type t, -// as an interface value. -func structPointer_Interface(p structPointer, t reflect.Type) interface{} { - return reflect.NewAt(t, unsafe.Pointer(p)).Interface() -} +const unsafeAllowed = true -// A field identifies a field in a struct, accessible from a structPointer. +// A field identifies a field in a struct, accessible from a pointer. // In this implementation, a field is identified by its byte offset from the start of the struct. type field uintptr @@ -80,191 +55,259 @@ func toField(f *reflect.StructField) field { // invalidField is an invalid field identifier. const invalidField = ^field(0) +// zeroField is a noop when calling pointer.offset. +const zeroField = field(0) + // IsValid reports whether the field identifier is valid. func (f field) IsValid() bool { - return f != ^field(0) + return f != invalidField +} + +// The pointer type below is for the new table-driven encoder/decoder. +// The implementation here uses unsafe.Pointer to create a generic pointer. +// In pointer_reflect.go we use reflect instead of unsafe to implement +// the same (but slower) interface. +type pointer struct { + p unsafe.Pointer +} + +// size of pointer +var ptrSize = unsafe.Sizeof(uintptr(0)) + +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + // Super-tricky - read pointer out of data word of interface value. + // Saves ~25ns over the equivalent: + // return valToPointer(reflect.ValueOf(*i)) + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} +} + +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) { + // Super-tricky - read or get the address of data word of interface value. + if isptr { + // The interface is of pointer type, thus it is a direct interface. + // The data word is the pointer data itself. We take its address. + p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} + } else { + // The interface is not of pointer type. The data word is the pointer + // to the data. + p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} + } + if deref { + p.p = *(*unsafe.Pointer)(p.p) + } + return p } -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{p: unsafe.Pointer(v.Pointer())} } -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + // For safety, we should panic if !f.IsValid, however calling panic causes + // this to no longer be inlineable, which is a serious performance cost. + /* + if !f.IsValid() { + panic("invalid field") + } + */ + return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} } -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) isNil() bool { + return p.p == nil } -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) toInt64() *int64 { + return (*int64)(p.p) } - -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) toInt64Ptr() **int64 { + return (**int64)(p.p) } - -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) toInt64Slice() *[]int64 { + return (*[]int64)(p.p) } - -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) toInt32() *int32 { + return (*int32)(p.p) } -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. +/* + func (p pointer) toInt32Ptr() **int32 { + return (**int32)(p.p) + } + func (p pointer) toInt32Slice() *[]int32 { + return (*[]int32)(p.p) + } +*/ +func (p pointer) getInt32Ptr() *int32 { + return *(**int32)(p.p) } - -// ExtMap returns the address of an extension map field in the struct. -func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { - return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) setInt32Ptr(v int32) { + *(**int32)(p.p) = &v } -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// getInt32Slice loads a []int32 from p. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getInt32Slice() []int32 { + return *(*[]int32)(p.p) } -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) +// setInt32Slice stores a []int32 to p. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setInt32Slice(v []int32) { + *(*[]int32)(p.p) = v } -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q +// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? +func (p pointer) appendInt32Slice(v int32) { + s := (*[]int32)(p.p) + *s = append(*s, v) } -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) toUint64() *uint64 { + return (*uint64)(p.p) } - -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { - return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) toUint64Ptr() **uint64 { + return (**uint64)(p.p) } - -// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). -type structPointerSlice []structPointer - -func (v *structPointerSlice) Len() int { return len(*v) } -func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } -func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } - -// A word32 is the address of a "pointer to 32-bit value" field. -type word32 **uint32 - -// IsNil reports whether *v is nil. -func word32_IsNil(p word32) bool { - return *p == nil +func (p pointer) toUint64Slice() *[]uint64 { + return (*[]uint64)(p.p) } - -// Set sets *v to point at a newly allocated word set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) - } - o.uint32s[0] = x - *p = &o.uint32s[0] - o.uint32s = o.uint32s[1:] +func (p pointer) toUint32() *uint32 { + return (*uint32)(p.p) } - -// Get gets the value pointed at by *v. -func word32_Get(p word32) uint32 { - return **p +func (p pointer) toUint32Ptr() **uint32 { + return (**uint32)(p.p) } - -// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +func (p pointer) toUint32Slice() *[]uint32 { + return (*[]uint32)(p.p) } - -// A word32Val is the address of a 32-bit value field. -type word32Val *uint32 - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - *p = x +func (p pointer) toBool() *bool { + return (*bool)(p.p) } - -// Get gets the value pointed at by p. -func word32Val_Get(p word32Val) uint32 { - return *p +func (p pointer) toBoolPtr() **bool { + return (**bool)(p.p) } - -// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +func (p pointer) toBoolSlice() *[]bool { + return (*[]bool)(p.p) } - -// A word32Slice is a slice of 32-bit values. -type word32Slice []uint32 - -func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } -func (v *word32Slice) Len() int { return len(*v) } -func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } - -// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) *word32Slice { - return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) toFloat64() *float64 { + return (*float64)(p.p) } - -// word64 is like word32 but for 64-bit values. -type word64 **uint64 - -func word64_Set(p word64, o *Buffer, x uint64) { - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) - } - o.uint64s[0] = x - *p = &o.uint64s[0] - o.uint64s = o.uint64s[1:] +func (p pointer) toFloat64Ptr() **float64 { + return (**float64)(p.p) } - -func word64_IsNil(p word64) bool { - return *p == nil +func (p pointer) toFloat64Slice() *[]float64 { + return (*[]float64)(p.p) } - -func word64_Get(p word64) uint64 { - return **p +func (p pointer) toFloat32() *float32 { + return (*float32)(p.p) +} +func (p pointer) toFloat32Ptr() **float32 { + return (**float32)(p.p) +} +func (p pointer) toFloat32Slice() *[]float32 { + return (*[]float32)(p.p) +} +func (p pointer) toString() *string { + return (*string)(p.p) +} +func (p pointer) toStringPtr() **string { + return (**string)(p.p) +} +func (p pointer) toStringSlice() *[]string { + return (*[]string)(p.p) +} +func (p pointer) toBytes() *[]byte { + return (*[]byte)(p.p) +} +func (p pointer) toBytesSlice() *[][]byte { + return (*[][]byte)(p.p) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return (*XXX_InternalExtensions)(p.p) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return (*map[int32]Extension)(p.p) } -func structPointer_Word64(p structPointer, f field) word64 { - return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +// getPointerSlice loads []*T from p as a []pointer. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getPointerSlice() []pointer { + // Super-tricky - p should point to a []*T where T is a + // message type. We load it as []pointer. + return *(*[]pointer)(p.p) } -// word64Val is like word32Val but for 64-bit values. -type word64Val *uint64 +// setPointerSlice stores []pointer into p as a []*T. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setPointerSlice(v []pointer) { + // Super-tricky - p should point to a []*T where T is a + // message type. We store it as []pointer. + *(*[]pointer)(p.p) = v +} -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - *p = x +// getPointer loads the pointer at p and returns it. +func (p pointer) getPointer() pointer { + return pointer{p: *(*unsafe.Pointer)(p.p)} } -func word64Val_Get(p word64Val) uint64 { - return *p +// setPointer stores the pointer q at p. +func (p pointer) setPointer(q pointer) { + *(*unsafe.Pointer)(p.p) = q.p } -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +// append q to the slice pointed to by p. +func (p pointer) appendPointer(q pointer) { + s := (*[]unsafe.Pointer)(p.p) + *s = append(*s, q.p) } -// word64Slice is like word32Slice but for 64-bit values. -type word64Slice []uint64 +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + // Super-tricky - read pointer out of data word of interface value. + return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} +} -func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } -func (v *word64Slice) Len() int { return len(*v) } -func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } +// asPointerTo returns a reflect.Value that is a pointer to an +// object of type t stored at p. +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + return reflect.NewAt(t, p.p) +} -func structPointer_Word64Slice(p structPointer, f field) *word64Slice { - return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) } diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go index ec2289c005..79668ff5c5 100644 --- a/vendor/github.com/golang/protobuf/proto/properties.go +++ b/vendor/github.com/golang/protobuf/proto/properties.go @@ -58,42 +58,6 @@ const ( WireFixed32 = 5 ) -const startSize = 10 // initial slice/string sizes - -// Encoders are defined in encode.go -// An encoder outputs the full representation of a field, including its -// tag and encoder type. -type encoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueEncoder encodes a single integer in a particular encoding. -type valueEncoder func(o *Buffer, x uint64) error - -// Sizers are defined in encode.go -// A sizer returns the encoded size of a field, including its tag and encoder -// type. -type sizer func(prop *Properties, base structPointer) int - -// A valueSizer returns the encoded size of a single integer in a particular -// encoding. -type valueSizer func(x uint64) int - -// Decoders are defined in decode.go -// A decoder creates a value from its wire representation. -// Unrecognized subelements are saved in unrec. -type decoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueDecoder decodes a single integer in a particular encoding. -type valueDecoder func(o *Buffer) (x uint64, err error) - -// A oneofMarshaler does the marshaling for all oneof fields in a message. -type oneofMarshaler func(Message, *Buffer) error - -// A oneofUnmarshaler does the unmarshaling for a oneof field in a message. -type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error) - -// A oneofSizer does the sizing for all oneof fields in a message. -type oneofSizer func(Message) int - // tagMap is an optimization over map[int]int for typical protocol buffer // use-cases. Encoded protocol buffers are often in tag order with small tag // numbers. @@ -140,13 +104,6 @@ type StructProperties struct { decoderTags tagMap // map from proto tag to struct field number decoderOrigNames map[string]int // map from original name to struct field number order []int // list of struct field numbers in tag order - unrecField field // field id of the XXX_unrecognized []byte field - extendable bool // is this an extendable proto - - oneofMarshaler oneofMarshaler - oneofUnmarshaler oneofUnmarshaler - oneofSizer oneofSizer - stype reflect.Type // OneofTypes contains information about the oneof fields in this message. // It is keyed by the original name of a field. @@ -182,41 +139,24 @@ type Properties struct { Repeated bool Packed bool // relevant for repeated primitives only Enum string // set for enum types only - proto3 bool // whether this is known to be a proto3 field; set for []byte only + proto3 bool // whether this is known to be a proto3 field oneof bool // whether this is a oneof field Default string // default value HasDefault bool // whether an explicit default was provided - def_uint64 uint64 - - enc encoder - valEnc valueEncoder // set for bool and numeric types only - field field - tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) - tagbuf [8]byte - stype reflect.Type // set for struct types only - sprop *StructProperties // set for struct types only - isMarshaler bool - isUnmarshaler bool - - mtype reflect.Type // set for map types only - mkeyprop *Properties // set for map types only - mvalprop *Properties // set for map types only - - size sizer - valSize valueSizer // set for bool and numeric types only - - dec decoder - valDec valueDecoder // set for bool and numeric types only - - // If this is a packable field, this will be the decoder for the packed version of the field. - packedDec decoder + + stype reflect.Type // set for struct types only + sprop *StructProperties // set for struct types only + + mtype reflect.Type // set for map types only + MapKeyProp *Properties // set for map types only + MapValProp *Properties // set for map types only } // String formats the properties in the protobuf struct field tag style. func (p *Properties) String() string { s := p.Wire - s = "," + s += "," s += strconv.Itoa(p.Tag) if p.Required { s += ",req" @@ -262,29 +202,14 @@ func (p *Properties) Parse(s string) { switch p.Wire { case "varint": p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeVarint - p.valDec = (*Buffer).DecodeVarint - p.valSize = sizeVarint case "fixed32": p.WireType = WireFixed32 - p.valEnc = (*Buffer).EncodeFixed32 - p.valDec = (*Buffer).DecodeFixed32 - p.valSize = sizeFixed32 case "fixed64": p.WireType = WireFixed64 - p.valEnc = (*Buffer).EncodeFixed64 - p.valDec = (*Buffer).DecodeFixed64 - p.valSize = sizeFixed64 case "zigzag32": p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag32 - p.valDec = (*Buffer).DecodeZigzag32 - p.valSize = sizeZigzag32 case "zigzag64": p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag64 - p.valDec = (*Buffer).DecodeZigzag64 - p.valSize = sizeZigzag64 case "bytes", "group": p.WireType = WireBytes // no numeric converter for non-numeric types @@ -299,6 +224,7 @@ func (p *Properties) Parse(s string) { return } +outer: for i := 2; i < len(fields); i++ { f := fields[i] switch { @@ -326,255 +252,40 @@ func (p *Properties) Parse(s string) { if i+1 < len(fields) { // Commas aren't escaped, and def is always last. p.Default += "," + strings.Join(fields[i+1:], ",") - break + break outer } } } } -func logNoSliceEnc(t1, t2 reflect.Type) { - fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) -} - var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() -// Initialize the fields for encoding and decoding. -func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { - p.enc = nil - p.dec = nil - p.size = nil - +// setFieldProps initializes the field properties for submessages and maps. +func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { switch t1 := typ; t1.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) - - // proto3 scalar types - - case reflect.Bool: - p.enc = (*Buffer).enc_proto3_bool - p.dec = (*Buffer).dec_proto3_bool - p.size = size_proto3_bool - case reflect.Int32: - p.enc = (*Buffer).enc_proto3_int32 - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_int32 - case reflect.Uint32: - p.enc = (*Buffer).enc_proto3_uint32 - p.dec = (*Buffer).dec_proto3_int32 // can reuse - p.size = size_proto3_uint32 - case reflect.Int64, reflect.Uint64: - p.enc = (*Buffer).enc_proto3_int64 - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - case reflect.Float32: - p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_uint32 - case reflect.Float64: - p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - case reflect.String: - p.enc = (*Buffer).enc_proto3_string - p.dec = (*Buffer).dec_proto3_string - p.size = size_proto3_string - case reflect.Ptr: - switch t2 := t1.Elem(); t2.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) - break - case reflect.Bool: - p.enc = (*Buffer).enc_bool - p.dec = (*Buffer).dec_bool - p.size = size_bool - case reflect.Int32: - p.enc = (*Buffer).enc_int32 - p.dec = (*Buffer).dec_int32 - p.size = size_int32 - case reflect.Uint32: - p.enc = (*Buffer).enc_uint32 - p.dec = (*Buffer).dec_int32 // can reuse - p.size = size_uint32 - case reflect.Int64, reflect.Uint64: - p.enc = (*Buffer).enc_int64 - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.Float32: - p.enc = (*Buffer).enc_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_int32 - p.size = size_uint32 - case reflect.Float64: - p.enc = (*Buffer).enc_int64 // can just treat them as bits - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.String: - p.enc = (*Buffer).enc_string - p.dec = (*Buffer).dec_string - p.size = size_string - case reflect.Struct: + if t1.Elem().Kind() == reflect.Struct { p.stype = t1.Elem() - p.isMarshaler = isMarshaler(t1) - p.isUnmarshaler = isUnmarshaler(t1) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_struct_message - p.dec = (*Buffer).dec_struct_message - p.size = size_struct_message - } else { - p.enc = (*Buffer).enc_struct_group - p.dec = (*Buffer).dec_struct_group - p.size = size_struct_group - } } case reflect.Slice: - switch t2 := t1.Elem(); t2.Kind() { - default: - logNoSliceEnc(t1, t2) - break - case reflect.Bool: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_bool - p.size = size_slice_packed_bool - } else { - p.enc = (*Buffer).enc_slice_bool - p.size = size_slice_bool - } - p.dec = (*Buffer).dec_slice_bool - p.packedDec = (*Buffer).dec_slice_packed_bool - case reflect.Int32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int32 - p.size = size_slice_packed_int32 - } else { - p.enc = (*Buffer).enc_slice_int32 - p.size = size_slice_int32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Uint32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Int64, reflect.Uint64: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - case reflect.Uint8: - p.dec = (*Buffer).dec_slice_byte - if p.proto3 { - p.enc = (*Buffer).enc_proto3_slice_byte - p.size = size_proto3_slice_byte - } else { - p.enc = (*Buffer).enc_slice_byte - p.size = size_slice_byte - } - case reflect.Float32, reflect.Float64: - switch t2.Bits() { - case 32: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case 64: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - default: - logNoSliceEnc(t1, t2) - break - } - case reflect.String: - p.enc = (*Buffer).enc_slice_string - p.dec = (*Buffer).dec_slice_string - p.size = size_slice_string - case reflect.Ptr: - switch t3 := t2.Elem(); t3.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) - break - case reflect.Struct: - p.stype = t2.Elem() - p.isMarshaler = isMarshaler(t2) - p.isUnmarshaler = isUnmarshaler(t2) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_slice_struct_message - p.dec = (*Buffer).dec_slice_struct_message - p.size = size_slice_struct_message - } else { - p.enc = (*Buffer).enc_slice_struct_group - p.dec = (*Buffer).dec_slice_struct_group - p.size = size_slice_struct_group - } - } - case reflect.Slice: - switch t2.Elem().Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) - break - case reflect.Uint8: - p.enc = (*Buffer).enc_slice_slice_byte - p.dec = (*Buffer).dec_slice_slice_byte - p.size = size_slice_slice_byte - } + if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct { + p.stype = t2.Elem() } case reflect.Map: - p.enc = (*Buffer).enc_new_map - p.dec = (*Buffer).dec_new_map - p.size = size_new_map - p.mtype = t1 - p.mkeyprop = &Properties{} - p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) - p.mvalprop = &Properties{} + p.MapKeyProp = &Properties{} + p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) + p.MapValProp = &Properties{} vtype := p.mtype.Elem() if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { // The value type is not a message (*T) or bytes ([]byte), // so we need encoders for the pointer to this type. vtype = reflect.PtrTo(vtype) } - p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) - } - - // precalculate tag code - wire := p.WireType - if p.Packed { - wire = WireBytes + p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) } - x := uint32(p.Tag)<<3 | uint32(wire) - i := 0 - for i = 0; x > 127; i++ { - p.tagbuf[i] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - p.tagbuf[i] = uint8(x) - p.tagcode = p.tagbuf[0 : i+1] if p.stype != nil { if lockGetProp { @@ -586,32 +297,9 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock } var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() - unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() ) -// isMarshaler reports whether type t implements Marshaler. -func isMarshaler(t reflect.Type) bool { - // We're checking for (likely) pointer-receiver methods - // so if t is not a pointer, something is very wrong. - // The calls above only invoke isMarshaler on pointer types. - if t.Kind() != reflect.Ptr { - panic("proto: misuse of isMarshaler") - } - return t.Implements(marshalerType) -} - -// isUnmarshaler reports whether type t implements Unmarshaler. -func isUnmarshaler(t reflect.Type) bool { - // We're checking for (likely) pointer-receiver methods - // so if t is not a pointer, something is very wrong. - // The calls above only invoke isUnmarshaler on pointer types. - if t.Kind() != reflect.Ptr { - panic("proto: misuse of isUnmarshaler") - } - return t.Implements(unmarshalerType) -} - // Init populates the properties from a protocol buffer struct tag. func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { p.init(typ, name, tag, f, true) @@ -621,14 +309,11 @@ func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructF // "bytes,49,opt,def=hello!" p.Name = name p.OrigName = name - if f != nil { - p.field = toField(f) - } if tag == "" { return } p.Parse(tag) - p.setEncAndDec(typ, f, lockGetProp) + p.setFieldProps(typ, f, lockGetProp) } var ( @@ -649,9 +334,6 @@ func GetProperties(t reflect.Type) *StructProperties { sprop, ok := propertiesMap[t] propertiesMu.RUnlock() if ok { - if collectStats { - stats.Chit++ - } return sprop } @@ -661,26 +343,26 @@ func GetProperties(t reflect.Type) *StructProperties { return sprop } +type ( + oneofFuncsIface interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + } + oneofWrappersIface interface { + XXX_OneofWrappers() []interface{} + } +) + // getPropertiesLocked requires that propertiesMu is held. func getPropertiesLocked(t reflect.Type) *StructProperties { if prop, ok := propertiesMap[t]; ok { - if collectStats { - stats.Chit++ - } return prop } - if collectStats { - stats.Cmiss++ - } prop := new(StructProperties) // in case of recursive protos, fill this in now. propertiesMap[t] = prop // build properties - prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) || - reflect.PtrTo(t).Implements(extendableProtoV1Type) - prop.unrecField = invalidField prop.Prop = make([]*Properties, t.NumField()) prop.order = make([]int, t.NumField()) @@ -690,17 +372,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { name := f.Name p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) - if f.Name == "XXX_InternalExtensions" { // special case - p.enc = (*Buffer).enc_exts - p.dec = nil // not needed - p.size = size_exts - } else if f.Name == "XXX_extensions" { // special case - p.enc = (*Buffer).enc_map - p.dec = nil // not needed - p.size = size_map - } else if f.Name == "XXX_unrecognized" { // special case - prop.unrecField = toField(&f) - } oneof := f.Tag.Get("protobuf_oneof") // special case if oneof != "" { // Oneof fields don't use the traditional protobuf tag. @@ -715,22 +386,19 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { } print("\n") } - if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" { - fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") - } } // Re-order prop.order. sort.Sort(prop) - type oneofMessage interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + var oots []interface{} + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: + _, _, _, oots = m.XXX_OneofFuncs() + case oneofWrappersIface: + oots = m.XXX_OneofWrappers() } - if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { - var oots []interface{} - prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs() - prop.stype = t - + if len(oots) > 0 { // Interpret oneof metadata. prop.OneofTypes = make(map[string]*OneofProperties) for _, oot := range oots { @@ -779,30 +447,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { return prop } -// Return the Properties object for the x[0]'th field of the structure. -func propByIndex(t reflect.Type, x []int) *Properties { - if len(x) != 1 { - fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) - return nil - } - prop := GetProperties(t) - return prop.Prop[x[0]] -} - -// Get the address and type of a pointer to a struct from an interface. -func getbase(pb Message) (t reflect.Type, b structPointer, err error) { - if pb == nil { - err = ErrNil - return - } - // get the reflect type of the pointer to the struct. - t = reflect.TypeOf(pb) - // get the address of the struct. - value := reflect.ValueOf(pb) - b = toStructPointer(value) - return -} - // A global registry of enum types. // The generated code will register the generated maps by calling RegisterEnum. @@ -826,20 +470,42 @@ func EnumValueMap(enumType string) map[string]int32 { // A registry of all linked message types. // The string is a fully-qualified proto name ("pkg.Message"). var ( - protoTypes = make(map[string]reflect.Type) - revProtoTypes = make(map[reflect.Type]string) + protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers + protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types + revProtoTypes = make(map[reflect.Type]string) ) // RegisterType is called from generated code and maps from the fully qualified // proto name to the type (pointer to struct) of the protocol buffer. func RegisterType(x Message, name string) { - if _, ok := protoTypes[name]; ok { + if _, ok := protoTypedNils[name]; ok { // TODO: Some day, make this a panic. log.Printf("proto: duplicate proto type registered: %s", name) return } t := reflect.TypeOf(x) - protoTypes[name] = t + if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { + // Generated code always calls RegisterType with nil x. + // This check is just for extra safety. + protoTypedNils[name] = x + } else { + protoTypedNils[name] = reflect.Zero(t).Interface().(Message) + } + revProtoTypes[t] = name +} + +// RegisterMapType is called from generated code and maps from the fully qualified +// proto name to the native map type of the proto map definition. +func RegisterMapType(x interface{}, name string) { + if reflect.TypeOf(x).Kind() != reflect.Map { + panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) + } + if _, ok := protoMapTypes[name]; ok { + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + protoMapTypes[name] = t revProtoTypes[t] = name } @@ -855,7 +521,14 @@ func MessageName(x Message) string { } // MessageType returns the message type (pointer to struct) for a named message. -func MessageType(name string) reflect.Type { return protoTypes[name] } +// The type is not guaranteed to implement proto.Message if the name refers to a +// map entry. +func MessageType(name string) reflect.Type { + if t, ok := protoTypedNils[name]; ok { + return reflect.TypeOf(t) + } + return protoMapTypes[name] +} // A registry of all linked proto files. var ( diff --git a/vendor/github.com/golang/protobuf/proto/proto3_test.go b/vendor/github.com/golang/protobuf/proto/proto3_test.go deleted file mode 100644 index 735837f2de..0000000000 --- a/vendor/github.com/golang/protobuf/proto/proto3_test.go +++ /dev/null @@ -1,135 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2014 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "testing" - - "github.com/golang/protobuf/proto" - pb "github.com/golang/protobuf/proto/proto3_proto" - tpb "github.com/golang/protobuf/proto/testdata" -) - -func TestProto3ZeroValues(t *testing.T) { - tests := []struct { - desc string - m proto.Message - }{ - {"zero message", &pb.Message{}}, - {"empty bytes field", &pb.Message{Data: []byte{}}}, - } - for _, test := range tests { - b, err := proto.Marshal(test.m) - if err != nil { - t.Errorf("%s: proto.Marshal: %v", test.desc, err) - continue - } - if len(b) > 0 { - t.Errorf("%s: Encoding is non-empty: %q", test.desc, b) - } - } -} - -func TestRoundTripProto3(t *testing.T) { - m := &pb.Message{ - Name: "David", // (2 | 1<<3): 0x0a 0x05 "David" - Hilarity: pb.Message_PUNS, // (0 | 2<<3): 0x10 0x01 - HeightInCm: 178, // (0 | 3<<3): 0x18 0xb2 0x01 - Data: []byte("roboto"), // (2 | 4<<3): 0x20 0x06 "roboto" - ResultCount: 47, // (0 | 7<<3): 0x38 0x2f - TrueScotsman: true, // (0 | 8<<3): 0x40 0x01 - Score: 8.1, // (5 | 9<<3): 0x4d <8.1> - - Key: []uint64{1, 0xdeadbeef}, - Nested: &pb.Nested{ - Bunny: "Monty", - }, - } - t.Logf(" m: %v", m) - - b, err := proto.Marshal(m) - if err != nil { - t.Fatalf("proto.Marshal: %v", err) - } - t.Logf(" b: %q", b) - - m2 := new(pb.Message) - if err := proto.Unmarshal(b, m2); err != nil { - t.Fatalf("proto.Unmarshal: %v", err) - } - t.Logf("m2: %v", m2) - - if !proto.Equal(m, m2) { - t.Errorf("proto.Equal returned false:\n m: %v\nm2: %v", m, m2) - } -} - -func TestGettersForBasicTypesExist(t *testing.T) { - var m pb.Message - if got := m.GetNested().GetBunny(); got != "" { - t.Errorf("m.GetNested().GetBunny() = %q, want empty string", got) - } - if got := m.GetNested().GetCute(); got { - t.Errorf("m.GetNested().GetCute() = %t, want false", got) - } -} - -func TestProto3SetDefaults(t *testing.T) { - in := &pb.Message{ - Terrain: map[string]*pb.Nested{ - "meadow": new(pb.Nested), - }, - Proto2Field: new(tpb.SubDefaults), - Proto2Value: map[string]*tpb.SubDefaults{ - "badlands": new(tpb.SubDefaults), - }, - } - - got := proto.Clone(in).(*pb.Message) - proto.SetDefaults(got) - - // There are no defaults in proto3. Everything should be the zero value, but - // we need to remember to set defaults for nested proto2 messages. - want := &pb.Message{ - Terrain: map[string]*pb.Nested{ - "meadow": new(pb.Nested), - }, - Proto2Field: &tpb.SubDefaults{N: proto.Int64(7)}, - Proto2Value: map[string]*tpb.SubDefaults{ - "badlands": &tpb.SubDefaults{N: proto.Int64(7)}, - }, - } - - if !proto.Equal(got, want) { - t.Errorf("with in = %v\nproto.SetDefaults(in) =>\ngot %v\nwant %v", in, got, want) - } -} diff --git a/vendor/github.com/golang/protobuf/proto/size2_test.go b/vendor/github.com/golang/protobuf/proto/size2_test.go deleted file mode 100644 index a2729c39a1..0000000000 --- a/vendor/github.com/golang/protobuf/proto/size2_test.go +++ /dev/null @@ -1,63 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "testing" -) - -// This is a separate file and package from size_test.go because that one uses -// generated messages and thus may not be in package proto without having a circular -// dependency, whereas this file tests unexported details of size.go. - -func TestVarintSize(t *testing.T) { - // Check the edge cases carefully. - testCases := []struct { - n uint64 - size int - }{ - {0, 1}, - {1, 1}, - {127, 1}, - {128, 2}, - {16383, 2}, - {16384, 3}, - {1<<63 - 1, 9}, - {1 << 63, 10}, - } - for _, tc := range testCases { - size := sizeVarint(tc.n) - if size != tc.size { - t.Errorf("sizeVarint(%d) = %d, want %d", tc.n, size, tc.size) - } - } -} diff --git a/vendor/github.com/golang/protobuf/proto/size_test.go b/vendor/github.com/golang/protobuf/proto/size_test.go deleted file mode 100644 index af1034dc7b..0000000000 --- a/vendor/github.com/golang/protobuf/proto/size_test.go +++ /dev/null @@ -1,164 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "log" - "strings" - "testing" - - . "github.com/golang/protobuf/proto" - proto3pb "github.com/golang/protobuf/proto/proto3_proto" - pb "github.com/golang/protobuf/proto/testdata" -) - -var messageWithExtension1 = &pb.MyMessage{Count: Int32(7)} - -// messageWithExtension2 is in equal_test.go. -var messageWithExtension3 = &pb.MyMessage{Count: Int32(8)} - -func init() { - if err := SetExtension(messageWithExtension1, pb.E_Ext_More, &pb.Ext{Data: String("Abbott")}); err != nil { - log.Panicf("SetExtension: %v", err) - } - if err := SetExtension(messageWithExtension3, pb.E_Ext_More, &pb.Ext{Data: String("Costello")}); err != nil { - log.Panicf("SetExtension: %v", err) - } - - // Force messageWithExtension3 to have the extension encoded. - Marshal(messageWithExtension3) - -} - -var SizeTests = []struct { - desc string - pb Message -}{ - {"empty", &pb.OtherMessage{}}, - // Basic types. - {"bool", &pb.Defaults{F_Bool: Bool(true)}}, - {"int32", &pb.Defaults{F_Int32: Int32(12)}}, - {"negative int32", &pb.Defaults{F_Int32: Int32(-1)}}, - {"small int64", &pb.Defaults{F_Int64: Int64(1)}}, - {"big int64", &pb.Defaults{F_Int64: Int64(1 << 20)}}, - {"negative int64", &pb.Defaults{F_Int64: Int64(-1)}}, - {"fixed32", &pb.Defaults{F_Fixed32: Uint32(71)}}, - {"fixed64", &pb.Defaults{F_Fixed64: Uint64(72)}}, - {"uint32", &pb.Defaults{F_Uint32: Uint32(123)}}, - {"uint64", &pb.Defaults{F_Uint64: Uint64(124)}}, - {"float", &pb.Defaults{F_Float: Float32(12.6)}}, - {"double", &pb.Defaults{F_Double: Float64(13.9)}}, - {"string", &pb.Defaults{F_String: String("niles")}}, - {"bytes", &pb.Defaults{F_Bytes: []byte("wowsa")}}, - {"bytes, empty", &pb.Defaults{F_Bytes: []byte{}}}, - {"sint32", &pb.Defaults{F_Sint32: Int32(65)}}, - {"sint64", &pb.Defaults{F_Sint64: Int64(67)}}, - {"enum", &pb.Defaults{F_Enum: pb.Defaults_BLUE.Enum()}}, - // Repeated. - {"empty repeated bool", &pb.MoreRepeated{Bools: []bool{}}}, - {"repeated bool", &pb.MoreRepeated{Bools: []bool{false, true, true, false}}}, - {"packed repeated bool", &pb.MoreRepeated{BoolsPacked: []bool{false, true, true, false, true, true, true}}}, - {"repeated int32", &pb.MoreRepeated{Ints: []int32{1, 12203, 1729, -1}}}, - {"repeated int32 packed", &pb.MoreRepeated{IntsPacked: []int32{1, 12203, 1729}}}, - {"repeated int64 packed", &pb.MoreRepeated{Int64SPacked: []int64{ - // Need enough large numbers to verify that the header is counting the number of bytes - // for the field, not the number of elements. - 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, - 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, - }}}, - {"repeated string", &pb.MoreRepeated{Strings: []string{"r", "ken", "gri"}}}, - {"repeated fixed", &pb.MoreRepeated{Fixeds: []uint32{1, 2, 3, 4}}}, - // Nested. - {"nested", &pb.OldMessage{Nested: &pb.OldMessage_Nested{Name: String("whatever")}}}, - {"group", &pb.GroupOld{G: &pb.GroupOld_G{X: Int32(12345)}}}, - // Other things. - {"unrecognized", &pb.MoreRepeated{XXX_unrecognized: []byte{13<<3 | 0, 4}}}, - {"extension (unencoded)", messageWithExtension1}, - {"extension (encoded)", messageWithExtension3}, - // proto3 message - {"proto3 empty", &proto3pb.Message{}}, - {"proto3 bool", &proto3pb.Message{TrueScotsman: true}}, - {"proto3 int64", &proto3pb.Message{ResultCount: 1}}, - {"proto3 uint32", &proto3pb.Message{HeightInCm: 123}}, - {"proto3 float", &proto3pb.Message{Score: 12.6}}, - {"proto3 string", &proto3pb.Message{Name: "Snezana"}}, - {"proto3 bytes", &proto3pb.Message{Data: []byte("wowsa")}}, - {"proto3 bytes, empty", &proto3pb.Message{Data: []byte{}}}, - {"proto3 enum", &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}}, - {"proto3 map field with empty bytes", &proto3pb.MessageWithMap{ByteMapping: map[bool][]byte{false: []byte{}}}}, - - {"map field", &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob", 7: "Andrew"}}}, - {"map field with message", &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{0x7001: &pb.FloatingPoint{F: Float64(2.0)}}}}, - {"map field with bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte("this time for sure")}}}, - {"map field with empty bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte{}}}}, - - {"map field with big entry", &pb.MessageWithMap{NameMapping: map[int32]string{8: strings.Repeat("x", 125)}}}, - {"map field with big key and val", &pb.MessageWithMap{StrToStr: map[string]string{strings.Repeat("x", 70): strings.Repeat("y", 70)}}}, - {"map field with big numeric key", &pb.MessageWithMap{NameMapping: map[int32]string{0xf00d: "om nom nom"}}}, - - {"oneof not set", &pb.Oneof{}}, - {"oneof bool", &pb.Oneof{Union: &pb.Oneof_F_Bool{true}}}, - {"oneof zero int32", &pb.Oneof{Union: &pb.Oneof_F_Int32{0}}}, - {"oneof big int32", &pb.Oneof{Union: &pb.Oneof_F_Int32{1 << 20}}}, - {"oneof int64", &pb.Oneof{Union: &pb.Oneof_F_Int64{42}}}, - {"oneof fixed32", &pb.Oneof{Union: &pb.Oneof_F_Fixed32{43}}}, - {"oneof fixed64", &pb.Oneof{Union: &pb.Oneof_F_Fixed64{44}}}, - {"oneof uint32", &pb.Oneof{Union: &pb.Oneof_F_Uint32{45}}}, - {"oneof uint64", &pb.Oneof{Union: &pb.Oneof_F_Uint64{46}}}, - {"oneof float", &pb.Oneof{Union: &pb.Oneof_F_Float{47.1}}}, - {"oneof double", &pb.Oneof{Union: &pb.Oneof_F_Double{48.9}}}, - {"oneof string", &pb.Oneof{Union: &pb.Oneof_F_String{"Rhythmic Fman"}}}, - {"oneof bytes", &pb.Oneof{Union: &pb.Oneof_F_Bytes{[]byte("let go")}}}, - {"oneof sint32", &pb.Oneof{Union: &pb.Oneof_F_Sint32{50}}}, - {"oneof sint64", &pb.Oneof{Union: &pb.Oneof_F_Sint64{51}}}, - {"oneof enum", &pb.Oneof{Union: &pb.Oneof_F_Enum{pb.MyMessage_BLUE}}}, - {"message for oneof", &pb.GoTestField{Label: String("k"), Type: String("v")}}, - {"oneof message", &pb.Oneof{Union: &pb.Oneof_F_Message{&pb.GoTestField{Label: String("k"), Type: String("v")}}}}, - {"oneof group", &pb.Oneof{Union: &pb.Oneof_FGroup{&pb.Oneof_F_Group{X: Int32(52)}}}}, - {"oneof largest tag", &pb.Oneof{Union: &pb.Oneof_F_Largest_Tag{1}}}, - {"multiple oneofs", &pb.Oneof{Union: &pb.Oneof_F_Int32{1}, Tormato: &pb.Oneof_Value{2}}}, -} - -func TestSize(t *testing.T) { - for _, tc := range SizeTests { - size := Size(tc.pb) - b, err := Marshal(tc.pb) - if err != nil { - t.Errorf("%v: Marshal failed: %v", tc.desc, err) - continue - } - if size != len(b) { - t.Errorf("%v: Size(%v) = %d, want %d", tc.desc, tc.pb, size, len(b)) - t.Logf("%v: bytes: %#v", tc.desc, b) - } - } -} diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go new file mode 100644 index 0000000000..5cb11fa955 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/table_marshal.go @@ -0,0 +1,2776 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// a sizer takes a pointer to a field and the size of its tag, computes the size of +// the encoded data. +type sizer func(pointer, int) int + +// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), +// marshals the field to the end of the slice, returns the slice and error (if any). +type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) + +// marshalInfo is the information used for marshaling a message. +type marshalInfo struct { + typ reflect.Type + fields []*marshalFieldInfo + unrecognized field // offset of XXX_unrecognized + extensions field // offset of XXX_InternalExtensions + v1extensions field // offset of XXX_extensions + sizecache field // offset of XXX_sizecache + initialized int32 // 0 -- only typ is set, 1 -- fully initialized + messageset bool // uses message set wire format + hasmarshaler bool // has custom marshaler + sync.RWMutex // protect extElems map, also for initialization + extElems map[int32]*marshalElemInfo // info of extension elements +} + +// marshalFieldInfo is the information used for marshaling a field of a message. +type marshalFieldInfo struct { + field field + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isPointer bool + required bool // field is required + name string // name of the field, for error reporting + oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements +} + +// marshalElemInfo is the information used for marshaling an extension or oneof element. +type marshalElemInfo struct { + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) + deref bool // dereference the pointer before operating on it; implies isptr +} + +var ( + marshalInfoMap = map[reflect.Type]*marshalInfo{} + marshalInfoLock sync.Mutex +) + +// getMarshalInfo returns the information to marshal a given type of message. +// The info it returns may not necessarily initialized. +// t is the type of the message (NOT the pointer to it). +func getMarshalInfo(t reflect.Type) *marshalInfo { + marshalInfoLock.Lock() + u, ok := marshalInfoMap[t] + if !ok { + u = &marshalInfo{typ: t} + marshalInfoMap[t] = u + } + marshalInfoLock.Unlock() + return u +} + +// Size is the entry point from generated code, +// and should be ONLY called by generated code. +// It computes the size of encoded data of msg. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Size(msg Message) int { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return 0 + } + return u.size(ptr) +} + +// Marshal is the entry point from generated code, +// and should be ONLY called by generated code. +// It marshals msg to the end of b. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return b, ErrNil + } + return u.marshal(b, ptr, deterministic) +} + +func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { + // u := a.marshal, but atomically. + // We use an atomic here to ensure memory consistency. + u := atomicLoadMarshalInfo(&a.marshal) + if u == nil { + // Get marshal information from type of message. + t := reflect.ValueOf(msg).Type() + if t.Kind() != reflect.Ptr { + panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) + } + u = getMarshalInfo(t.Elem()) + // Store it in the cache for later users. + // a.marshal = u, but atomically. + atomicStoreMarshalInfo(&a.marshal, u) + } + return u +} + +// size is the main function to compute the size of the encoded data of a message. +// ptr is the pointer to the message. +func (u *marshalInfo) size(ptr pointer) int { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b, _ := m.Marshal() + return len(b) + } + + n := 0 + for _, f := range u.fields { + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + n += f.sizer(ptr.offset(f.field), f.tagsize) + } + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + n += u.sizeMessageSet(e) + } else { + n += u.sizeExtensions(e) + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + n += u.sizeV1Extensions(m) + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + n += len(s) + } + // cache the result for use in marshal + if u.sizecache.IsValid() { + atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) + } + return n +} + +// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), +// fall back to compute the size. +func (u *marshalInfo) cachedsize(ptr pointer) int { + if u.sizecache.IsValid() { + return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) + } + return u.size(ptr) +} + +// marshal is the main function to marshal a message. It takes a byte slice and appends +// the encoded data to the end of the slice, returns the slice and error (if any). +// ptr is the pointer to the message. +// If deterministic is true, map is marshaled in deterministic order. +func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b1, err := m.Marshal() + b = append(b, b1...) + return b, err + } + + var err, errLater error + // The old marshaler encodes extensions at beginning. + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + b, err = u.appendMessageSet(b, e, deterministic) + } else { + b, err = u.appendExtensions(b, e, deterministic) + } + if err != nil { + return b, err + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + b, err = u.appendV1Extensions(b, m, deterministic) + if err != nil { + return b, err + } + } + for _, f := range u.fields { + if f.required { + if ptr.offset(f.field).getPointer().isNil() { + // Required field is not set. + // We record the error but keep going, to give a complete marshaling. + if errLater == nil { + errLater = &RequiredNotSetError{f.name} + } + continue + } + } + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) + if err != nil { + if err1, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errLater == nil { + errLater = &RequiredNotSetError{f.name + "." + err1.field} + } + continue + } + if err == errRepeatedHasNil { + err = errors.New("proto: repeated field " + f.name + " has nil element") + } + if err == errInvalidUTF8 { + if errLater == nil { + fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name + errLater = &invalidUTF8Error{fullName} + } + continue + } + return b, err + } + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + b = append(b, s...) + } + return b, errLater +} + +// computeMarshalInfo initializes the marshal info. +func (u *marshalInfo) computeMarshalInfo() { + u.Lock() + defer u.Unlock() + if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock + return + } + + t := u.typ + u.unrecognized = invalidField + u.extensions = invalidField + u.v1extensions = invalidField + u.sizecache = invalidField + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if reflect.PtrTo(t).Implements(marshalerType) { + u.hasmarshaler = true + atomic.StoreInt32(&u.initialized, 1) + return + } + + // get oneof implementers + var oneofImplementers []interface{} + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: + _, _, _, oneofImplementers = m.XXX_OneofFuncs() + case oneofWrappersIface: + oneofImplementers = m.XXX_OneofWrappers() + } + + n := t.NumField() + + // deal with XXX fields first + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if !strings.HasPrefix(f.Name, "XXX_") { + continue + } + switch f.Name { + case "XXX_sizecache": + u.sizecache = toField(&f) + case "XXX_unrecognized": + u.unrecognized = toField(&f) + case "XXX_InternalExtensions": + u.extensions = toField(&f) + u.messageset = f.Tag.Get("protobuf_messageset") == "1" + case "XXX_extensions": + u.v1extensions = toField(&f) + case "XXX_NoUnkeyedLiteral": + // nothing to do + default: + panic("unknown XXX field: " + f.Name) + } + n-- + } + + // normal fields + fields := make([]marshalFieldInfo, n) // batch allocation + u.fields = make([]*marshalFieldInfo, 0, n) + for i, j := 0, 0; i < t.NumField(); i++ { + f := t.Field(i) + + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + field := &fields[j] + j++ + field.name = f.Name + u.fields = append(u.fields, field) + if f.Tag.Get("protobuf_oneof") != "" { + field.computeOneofFieldInfo(&f, oneofImplementers) + continue + } + if f.Tag.Get("protobuf") == "" { + // field has no tag (not in generated message), ignore it + u.fields = u.fields[:len(u.fields)-1] + j-- + continue + } + field.computeMarshalFieldInfo(&f) + } + + // fields are marshaled in tag order on the wire. + sort.Sort(byTag(u.fields)) + + atomic.StoreInt32(&u.initialized, 1) +} + +// helper for sorting fields by tag +type byTag []*marshalFieldInfo + +func (a byTag) Len() int { return len(a) } +func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } + +// getExtElemInfo returns the information to marshal an extension element. +// The info it returns is initialized. +func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { + // get from cache first + u.RLock() + e, ok := u.extElems[desc.Field] + u.RUnlock() + if ok { + return e + } + + t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct + tags := strings.Split(desc.Tag, ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + if t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct { + t = t.Elem() + } + sizer, marshaler := typeMarshaler(t, tags, false, false) + var deref bool + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + t = reflect.PtrTo(t) + deref = true + } + e = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizer, + marshaler: marshaler, + isptr: t.Kind() == reflect.Ptr, + deref: deref, + } + + // update cache + u.Lock() + if u.extElems == nil { + u.extElems = make(map[int32]*marshalElemInfo) + } + u.extElems[desc.Field] = e + u.Unlock() + return e +} + +// computeMarshalFieldInfo fills up the information to marshal a field. +func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { + // parse protobuf tag of the field. + // tag has format of "bytes,49,opt,name=foo,def=hello!" + tags := strings.Split(f.Tag.Get("protobuf"), ",") + if tags[0] == "" { + return + } + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + if tags[2] == "req" { + fi.required = true + } + fi.setTag(f, tag, wt) + fi.setMarshaler(f, tags) +} + +func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { + fi.field = toField(f) + fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. + fi.isPointer = true + fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) + fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) + + ityp := f.Type // interface type + for _, o := range oneofImplementers { + t := reflect.TypeOf(o) + if !t.Implements(ityp) { + continue + } + sf := t.Elem().Field(0) // oneof implementer is a struct with a single field + tags := strings.Split(sf.Tag.Get("protobuf"), ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizer, marshaler := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value + fi.oneofElems[t.Elem()] = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizer, + marshaler: marshaler, + } + } +} + +// wiretype returns the wire encoding of the type. +func wiretype(encoding string) uint64 { + switch encoding { + case "fixed32": + return WireFixed32 + case "fixed64": + return WireFixed64 + case "varint", "zigzag32", "zigzag64": + return WireVarint + case "bytes": + return WireBytes + case "group": + return WireStartGroup + } + panic("unknown wire type " + encoding) +} + +// setTag fills up the tag (in wire format) and its size in the info of a field. +func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { + fi.field = toField(f) + fi.wiretag = uint64(tag)<<3 | wt + fi.tagsize = SizeVarint(uint64(tag) << 3) +} + +// setMarshaler fills up the sizer and marshaler in the info of a field. +func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { + switch f.Type.Kind() { + case reflect.Map: + // map field + fi.isPointer = true + fi.sizer, fi.marshaler = makeMapMarshaler(f) + return + case reflect.Ptr, reflect.Slice: + fi.isPointer = true + } + fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) +} + +// typeMarshaler returns the sizer and marshaler of a given field. +// t is the type of the field. +// tags is the generated "protobuf" tag of the field. +// If nozero is true, zero value is not marshaled to the wire. +// If oneof is true, it is a oneof field. +func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { + encoding := tags[0] + + pointer := false + slice := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + packed := false + proto3 := false + validateUTF8 := true + for i := 2; i < len(tags); i++ { + if tags[i] == "packed" { + packed = true + } + if tags[i] == "proto3" { + proto3 = true + } + } + validateUTF8 = validateUTF8 && proto3 + + switch t.Kind() { + case reflect.Bool: + if pointer { + return sizeBoolPtr, appendBoolPtr + } + if slice { + if packed { + return sizeBoolPackedSlice, appendBoolPackedSlice + } + return sizeBoolSlice, appendBoolSlice + } + if nozero { + return sizeBoolValueNoZero, appendBoolValueNoZero + } + return sizeBoolValue, appendBoolValue + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixed32Ptr, appendFixed32Ptr + } + if slice { + if packed { + return sizeFixed32PackedSlice, appendFixed32PackedSlice + } + return sizeFixed32Slice, appendFixed32Slice + } + if nozero { + return sizeFixed32ValueNoZero, appendFixed32ValueNoZero + } + return sizeFixed32Value, appendFixed32Value + case "varint": + if pointer { + return sizeVarint32Ptr, appendVarint32Ptr + } + if slice { + if packed { + return sizeVarint32PackedSlice, appendVarint32PackedSlice + } + return sizeVarint32Slice, appendVarint32Slice + } + if nozero { + return sizeVarint32ValueNoZero, appendVarint32ValueNoZero + } + return sizeVarint32Value, appendVarint32Value + } + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixedS32Ptr, appendFixedS32Ptr + } + if slice { + if packed { + return sizeFixedS32PackedSlice, appendFixedS32PackedSlice + } + return sizeFixedS32Slice, appendFixedS32Slice + } + if nozero { + return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero + } + return sizeFixedS32Value, appendFixedS32Value + case "varint": + if pointer { + return sizeVarintS32Ptr, appendVarintS32Ptr + } + if slice { + if packed { + return sizeVarintS32PackedSlice, appendVarintS32PackedSlice + } + return sizeVarintS32Slice, appendVarintS32Slice + } + if nozero { + return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero + } + return sizeVarintS32Value, appendVarintS32Value + case "zigzag32": + if pointer { + return sizeZigzag32Ptr, appendZigzag32Ptr + } + if slice { + if packed { + return sizeZigzag32PackedSlice, appendZigzag32PackedSlice + } + return sizeZigzag32Slice, appendZigzag32Slice + } + if nozero { + return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero + } + return sizeZigzag32Value, appendZigzag32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixed64Ptr, appendFixed64Ptr + } + if slice { + if packed { + return sizeFixed64PackedSlice, appendFixed64PackedSlice + } + return sizeFixed64Slice, appendFixed64Slice + } + if nozero { + return sizeFixed64ValueNoZero, appendFixed64ValueNoZero + } + return sizeFixed64Value, appendFixed64Value + case "varint": + if pointer { + return sizeVarint64Ptr, appendVarint64Ptr + } + if slice { + if packed { + return sizeVarint64PackedSlice, appendVarint64PackedSlice + } + return sizeVarint64Slice, appendVarint64Slice + } + if nozero { + return sizeVarint64ValueNoZero, appendVarint64ValueNoZero + } + return sizeVarint64Value, appendVarint64Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixedS64Ptr, appendFixedS64Ptr + } + if slice { + if packed { + return sizeFixedS64PackedSlice, appendFixedS64PackedSlice + } + return sizeFixedS64Slice, appendFixedS64Slice + } + if nozero { + return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero + } + return sizeFixedS64Value, appendFixedS64Value + case "varint": + if pointer { + return sizeVarintS64Ptr, appendVarintS64Ptr + } + if slice { + if packed { + return sizeVarintS64PackedSlice, appendVarintS64PackedSlice + } + return sizeVarintS64Slice, appendVarintS64Slice + } + if nozero { + return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero + } + return sizeVarintS64Value, appendVarintS64Value + case "zigzag64": + if pointer { + return sizeZigzag64Ptr, appendZigzag64Ptr + } + if slice { + if packed { + return sizeZigzag64PackedSlice, appendZigzag64PackedSlice + } + return sizeZigzag64Slice, appendZigzag64Slice + } + if nozero { + return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero + } + return sizeZigzag64Value, appendZigzag64Value + } + case reflect.Float32: + if pointer { + return sizeFloat32Ptr, appendFloat32Ptr + } + if slice { + if packed { + return sizeFloat32PackedSlice, appendFloat32PackedSlice + } + return sizeFloat32Slice, appendFloat32Slice + } + if nozero { + return sizeFloat32ValueNoZero, appendFloat32ValueNoZero + } + return sizeFloat32Value, appendFloat32Value + case reflect.Float64: + if pointer { + return sizeFloat64Ptr, appendFloat64Ptr + } + if slice { + if packed { + return sizeFloat64PackedSlice, appendFloat64PackedSlice + } + return sizeFloat64Slice, appendFloat64Slice + } + if nozero { + return sizeFloat64ValueNoZero, appendFloat64ValueNoZero + } + return sizeFloat64Value, appendFloat64Value + case reflect.String: + if validateUTF8 { + if pointer { + return sizeStringPtr, appendUTF8StringPtr + } + if slice { + return sizeStringSlice, appendUTF8StringSlice + } + if nozero { + return sizeStringValueNoZero, appendUTF8StringValueNoZero + } + return sizeStringValue, appendUTF8StringValue + } + if pointer { + return sizeStringPtr, appendStringPtr + } + if slice { + return sizeStringSlice, appendStringSlice + } + if nozero { + return sizeStringValueNoZero, appendStringValueNoZero + } + return sizeStringValue, appendStringValue + case reflect.Slice: + if slice { + return sizeBytesSlice, appendBytesSlice + } + if oneof { + // Oneof bytes field may also have "proto3" tag. + // We want to marshal it as a oneof field. Do this + // check before the proto3 check. + return sizeBytesOneof, appendBytesOneof + } + if proto3 { + return sizeBytes3, appendBytes3 + } + return sizeBytes, appendBytes + case reflect.Struct: + switch encoding { + case "group": + if slice { + return makeGroupSliceMarshaler(getMarshalInfo(t)) + } + return makeGroupMarshaler(getMarshalInfo(t)) + case "bytes": + if slice { + return makeMessageSliceMarshaler(getMarshalInfo(t)) + } + return makeMessageMarshaler(getMarshalInfo(t)) + } + } + panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) +} + +// Below are functions to size/marshal a specific type of a field. +// They are stored in the field's info, and called by function pointers. +// They have type sizer or marshaler. + +func sizeFixed32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixedS32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFloat32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + return (4 + tagsize) * len(s) +} +func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixed64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFixedS64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFloat64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + return (8 + tagsize) * len(s) +} +func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeVarint32Value(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarint32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarint64Value(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + return SizeVarint(v) + tagsize +} +func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return SizeVarint(v) + tagsize +} +func sizeVarint64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return SizeVarint(*p) + tagsize +} +func sizeVarint64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(v) + tagsize + } + return n +} +func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize + } + return n +} +func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize + } + return n +} +func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeBoolValue(_ pointer, tagsize int) int { + return 1 + tagsize +} +func sizeBoolValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toBool() + if !v { + return 0 + } + return 1 + tagsize +} +func sizeBoolPtr(ptr pointer, tagsize int) int { + p := *ptr.toBoolPtr() + if p == nil { + return 0 + } + return 1 + tagsize +} +func sizeBoolSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + return (1 + tagsize) * len(s) +} +func sizeBoolPackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return 0 + } + return len(s) + SizeVarint(uint64(len(s))) + tagsize +} +func sizeStringValue(ptr pointer, tagsize int) int { + v := *ptr.toString() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toString() + if v == "" { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringPtr(ptr pointer, tagsize int) int { + p := *ptr.toStringPtr() + if p == nil { + return 0 + } + v := *p + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringSlice(ptr pointer, tagsize int) int { + s := *ptr.toStringSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} +func sizeBytes(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if v == nil { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytes3(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if len(v) == 0 { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesOneof(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesSlice(ptr pointer, tagsize int) int { + s := *ptr.toBytesSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} + +// appendFixed32 appends an encoded fixed32 to b. +func appendFixed32(b []byte, v uint32) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24)) + return b +} + +// appendFixed64 appends an encoded fixed64 to b. +func appendFixed64(b []byte, v uint64) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24), + byte(v>>32), + byte(v>>40), + byte(v>>48), + byte(v>>56)) + return b +} + +// appendVarint appends an encoded varint to b. +func appendVarint(b []byte, v uint64) []byte { + // TODO: make 1-byte (maybe 2-byte) case inline-able, once we + // have non-leaf inliner. + switch { + case v < 1<<7: + b = append(b, byte(v)) + case v < 1<<14: + b = append(b, + byte(v&0x7f|0x80), + byte(v>>7)) + case v < 1<<21: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte(v>>14)) + case v < 1<<28: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte(v>>21)) + case v < 1<<35: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte(v>>28)) + case v < 1<<42: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte(v>>35)) + case v < 1<<49: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte(v>>42)) + case v < 1<<56: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte(v>>49)) + case v < 1<<63: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte(v>>56)) + default: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte((v>>56)&0x7f|0x80), + 1) + } + return b +} + +func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, *p) + return b, nil +} +func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(*p)) + return b, nil +} +func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(*p)) + return b, nil +} +func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, *p) + return b, nil +} +func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(*p)) + return b, nil +} +func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(*p)) + return b, nil +} +func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, *p) + return b, nil +} +func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + } + return b, nil +} +func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, v) + } + return b, nil +} +func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + if !v { + return b, nil + } + b = appendVarint(b, wiretag) + b = append(b, 1) + return b, nil +} + +func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toBoolPtr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + if *p { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(len(s))) + for _, v := range s { + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + if v == "" { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toStringSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} +func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + v := *ptr.toString() + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + v := *ptr.toString() + if v == "" { + return b, nil + } + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + s := *ptr.toStringSlice() + for _, v := range s { + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if v == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if len(v) == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBytesSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} + +// makeGroupMarshaler returns the sizer and marshaler for a group. +// u is the marshal info of the underlying message. +func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + return u.size(p) + 2*tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + var err error + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, p, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + return b, err + } +} + +// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. +// u is the marshal info of the underlying message. +func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + n += u.size(v) + 2*tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err error + var nerr nonFatal + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, v, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + if !nerr.Merge(err) { + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, nerr.E + } +} + +// makeMessageMarshaler returns the sizer and marshaler for a message field. +// u is the marshal info of the message. +func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.size(p) + return siz + SizeVarint(uint64(siz)) + tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(p) + b = appendVarint(b, uint64(siz)) + return u.marshal(b, p, deterministic) + } +} + +// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. +// u is the marshal info of the message. +func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + siz := u.size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err error + var nerr nonFatal + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(v) + b = appendVarint(b, uint64(siz)) + b, err = u.marshal(b, v, deterministic) + + if !nerr.Merge(err) { + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, nerr.E + } +} + +// makeMapMarshaler returns the sizer and marshaler for a map field. +// f is the pointer to the reflect data structure of the field. +func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { + // figure out key and value type + t := f.Type + keyType := t.Key() + valType := t.Elem() + keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") + valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") + keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map + valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map + keyWireTag := 1<<3 | wiretype(keyTags[0]) + valWireTag := 2<<3 | wiretype(valTags[0]) + + // We create an interface to get the addresses of the map key and value. + // If value is pointer-typed, the interface is a direct interface, the + // idata itself is the value. Otherwise, the idata is the pointer to the + // value. + // Key cannot be pointer-typed. + valIsPtr := valType.Kind() == reflect.Ptr + + // If value is a message with nested maps, calling + // valSizer in marshal may be quadratic. We should use + // cached version in marshal (but not in size). + // If value is not message type, we don't have size cache, + // but it cannot be nested either. Just use valSizer. + valCachedSizer := valSizer + if valIsPtr && valType.Elem().Kind() == reflect.Struct { + u := getMarshalInfo(valType.Elem()) + valCachedSizer = func(ptr pointer, tagsize int) int { + // Same as message sizer, but use cache. + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.cachedsize(p) + return siz + SizeVarint(uint64(siz)) + tagsize + } + } + return func(ptr pointer, tagsize int) int { + m := ptr.asPointerTo(t).Elem() // the map + n := 0 + for _, k := range m.MapKeys() { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value + siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { + m := ptr.asPointerTo(t).Elem() // the map + var err error + keys := m.MapKeys() + if len(keys) > 1 && deterministic { + sort.Sort(mapKeys(keys)) + } + + var nerr nonFatal + for _, k := range keys { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value + b = appendVarint(b, tag) + siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + b = appendVarint(b, uint64(siz)) + b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) + if !nerr.Merge(err) { + return b, err + } + b, err = valMarshaler(b, vaddr, valWireTag, deterministic) + if err != ErrNil && !nerr.Merge(err) { // allow nil value in map + return b, err + } + } + return b, nerr.E + } +} + +// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. +// fi is the marshal info of the field. +// f is the pointer to the reflect data structure of the field. +func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { + // Oneof field is an interface. We need to get the actual data type on the fly. + t := f.Type + return func(ptr pointer, _ int) int { + p := ptr.getInterfacePointer() + if p.isNil() { + return 0 + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + e := fi.oneofElems[telem] + return e.sizer(p, e.tagsize) + }, + func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { + p := ptr.getInterfacePointer() + if p.isNil() { + return b, nil + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { + return b, errOneofHasNil + } + e := fi.oneofElems[telem] + return e.marshaler(b, p, e.wiretag, deterministic) + } +} + +// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. +func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr, ei.deref) + n += ei.sizer(p, ei.tagsize) + } + mu.Unlock() + return n +} + +// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. +func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + var nerr nonFatal + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr, ei.deref) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E + } + + // Sort the keys to provide a deterministic encoding. + // Not sure this is required, but the old code does it. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr, ei.deref) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// message set format is: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } + +// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field +// in message set format (above). +func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for id, e := range m { + n += 2 // start group, end group. tag = 1 (size=1) + n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + siz := len(msgWithLen) + n += siz + 1 // message, tag = 3 (size=1) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr, ei.deref) + n += ei.sizer(p, 1) // message, tag = 3 (size=1) + } + mu.Unlock() + return n +} + +// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) +// to the end of byte slice b. +func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + var nerr nonFatal + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for id, e := range m { + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr, ei.deref) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + if !nerr.Merge(err) { + return b, err + } + b = append(b, 1<<3|WireEndGroup) + } + return b, nerr.E + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, id := range keys { + e := m[int32(id)] + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr, ei.deref) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + b = append(b, 1<<3|WireEndGroup) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// sizeV1Extensions computes the size of encoded data for a V1-API extension field. +func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { + if m == nil { + return 0 + } + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr, ei.deref) + n += ei.sizer(p, ei.tagsize) + } + return n +} + +// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. +func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { + if m == nil { + return b, nil + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + var err error + var nerr nonFatal + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr, ei.deref) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// newMarshaler is the interface representing objects that can marshal themselves. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newMarshaler interface { + XXX_Size() int + XXX_Marshal(b []byte, deterministic bool) ([]byte, error) +} + +// Size returns the encoded size of a protocol buffer message. +// This is the main entry point. +func Size(pb Message) int { + if m, ok := pb.(newMarshaler); ok { + return m.XXX_Size() + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + b, _ := m.Marshal() + return len(b) + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return 0 + } + var info InternalMessageInfo + return info.Size(pb) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, returning the data. +// This is the main entry point. +func Marshal(pb Message) ([]byte, error) { + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + b := make([]byte, 0, siz) + return m.XXX_Marshal(b, false) + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + return m.Marshal() + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return nil, ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + b := make([]byte, 0, siz) + return info.Marshal(b, pb, false) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, writing the result to the +// Buffer. +// This is an alternative entry point. It is not necessary to use +// a Buffer for most applications. +func (p *Buffer) Marshal(pb Message) error { + var err error + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + p.grow(siz) // make sure buf has enough capacity + p.buf, err = m.XXX_Marshal(p.buf, p.deterministic) + return err + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + b, err := m.Marshal() + p.buf = append(p.buf, b...) + return err + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + p.grow(siz) // make sure buf has enough capacity + p.buf, err = info.Marshal(p.buf, pb, p.deterministic) + return err +} + +// grow grows the buffer's capacity, if necessary, to guarantee space for +// another n bytes. After grow(n), at least n bytes can be written to the +// buffer without another allocation. +func (p *Buffer) grow(n int) { + need := len(p.buf) + n + if need <= cap(p.buf) { + return + } + newCap := len(p.buf) * 2 + if newCap < need { + newCap = need + } + p.buf = append(make([]byte, 0, newCap), p.buf...) +} diff --git a/vendor/github.com/golang/protobuf/proto/table_merge.go b/vendor/github.com/golang/protobuf/proto/table_merge.go new file mode 100644 index 0000000000..5525def6a5 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/table_merge.go @@ -0,0 +1,654 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +// Merge merges the src message into dst. +// This assumes that dst and src of the same type and are non-nil. +func (a *InternalMessageInfo) Merge(dst, src Message) { + mi := atomicLoadMergeInfo(&a.merge) + if mi == nil { + mi = getMergeInfo(reflect.TypeOf(dst).Elem()) + atomicStoreMergeInfo(&a.merge, mi) + } + mi.merge(toPointer(&dst), toPointer(&src)) +} + +type mergeInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []mergeFieldInfo + unrecognized field // Offset of XXX_unrecognized +} + +type mergeFieldInfo struct { + field field // Offset of field, guaranteed to be valid + + // isPointer reports whether the value in the field is a pointer. + // This is true for the following situations: + // * Pointer to struct + // * Pointer to basic type (proto2 only) + // * Slice (first value in slice header is a pointer) + // * String (first value in string header is a pointer) + isPointer bool + + // basicWidth reports the width of the field assuming that it is directly + // embedded in the struct (as is the case for basic types in proto3). + // The possible values are: + // 0: invalid + // 1: bool + // 4: int32, uint32, float32 + // 8: int64, uint64, float64 + basicWidth int + + // Where dst and src are pointers to the types being merged. + merge func(dst, src pointer) +} + +var ( + mergeInfoMap = map[reflect.Type]*mergeInfo{} + mergeInfoLock sync.Mutex +) + +func getMergeInfo(t reflect.Type) *mergeInfo { + mergeInfoLock.Lock() + defer mergeInfoLock.Unlock() + mi := mergeInfoMap[t] + if mi == nil { + mi = &mergeInfo{typ: t} + mergeInfoMap[t] = mi + } + return mi +} + +// merge merges src into dst assuming they are both of type *mi.typ. +func (mi *mergeInfo) merge(dst, src pointer) { + if dst.isNil() { + panic("proto: nil destination") + } + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&mi.initialized) == 0 { + mi.computeMergeInfo() + } + + for _, fi := range mi.fields { + sfp := src.offset(fi.field) + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string + continue + } + if fi.basicWidth > 0 { + switch { + case fi.basicWidth == 1 && !*sfp.toBool(): + continue + case fi.basicWidth == 4 && *sfp.toUint32() == 0: + continue + case fi.basicWidth == 8 && *sfp.toUint64() == 0: + continue + } + } + } + + dfp := dst.offset(fi.field) + fi.merge(dfp, sfp) + } + + // TODO: Make this faster? + out := dst.asPointerTo(mi.typ).Elem() + in := src.asPointerTo(mi.typ).Elem() + if emIn, err := extendable(in.Addr().Interface()); err == nil { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + if mi.unrecognized.IsValid() { + if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { + *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) + } + } +} + +func (mi *mergeInfo) computeMergeInfo() { + mi.lock.Lock() + defer mi.lock.Unlock() + if mi.initialized != 0 { + return + } + t := mi.typ + n := t.NumField() + + props := GetProperties(t) + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + mfi := mergeFieldInfo{field: toField(&f)} + tf := f.Type + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + switch tf.Kind() { + case reflect.Ptr, reflect.Slice, reflect.String: + // As a special case, we assume slices and strings are pointers + // since we know that the first field in the SliceSlice or + // StringHeader is a data pointer. + mfi.isPointer = true + case reflect.Bool: + mfi.basicWidth = 1 + case reflect.Int32, reflect.Uint32, reflect.Float32: + mfi.basicWidth = 4 + case reflect.Int64, reflect.Uint64, reflect.Float64: + mfi.basicWidth = 8 + } + } + + // Unwrap tf to get at its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + tf.Name()) + } + + switch tf.Kind() { + case reflect.Int32: + switch { + case isSlice: // E.g., []int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Slice is not defined (see pointer_reflect.go). + /* + sfsp := src.toInt32Slice() + if *sfsp != nil { + dfsp := dst.toInt32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + */ + sfs := src.getInt32Slice() + if sfs != nil { + dfs := dst.getInt32Slice() + dfs = append(dfs, sfs...) + if dfs == nil { + dfs = []int32{} + } + dst.setInt32Slice(dfs) + } + } + case isPointer: // E.g., *int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). + /* + sfpp := src.toInt32Ptr() + if *sfpp != nil { + dfpp := dst.toInt32Ptr() + if *dfpp == nil { + *dfpp = Int32(**sfpp) + } else { + **dfpp = **sfpp + } + } + */ + sfp := src.getInt32Ptr() + if sfp != nil { + dfp := dst.getInt32Ptr() + if dfp == nil { + dst.setInt32Ptr(*sfp) + } else { + *dfp = *sfp + } + } + } + default: // E.g., int32 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt32(); v != 0 { + *dst.toInt32() = v + } + } + } + case reflect.Int64: + switch { + case isSlice: // E.g., []int64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toInt64Slice() + if *sfsp != nil { + dfsp := dst.toInt64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + } + case isPointer: // E.g., *int64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toInt64Ptr() + if *sfpp != nil { + dfpp := dst.toInt64Ptr() + if *dfpp == nil { + *dfpp = Int64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., int64 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt64(); v != 0 { + *dst.toInt64() = v + } + } + } + case reflect.Uint32: + switch { + case isSlice: // E.g., []uint32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint32Slice() + if *sfsp != nil { + dfsp := dst.toUint32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint32{} + } + } + } + case isPointer: // E.g., *uint32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint32Ptr() + if *sfpp != nil { + dfpp := dst.toUint32Ptr() + if *dfpp == nil { + *dfpp = Uint32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint32 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint32(); v != 0 { + *dst.toUint32() = v + } + } + } + case reflect.Uint64: + switch { + case isSlice: // E.g., []uint64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint64Slice() + if *sfsp != nil { + dfsp := dst.toUint64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint64{} + } + } + } + case isPointer: // E.g., *uint64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint64Ptr() + if *sfpp != nil { + dfpp := dst.toUint64Ptr() + if *dfpp == nil { + *dfpp = Uint64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint64 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint64(); v != 0 { + *dst.toUint64() = v + } + } + } + case reflect.Float32: + switch { + case isSlice: // E.g., []float32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat32Slice() + if *sfsp != nil { + dfsp := dst.toFloat32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float32{} + } + } + } + case isPointer: // E.g., *float32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat32Ptr() + if *sfpp != nil { + dfpp := dst.toFloat32Ptr() + if *dfpp == nil { + *dfpp = Float32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float32 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat32(); v != 0 { + *dst.toFloat32() = v + } + } + } + case reflect.Float64: + switch { + case isSlice: // E.g., []float64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat64Slice() + if *sfsp != nil { + dfsp := dst.toFloat64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float64{} + } + } + } + case isPointer: // E.g., *float64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat64Ptr() + if *sfpp != nil { + dfpp := dst.toFloat64Ptr() + if *dfpp == nil { + *dfpp = Float64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float64 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat64(); v != 0 { + *dst.toFloat64() = v + } + } + } + case reflect.Bool: + switch { + case isSlice: // E.g., []bool + mfi.merge = func(dst, src pointer) { + sfsp := src.toBoolSlice() + if *sfsp != nil { + dfsp := dst.toBoolSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []bool{} + } + } + } + case isPointer: // E.g., *bool + mfi.merge = func(dst, src pointer) { + sfpp := src.toBoolPtr() + if *sfpp != nil { + dfpp := dst.toBoolPtr() + if *dfpp == nil { + *dfpp = Bool(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., bool + mfi.merge = func(dst, src pointer) { + if v := *src.toBool(); v { + *dst.toBool() = v + } + } + } + case reflect.String: + switch { + case isSlice: // E.g., []string + mfi.merge = func(dst, src pointer) { + sfsp := src.toStringSlice() + if *sfsp != nil { + dfsp := dst.toStringSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []string{} + } + } + } + case isPointer: // E.g., *string + mfi.merge = func(dst, src pointer) { + sfpp := src.toStringPtr() + if *sfpp != nil { + dfpp := dst.toStringPtr() + if *dfpp == nil { + *dfpp = String(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., string + mfi.merge = func(dst, src pointer) { + if v := *src.toString(); v != "" { + *dst.toString() = v + } + } + } + case reflect.Slice: + isProto3 := props.Prop[i].proto3 + switch { + case isPointer: + panic("bad pointer in byte slice case in " + tf.Name()) + case tf.Elem().Kind() != reflect.Uint8: + panic("bad element kind in byte slice case in " + tf.Name()) + case isSlice: // E.g., [][]byte + mfi.merge = func(dst, src pointer) { + sbsp := src.toBytesSlice() + if *sbsp != nil { + dbsp := dst.toBytesSlice() + for _, sb := range *sbsp { + if sb == nil { + *dbsp = append(*dbsp, nil) + } else { + *dbsp = append(*dbsp, append([]byte{}, sb...)) + } + } + if *dbsp == nil { + *dbsp = [][]byte{} + } + } + } + default: // E.g., []byte + mfi.merge = func(dst, src pointer) { + sbp := src.toBytes() + if *sbp != nil { + dbp := dst.toBytes() + if !isProto3 || len(*sbp) > 0 { + *dbp = append([]byte{}, *sbp...) + } + } + } + } + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("message field %s without pointer", tf)) + case isSlice: // E.g., []*pb.T + mi := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sps := src.getPointerSlice() + if sps != nil { + dps := dst.getPointerSlice() + for _, sp := range sps { + var dp pointer + if !sp.isNil() { + dp = valToPointer(reflect.New(tf)) + mi.merge(dp, sp) + } + dps = append(dps, dp) + } + if dps == nil { + dps = []pointer{} + } + dst.setPointerSlice(dps) + } + } + default: // E.g., *pb.T + mi := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sp := src.getPointer() + if !sp.isNil() { + dp := dst.getPointer() + if dp.isNil() { + dp = valToPointer(reflect.New(tf)) + dst.setPointer(dp) + } + mi.merge(dp, sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic("bad pointer or slice in map case in " + tf.Name()) + default: // E.g., map[K]V + mfi.merge = func(dst, src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + dm := dst.asPointerTo(tf).Elem() + if dm.IsNil() { + dm.Set(reflect.MakeMap(tf)) + } + + switch tf.Elem().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(Clone(val.Interface().(Message))) + dm.SetMapIndex(key, val) + } + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + dm.SetMapIndex(key, val) + } + default: // Basic type (e.g., string) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + dm.SetMapIndex(key, val) + } + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic("bad pointer or slice in interface case in " + tf.Name()) + default: // E.g., interface{} + // TODO: Make this faster? + mfi.merge = func(dst, src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + du := dst.asPointerTo(tf).Elem() + typ := su.Elem().Type() + if du.IsNil() || du.Elem().Type() != typ { + du.Set(reflect.New(typ.Elem())) // Initialize interface if empty + } + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + dv := du.Elem().Elem().Field(0) + if dv.Kind() == reflect.Ptr && dv.IsNil() { + dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + Merge(dv.Interface().(Message), sv.Interface().(Message)) + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) + default: // Basic type (e.g., string) + dv.Set(sv) + } + } + } + } + default: + panic(fmt.Sprintf("merger not found for type:%s", tf)) + } + mi.fields = append(mi.fields, mfi) + } + + mi.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + mi.unrecognized = toField(&f) + } + + atomic.StoreInt32(&mi.initialized, 1) +} diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go new file mode 100644 index 0000000000..acee2fc529 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go @@ -0,0 +1,2053 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "io" + "math" + "reflect" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// Unmarshal is the entry point from the generated .pb.go files. +// This function is not intended to be used by non-generated code. +// This function is not subject to any compatibility guarantee. +// msg contains a pointer to a protocol buffer struct. +// b is the data to be unmarshaled into the protocol buffer. +// a is a pointer to a place to store cached unmarshal information. +func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { + // Load the unmarshal information for this message type. + // The atomic load ensures memory consistency. + u := atomicLoadUnmarshalInfo(&a.unmarshal) + if u == nil { + // Slow path: find unmarshal info for msg, update a with it. + u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) + atomicStoreUnmarshalInfo(&a.unmarshal, u) + } + // Then do the unmarshaling. + err := u.unmarshal(toPointer(&msg), b) + return err +} + +type unmarshalInfo struct { + typ reflect.Type // type of the protobuf struct + + // 0 = only typ field is initialized + // 1 = completely initialized + initialized int32 + lock sync.Mutex // prevents double initialization + dense []unmarshalFieldInfo // fields indexed by tag # + sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # + reqFields []string // names of required fields + reqMask uint64 // 1< 0 { + // Read tag and wire type. + // Special case 1 and 2 byte varints. + var x uint64 + if b[0] < 128 { + x = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + x = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + x, n = decodeVarint(b) + if n == 0 { + return io.ErrUnexpectedEOF + } + b = b[n:] + } + tag := x >> 3 + wire := int(x) & 7 + + // Dispatch on the tag to one of the unmarshal* functions below. + var f unmarshalFieldInfo + if tag < uint64(len(u.dense)) { + f = u.dense[tag] + } else { + f = u.sparse[tag] + } + if fn := f.unmarshal; fn != nil { + var err error + b, err = fn(b, m.offset(f.field), wire) + if err == nil { + reqMask |= f.reqMask + continue + } + if r, ok := err.(*RequiredNotSetError); ok { + // Remember this error, but keep parsing. We need to produce + // a full parse even if a required field is missing. + if errLater == nil { + errLater = r + } + reqMask |= f.reqMask + continue + } + if err != errInternalBadWireType { + if err == errInvalidUTF8 { + if errLater == nil { + fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name + errLater = &invalidUTF8Error{fullName} + } + continue + } + return err + } + // Fragments with bad wire type are treated as unknown fields. + } + + // Unknown tag. + if !u.unrecognized.IsValid() { + // Don't keep unrecognized data; just skip it. + var err error + b, err = skipField(b, wire) + if err != nil { + return err + } + continue + } + // Keep unrecognized data around. + // maybe in extensions, maybe in the unrecognized field. + z := m.offset(u.unrecognized).toBytes() + var emap map[int32]Extension + var e Extension + for _, r := range u.extensionRanges { + if uint64(r.Start) <= tag && tag <= uint64(r.End) { + if u.extensions.IsValid() { + mp := m.offset(u.extensions).toExtensions() + emap = mp.extensionsWrite() + e = emap[int32(tag)] + z = &e.enc + break + } + if u.oldExtensions.IsValid() { + p := m.offset(u.oldExtensions).toOldExtensions() + emap = *p + if emap == nil { + emap = map[int32]Extension{} + *p = emap + } + e = emap[int32(tag)] + z = &e.enc + break + } + panic("no extensions field available") + } + } + + // Use wire type to skip data. + var err error + b0 := b + b, err = skipField(b, wire) + if err != nil { + return err + } + *z = encodeVarint(*z, tag<<3|uint64(wire)) + *z = append(*z, b0[:len(b0)-len(b)]...) + + if emap != nil { + emap[int32(tag)] = e + } + } + if reqMask != u.reqMask && errLater == nil { + // A required field of this message is missing. + for _, n := range u.reqFields { + if reqMask&1 == 0 { + errLater = &RequiredNotSetError{n} + } + reqMask >>= 1 + } + } + return errLater +} + +// computeUnmarshalInfo fills in u with information for use +// in unmarshaling protocol buffers of type u.typ. +func (u *unmarshalInfo) computeUnmarshalInfo() { + u.lock.Lock() + defer u.lock.Unlock() + if u.initialized != 0 { + return + } + t := u.typ + n := t.NumField() + + // Set up the "not found" value for the unrecognized byte buffer. + // This is the default for proto3. + u.unrecognized = invalidField + u.extensions = invalidField + u.oldExtensions = invalidField + + // List of the generated type and offset for each oneof field. + type oneofField struct { + ityp reflect.Type // interface type of oneof field + field field // offset in containing message + } + var oneofFields []oneofField + + for i := 0; i < n; i++ { + f := t.Field(i) + if f.Name == "XXX_unrecognized" { + // The byte slice used to hold unrecognized input is special. + if f.Type != reflect.TypeOf(([]byte)(nil)) { + panic("bad type for XXX_unrecognized field: " + f.Type.Name()) + } + u.unrecognized = toField(&f) + continue + } + if f.Name == "XXX_InternalExtensions" { + // Ditto here. + if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { + panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) + } + u.extensions = toField(&f) + if f.Tag.Get("protobuf_messageset") == "1" { + u.isMessageSet = true + } + continue + } + if f.Name == "XXX_extensions" { + // An older form of the extensions field. + if f.Type != reflect.TypeOf((map[int32]Extension)(nil)) { + panic("bad type for XXX_extensions field: " + f.Type.Name()) + } + u.oldExtensions = toField(&f) + continue + } + if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { + continue + } + + oneof := f.Tag.Get("protobuf_oneof") + if oneof != "" { + oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) + // The rest of oneof processing happens below. + continue + } + + tags := f.Tag.Get("protobuf") + tagArray := strings.Split(tags, ",") + if len(tagArray) < 2 { + panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) + } + tag, err := strconv.Atoi(tagArray[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tagArray[1]) + } + + name := "" + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + } + + // Extract unmarshaling function from the field (its type and tags). + unmarshal := fieldUnmarshaler(&f) + + // Required field? + var reqMask uint64 + if tagArray[2] == "req" { + bit := len(u.reqFields) + u.reqFields = append(u.reqFields, name) + reqMask = uint64(1) << uint(bit) + // TODO: if we have more than 64 required fields, we end up + // not verifying that all required fields are present. + // Fix this, perhaps using a count of required fields? + } + + // Store the info in the correct slot in the message. + u.setTag(tag, toField(&f), unmarshal, reqMask, name) + } + + // Find any types associated with oneof fields. + var oneofImplementers []interface{} + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: + _, _, _, oneofImplementers = m.XXX_OneofFuncs() + case oneofWrappersIface: + oneofImplementers = m.XXX_OneofWrappers() + } + for _, v := range oneofImplementers { + tptr := reflect.TypeOf(v) // *Msg_X + typ := tptr.Elem() // Msg_X + + f := typ.Field(0) // oneof implementers have one field + baseUnmarshal := fieldUnmarshaler(&f) + tags := strings.Split(f.Tag.Get("protobuf"), ",") + fieldNum, err := strconv.Atoi(tags[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tags[1]) + } + var name string + for _, tag := range tags { + if strings.HasPrefix(tag, "name=") { + name = strings.TrimPrefix(tag, "name=") + break + } + } + + // Find the oneof field that this struct implements. + // Might take O(n^2) to process all of the oneofs, but who cares. + for _, of := range oneofFields { + if tptr.Implements(of.ityp) { + // We have found the corresponding interface for this struct. + // That lets us know where this struct should be stored + // when we encounter it during unmarshaling. + unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) + u.setTag(fieldNum, of.field, unmarshal, 0, name) + } + } + + } + + // Get extension ranges, if any. + fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") + if fn.IsValid() { + if !u.extensions.IsValid() && !u.oldExtensions.IsValid() { + panic("a message with extensions, but no extensions field in " + t.Name()) + } + u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) + } + + // Explicitly disallow tag 0. This will ensure we flag an error + // when decoding a buffer of all zeros. Without this code, we + // would decode and skip an all-zero buffer of even length. + // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. + u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { + return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) + }, 0, "") + + // Set mask for required field check. + u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? + for len(u.dense) <= tag { + u.dense = append(u.dense, unmarshalFieldInfo{}) + } + u.dense[tag] = i + return + } + if u.sparse == nil { + u.sparse = map[uint64]unmarshalFieldInfo{} + } + u.sparse[uint64(tag)] = i +} + +// fieldUnmarshaler returns an unmarshaler for the given field. +func fieldUnmarshaler(f *reflect.StructField) unmarshaler { + if f.Type.Kind() == reflect.Map { + return makeUnmarshalMap(f) + } + return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) +} + +// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. +func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { + tagArray := strings.Split(tags, ",") + encoding := tagArray[0] + name := "unknown" + proto3 := false + validateUTF8 := true + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + if tag == "proto3" { + proto3 = true + } + } + validateUTF8 = validateUTF8 && proto3 + + // Figure out packaging (pointer, slice, or both) + slice := false + pointer := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + // We'll never have both pointer and slice for basic types. + if pointer && slice && t.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + t.Name()) + } + + switch t.Kind() { + case reflect.Bool: + if pointer { + return unmarshalBoolPtr + } + if slice { + return unmarshalBoolSlice + } + return unmarshalBoolValue + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixedS32Ptr + } + if slice { + return unmarshalFixedS32Slice + } + return unmarshalFixedS32Value + case "varint": + // this could be int32 or enum + if pointer { + return unmarshalInt32Ptr + } + if slice { + return unmarshalInt32Slice + } + return unmarshalInt32Value + case "zigzag32": + if pointer { + return unmarshalSint32Ptr + } + if slice { + return unmarshalSint32Slice + } + return unmarshalSint32Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixedS64Ptr + } + if slice { + return unmarshalFixedS64Slice + } + return unmarshalFixedS64Value + case "varint": + if pointer { + return unmarshalInt64Ptr + } + if slice { + return unmarshalInt64Slice + } + return unmarshalInt64Value + case "zigzag64": + if pointer { + return unmarshalSint64Ptr + } + if slice { + return unmarshalSint64Slice + } + return unmarshalSint64Value + } + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixed32Ptr + } + if slice { + return unmarshalFixed32Slice + } + return unmarshalFixed32Value + case "varint": + if pointer { + return unmarshalUint32Ptr + } + if slice { + return unmarshalUint32Slice + } + return unmarshalUint32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixed64Ptr + } + if slice { + return unmarshalFixed64Slice + } + return unmarshalFixed64Value + case "varint": + if pointer { + return unmarshalUint64Ptr + } + if slice { + return unmarshalUint64Slice + } + return unmarshalUint64Value + } + case reflect.Float32: + if pointer { + return unmarshalFloat32Ptr + } + if slice { + return unmarshalFloat32Slice + } + return unmarshalFloat32Value + case reflect.Float64: + if pointer { + return unmarshalFloat64Ptr + } + if slice { + return unmarshalFloat64Slice + } + return unmarshalFloat64Value + case reflect.Map: + panic("map type in typeUnmarshaler in " + t.Name()) + case reflect.Slice: + if pointer { + panic("bad pointer in slice case in " + t.Name()) + } + if slice { + return unmarshalBytesSlice + } + return unmarshalBytesValue + case reflect.String: + if validateUTF8 { + if pointer { + return unmarshalUTF8StringPtr + } + if slice { + return unmarshalUTF8StringSlice + } + return unmarshalUTF8StringValue + } + if pointer { + return unmarshalStringPtr + } + if slice { + return unmarshalStringSlice + } + return unmarshalStringValue + case reflect.Struct: + // message or group field + if !pointer { + panic(fmt.Sprintf("message/group field %s:%s without pointer", t, encoding)) + } + switch encoding { + case "bytes": + if slice { + return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) + case "group": + if slice { + return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) + } + } + panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) +} + +// Below are all the unmarshalers for individual fields of various types. + +func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64() = v + return b, nil +} + +func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64() = v + return b, nil +} + +func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64() = v + return b, nil +} + +func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64Ptr() = &v + return b, nil +} + +func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + *f.toInt32() = v + return b, nil +} + +func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + *f.toInt32() = v + return b, nil +} + +func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32() = v + return b, nil +} + +func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32Ptr() = &v + return b, nil +} + +func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64() = v + return b[8:], nil +} + +func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64() = v + return b[8:], nil +} + +func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32() = v + return b[4:], nil +} + +func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32Ptr() = &v + return b[4:], nil +} + +func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + *f.toInt32() = v + return b[4:], nil +} + +func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.setInt32Ptr(v) + return b[4:], nil +} + +func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + return b[4:], nil +} + +func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + // Note: any length varint is allowed, even though any sane + // encoder will use one byte. + // See https://github.com/golang/protobuf/issues/76 + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + // TODO: check if x>1? Tests seem to indicate no. + v := x != 0 + *f.toBool() = v + return b[n:], nil +} + +func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + *f.toBoolPtr() = &v + return b[n:], nil +} + +func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + b = b[n:] + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + return b[n:], nil +} + +func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64() = v + return b[8:], nil +} + +func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64Ptr() = &v + return b[8:], nil +} + +func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32() = v + return b[4:], nil +} + +func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32Ptr() = &v + return b[4:], nil +} + +func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toString() = v + return b[x:], nil +} + +func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toStringPtr() = &v + return b[x:], nil +} + +func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + s := f.toStringSlice() + *s = append(*s, v) + return b[x:], nil +} + +func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toString() = v + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toStringPtr() = &v + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + s := f.toStringSlice() + *s = append(*s, v) + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +var emptyBuf [0]byte + +func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // The use of append here is a trick which avoids the zeroing + // that would be required if we used a make/copy pair. + // We append to emptyBuf instead of nil because we want + // a non-nil result even when the length is 0. + v := append(emptyBuf[:], b[:x]...) + *f.toBytes() = v + return b[x:], nil +} + +func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := append(emptyBuf[:], b[:x]...) + s := f.toBytesSlice() + *s = append(*s, v) + return b[x:], nil +} + +func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // First read the message field to see if something is there. + // The semantics of multiple submessages are weird. Instead of + // the last one winning (as it is for all other fields), multiple + // submessages are merged. + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[x:], err + } +} + +func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[x:], err + } +} + +func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[y:], err + } +} + +func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[y:], err + } +} + +func makeUnmarshalMap(f *reflect.StructField) unmarshaler { + t := f.Type + kt := t.Key() + vt := t.Elem() + unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) + unmarshalVal := typeUnmarshaler(vt, f.Tag.Get("protobuf_val")) + return func(b []byte, f pointer, w int) ([]byte, error) { + // The map entry is a submessage. Figure out how big it is. + if w != WireBytes { + return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + r := b[x:] // unused data to return + b = b[:x] // data for map entry + + // Note: we could use #keys * #values ~= 200 functions + // to do map decoding without reflection. Probably not worth it. + // Maps will be somewhat slow. Oh well. + + // Read key and value from data. + var nerr nonFatal + k := reflect.New(kt) + v := reflect.New(vt) + for len(b) > 0 { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + wire := int(x) & 7 + b = b[n:] + + var err error + switch x >> 3 { + case 1: + b, err = unmarshalKey(b, valToPointer(k), wire) + case 2: + b, err = unmarshalVal(b, valToPointer(v), wire) + default: + err = errInternalBadWireType // skip unknown tag + } + + if nerr.Merge(err) { + continue + } + if err != errInternalBadWireType { + return nil, err + } + + // Skip past unknown fields. + b, err = skipField(b, wire) + if err != nil { + return nil, err + } + } + + // Get map, allocate if needed. + m := f.asPointerTo(t).Elem() // an addressable map[K]T + if m.IsNil() { + m.Set(reflect.MakeMap(t)) + } + + // Insert into map. + m.SetMapIndex(k.Elem(), v.Elem()) + + return r, nerr.E + } +} + +// makeUnmarshalOneof makes an unmarshaler for oneof fields. +// for: +// message Msg { +// oneof F { +// int64 X = 1; +// float64 Y = 2; +// } +// } +// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). +// ityp is the interface type of the oneof field (e.g. isMsg_F). +// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). +// Note that this function will be called once for each case in the oneof. +func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { + sf := typ.Field(0) + field0 := toField(&sf) + return func(b []byte, f pointer, w int) ([]byte, error) { + // Allocate holder for value. + v := reflect.New(typ) + + // Unmarshal data into holder. + // We unmarshal into the first field of the holder object. + var err error + var nerr nonFatal + b, err = unmarshal(b, valToPointer(v).offset(field0), w) + if !nerr.Merge(err) { + return nil, err + } + + // Write pointer to holder into target field. + f.asPointerTo(ityp).Elem().Set(v) + + return b, nerr.E + } +} + +// Error used by decode internally. +var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") + +// skipField skips past a field of type wire and returns the remaining bytes. +func skipField(b []byte, wire int) ([]byte, error) { + switch wire { + case WireVarint: + _, k := decodeVarint(b) + if k == 0 { + return b, io.ErrUnexpectedEOF + } + b = b[k:] + case WireFixed32: + if len(b) < 4 { + return b, io.ErrUnexpectedEOF + } + b = b[4:] + case WireFixed64: + if len(b) < 8 { + return b, io.ErrUnexpectedEOF + } + b = b[8:] + case WireBytes: + m, k := decodeVarint(b) + if k == 0 || uint64(len(b)-k) < m { + return b, io.ErrUnexpectedEOF + } + b = b[uint64(k)+m:] + case WireStartGroup: + _, i := findEndGroup(b) + if i == -1 { + return b, io.ErrUnexpectedEOF + } + b = b[i:] + default: + return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) + } + return b, nil +} + +// findEndGroup finds the index of the next EndGroup tag. +// Groups may be nested, so the "next" EndGroup tag is the first +// unpaired EndGroup. +// findEndGroup returns the indexes of the start and end of the EndGroup tag. +// Returns (-1,-1) if it can't find one. +func findEndGroup(b []byte) (int, int) { + depth := 1 + i := 0 + for { + x, n := decodeVarint(b[i:]) + if n == 0 { + return -1, -1 + } + j := i + i += n + switch x & 7 { + case WireVarint: + _, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + case WireFixed32: + if len(b)-4 < i { + return -1, -1 + } + i += 4 + case WireFixed64: + if len(b)-8 < i { + return -1, -1 + } + i += 8 + case WireBytes: + m, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + if uint64(len(b)-i) < m { + return -1, -1 + } + i += int(m) + case WireStartGroup: + depth++ + case WireEndGroup: + depth-- + if depth == 0 { + return j, i + } + default: + return -1, -1 + } + } +} + +// encodeVarint appends a varint-encoded integer to b and returns the result. +func encodeVarint(b []byte, x uint64) []byte { + for x >= 1<<7 { + b = append(b, byte(x&0x7f|0x80)) + x >>= 7 + } + return append(b, byte(x)) +} + +// decodeVarint reads a varint-encoded integer from b. +// Returns the decoded integer and the number of bytes read. +// If there is an error, it returns 0,0. +func decodeVarint(b []byte) (uint64, int) { + var x, y uint64 + if len(b) == 0 { + goto bad + } + x = uint64(b[0]) + if x < 0x80 { + return x, 1 + } + x -= 0x80 + + if len(b) <= 1 { + goto bad + } + y = uint64(b[1]) + x += y << 7 + if y < 0x80 { + return x, 2 + } + x -= 0x80 << 7 + + if len(b) <= 2 { + goto bad + } + y = uint64(b[2]) + x += y << 14 + if y < 0x80 { + return x, 3 + } + x -= 0x80 << 14 + + if len(b) <= 3 { + goto bad + } + y = uint64(b[3]) + x += y << 21 + if y < 0x80 { + return x, 4 + } + x -= 0x80 << 21 + + if len(b) <= 4 { + goto bad + } + y = uint64(b[4]) + x += y << 28 + if y < 0x80 { + return x, 5 + } + x -= 0x80 << 28 + + if len(b) <= 5 { + goto bad + } + y = uint64(b[5]) + x += y << 35 + if y < 0x80 { + return x, 6 + } + x -= 0x80 << 35 + + if len(b) <= 6 { + goto bad + } + y = uint64(b[6]) + x += y << 42 + if y < 0x80 { + return x, 7 + } + x -= 0x80 << 42 + + if len(b) <= 7 { + goto bad + } + y = uint64(b[7]) + x += y << 49 + if y < 0x80 { + return x, 8 + } + x -= 0x80 << 49 + + if len(b) <= 8 { + goto bad + } + y = uint64(b[8]) + x += y << 56 + if y < 0x80 { + return x, 9 + } + x -= 0x80 << 56 + + if len(b) <= 9 { + goto bad + } + y = uint64(b[9]) + x += y << 63 + if y < 2 { + return x, 10 + } + +bad: + return 0, 0 +} diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go index 965876bf03..1aaee725b4 100644 --- a/vendor/github.com/golang/protobuf/proto/text.go +++ b/vendor/github.com/golang/protobuf/proto/text.go @@ -50,7 +50,6 @@ import ( var ( newline = []byte("\n") spaces = []byte(" ") - gtNewline = []byte(">\n") endBraceNewline = []byte("}\n") backslashN = []byte{'\\', 'n'} backslashR = []byte{'\\', 'r'} @@ -170,11 +169,6 @@ func writeName(w *textWriter, props *Properties) error { return nil } -// raw is the interface satisfied by RawMessage. -type raw interface { - Bytes() []byte -} - func requiresQuotes(u string) bool { // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. for _, ch := range u { @@ -269,6 +263,10 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { props := sprops.Prop[i] name := st.Field(i).Name + if name == "XXX_NoUnkeyedLiteral" { + continue + } + if strings.HasPrefix(name, "XXX_") { // There are two XXX_ fields: // XXX_unrecognized []byte @@ -355,7 +353,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { return err } } - if err := tm.writeAny(w, key, props.mkeyprop); err != nil { + if err := tm.writeAny(w, key, props.MapKeyProp); err != nil { return err } if err := w.WriteByte('\n'); err != nil { @@ -372,7 +370,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { return err } } - if err := tm.writeAny(w, val, props.mvalprop); err != nil { + if err := tm.writeAny(w, val, props.MapValProp); err != nil { return err } if err := w.WriteByte('\n'); err != nil { @@ -436,12 +434,6 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { return err } } - if b, ok := fv.Interface().(raw); ok { - if err := writeRaw(w, b.Bytes()); err != nil { - return err - } - continue - } // Enums have a String method, so writeAny will work fine. if err := tm.writeAny(w, fv, props); err != nil { @@ -455,7 +447,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { // Extensions (the XXX_extensions field). pv := sv.Addr() - if _, ok := extendable(pv.Interface()); ok { + if _, err := extendable(pv.Interface()); err == nil { if err := tm.writeExtensions(w, pv); err != nil { return err } @@ -464,27 +456,6 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { return nil } -// writeRaw writes an uninterpreted raw message. -func writeRaw(w *textWriter, b []byte) error { - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if err := writeUnknownStruct(w, b); err != nil { - return err - } - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - return nil -} - // writeAny writes an arbitrary field. func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { v = reflect.Indirect(v) @@ -535,6 +506,19 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert } } w.indent() + if v.CanAddr() { + // Calling v.Interface on a struct causes the reflect package to + // copy the entire struct. This is racy with the new Marshaler + // since we atomically update the XXX_sizecache. + // + // Thus, we retrieve a pointer to the struct if possible to avoid + // a race since v.Interface on the pointer doesn't copy the struct. + // + // If v is not addressable, then we are not worried about a race + // since it implies that the binary Marshaler cannot possibly be + // mutating this value. + v = v.Addr() + } if etm, ok := v.Interface().(encoding.TextMarshaler); ok { text, err := etm.MarshalText() if err != nil { @@ -543,8 +527,13 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert if _, err = w.Write(text); err != nil { return err } - } else if err := tm.writeStruct(w, v); err != nil { - return err + } else { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + if err := tm.writeStruct(w, v); err != nil { + return err + } } w.unindent() if err := w.WriteByte(ket); err != nil { diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go index 5e14513f28..bb55a3af27 100644 --- a/vendor/github.com/golang/protobuf/proto/text_parser.go +++ b/vendor/github.com/golang/protobuf/proto/text_parser.go @@ -206,7 +206,6 @@ func (p *textParser) advance() { var ( errBadUTF8 = errors.New("proto: bad UTF-8") - errBadHex = errors.New("proto: bad hexadecimal") ) func unquoteC(s string, quote rune) (string, error) { @@ -277,60 +276,47 @@ func unescape(s string) (ch string, tail string, err error) { return "?", s, nil // trigraph workaround case '\'', '"', '\\': return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': + case '0', '1', '2', '3', '4', '5', '6', '7': if len(s) < 2 { return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) } - base := 8 - ss := s[:2] + ss := string(r) + s[:2] s = s[2:] - if r == 'x' || r == 'X' { - base = 16 - } else { - ss = string(r) + ss - } - i, err := strconv.ParseUint(ss, base, 8) + i, err := strconv.ParseUint(ss, 8, 8) if err != nil { - return "", "", err + return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) } return string([]byte{byte(i)}), s, nil - case 'u', 'U': - n := 4 - if r == 'U' { + case 'x', 'X', 'u', 'U': + var n int + switch r { + case 'x', 'X': + n = 2 + case 'u': + n = 4 + case 'U': n = 8 } if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) - } - - bs := make([]byte, n/2) - for i := 0; i < n; i += 2 { - a, ok1 := unhex(s[i]) - b, ok2 := unhex(s[i+1]) - if !ok1 || !ok2 { - return "", "", errBadHex - } - bs[i/2] = a<<4 | b + return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) } + ss := s[:n] s = s[n:] - return string(bs), s, nil + i, err := strconv.ParseUint(ss, 16, 64) + if err != nil { + return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) + } + if r == 'x' || r == 'X' { + return string([]byte{byte(i)}), s, nil + } + if i > utf8.MaxRune { + return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) + } + return string(i), s, nil } return "", "", fmt.Errorf(`unknown escape \%c`, r) } -// Adapted from src/pkg/strconv/quote.go. -func unhex(b byte) (v byte, ok bool) { - switch { - case '0' <= b && b <= '9': - return b - '0', true - case 'a' <= b && b <= 'f': - return b - 'a' + 10, true - case 'A' <= b && b <= 'F': - return b - 'A' + 10, true - } - return 0, false -} - // Back off the parser by one token. Can only be done between calls to next(). // It makes the next advance() a no-op. func (p *textParser) back() { p.backed = true } @@ -644,17 +630,17 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { if err := p.consumeToken(":"); err != nil { return err } - if err := p.readAny(key, props.mkeyprop); err != nil { + if err := p.readAny(key, props.MapKeyProp); err != nil { return err } if err := p.consumeOptionalSeparator(); err != nil { return err } case "value": - if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { + if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil { return err } - if err := p.readAny(val, props.mvalprop); err != nil { + if err := p.readAny(val, props.MapValProp); err != nil { return err } if err := p.consumeOptionalSeparator(); err != nil { @@ -728,6 +714,9 @@ func (p *textParser) consumeExtName() (string, error) { if tok.err != nil { return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) } + if p.done && tok.value != "]" { + return "", p.errorf("unclosed type_url or extension name") + } } return strings.Join(parts, ""), nil } @@ -865,7 +854,7 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { return p.readStruct(fv, terminator) case reflect.Uint32: if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(x) + fv.SetUint(uint64(x)) return nil } case reflect.Uint64: @@ -883,13 +872,9 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { // UnmarshalText returns *RequiredNotSetError. func UnmarshalText(s string, pb Message) error { if um, ok := pb.(encoding.TextUnmarshaler); ok { - err := um.UnmarshalText([]byte(s)) - return err + return um.UnmarshalText([]byte(s)) } pb.Reset() v := reflect.ValueOf(pb) - if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { - return pe - } - return nil + return newTextParser(s).readStruct(v.Elem(), "") } diff --git a/vendor/github.com/golang/protobuf/proto/text_parser_test.go b/vendor/github.com/golang/protobuf/proto/text_parser_test.go deleted file mode 100644 index 8f7cb4d274..0000000000 --- a/vendor/github.com/golang/protobuf/proto/text_parser_test.go +++ /dev/null @@ -1,673 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "math" - "reflect" - "testing" - - . "github.com/golang/protobuf/proto" - proto3pb "github.com/golang/protobuf/proto/proto3_proto" - . "github.com/golang/protobuf/proto/testdata" -) - -type UnmarshalTextTest struct { - in string - err string // if "", no error expected - out *MyMessage -} - -func buildExtStructTest(text string) UnmarshalTextTest { - msg := &MyMessage{ - Count: Int32(42), - } - SetExtension(msg, E_Ext_More, &Ext{ - Data: String("Hello, world!"), - }) - return UnmarshalTextTest{in: text, out: msg} -} - -func buildExtDataTest(text string) UnmarshalTextTest { - msg := &MyMessage{ - Count: Int32(42), - } - SetExtension(msg, E_Ext_Text, String("Hello, world!")) - SetExtension(msg, E_Ext_Number, Int32(1729)) - return UnmarshalTextTest{in: text, out: msg} -} - -func buildExtRepStringTest(text string) UnmarshalTextTest { - msg := &MyMessage{ - Count: Int32(42), - } - if err := SetExtension(msg, E_Greeting, []string{"bula", "hola"}); err != nil { - panic(err) - } - return UnmarshalTextTest{in: text, out: msg} -} - -var unMarshalTextTests = []UnmarshalTextTest{ - // Basic - { - in: " count:42\n name:\"Dave\" ", - out: &MyMessage{ - Count: Int32(42), - Name: String("Dave"), - }, - }, - - // Empty quoted string - { - in: `count:42 name:""`, - out: &MyMessage{ - Count: Int32(42), - Name: String(""), - }, - }, - - // Quoted string concatenation with double quotes - { - in: `count:42 name: "My name is "` + "\n" + `"elsewhere"`, - out: &MyMessage{ - Count: Int32(42), - Name: String("My name is elsewhere"), - }, - }, - - // Quoted string concatenation with single quotes - { - in: "count:42 name: 'My name is '\n'elsewhere'", - out: &MyMessage{ - Count: Int32(42), - Name: String("My name is elsewhere"), - }, - }, - - // Quoted string concatenations with mixed quotes - { - in: "count:42 name: 'My name is '\n\"elsewhere\"", - out: &MyMessage{ - Count: Int32(42), - Name: String("My name is elsewhere"), - }, - }, - { - in: "count:42 name: \"My name is \"\n'elsewhere'", - out: &MyMessage{ - Count: Int32(42), - Name: String("My name is elsewhere"), - }, - }, - - // Quoted string with escaped apostrophe - { - in: `count:42 name: "HOLIDAY - New Year\'s Day"`, - out: &MyMessage{ - Count: Int32(42), - Name: String("HOLIDAY - New Year's Day"), - }, - }, - - // Quoted string with single quote - { - in: `count:42 name: 'Roger "The Ramster" Ramjet'`, - out: &MyMessage{ - Count: Int32(42), - Name: String(`Roger "The Ramster" Ramjet`), - }, - }, - - // Quoted string with all the accepted special characters from the C++ test - { - in: `count:42 name: ` + "\"\\\"A string with \\' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"", - out: &MyMessage{ - Count: Int32(42), - Name: String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces"), - }, - }, - - // Quoted string with quoted backslash - { - in: `count:42 name: "\\'xyz"`, - out: &MyMessage{ - Count: Int32(42), - Name: String(`\'xyz`), - }, - }, - - // Quoted string with UTF-8 bytes. - { - in: "count:42 name: '\303\277\302\201\xAB'", - out: &MyMessage{ - Count: Int32(42), - Name: String("\303\277\302\201\xAB"), - }, - }, - - // Bad quoted string - { - in: `inner: < host: "\0" >` + "\n", - err: `line 1.15: invalid quoted string "\0": \0 requires 2 following digits`, - }, - - // Number too large for int64 - { - in: "count: 1 others { key: 123456789012345678901 }", - err: "line 1.23: invalid int64: 123456789012345678901", - }, - - // Number too large for int32 - { - in: "count: 1234567890123", - err: "line 1.7: invalid int32: 1234567890123", - }, - - // Number in hexadecimal - { - in: "count: 0x2beef", - out: &MyMessage{ - Count: Int32(0x2beef), - }, - }, - - // Number in octal - { - in: "count: 024601", - out: &MyMessage{ - Count: Int32(024601), - }, - }, - - // Floating point number with "f" suffix - { - in: "count: 4 others:< weight: 17.0f >", - out: &MyMessage{ - Count: Int32(4), - Others: []*OtherMessage{ - { - Weight: Float32(17), - }, - }, - }, - }, - - // Floating point positive infinity - { - in: "count: 4 bigfloat: inf", - out: &MyMessage{ - Count: Int32(4), - Bigfloat: Float64(math.Inf(1)), - }, - }, - - // Floating point negative infinity - { - in: "count: 4 bigfloat: -inf", - out: &MyMessage{ - Count: Int32(4), - Bigfloat: Float64(math.Inf(-1)), - }, - }, - - // Number too large for float32 - { - in: "others:< weight: 12345678901234567890123456789012345678901234567890 >", - err: "line 1.17: invalid float32: 12345678901234567890123456789012345678901234567890", - }, - - // Number posing as a quoted string - { - in: `inner: < host: 12 >` + "\n", - err: `line 1.15: invalid string: 12`, - }, - - // Quoted string posing as int32 - { - in: `count: "12"`, - err: `line 1.7: invalid int32: "12"`, - }, - - // Quoted string posing a float32 - { - in: `others:< weight: "17.4" >`, - err: `line 1.17: invalid float32: "17.4"`, - }, - - // Enum - { - in: `count:42 bikeshed: BLUE`, - out: &MyMessage{ - Count: Int32(42), - Bikeshed: MyMessage_BLUE.Enum(), - }, - }, - - // Repeated field - { - in: `count:42 pet: "horsey" pet:"bunny"`, - out: &MyMessage{ - Count: Int32(42), - Pet: []string{"horsey", "bunny"}, - }, - }, - - // Repeated field with list notation - { - in: `count:42 pet: ["horsey", "bunny"]`, - out: &MyMessage{ - Count: Int32(42), - Pet: []string{"horsey", "bunny"}, - }, - }, - - // Repeated message with/without colon and <>/{} - { - in: `count:42 others:{} others{} others:<> others:{}`, - out: &MyMessage{ - Count: Int32(42), - Others: []*OtherMessage{ - {}, - {}, - {}, - {}, - }, - }, - }, - - // Missing colon for inner message - { - in: `count:42 inner < host: "cauchy.syd" >`, - out: &MyMessage{ - Count: Int32(42), - Inner: &InnerMessage{ - Host: String("cauchy.syd"), - }, - }, - }, - - // Missing colon for string field - { - in: `name "Dave"`, - err: `line 1.5: expected ':', found "\"Dave\""`, - }, - - // Missing colon for int32 field - { - in: `count 42`, - err: `line 1.6: expected ':', found "42"`, - }, - - // Missing required field - { - in: `name: "Pawel"`, - err: `proto: required field "testdata.MyMessage.count" not set`, - out: &MyMessage{ - Name: String("Pawel"), - }, - }, - - // Missing required field in a required submessage - { - in: `count: 42 we_must_go_deeper < leo_finally_won_an_oscar <> >`, - err: `proto: required field "testdata.InnerMessage.host" not set`, - out: &MyMessage{ - Count: Int32(42), - WeMustGoDeeper: &RequiredInnerMessage{LeoFinallyWonAnOscar: &InnerMessage{}}, - }, - }, - - // Repeated non-repeated field - { - in: `name: "Rob" name: "Russ"`, - err: `line 1.12: non-repeated field "name" was repeated`, - }, - - // Group - { - in: `count: 17 SomeGroup { group_field: 12 }`, - out: &MyMessage{ - Count: Int32(17), - Somegroup: &MyMessage_SomeGroup{ - GroupField: Int32(12), - }, - }, - }, - - // Semicolon between fields - { - in: `count:3;name:"Calvin"`, - out: &MyMessage{ - Count: Int32(3), - Name: String("Calvin"), - }, - }, - // Comma between fields - { - in: `count:4,name:"Ezekiel"`, - out: &MyMessage{ - Count: Int32(4), - Name: String("Ezekiel"), - }, - }, - - // Boolean false - { - in: `count:42 inner { host: "example.com" connected: false }`, - out: &MyMessage{ - Count: Int32(42), - Inner: &InnerMessage{ - Host: String("example.com"), - Connected: Bool(false), - }, - }, - }, - // Boolean true - { - in: `count:42 inner { host: "example.com" connected: true }`, - out: &MyMessage{ - Count: Int32(42), - Inner: &InnerMessage{ - Host: String("example.com"), - Connected: Bool(true), - }, - }, - }, - // Boolean 0 - { - in: `count:42 inner { host: "example.com" connected: 0 }`, - out: &MyMessage{ - Count: Int32(42), - Inner: &InnerMessage{ - Host: String("example.com"), - Connected: Bool(false), - }, - }, - }, - // Boolean 1 - { - in: `count:42 inner { host: "example.com" connected: 1 }`, - out: &MyMessage{ - Count: Int32(42), - Inner: &InnerMessage{ - Host: String("example.com"), - Connected: Bool(true), - }, - }, - }, - // Boolean f - { - in: `count:42 inner { host: "example.com" connected: f }`, - out: &MyMessage{ - Count: Int32(42), - Inner: &InnerMessage{ - Host: String("example.com"), - Connected: Bool(false), - }, - }, - }, - // Boolean t - { - in: `count:42 inner { host: "example.com" connected: t }`, - out: &MyMessage{ - Count: Int32(42), - Inner: &InnerMessage{ - Host: String("example.com"), - Connected: Bool(true), - }, - }, - }, - // Boolean False - { - in: `count:42 inner { host: "example.com" connected: False }`, - out: &MyMessage{ - Count: Int32(42), - Inner: &InnerMessage{ - Host: String("example.com"), - Connected: Bool(false), - }, - }, - }, - // Boolean True - { - in: `count:42 inner { host: "example.com" connected: True }`, - out: &MyMessage{ - Count: Int32(42), - Inner: &InnerMessage{ - Host: String("example.com"), - Connected: Bool(true), - }, - }, - }, - - // Extension - buildExtStructTest(`count: 42 [testdata.Ext.more]:`), - buildExtStructTest(`count: 42 [testdata.Ext.more] {data:"Hello, world!"}`), - buildExtDataTest(`count: 42 [testdata.Ext.text]:"Hello, world!" [testdata.Ext.number]:1729`), - buildExtRepStringTest(`count: 42 [testdata.greeting]:"bula" [testdata.greeting]:"hola"`), - - // Big all-in-one - { - in: "count:42 # Meaning\n" + - `name:"Dave" ` + - `quote:"\"I didn't want to go.\"" ` + - `pet:"bunny" ` + - `pet:"kitty" ` + - `pet:"horsey" ` + - `inner:<` + - ` host:"footrest.syd" ` + - ` port:7001 ` + - ` connected:true ` + - `> ` + - `others:<` + - ` key:3735928559 ` + - ` value:"\x01A\a\f" ` + - `> ` + - `others:<` + - " weight:58.9 # Atomic weight of Co\n" + - ` inner:<` + - ` host:"lesha.mtv" ` + - ` port:8002 ` + - ` >` + - `>`, - out: &MyMessage{ - Count: Int32(42), - Name: String("Dave"), - Quote: String(`"I didn't want to go."`), - Pet: []string{"bunny", "kitty", "horsey"}, - Inner: &InnerMessage{ - Host: String("footrest.syd"), - Port: Int32(7001), - Connected: Bool(true), - }, - Others: []*OtherMessage{ - { - Key: Int64(3735928559), - Value: []byte{0x1, 'A', '\a', '\f'}, - }, - { - Weight: Float32(58.9), - Inner: &InnerMessage{ - Host: String("lesha.mtv"), - Port: Int32(8002), - }, - }, - }, - }, - }, -} - -func TestUnmarshalText(t *testing.T) { - for i, test := range unMarshalTextTests { - pb := new(MyMessage) - err := UnmarshalText(test.in, pb) - if test.err == "" { - // We don't expect failure. - if err != nil { - t.Errorf("Test %d: Unexpected error: %v", i, err) - } else if !reflect.DeepEqual(pb, test.out) { - t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v", - i, pb, test.out) - } - } else { - // We do expect failure. - if err == nil { - t.Errorf("Test %d: Didn't get expected error: %v", i, test.err) - } else if err.Error() != test.err { - t.Errorf("Test %d: Incorrect error.\nHave: %v\nWant: %v", - i, err.Error(), test.err) - } else if _, ok := err.(*RequiredNotSetError); ok && test.out != nil && !reflect.DeepEqual(pb, test.out) { - t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v", - i, pb, test.out) - } - } - } -} - -func TestUnmarshalTextCustomMessage(t *testing.T) { - msg := &textMessage{} - if err := UnmarshalText("custom", msg); err != nil { - t.Errorf("Unexpected error from custom unmarshal: %v", err) - } - if UnmarshalText("not custom", msg) == nil { - t.Errorf("Didn't get expected error from custom unmarshal") - } -} - -// Regression test; this caused a panic. -func TestRepeatedEnum(t *testing.T) { - pb := new(RepeatedEnum) - if err := UnmarshalText("color: RED", pb); err != nil { - t.Fatal(err) - } - exp := &RepeatedEnum{ - Color: []RepeatedEnum_Color{RepeatedEnum_RED}, - } - if !Equal(pb, exp) { - t.Errorf("Incorrect populated \nHave: %v\nWant: %v", pb, exp) - } -} - -func TestProto3TextParsing(t *testing.T) { - m := new(proto3pb.Message) - const in = `name: "Wallace" true_scotsman: true` - want := &proto3pb.Message{ - Name: "Wallace", - TrueScotsman: true, - } - if err := UnmarshalText(in, m); err != nil { - t.Fatal(err) - } - if !Equal(m, want) { - t.Errorf("\n got %v\nwant %v", m, want) - } -} - -func TestMapParsing(t *testing.T) { - m := new(MessageWithMap) - const in = `name_mapping: name_mapping:` + - `msg_mapping:,>` + // separating commas are okay - `msg_mapping>` + // no colon after "value" - `msg_mapping:>` + // omitted key - `msg_mapping:` + // omitted value - `byte_mapping:` + - `byte_mapping:<>` // omitted key and value - want := &MessageWithMap{ - NameMapping: map[int32]string{ - 1: "Beatles", - 1234: "Feist", - }, - MsgMapping: map[int64]*FloatingPoint{ - -4: {F: Float64(2.0)}, - -2: {F: Float64(4.0)}, - 0: {F: Float64(5.0)}, - 1: nil, - }, - ByteMapping: map[bool][]byte{ - false: nil, - true: []byte("so be it"), - }, - } - if err := UnmarshalText(in, m); err != nil { - t.Fatal(err) - } - if !Equal(m, want) { - t.Errorf("\n got %v\nwant %v", m, want) - } -} - -func TestOneofParsing(t *testing.T) { - const in = `name:"Shrek"` - m := new(Communique) - want := &Communique{Union: &Communique_Name{"Shrek"}} - if err := UnmarshalText(in, m); err != nil { - t.Fatal(err) - } - if !Equal(m, want) { - t.Errorf("\n got %v\nwant %v", m, want) - } - - const inOverwrite = `name:"Shrek" number:42` - m = new(Communique) - testErr := "line 1.13: field 'number' would overwrite already parsed oneof 'Union'" - if err := UnmarshalText(inOverwrite, m); err == nil { - t.Errorf("TestOneofParsing: Didn't get expected error: %v", testErr) - } else if err.Error() != testErr { - t.Errorf("TestOneofParsing: Incorrect error.\nHave: %v\nWant: %v", - err.Error(), testErr) - } - -} - -var benchInput string - -func init() { - benchInput = "count: 4\n" - for i := 0; i < 1000; i++ { - benchInput += "pet: \"fido\"\n" - } - - // Check it is valid input. - pb := new(MyMessage) - err := UnmarshalText(benchInput, pb) - if err != nil { - panic("Bad benchmark input: " + err.Error()) - } -} - -func BenchmarkUnmarshalText(b *testing.B) { - pb := new(MyMessage) - for i := 0; i < b.N; i++ { - UnmarshalText(benchInput, pb) - } - b.SetBytes(int64(len(benchInput))) -} diff --git a/vendor/github.com/golang/protobuf/proto/text_test.go b/vendor/github.com/golang/protobuf/proto/text_test.go deleted file mode 100644 index 3eabacac8f..0000000000 --- a/vendor/github.com/golang/protobuf/proto/text_test.go +++ /dev/null @@ -1,474 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "bytes" - "errors" - "io/ioutil" - "math" - "strings" - "testing" - - "github.com/golang/protobuf/proto" - - proto3pb "github.com/golang/protobuf/proto/proto3_proto" - pb "github.com/golang/protobuf/proto/testdata" -) - -// textMessage implements the methods that allow it to marshal and unmarshal -// itself as text. -type textMessage struct { -} - -func (*textMessage) MarshalText() ([]byte, error) { - return []byte("custom"), nil -} - -func (*textMessage) UnmarshalText(bytes []byte) error { - if string(bytes) != "custom" { - return errors.New("expected 'custom'") - } - return nil -} - -func (*textMessage) Reset() {} -func (*textMessage) String() string { return "" } -func (*textMessage) ProtoMessage() {} - -func newTestMessage() *pb.MyMessage { - msg := &pb.MyMessage{ - Count: proto.Int32(42), - Name: proto.String("Dave"), - Quote: proto.String(`"I didn't want to go."`), - Pet: []string{"bunny", "kitty", "horsey"}, - Inner: &pb.InnerMessage{ - Host: proto.String("footrest.syd"), - Port: proto.Int32(7001), - Connected: proto.Bool(true), - }, - Others: []*pb.OtherMessage{ - { - Key: proto.Int64(0xdeadbeef), - Value: []byte{1, 65, 7, 12}, - }, - { - Weight: proto.Float32(6.022), - Inner: &pb.InnerMessage{ - Host: proto.String("lesha.mtv"), - Port: proto.Int32(8002), - }, - }, - }, - Bikeshed: pb.MyMessage_BLUE.Enum(), - Somegroup: &pb.MyMessage_SomeGroup{ - GroupField: proto.Int32(8), - }, - // One normally wouldn't do this. - // This is an undeclared tag 13, as a varint (wire type 0) with value 4. - XXX_unrecognized: []byte{13<<3 | 0, 4}, - } - ext := &pb.Ext{ - Data: proto.String("Big gobs for big rats"), - } - if err := proto.SetExtension(msg, pb.E_Ext_More, ext); err != nil { - panic(err) - } - greetings := []string{"adg", "easy", "cow"} - if err := proto.SetExtension(msg, pb.E_Greeting, greetings); err != nil { - panic(err) - } - - // Add an unknown extension. We marshal a pb.Ext, and fake the ID. - b, err := proto.Marshal(&pb.Ext{Data: proto.String("3G skiing")}) - if err != nil { - panic(err) - } - b = append(proto.EncodeVarint(201<<3|proto.WireBytes), b...) - proto.SetRawExtension(msg, 201, b) - - // Extensions can be plain fields, too, so let's test that. - b = append(proto.EncodeVarint(202<<3|proto.WireVarint), 19) - proto.SetRawExtension(msg, 202, b) - - return msg -} - -const text = `count: 42 -name: "Dave" -quote: "\"I didn't want to go.\"" -pet: "bunny" -pet: "kitty" -pet: "horsey" -inner: < - host: "footrest.syd" - port: 7001 - connected: true -> -others: < - key: 3735928559 - value: "\001A\007\014" -> -others: < - weight: 6.022 - inner: < - host: "lesha.mtv" - port: 8002 - > -> -bikeshed: BLUE -SomeGroup { - group_field: 8 -} -/* 2 unknown bytes */ -13: 4 -[testdata.Ext.more]: < - data: "Big gobs for big rats" -> -[testdata.greeting]: "adg" -[testdata.greeting]: "easy" -[testdata.greeting]: "cow" -/* 13 unknown bytes */ -201: "\t3G skiing" -/* 3 unknown bytes */ -202: 19 -` - -func TestMarshalText(t *testing.T) { - buf := new(bytes.Buffer) - if err := proto.MarshalText(buf, newTestMessage()); err != nil { - t.Fatalf("proto.MarshalText: %v", err) - } - s := buf.String() - if s != text { - t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, text) - } -} - -func TestMarshalTextCustomMessage(t *testing.T) { - buf := new(bytes.Buffer) - if err := proto.MarshalText(buf, &textMessage{}); err != nil { - t.Fatalf("proto.MarshalText: %v", err) - } - s := buf.String() - if s != "custom" { - t.Errorf("Got %q, expected %q", s, "custom") - } -} -func TestMarshalTextNil(t *testing.T) { - want := "" - tests := []proto.Message{nil, (*pb.MyMessage)(nil)} - for i, test := range tests { - buf := new(bytes.Buffer) - if err := proto.MarshalText(buf, test); err != nil { - t.Fatal(err) - } - if got := buf.String(); got != want { - t.Errorf("%d: got %q want %q", i, got, want) - } - } -} - -func TestMarshalTextUnknownEnum(t *testing.T) { - // The Color enum only specifies values 0-2. - m := &pb.MyMessage{Bikeshed: pb.MyMessage_Color(3).Enum()} - got := m.String() - const want = `bikeshed:3 ` - if got != want { - t.Errorf("\n got %q\nwant %q", got, want) - } -} - -func TestTextOneof(t *testing.T) { - tests := []struct { - m proto.Message - want string - }{ - // zero message - {&pb.Communique{}, ``}, - // scalar field - {&pb.Communique{Union: &pb.Communique_Number{4}}, `number:4`}, - // message field - {&pb.Communique{Union: &pb.Communique_Msg{ - &pb.Strings{StringField: proto.String("why hello!")}, - }}, `msg:`}, - // bad oneof (should not panic) - {&pb.Communique{Union: &pb.Communique_Msg{nil}}, `msg:/* nil */`}, - } - for _, test := range tests { - got := strings.TrimSpace(test.m.String()) - if got != test.want { - t.Errorf("\n got %s\nwant %s", got, test.want) - } - } -} - -func BenchmarkMarshalTextBuffered(b *testing.B) { - buf := new(bytes.Buffer) - m := newTestMessage() - for i := 0; i < b.N; i++ { - buf.Reset() - proto.MarshalText(buf, m) - } -} - -func BenchmarkMarshalTextUnbuffered(b *testing.B) { - w := ioutil.Discard - m := newTestMessage() - for i := 0; i < b.N; i++ { - proto.MarshalText(w, m) - } -} - -func compact(src string) string { - // s/[ \n]+/ /g; s/ $//; - dst := make([]byte, len(src)) - space, comment := false, false - j := 0 - for i := 0; i < len(src); i++ { - if strings.HasPrefix(src[i:], "/*") { - comment = true - i++ - continue - } - if comment && strings.HasPrefix(src[i:], "*/") { - comment = false - i++ - continue - } - if comment { - continue - } - c := src[i] - if c == ' ' || c == '\n' { - space = true - continue - } - if j > 0 && (dst[j-1] == ':' || dst[j-1] == '<' || dst[j-1] == '{') { - space = false - } - if c == '{' { - space = false - } - if space { - dst[j] = ' ' - j++ - space = false - } - dst[j] = c - j++ - } - if space { - dst[j] = ' ' - j++ - } - return string(dst[0:j]) -} - -var compactText = compact(text) - -func TestCompactText(t *testing.T) { - s := proto.CompactTextString(newTestMessage()) - if s != compactText { - t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v\n===\n", s, compactText) - } -} - -func TestStringEscaping(t *testing.T) { - testCases := []struct { - in *pb.Strings - out string - }{ - { - // Test data from C++ test (TextFormatTest.StringEscape). - // Single divergence: we don't escape apostrophes. - &pb.Strings{StringField: proto.String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces")}, - "string_field: \"\\\"A string with ' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"\n", - }, - { - // Test data from the same C++ test. - &pb.Strings{StringField: proto.String("\350\260\267\346\255\214")}, - "string_field: \"\\350\\260\\267\\346\\255\\214\"\n", - }, - { - // Some UTF-8. - &pb.Strings{StringField: proto.String("\x00\x01\xff\x81")}, - `string_field: "\000\001\377\201"` + "\n", - }, - } - - for i, tc := range testCases { - var buf bytes.Buffer - if err := proto.MarshalText(&buf, tc.in); err != nil { - t.Errorf("proto.MarsalText: %v", err) - continue - } - s := buf.String() - if s != tc.out { - t.Errorf("#%d: Got:\n%s\nExpected:\n%s\n", i, s, tc.out) - continue - } - - // Check round-trip. - pb := new(pb.Strings) - if err := proto.UnmarshalText(s, pb); err != nil { - t.Errorf("#%d: UnmarshalText: %v", i, err) - continue - } - if !proto.Equal(pb, tc.in) { - t.Errorf("#%d: Round-trip failed:\nstart: %v\n end: %v", i, tc.in, pb) - } - } -} - -// A limitedWriter accepts some output before it fails. -// This is a proxy for something like a nearly-full or imminently-failing disk, -// or a network connection that is about to die. -type limitedWriter struct { - b bytes.Buffer - limit int -} - -var outOfSpace = errors.New("proto: insufficient space") - -func (w *limitedWriter) Write(p []byte) (n int, err error) { - var avail = w.limit - w.b.Len() - if avail <= 0 { - return 0, outOfSpace - } - if len(p) <= avail { - return w.b.Write(p) - } - n, _ = w.b.Write(p[:avail]) - return n, outOfSpace -} - -func TestMarshalTextFailing(t *testing.T) { - // Try lots of different sizes to exercise more error code-paths. - for lim := 0; lim < len(text); lim++ { - buf := new(limitedWriter) - buf.limit = lim - err := proto.MarshalText(buf, newTestMessage()) - // We expect a certain error, but also some partial results in the buffer. - if err != outOfSpace { - t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", err, outOfSpace) - } - s := buf.b.String() - x := text[:buf.limit] - if s != x { - t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, x) - } - } -} - -func TestFloats(t *testing.T) { - tests := []struct { - f float64 - want string - }{ - {0, "0"}, - {4.7, "4.7"}, - {math.Inf(1), "inf"}, - {math.Inf(-1), "-inf"}, - {math.NaN(), "nan"}, - } - for _, test := range tests { - msg := &pb.FloatingPoint{F: &test.f} - got := strings.TrimSpace(msg.String()) - want := `f:` + test.want - if got != want { - t.Errorf("f=%f: got %q, want %q", test.f, got, want) - } - } -} - -func TestRepeatedNilText(t *testing.T) { - m := &pb.MessageList{ - Message: []*pb.MessageList_Message{ - nil, - &pb.MessageList_Message{ - Name: proto.String("Horse"), - }, - nil, - }, - } - want := `Message -Message { - name: "Horse" -} -Message -` - if s := proto.MarshalTextString(m); s != want { - t.Errorf(" got: %s\nwant: %s", s, want) - } -} - -func TestProto3Text(t *testing.T) { - tests := []struct { - m proto.Message - want string - }{ - // zero message - {&proto3pb.Message{}, ``}, - // zero message except for an empty byte slice - {&proto3pb.Message{Data: []byte{}}, ``}, - // trivial case - {&proto3pb.Message{Name: "Rob", HeightInCm: 175}, `name:"Rob" height_in_cm:175`}, - // empty map - {&pb.MessageWithMap{}, ``}, - // non-empty map; map format is the same as a repeated struct, - // and they are sorted by key (numerically for numeric keys). - { - &pb.MessageWithMap{NameMapping: map[int32]string{ - -1: "Negatory", - 7: "Lucky", - 1234: "Feist", - 6345789: "Otis", - }}, - `name_mapping: ` + - `name_mapping: ` + - `name_mapping: ` + - `name_mapping:`, - }, - // map with nil value; not well-defined, but we shouldn't crash - { - &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{7: nil}}, - `msg_mapping:`, - }, - } - for _, test := range tests { - got := strings.TrimSpace(test.m.String()) - if got != test.want { - t.Errorf("\n got %s\nwant %s", got, test.want) - } - } -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go new file mode 100644 index 0000000000..1ded05bbe7 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go @@ -0,0 +1,2887 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/descriptor.proto + +package descriptor + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type FieldDescriptorProto_Type int32 + +const ( + // 0 is reserved for errors. + // Order is weird for historical reasons. + FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 + FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 + FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 + FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 + FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 + FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 + FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 + FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 + // New in version 2. + FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 + FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 + FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 + FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 + FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 + FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 + FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 +) + +var FieldDescriptorProto_Type_name = map[int32]string{ + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", +} + +var FieldDescriptorProto_Type_value = map[string]int32{ + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, +} + +func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { + p := new(FieldDescriptorProto_Type) + *p = x + return p +} + +func (x FieldDescriptorProto_Type) String() string { + return proto.EnumName(FieldDescriptorProto_Type_name, int32(x)) +} + +func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type") + if err != nil { + return err + } + *x = FieldDescriptorProto_Type(value) + return nil +} + +func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{4, 0} +} + +type FieldDescriptorProto_Label int32 + +const ( + // 0 is reserved for errors + FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 + FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 +) + +var FieldDescriptorProto_Label_name = map[int32]string{ + 1: "LABEL_OPTIONAL", + 2: "LABEL_REQUIRED", + 3: "LABEL_REPEATED", +} + +var FieldDescriptorProto_Label_value = map[string]int32{ + "LABEL_OPTIONAL": 1, + "LABEL_REQUIRED": 2, + "LABEL_REPEATED": 3, +} + +func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { + p := new(FieldDescriptorProto_Label) + *p = x + return p +} + +func (x FieldDescriptorProto_Label) String() string { + return proto.EnumName(FieldDescriptorProto_Label_name, int32(x)) +} + +func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label") + if err != nil { + return err + } + *x = FieldDescriptorProto_Label(value) + return nil +} + +func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{4, 1} +} + +// Generated classes can be optimized for speed or code size. +type FileOptions_OptimizeMode int32 + +const ( + FileOptions_SPEED FileOptions_OptimizeMode = 1 + // etc. + FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 + FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 +) + +var FileOptions_OptimizeMode_name = map[int32]string{ + 1: "SPEED", + 2: "CODE_SIZE", + 3: "LITE_RUNTIME", +} + +var FileOptions_OptimizeMode_value = map[string]int32{ + "SPEED": 1, + "CODE_SIZE": 2, + "LITE_RUNTIME": 3, +} + +func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { + p := new(FileOptions_OptimizeMode) + *p = x + return p +} + +func (x FileOptions_OptimizeMode) String() string { + return proto.EnumName(FileOptions_OptimizeMode_name, int32(x)) +} + +func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode") + if err != nil { + return err + } + *x = FileOptions_OptimizeMode(value) + return nil +} + +func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{10, 0} +} + +type FieldOptions_CType int32 + +const ( + // Default mode. + FieldOptions_STRING FieldOptions_CType = 0 + FieldOptions_CORD FieldOptions_CType = 1 + FieldOptions_STRING_PIECE FieldOptions_CType = 2 +) + +var FieldOptions_CType_name = map[int32]string{ + 0: "STRING", + 1: "CORD", + 2: "STRING_PIECE", +} + +var FieldOptions_CType_value = map[string]int32{ + "STRING": 0, + "CORD": 1, + "STRING_PIECE": 2, +} + +func (x FieldOptions_CType) Enum() *FieldOptions_CType { + p := new(FieldOptions_CType) + *p = x + return p +} + +func (x FieldOptions_CType) String() string { + return proto.EnumName(FieldOptions_CType_name, int32(x)) +} + +func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType") + if err != nil { + return err + } + *x = FieldOptions_CType(value) + return nil +} + +func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{12, 0} +} + +type FieldOptions_JSType int32 + +const ( + // Use the default type. + FieldOptions_JS_NORMAL FieldOptions_JSType = 0 + // Use JavaScript strings. + FieldOptions_JS_STRING FieldOptions_JSType = 1 + // Use JavaScript numbers. + FieldOptions_JS_NUMBER FieldOptions_JSType = 2 +) + +var FieldOptions_JSType_name = map[int32]string{ + 0: "JS_NORMAL", + 1: "JS_STRING", + 2: "JS_NUMBER", +} + +var FieldOptions_JSType_value = map[string]int32{ + "JS_NORMAL": 0, + "JS_STRING": 1, + "JS_NUMBER": 2, +} + +func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { + p := new(FieldOptions_JSType) + *p = x + return p +} + +func (x FieldOptions_JSType) String() string { + return proto.EnumName(FieldOptions_JSType_name, int32(x)) +} + +func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType") + if err != nil { + return err + } + *x = FieldOptions_JSType(value) + return nil +} + +func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{12, 1} +} + +// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, +// or neither? HTTP based RPC implementation may choose GET verb for safe +// methods, and PUT verb for idempotent methods instead of the default POST. +type MethodOptions_IdempotencyLevel int32 + +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 + MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 + MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 +) + +var MethodOptions_IdempotencyLevel_name = map[int32]string{ + 0: "IDEMPOTENCY_UNKNOWN", + 1: "NO_SIDE_EFFECTS", + 2: "IDEMPOTENT", +} + +var MethodOptions_IdempotencyLevel_value = map[string]int32{ + "IDEMPOTENCY_UNKNOWN": 0, + "NO_SIDE_EFFECTS": 1, + "IDEMPOTENT": 2, +} + +func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { + p := new(MethodOptions_IdempotencyLevel) + *p = x + return p +} + +func (x MethodOptions_IdempotencyLevel) String() string { + return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x)) +} + +func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel") + if err != nil { + return err + } + *x = MethodOptions_IdempotencyLevel(value) + return nil +} + +func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{17, 0} +} + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +type FileDescriptorSet struct { + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } +func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorSet) ProtoMessage() {} +func (*FileDescriptorSet) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{0} +} + +func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorSet.Unmarshal(m, b) +} +func (m *FileDescriptorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorSet.Marshal(b, m, deterministic) +} +func (m *FileDescriptorSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorSet.Merge(m, src) +} +func (m *FileDescriptorSet) XXX_Size() int { + return xxx_messageInfo_FileDescriptorSet.Size(m) +} +func (m *FileDescriptorSet) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorSet.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorSet proto.InternalMessageInfo + +func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { + if m != nil { + return m.File + } + return nil +} + +// Describes a complete .proto file. +type FileDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` + // Names of files imported by this file. + Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` + // Indexes of the public imported files in the dependency list above. + PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` + // All top-level definitions in this file. + MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` + Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } +func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorProto) ProtoMessage() {} +func (*FileDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{1} +} + +func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorProto.Unmarshal(m, b) +} +func (m *FileDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorProto.Marshal(b, m, deterministic) +} +func (m *FileDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorProto.Merge(m, src) +} +func (m *FileDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FileDescriptorProto.Size(m) +} +func (m *FileDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorProto proto.InternalMessageInfo + +func (m *FileDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FileDescriptorProto) GetPackage() string { + if m != nil && m.Package != nil { + return *m.Package + } + return "" +} + +func (m *FileDescriptorProto) GetDependency() []string { + if m != nil { + return m.Dependency + } + return nil +} + +func (m *FileDescriptorProto) GetPublicDependency() []int32 { + if m != nil { + return m.PublicDependency + } + return nil +} + +func (m *FileDescriptorProto) GetWeakDependency() []int32 { + if m != nil { + return m.WeakDependency + } + return nil +} + +func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto { + if m != nil { + return m.MessageType + } + return nil +} + +func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto { + if m != nil { + return m.Service + } + return nil +} + +func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *FileDescriptorProto) GetOptions() *FileOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { + if m != nil { + return m.SourceCodeInfo + } + return nil +} + +func (m *FileDescriptorProto) GetSyntax() string { + if m != nil && m.Syntax != nil { + return *m.Syntax + } + return "" +} + +// Describes a message type. +type DescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` + NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` + OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` + Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` + ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } +func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto) ProtoMessage() {} +func (*DescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{2} +} + +func (m *DescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto.Unmarshal(m, b) +} +func (m *DescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto.Marshal(b, m, deterministic) +} +func (m *DescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto.Merge(m, src) +} +func (m *DescriptorProto) XXX_Size() int { + return xxx_messageInfo_DescriptorProto.Size(m) +} +func (m *DescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto proto.InternalMessageInfo + +func (m *DescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *DescriptorProto) GetField() []*FieldDescriptorProto { + if m != nil { + return m.Field + } + return nil +} + +func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *DescriptorProto) GetNestedType() []*DescriptorProto { + if m != nil { + return m.NestedType + } + return nil +} + +func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { + if m != nil { + return m.ExtensionRange + } + return nil +} + +func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { + if m != nil { + return m.OneofDecl + } + return nil +} + +func (m *DescriptorProto) GetOptions() *MessageOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *DescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +type DescriptorProto_ExtensionRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } +func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ExtensionRange) ProtoMessage() {} +func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{2, 0} +} + +func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ExtensionRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Marshal(b, m, deterministic) +} +func (m *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(m, src) +} +func (m *DescriptorProto_ExtensionRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Size(m) +} +func (m *DescriptorProto_ExtensionRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ExtensionRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ExtensionRange proto.InternalMessageInfo + +func (m *DescriptorProto_ExtensionRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { + if m != nil { + return m.Options + } + return nil +} + +// Range of reserved tag numbers. Reserved tag numbers may not be used by +// fields or extension ranges in the same message. Reserved ranges may +// not overlap. +type DescriptorProto_ReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } +func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ReservedRange) ProtoMessage() {} +func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{2, 1} +} + +func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ReservedRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ReservedRange.Marshal(b, m, deterministic) +} +func (m *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ReservedRange.Merge(m, src) +} +func (m *DescriptorProto_ReservedRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ReservedRange.Size(m) +} +func (m *DescriptorProto_ReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ReservedRange proto.InternalMessageInfo + +func (m *DescriptorProto_ReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +type ExtensionRangeOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } +func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } +func (*ExtensionRangeOptions) ProtoMessage() {} +func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{3} +} + +var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ExtensionRangeOptions +} + +func (m *ExtensionRangeOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExtensionRangeOptions.Unmarshal(m, b) +} +func (m *ExtensionRangeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExtensionRangeOptions.Marshal(b, m, deterministic) +} +func (m *ExtensionRangeOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtensionRangeOptions.Merge(m, src) +} +func (m *ExtensionRangeOptions) XXX_Size() int { + return xxx_messageInfo_ExtensionRangeOptions.Size(m) +} +func (m *ExtensionRangeOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ExtensionRangeOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtensionRangeOptions proto.InternalMessageInfo + +func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// Describes a field within a message. +type FieldDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` + Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` + Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } +func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FieldDescriptorProto) ProtoMessage() {} +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{4} +} + +func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldDescriptorProto.Unmarshal(m, b) +} +func (m *FieldDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldDescriptorProto.Marshal(b, m, deterministic) +} +func (m *FieldDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldDescriptorProto.Merge(m, src) +} +func (m *FieldDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FieldDescriptorProto.Size(m) +} +func (m *FieldDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FieldDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldDescriptorProto proto.InternalMessageInfo + +func (m *FieldDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FieldDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { + if m != nil && m.Label != nil { + return *m.Label + } + return FieldDescriptorProto_LABEL_OPTIONAL +} + +func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { + if m != nil && m.Type != nil { + return *m.Type + } + return FieldDescriptorProto_TYPE_DOUBLE +} + +func (m *FieldDescriptorProto) GetTypeName() string { + if m != nil && m.TypeName != nil { + return *m.TypeName + } + return "" +} + +func (m *FieldDescriptorProto) GetExtendee() string { + if m != nil && m.Extendee != nil { + return *m.Extendee + } + return "" +} + +func (m *FieldDescriptorProto) GetDefaultValue() string { + if m != nil && m.DefaultValue != nil { + return *m.DefaultValue + } + return "" +} + +func (m *FieldDescriptorProto) GetOneofIndex() int32 { + if m != nil && m.OneofIndex != nil { + return *m.OneofIndex + } + return 0 +} + +func (m *FieldDescriptorProto) GetJsonName() string { + if m != nil && m.JsonName != nil { + return *m.JsonName + } + return "" +} + +func (m *FieldDescriptorProto) GetOptions() *FieldOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a oneof. +type OneofDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } +func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*OneofDescriptorProto) ProtoMessage() {} +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{5} +} + +func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofDescriptorProto.Unmarshal(m, b) +} +func (m *OneofDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofDescriptorProto.Marshal(b, m, deterministic) +} +func (m *OneofDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofDescriptorProto.Merge(m, src) +} +func (m *OneofDescriptorProto) XXX_Size() int { + return xxx_messageInfo_OneofDescriptorProto.Size(m) +} +func (m *OneofDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_OneofDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofDescriptorProto proto.InternalMessageInfo + +func (m *OneofDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *OneofDescriptorProto) GetOptions() *OneofOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes an enum type. +type EnumDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } +func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto) ProtoMessage() {} +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{6} +} + +func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto.Unmarshal(m, b) +} +func (m *EnumDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto.Marshal(b, m, deterministic) +} +func (m *EnumDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto.Merge(m, src) +} +func (m *EnumDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto.Size(m) +} +func (m *EnumDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto proto.InternalMessageInfo + +func (m *EnumDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { + if m != nil { + return m.Value + } + return nil +} + +func (m *EnumDescriptorProto) GetOptions() *EnumOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +// Range of reserved numeric values. Reserved values may not be used by +// entries in the same enum. Reserved ranges may not overlap. +// +// Note that this is distinct from DescriptorProto.ReservedRange in that it +// is inclusive such that it can appropriately represent the entire int32 +// domain. +type EnumDescriptorProto_EnumReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto_EnumReservedRange) Reset() { *m = EnumDescriptorProto_EnumReservedRange{} } +func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} +func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{6, 0} +} + +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Unmarshal(m, b) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Marshal(b, m, deterministic) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(m, src) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Size(m) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto_EnumReservedRange proto.InternalMessageInfo + +func (m *EnumDescriptorProto_EnumReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +// Describes a value within an enum. +type EnumValueDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } +func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumValueDescriptorProto) ProtoMessage() {} +func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{7} +} + +func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueDescriptorProto.Unmarshal(m, b) +} +func (m *EnumValueDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueDescriptorProto.Marshal(b, m, deterministic) +} +func (m *EnumValueDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueDescriptorProto.Merge(m, src) +} +func (m *EnumValueDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumValueDescriptorProto.Size(m) +} +func (m *EnumValueDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueDescriptorProto proto.InternalMessageInfo + +func (m *EnumValueDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumValueDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a service. +type ServiceDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } +func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*ServiceDescriptorProto) ProtoMessage() {} +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{8} +} + +func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceDescriptorProto.Unmarshal(m, b) +} +func (m *ServiceDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceDescriptorProto.Marshal(b, m, deterministic) +} +func (m *ServiceDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceDescriptorProto.Merge(m, src) +} +func (m *ServiceDescriptorProto) XXX_Size() int { + return xxx_messageInfo_ServiceDescriptorProto.Size(m) +} +func (m *ServiceDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceDescriptorProto proto.InternalMessageInfo + +func (m *ServiceDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { + if m != nil { + return m.Method + } + return nil +} + +func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a method of a service. +type MethodDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` + OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` + Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` + // Identifies if client streams multiple client messages + ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` + // Identifies if server streams multiple server messages + ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } +func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*MethodDescriptorProto) ProtoMessage() {} +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{9} +} + +func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodDescriptorProto.Unmarshal(m, b) +} +func (m *MethodDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodDescriptorProto.Marshal(b, m, deterministic) +} +func (m *MethodDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodDescriptorProto.Merge(m, src) +} +func (m *MethodDescriptorProto) XXX_Size() int { + return xxx_messageInfo_MethodDescriptorProto.Size(m) +} +func (m *MethodDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_MethodDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodDescriptorProto proto.InternalMessageInfo + +const Default_MethodDescriptorProto_ClientStreaming bool = false +const Default_MethodDescriptorProto_ServerStreaming bool = false + +func (m *MethodDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MethodDescriptorProto) GetInputType() string { + if m != nil && m.InputType != nil { + return *m.InputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOutputType() string { + if m != nil && m.OutputType != nil { + return *m.OutputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOptions() *MethodOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *MethodDescriptorProto) GetClientStreaming() bool { + if m != nil && m.ClientStreaming != nil { + return *m.ClientStreaming + } + return Default_MethodDescriptorProto_ClientStreaming +} + +func (m *MethodDescriptorProto) GetServerStreaming() bool { + if m != nil && m.ServerStreaming != nil { + return *m.ServerStreaming + } + return Default_MethodDescriptorProto_ServerStreaming +} + +type FileOptions struct { + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` + // This option does nothing. + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // Deprecated: Do not use. + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` + OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` + JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` + PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` + PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"` + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` + // Namespace for generated classes; defaults to the package. + CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` + // Use this option to change the namespace of php generated metadata classes. + // Default is empty. When this option is empty, the proto file name will be used + // for determining the namespace. + PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"` + // Use this option to change the package of ruby generated classes. Default + // is empty. When this option is not set, the package name will be used for + // determining the ruby package. + RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileOptions) Reset() { *m = FileOptions{} } +func (m *FileOptions) String() string { return proto.CompactTextString(m) } +func (*FileOptions) ProtoMessage() {} +func (*FileOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{10} +} + +var extRange_FileOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FileOptions +} + +func (m *FileOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileOptions.Unmarshal(m, b) +} +func (m *FileOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileOptions.Marshal(b, m, deterministic) +} +func (m *FileOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileOptions.Merge(m, src) +} +func (m *FileOptions) XXX_Size() int { + return xxx_messageInfo_FileOptions.Size(m) +} +func (m *FileOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FileOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FileOptions proto.InternalMessageInfo + +const Default_FileOptions_JavaMultipleFiles bool = false +const Default_FileOptions_JavaStringCheckUtf8 bool = false +const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED +const Default_FileOptions_CcGenericServices bool = false +const Default_FileOptions_JavaGenericServices bool = false +const Default_FileOptions_PyGenericServices bool = false +const Default_FileOptions_PhpGenericServices bool = false +const Default_FileOptions_Deprecated bool = false +const Default_FileOptions_CcEnableArenas bool = false + +func (m *FileOptions) GetJavaPackage() string { + if m != nil && m.JavaPackage != nil { + return *m.JavaPackage + } + return "" +} + +func (m *FileOptions) GetJavaOuterClassname() string { + if m != nil && m.JavaOuterClassname != nil { + return *m.JavaOuterClassname + } + return "" +} + +func (m *FileOptions) GetJavaMultipleFiles() bool { + if m != nil && m.JavaMultipleFiles != nil { + return *m.JavaMultipleFiles + } + return Default_FileOptions_JavaMultipleFiles +} + +// Deprecated: Do not use. +func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { + if m != nil && m.JavaGenerateEqualsAndHash != nil { + return *m.JavaGenerateEqualsAndHash + } + return false +} + +func (m *FileOptions) GetJavaStringCheckUtf8() bool { + if m != nil && m.JavaStringCheckUtf8 != nil { + return *m.JavaStringCheckUtf8 + } + return Default_FileOptions_JavaStringCheckUtf8 +} + +func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { + if m != nil && m.OptimizeFor != nil { + return *m.OptimizeFor + } + return Default_FileOptions_OptimizeFor +} + +func (m *FileOptions) GetGoPackage() string { + if m != nil && m.GoPackage != nil { + return *m.GoPackage + } + return "" +} + +func (m *FileOptions) GetCcGenericServices() bool { + if m != nil && m.CcGenericServices != nil { + return *m.CcGenericServices + } + return Default_FileOptions_CcGenericServices +} + +func (m *FileOptions) GetJavaGenericServices() bool { + if m != nil && m.JavaGenericServices != nil { + return *m.JavaGenericServices + } + return Default_FileOptions_JavaGenericServices +} + +func (m *FileOptions) GetPyGenericServices() bool { + if m != nil && m.PyGenericServices != nil { + return *m.PyGenericServices + } + return Default_FileOptions_PyGenericServices +} + +func (m *FileOptions) GetPhpGenericServices() bool { + if m != nil && m.PhpGenericServices != nil { + return *m.PhpGenericServices + } + return Default_FileOptions_PhpGenericServices +} + +func (m *FileOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FileOptions_Deprecated +} + +func (m *FileOptions) GetCcEnableArenas() bool { + if m != nil && m.CcEnableArenas != nil { + return *m.CcEnableArenas + } + return Default_FileOptions_CcEnableArenas +} + +func (m *FileOptions) GetObjcClassPrefix() string { + if m != nil && m.ObjcClassPrefix != nil { + return *m.ObjcClassPrefix + } + return "" +} + +func (m *FileOptions) GetCsharpNamespace() string { + if m != nil && m.CsharpNamespace != nil { + return *m.CsharpNamespace + } + return "" +} + +func (m *FileOptions) GetSwiftPrefix() string { + if m != nil && m.SwiftPrefix != nil { + return *m.SwiftPrefix + } + return "" +} + +func (m *FileOptions) GetPhpClassPrefix() string { + if m != nil && m.PhpClassPrefix != nil { + return *m.PhpClassPrefix + } + return "" +} + +func (m *FileOptions) GetPhpNamespace() string { + if m != nil && m.PhpNamespace != nil { + return *m.PhpNamespace + } + return "" +} + +func (m *FileOptions) GetPhpMetadataNamespace() string { + if m != nil && m.PhpMetadataNamespace != nil { + return *m.PhpMetadataNamespace + } + return "" +} + +func (m *FileOptions) GetRubyPackage() string { + if m != nil && m.RubyPackage != nil { + return *m.RubyPackage + } + return "" +} + +func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MessageOptions struct { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementions still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MessageOptions) Reset() { *m = MessageOptions{} } +func (m *MessageOptions) String() string { return proto.CompactTextString(m) } +func (*MessageOptions) ProtoMessage() {} +func (*MessageOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{11} +} + +var extRange_MessageOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MessageOptions +} + +func (m *MessageOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MessageOptions.Unmarshal(m, b) +} +func (m *MessageOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MessageOptions.Marshal(b, m, deterministic) +} +func (m *MessageOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MessageOptions.Merge(m, src) +} +func (m *MessageOptions) XXX_Size() int { + return xxx_messageInfo_MessageOptions.Size(m) +} +func (m *MessageOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MessageOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MessageOptions proto.InternalMessageInfo + +const Default_MessageOptions_MessageSetWireFormat bool = false +const Default_MessageOptions_NoStandardDescriptorAccessor bool = false +const Default_MessageOptions_Deprecated bool = false + +func (m *MessageOptions) GetMessageSetWireFormat() bool { + if m != nil && m.MessageSetWireFormat != nil { + return *m.MessageSetWireFormat + } + return Default_MessageOptions_MessageSetWireFormat +} + +func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool { + if m != nil && m.NoStandardDescriptorAccessor != nil { + return *m.NoStandardDescriptorAccessor + } + return Default_MessageOptions_NoStandardDescriptorAccessor +} + +func (m *MessageOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MessageOptions_Deprecated +} + +func (m *MessageOptions) GetMapEntry() bool { + if m != nil && m.MapEntry != nil { + return *m.MapEntry + } + return false +} + +func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type FieldOptions struct { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // For Google-internal migration only. Do not use. + Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldOptions) Reset() { *m = FieldOptions{} } +func (m *FieldOptions) String() string { return proto.CompactTextString(m) } +func (*FieldOptions) ProtoMessage() {} +func (*FieldOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{12} +} + +var extRange_FieldOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FieldOptions +} + +func (m *FieldOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldOptions.Unmarshal(m, b) +} +func (m *FieldOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldOptions.Marshal(b, m, deterministic) +} +func (m *FieldOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldOptions.Merge(m, src) +} +func (m *FieldOptions) XXX_Size() int { + return xxx_messageInfo_FieldOptions.Size(m) +} +func (m *FieldOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FieldOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldOptions proto.InternalMessageInfo + +const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING +const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL +const Default_FieldOptions_Lazy bool = false +const Default_FieldOptions_Deprecated bool = false +const Default_FieldOptions_Weak bool = false + +func (m *FieldOptions) GetCtype() FieldOptions_CType { + if m != nil && m.Ctype != nil { + return *m.Ctype + } + return Default_FieldOptions_Ctype +} + +func (m *FieldOptions) GetPacked() bool { + if m != nil && m.Packed != nil { + return *m.Packed + } + return false +} + +func (m *FieldOptions) GetJstype() FieldOptions_JSType { + if m != nil && m.Jstype != nil { + return *m.Jstype + } + return Default_FieldOptions_Jstype +} + +func (m *FieldOptions) GetLazy() bool { + if m != nil && m.Lazy != nil { + return *m.Lazy + } + return Default_FieldOptions_Lazy +} + +func (m *FieldOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FieldOptions_Deprecated +} + +func (m *FieldOptions) GetWeak() bool { + if m != nil && m.Weak != nil { + return *m.Weak + } + return Default_FieldOptions_Weak +} + +func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type OneofOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneofOptions) Reset() { *m = OneofOptions{} } +func (m *OneofOptions) String() string { return proto.CompactTextString(m) } +func (*OneofOptions) ProtoMessage() {} +func (*OneofOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{13} +} + +var extRange_OneofOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OneofOptions +} + +func (m *OneofOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofOptions.Unmarshal(m, b) +} +func (m *OneofOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofOptions.Marshal(b, m, deterministic) +} +func (m *OneofOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofOptions.Merge(m, src) +} +func (m *OneofOptions) XXX_Size() int { + return xxx_messageInfo_OneofOptions.Size(m) +} +func (m *OneofOptions) XXX_DiscardUnknown() { + xxx_messageInfo_OneofOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofOptions proto.InternalMessageInfo + +func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumOptions struct { + // Set this option to true to allow mapping different tag names to the same + // value. + AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumOptions) Reset() { *m = EnumOptions{} } +func (m *EnumOptions) String() string { return proto.CompactTextString(m) } +func (*EnumOptions) ProtoMessage() {} +func (*EnumOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{14} +} + +var extRange_EnumOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumOptions +} + +func (m *EnumOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumOptions.Unmarshal(m, b) +} +func (m *EnumOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumOptions.Marshal(b, m, deterministic) +} +func (m *EnumOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumOptions.Merge(m, src) +} +func (m *EnumOptions) XXX_Size() int { + return xxx_messageInfo_EnumOptions.Size(m) +} +func (m *EnumOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumOptions proto.InternalMessageInfo + +const Default_EnumOptions_Deprecated bool = false + +func (m *EnumOptions) GetAllowAlias() bool { + if m != nil && m.AllowAlias != nil { + return *m.AllowAlias + } + return false +} + +func (m *EnumOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumOptions_Deprecated +} + +func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumValueOptions struct { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } +func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } +func (*EnumValueOptions) ProtoMessage() {} +func (*EnumValueOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{15} +} + +var extRange_EnumValueOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumValueOptions +} + +func (m *EnumValueOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueOptions.Unmarshal(m, b) +} +func (m *EnumValueOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueOptions.Marshal(b, m, deterministic) +} +func (m *EnumValueOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueOptions.Merge(m, src) +} +func (m *EnumValueOptions) XXX_Size() int { + return xxx_messageInfo_EnumValueOptions.Size(m) +} +func (m *EnumValueOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueOptions proto.InternalMessageInfo + +const Default_EnumValueOptions_Deprecated bool = false + +func (m *EnumValueOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumValueOptions_Deprecated +} + +func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type ServiceOptions struct { + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } +func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } +func (*ServiceOptions) ProtoMessage() {} +func (*ServiceOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{16} +} + +var extRange_ServiceOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ServiceOptions +} + +func (m *ServiceOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceOptions.Unmarshal(m, b) +} +func (m *ServiceOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceOptions.Marshal(b, m, deterministic) +} +func (m *ServiceOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceOptions.Merge(m, src) +} +func (m *ServiceOptions) XXX_Size() int { + return xxx_messageInfo_ServiceOptions.Size(m) +} +func (m *ServiceOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceOptions proto.InternalMessageInfo + +const Default_ServiceOptions_Deprecated bool = false + +func (m *ServiceOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_ServiceOptions_Deprecated +} + +func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MethodOptions struct { + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MethodOptions) Reset() { *m = MethodOptions{} } +func (m *MethodOptions) String() string { return proto.CompactTextString(m) } +func (*MethodOptions) ProtoMessage() {} +func (*MethodOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{17} +} + +var extRange_MethodOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MethodOptions +} + +func (m *MethodOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodOptions.Unmarshal(m, b) +} +func (m *MethodOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodOptions.Marshal(b, m, deterministic) +} +func (m *MethodOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodOptions.Merge(m, src) +} +func (m *MethodOptions) XXX_Size() int { + return xxx_messageInfo_MethodOptions.Size(m) +} +func (m *MethodOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MethodOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodOptions proto.InternalMessageInfo + +const Default_MethodOptions_Deprecated bool = false +const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN + +func (m *MethodOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MethodOptions_Deprecated +} + +func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { + if m != nil && m.IdempotencyLevel != nil { + return *m.IdempotencyLevel + } + return Default_MethodOptions_IdempotencyLevel +} + +func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +type UninterpretedOption struct { + Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` + PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` + NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` + StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` + AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } +func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption) ProtoMessage() {} +func (*UninterpretedOption) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{18} +} + +func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption.Unmarshal(m, b) +} +func (m *UninterpretedOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption.Marshal(b, m, deterministic) +} +func (m *UninterpretedOption) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption.Merge(m, src) +} +func (m *UninterpretedOption) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption.Size(m) +} +func (m *UninterpretedOption) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption proto.InternalMessageInfo + +func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { + if m != nil { + return m.Name + } + return nil +} + +func (m *UninterpretedOption) GetIdentifierValue() string { + if m != nil && m.IdentifierValue != nil { + return *m.IdentifierValue + } + return "" +} + +func (m *UninterpretedOption) GetPositiveIntValue() uint64 { + if m != nil && m.PositiveIntValue != nil { + return *m.PositiveIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetNegativeIntValue() int64 { + if m != nil && m.NegativeIntValue != nil { + return *m.NegativeIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetDoubleValue() float64 { + if m != nil && m.DoubleValue != nil { + return *m.DoubleValue + } + return 0 +} + +func (m *UninterpretedOption) GetStringValue() []byte { + if m != nil { + return m.StringValue + } + return nil +} + +func (m *UninterpretedOption) GetAggregateValue() string { + if m != nil && m.AggregateValue != nil { + return *m.AggregateValue + } + return "" +} + +// The name of the uninterpreted option. Each string represents a segment in +// a dot-separated name. is_extension is true iff a segment represents an +// extension (denoted with parentheses in options specs in .proto files). +// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents +// "foo.(bar.baz).qux". +type UninterpretedOption_NamePart struct { + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } +func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption_NamePart) ProtoMessage() {} +func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{18, 0} +} + +func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption_NamePart.Unmarshal(m, b) +} +func (m *UninterpretedOption_NamePart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption_NamePart.Marshal(b, m, deterministic) +} +func (m *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption_NamePart.Merge(m, src) +} +func (m *UninterpretedOption_NamePart) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption_NamePart.Size(m) +} +func (m *UninterpretedOption_NamePart) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption_NamePart.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption_NamePart proto.InternalMessageInfo + +func (m *UninterpretedOption_NamePart) GetNamePart() string { + if m != nil && m.NamePart != nil { + return *m.NamePart + } + return "" +} + +func (m *UninterpretedOption_NamePart) GetIsExtension() bool { + if m != nil && m.IsExtension != nil { + return *m.IsExtension + } + return false +} + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +type SourceCodeInfo struct { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendent. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } +func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo) ProtoMessage() {} +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{19} +} + +func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo.Unmarshal(m, b) +} +func (m *SourceCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo.Marshal(b, m, deterministic) +} +func (m *SourceCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo.Merge(m, src) +} +func (m *SourceCodeInfo) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo.Size(m) +} +func (m *SourceCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo proto.InternalMessageInfo + +func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { + if m != nil { + return m.Location + } + return nil +} + +type SourceCodeInfo_Location struct { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` + TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` + LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } +func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo_Location) ProtoMessage() {} +func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{19, 0} +} + +func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo_Location.Unmarshal(m, b) +} +func (m *SourceCodeInfo_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo_Location.Marshal(b, m, deterministic) +} +func (m *SourceCodeInfo_Location) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo_Location.Merge(m, src) +} +func (m *SourceCodeInfo_Location) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo_Location.Size(m) +} +func (m *SourceCodeInfo_Location) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo_Location.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo_Location proto.InternalMessageInfo + +func (m *SourceCodeInfo_Location) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *SourceCodeInfo_Location) GetSpan() []int32 { + if m != nil { + return m.Span + } + return nil +} + +func (m *SourceCodeInfo_Location) GetLeadingComments() string { + if m != nil && m.LeadingComments != nil { + return *m.LeadingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetTrailingComments() string { + if m != nil && m.TrailingComments != nil { + return *m.TrailingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { + if m != nil { + return m.LeadingDetachedComments + } + return nil +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +type GeneratedCodeInfo struct { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } +func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo) ProtoMessage() {} +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{20} +} + +func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo.Marshal(b, m, deterministic) +} +func (m *GeneratedCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo.Merge(m, src) +} +func (m *GeneratedCodeInfo) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo.Size(m) +} +func (m *GeneratedCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo proto.InternalMessageInfo + +func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { + if m != nil { + return m.Annotation + } + return nil +} + +type GeneratedCodeInfo_Annotation struct { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Identifies the filesystem path to the original source .proto. + SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } +func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} +func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{20, 0} +} + +func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Marshal(b, m, deterministic) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(m, src) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Size(m) +} +func (m *GeneratedCodeInfo_Annotation) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo_Annotation.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo_Annotation proto.InternalMessageInfo + +func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string { + if m != nil && m.SourceFile != nil { + return *m.SourceFile + } + return "" +} + +func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 { + if m != nil && m.Begin != nil { + return *m.Begin + } + return 0 +} + +func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func init() { + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) + proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) + proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) + proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) + proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) + proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") + proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") + proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") + proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") + proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") + proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions") + proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") + proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") + proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") + proto.RegisterType((*EnumDescriptorProto_EnumReservedRange)(nil), "google.protobuf.EnumDescriptorProto.EnumReservedRange") + proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") + proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") + proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") + proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions") + proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions") + proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions") + proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions") + proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions") + proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions") + proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions") + proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions") + proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption") + proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart") + proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo") + proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location") + proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo") + proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation") +} + +func init() { proto.RegisterFile("google/protobuf/descriptor.proto", fileDescriptor_e5baabe45344a177) } + +var fileDescriptor_e5baabe45344a177 = []byte{ + // 2589 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xdd, 0x8e, 0xdb, 0xc6, + 0x15, 0x0e, 0xf5, 0xb7, 0xd2, 0x91, 0x56, 0x3b, 0x3b, 0xbb, 0xb1, 0xe9, 0xcd, 0x8f, 0xd7, 0xca, + 0x8f, 0xd7, 0x4e, 0xac, 0x0d, 0x1c, 0xdb, 0x71, 0xd6, 0x45, 0x5a, 0xad, 0x44, 0x6f, 0xe4, 0xee, + 0x4a, 0x2a, 0xa5, 0x6d, 0x7e, 0x80, 0x82, 0x98, 0x25, 0x47, 0x12, 0x6d, 0x8a, 0x64, 0x48, 0xca, + 0xf6, 0x06, 0xbd, 0x30, 0xd0, 0xab, 0x5e, 0x15, 0xe8, 0x55, 0x51, 0x14, 0xbd, 0xe8, 0x4d, 0x80, + 0x3e, 0x40, 0x81, 0xde, 0xf5, 0x09, 0x0a, 0xe4, 0x0d, 0x8a, 0xb6, 0x40, 0xfb, 0x08, 0xbd, 0x2c, + 0x66, 0x86, 0xa4, 0x48, 0x49, 0x1b, 0x6f, 0x02, 0xc4, 0xb9, 0x92, 0xe6, 0x3b, 0xdf, 0x39, 0x73, + 0xe6, 0xcc, 0x99, 0x99, 0x33, 0x43, 0xd8, 0x1e, 0x39, 0xce, 0xc8, 0xa2, 0xbb, 0xae, 0xe7, 0x04, + 0xce, 0xc9, 0x74, 0xb8, 0x6b, 0x50, 0x5f, 0xf7, 0x4c, 0x37, 0x70, 0xbc, 0x3a, 0xc7, 0xf0, 0x9a, + 0x60, 0xd4, 0x23, 0x46, 0xed, 0x08, 0xd6, 0xef, 0x9b, 0x16, 0x6d, 0xc5, 0xc4, 0x3e, 0x0d, 0xf0, + 0x5d, 0xc8, 0x0d, 0x4d, 0x8b, 0xca, 0xd2, 0x76, 0x76, 0xa7, 0x7c, 0xf3, 0xcd, 0xfa, 0x9c, 0x52, + 0x3d, 0xad, 0xd1, 0x63, 0xb0, 0xca, 0x35, 0x6a, 0xff, 0xce, 0xc1, 0xc6, 0x12, 0x29, 0xc6, 0x90, + 0xb3, 0xc9, 0x84, 0x59, 0x94, 0x76, 0x4a, 0x2a, 0xff, 0x8f, 0x65, 0x58, 0x71, 0x89, 0xfe, 0x88, + 0x8c, 0xa8, 0x9c, 0xe1, 0x70, 0xd4, 0xc4, 0xaf, 0x03, 0x18, 0xd4, 0xa5, 0xb6, 0x41, 0x6d, 0xfd, + 0x54, 0xce, 0x6e, 0x67, 0x77, 0x4a, 0x6a, 0x02, 0xc1, 0xef, 0xc0, 0xba, 0x3b, 0x3d, 0xb1, 0x4c, + 0x5d, 0x4b, 0xd0, 0x60, 0x3b, 0xbb, 0x93, 0x57, 0x91, 0x10, 0xb4, 0x66, 0xe4, 0xab, 0xb0, 0xf6, + 0x84, 0x92, 0x47, 0x49, 0x6a, 0x99, 0x53, 0xab, 0x0c, 0x4e, 0x10, 0x9b, 0x50, 0x99, 0x50, 0xdf, + 0x27, 0x23, 0xaa, 0x05, 0xa7, 0x2e, 0x95, 0x73, 0x7c, 0xf4, 0xdb, 0x0b, 0xa3, 0x9f, 0x1f, 0x79, + 0x39, 0xd4, 0x1a, 0x9c, 0xba, 0x14, 0x37, 0xa0, 0x44, 0xed, 0xe9, 0x44, 0x58, 0xc8, 0x9f, 0x11, + 0x3f, 0xc5, 0x9e, 0x4e, 0xe6, 0xad, 0x14, 0x99, 0x5a, 0x68, 0x62, 0xc5, 0xa7, 0xde, 0x63, 0x53, + 0xa7, 0x72, 0x81, 0x1b, 0xb8, 0xba, 0x60, 0xa0, 0x2f, 0xe4, 0xf3, 0x36, 0x22, 0x3d, 0xdc, 0x84, + 0x12, 0x7d, 0x1a, 0x50, 0xdb, 0x37, 0x1d, 0x5b, 0x5e, 0xe1, 0x46, 0xde, 0x5a, 0x32, 0x8b, 0xd4, + 0x32, 0xe6, 0x4d, 0xcc, 0xf4, 0xf0, 0x1d, 0x58, 0x71, 0xdc, 0xc0, 0x74, 0x6c, 0x5f, 0x2e, 0x6e, + 0x4b, 0x3b, 0xe5, 0x9b, 0xaf, 0x2e, 0x4d, 0x84, 0xae, 0xe0, 0xa8, 0x11, 0x19, 0xb7, 0x01, 0xf9, + 0xce, 0xd4, 0xd3, 0xa9, 0xa6, 0x3b, 0x06, 0xd5, 0x4c, 0x7b, 0xe8, 0xc8, 0x25, 0x6e, 0xe0, 0xf2, + 0xe2, 0x40, 0x38, 0xb1, 0xe9, 0x18, 0xb4, 0x6d, 0x0f, 0x1d, 0xb5, 0xea, 0xa7, 0xda, 0xf8, 0x02, + 0x14, 0xfc, 0x53, 0x3b, 0x20, 0x4f, 0xe5, 0x0a, 0xcf, 0x90, 0xb0, 0x55, 0xfb, 0x6b, 0x01, 0xd6, + 0xce, 0x93, 0x62, 0xf7, 0x20, 0x3f, 0x64, 0xa3, 0x94, 0x33, 0xdf, 0x26, 0x06, 0x42, 0x27, 0x1d, + 0xc4, 0xc2, 0x77, 0x0c, 0x62, 0x03, 0xca, 0x36, 0xf5, 0x03, 0x6a, 0x88, 0x8c, 0xc8, 0x9e, 0x33, + 0xa7, 0x40, 0x28, 0x2d, 0xa6, 0x54, 0xee, 0x3b, 0xa5, 0xd4, 0xa7, 0xb0, 0x16, 0xbb, 0xa4, 0x79, + 0xc4, 0x1e, 0x45, 0xb9, 0xb9, 0xfb, 0x3c, 0x4f, 0xea, 0x4a, 0xa4, 0xa7, 0x32, 0x35, 0xb5, 0x4a, + 0x53, 0x6d, 0xdc, 0x02, 0x70, 0x6c, 0xea, 0x0c, 0x35, 0x83, 0xea, 0x96, 0x5c, 0x3c, 0x23, 0x4a, + 0x5d, 0x46, 0x59, 0x88, 0x92, 0x23, 0x50, 0xdd, 0xc2, 0x1f, 0xce, 0x52, 0x6d, 0xe5, 0x8c, 0x4c, + 0x39, 0x12, 0x8b, 0x6c, 0x21, 0xdb, 0x8e, 0xa1, 0xea, 0x51, 0x96, 0xf7, 0xd4, 0x08, 0x47, 0x56, + 0xe2, 0x4e, 0xd4, 0x9f, 0x3b, 0x32, 0x35, 0x54, 0x13, 0x03, 0x5b, 0xf5, 0x92, 0x4d, 0xfc, 0x06, + 0xc4, 0x80, 0xc6, 0xd3, 0x0a, 0xf8, 0x2e, 0x54, 0x89, 0xc0, 0x0e, 0x99, 0xd0, 0xad, 0x2f, 0xa1, + 0x9a, 0x0e, 0x0f, 0xde, 0x84, 0xbc, 0x1f, 0x10, 0x2f, 0xe0, 0x59, 0x98, 0x57, 0x45, 0x03, 0x23, + 0xc8, 0x52, 0xdb, 0xe0, 0xbb, 0x5c, 0x5e, 0x65, 0x7f, 0xf1, 0x4f, 0x66, 0x03, 0xce, 0xf2, 0x01, + 0xbf, 0xbd, 0x38, 0xa3, 0x29, 0xcb, 0xf3, 0xe3, 0xde, 0xfa, 0x00, 0x56, 0x53, 0x03, 0x38, 0x6f, + 0xd7, 0xb5, 0x5f, 0xc2, 0xcb, 0x4b, 0x4d, 0xe3, 0x4f, 0x61, 0x73, 0x6a, 0x9b, 0x76, 0x40, 0x3d, + 0xd7, 0xa3, 0x2c, 0x63, 0x45, 0x57, 0xf2, 0x7f, 0x56, 0xce, 0xc8, 0xb9, 0xe3, 0x24, 0x5b, 0x58, + 0x51, 0x37, 0xa6, 0x8b, 0xe0, 0xf5, 0x52, 0xf1, 0xbf, 0x2b, 0xe8, 0xd9, 0xb3, 0x67, 0xcf, 0x32, + 0xb5, 0xdf, 0x15, 0x60, 0x73, 0xd9, 0x9a, 0x59, 0xba, 0x7c, 0x2f, 0x40, 0xc1, 0x9e, 0x4e, 0x4e, + 0xa8, 0xc7, 0x83, 0x94, 0x57, 0xc3, 0x16, 0x6e, 0x40, 0xde, 0x22, 0x27, 0xd4, 0x92, 0x73, 0xdb, + 0xd2, 0x4e, 0xf5, 0xe6, 0x3b, 0xe7, 0x5a, 0x95, 0xf5, 0x43, 0xa6, 0xa2, 0x0a, 0x4d, 0xfc, 0x11, + 0xe4, 0xc2, 0x2d, 0x9a, 0x59, 0xb8, 0x7e, 0x3e, 0x0b, 0x6c, 0x2d, 0xa9, 0x5c, 0x0f, 0xbf, 0x02, + 0x25, 0xf6, 0x2b, 0x72, 0xa3, 0xc0, 0x7d, 0x2e, 0x32, 0x80, 0xe5, 0x05, 0xde, 0x82, 0x22, 0x5f, + 0x26, 0x06, 0x8d, 0x8e, 0xb6, 0xb8, 0xcd, 0x12, 0xcb, 0xa0, 0x43, 0x32, 0xb5, 0x02, 0xed, 0x31, + 0xb1, 0xa6, 0x94, 0x27, 0x7c, 0x49, 0xad, 0x84, 0xe0, 0xcf, 0x19, 0x86, 0x2f, 0x43, 0x59, 0xac, + 0x2a, 0xd3, 0x36, 0xe8, 0x53, 0xbe, 0x7b, 0xe6, 0x55, 0xb1, 0xd0, 0xda, 0x0c, 0x61, 0xdd, 0x3f, + 0xf4, 0x1d, 0x3b, 0x4a, 0x4d, 0xde, 0x05, 0x03, 0x78, 0xf7, 0x1f, 0xcc, 0x6f, 0xdc, 0xaf, 0x2d, + 0x1f, 0xde, 0x7c, 0x4e, 0xd5, 0xfe, 0x92, 0x81, 0x1c, 0xdf, 0x2f, 0xd6, 0xa0, 0x3c, 0xf8, 0xac, + 0xa7, 0x68, 0xad, 0xee, 0xf1, 0xfe, 0xa1, 0x82, 0x24, 0x5c, 0x05, 0xe0, 0xc0, 0xfd, 0xc3, 0x6e, + 0x63, 0x80, 0x32, 0x71, 0xbb, 0xdd, 0x19, 0xdc, 0xb9, 0x85, 0xb2, 0xb1, 0xc2, 0xb1, 0x00, 0x72, + 0x49, 0xc2, 0xfb, 0x37, 0x51, 0x1e, 0x23, 0xa8, 0x08, 0x03, 0xed, 0x4f, 0x95, 0xd6, 0x9d, 0x5b, + 0xa8, 0x90, 0x46, 0xde, 0xbf, 0x89, 0x56, 0xf0, 0x2a, 0x94, 0x38, 0xb2, 0xdf, 0xed, 0x1e, 0xa2, + 0x62, 0x6c, 0xb3, 0x3f, 0x50, 0xdb, 0x9d, 0x03, 0x54, 0x8a, 0x6d, 0x1e, 0xa8, 0xdd, 0xe3, 0x1e, + 0x82, 0xd8, 0xc2, 0x91, 0xd2, 0xef, 0x37, 0x0e, 0x14, 0x54, 0x8e, 0x19, 0xfb, 0x9f, 0x0d, 0x94, + 0x3e, 0xaa, 0xa4, 0xdc, 0x7a, 0xff, 0x26, 0x5a, 0x8d, 0xbb, 0x50, 0x3a, 0xc7, 0x47, 0xa8, 0x8a, + 0xd7, 0x61, 0x55, 0x74, 0x11, 0x39, 0xb1, 0x36, 0x07, 0xdd, 0xb9, 0x85, 0xd0, 0xcc, 0x11, 0x61, + 0x65, 0x3d, 0x05, 0xdc, 0xb9, 0x85, 0x70, 0xad, 0x09, 0x79, 0x9e, 0x5d, 0x18, 0x43, 0xf5, 0xb0, + 0xb1, 0xaf, 0x1c, 0x6a, 0xdd, 0xde, 0xa0, 0xdd, 0xed, 0x34, 0x0e, 0x91, 0x34, 0xc3, 0x54, 0xe5, + 0x67, 0xc7, 0x6d, 0x55, 0x69, 0xa1, 0x4c, 0x12, 0xeb, 0x29, 0x8d, 0x81, 0xd2, 0x42, 0xd9, 0x9a, + 0x0e, 0x9b, 0xcb, 0xf6, 0xc9, 0xa5, 0x2b, 0x23, 0x31, 0xc5, 0x99, 0x33, 0xa6, 0x98, 0xdb, 0x5a, + 0x98, 0xe2, 0x7f, 0x65, 0x60, 0x63, 0xc9, 0x59, 0xb1, 0xb4, 0x93, 0x1f, 0x43, 0x5e, 0xa4, 0xa8, + 0x38, 0x3d, 0xaf, 0x2d, 0x3d, 0x74, 0x78, 0xc2, 0x2e, 0x9c, 0xa0, 0x5c, 0x2f, 0x59, 0x41, 0x64, + 0xcf, 0xa8, 0x20, 0x98, 0x89, 0x85, 0x3d, 0xfd, 0x17, 0x0b, 0x7b, 0xba, 0x38, 0xf6, 0xee, 0x9c, + 0xe7, 0xd8, 0xe3, 0xd8, 0xb7, 0xdb, 0xdb, 0xf3, 0x4b, 0xf6, 0xf6, 0x7b, 0xb0, 0xbe, 0x60, 0xe8, + 0xdc, 0x7b, 0xec, 0xaf, 0x24, 0x90, 0xcf, 0x0a, 0xce, 0x73, 0x76, 0xba, 0x4c, 0x6a, 0xa7, 0xbb, + 0x37, 0x1f, 0xc1, 0x2b, 0x67, 0x4f, 0xc2, 0xc2, 0x5c, 0x7f, 0x25, 0xc1, 0x85, 0xe5, 0x95, 0xe2, + 0x52, 0x1f, 0x3e, 0x82, 0xc2, 0x84, 0x06, 0x63, 0x27, 0xaa, 0x96, 0xde, 0x5e, 0x72, 0x06, 0x33, + 0xf1, 0xfc, 0x64, 0x87, 0x5a, 0xc9, 0x43, 0x3c, 0x7b, 0x56, 0xb9, 0x27, 0xbc, 0x59, 0xf0, 0xf4, + 0xd7, 0x19, 0x78, 0x79, 0xa9, 0xf1, 0xa5, 0x8e, 0xbe, 0x06, 0x60, 0xda, 0xee, 0x34, 0x10, 0x15, + 0x91, 0xd8, 0x60, 0x4b, 0x1c, 0xe1, 0x9b, 0x17, 0xdb, 0x3c, 0xa7, 0x41, 0x2c, 0xcf, 0x72, 0x39, + 0x08, 0x88, 0x13, 0xee, 0xce, 0x1c, 0xcd, 0x71, 0x47, 0x5f, 0x3f, 0x63, 0xa4, 0x0b, 0x89, 0xf9, + 0x1e, 0x20, 0xdd, 0x32, 0xa9, 0x1d, 0x68, 0x7e, 0xe0, 0x51, 0x32, 0x31, 0xed, 0x11, 0x3f, 0x41, + 0x8a, 0x7b, 0xf9, 0x21, 0xb1, 0x7c, 0xaa, 0xae, 0x09, 0x71, 0x3f, 0x92, 0x32, 0x0d, 0x9e, 0x40, + 0x5e, 0x42, 0xa3, 0x90, 0xd2, 0x10, 0xe2, 0x58, 0xa3, 0xf6, 0xdb, 0x12, 0x94, 0x13, 0x75, 0x35, + 0xbe, 0x02, 0x95, 0x87, 0xe4, 0x31, 0xd1, 0xa2, 0xbb, 0x92, 0x88, 0x44, 0x99, 0x61, 0xbd, 0xf0, + 0xbe, 0xf4, 0x1e, 0x6c, 0x72, 0x8a, 0x33, 0x0d, 0xa8, 0xa7, 0xe9, 0x16, 0xf1, 0x7d, 0x1e, 0xb4, + 0x22, 0xa7, 0x62, 0x26, 0xeb, 0x32, 0x51, 0x33, 0x92, 0xe0, 0xdb, 0xb0, 0xc1, 0x35, 0x26, 0x53, + 0x2b, 0x30, 0x5d, 0x8b, 0x6a, 0xec, 0xf6, 0xe6, 0xf3, 0x93, 0x24, 0xf6, 0x6c, 0x9d, 0x31, 0x8e, + 0x42, 0x02, 0xf3, 0xc8, 0xc7, 0x2d, 0x78, 0x8d, 0xab, 0x8d, 0xa8, 0x4d, 0x3d, 0x12, 0x50, 0x8d, + 0x7e, 0x31, 0x25, 0x96, 0xaf, 0x11, 0xdb, 0xd0, 0xc6, 0xc4, 0x1f, 0xcb, 0x9b, 0xcc, 0xc0, 0x7e, + 0x46, 0x96, 0xd4, 0x4b, 0x8c, 0x78, 0x10, 0xf2, 0x14, 0x4e, 0x6b, 0xd8, 0xc6, 0xc7, 0xc4, 0x1f, + 0xe3, 0x3d, 0xb8, 0xc0, 0xad, 0xf8, 0x81, 0x67, 0xda, 0x23, 0x4d, 0x1f, 0x53, 0xfd, 0x91, 0x36, + 0x0d, 0x86, 0x77, 0xe5, 0x57, 0x92, 0xfd, 0x73, 0x0f, 0xfb, 0x9c, 0xd3, 0x64, 0x94, 0xe3, 0x60, + 0x78, 0x17, 0xf7, 0xa1, 0xc2, 0x26, 0x63, 0x62, 0x7e, 0x49, 0xb5, 0xa1, 0xe3, 0xf1, 0xa3, 0xb1, + 0xba, 0x64, 0x6b, 0x4a, 0x44, 0xb0, 0xde, 0x0d, 0x15, 0x8e, 0x1c, 0x83, 0xee, 0xe5, 0xfb, 0x3d, + 0x45, 0x69, 0xa9, 0xe5, 0xc8, 0xca, 0x7d, 0xc7, 0x63, 0x09, 0x35, 0x72, 0xe2, 0x00, 0x97, 0x45, + 0x42, 0x8d, 0x9c, 0x28, 0xbc, 0xb7, 0x61, 0x43, 0xd7, 0xc5, 0x98, 0x4d, 0x5d, 0x0b, 0xef, 0x58, + 0xbe, 0x8c, 0x52, 0xc1, 0xd2, 0xf5, 0x03, 0x41, 0x08, 0x73, 0xdc, 0xc7, 0x1f, 0xc2, 0xcb, 0xb3, + 0x60, 0x25, 0x15, 0xd7, 0x17, 0x46, 0x39, 0xaf, 0x7a, 0x1b, 0x36, 0xdc, 0xd3, 0x45, 0x45, 0x9c, + 0xea, 0xd1, 0x3d, 0x9d, 0x57, 0xfb, 0x00, 0x36, 0xdd, 0xb1, 0xbb, 0xa8, 0x77, 0x3d, 0xa9, 0x87, + 0xdd, 0xb1, 0x3b, 0xaf, 0xf8, 0x16, 0xbf, 0x70, 0x7b, 0x54, 0x27, 0x01, 0x35, 0xe4, 0x8b, 0x49, + 0x7a, 0x42, 0x80, 0x77, 0x01, 0xe9, 0xba, 0x46, 0x6d, 0x72, 0x62, 0x51, 0x8d, 0x78, 0xd4, 0x26, + 0xbe, 0x7c, 0x39, 0x49, 0xae, 0xea, 0xba, 0xc2, 0xa5, 0x0d, 0x2e, 0xc4, 0xd7, 0x61, 0xdd, 0x39, + 0x79, 0xa8, 0x8b, 0x94, 0xd4, 0x5c, 0x8f, 0x0e, 0xcd, 0xa7, 0xf2, 0x9b, 0x3c, 0xbe, 0x6b, 0x4c, + 0xc0, 0x13, 0xb2, 0xc7, 0x61, 0x7c, 0x0d, 0x90, 0xee, 0x8f, 0x89, 0xe7, 0xf2, 0x3d, 0xd9, 0x77, + 0x89, 0x4e, 0xe5, 0xb7, 0x04, 0x55, 0xe0, 0x9d, 0x08, 0x66, 0x4b, 0xc2, 0x7f, 0x62, 0x0e, 0x83, + 0xc8, 0xe2, 0x55, 0xb1, 0x24, 0x38, 0x16, 0x5a, 0xdb, 0x01, 0xc4, 0x42, 0x91, 0xea, 0x78, 0x87, + 0xd3, 0xaa, 0xee, 0xd8, 0x4d, 0xf6, 0xfb, 0x06, 0xac, 0x32, 0xe6, 0xac, 0xd3, 0x6b, 0xa2, 0x20, + 0x73, 0xc7, 0x89, 0x1e, 0x6f, 0xc1, 0x05, 0x46, 0x9a, 0xd0, 0x80, 0x18, 0x24, 0x20, 0x09, 0xf6, + 0xbb, 0x9c, 0xcd, 0xe2, 0x7e, 0x14, 0x0a, 0x53, 0x7e, 0x7a, 0xd3, 0x93, 0xd3, 0x38, 0xb3, 0x6e, + 0x08, 0x3f, 0x19, 0x16, 0xe5, 0xd6, 0xf7, 0x56, 0x74, 0xd7, 0xf6, 0xa0, 0x92, 0x4c, 0x7c, 0x5c, + 0x02, 0x91, 0xfa, 0x48, 0x62, 0x55, 0x50, 0xb3, 0xdb, 0x62, 0xf5, 0xcb, 0xe7, 0x0a, 0xca, 0xb0, + 0x3a, 0xea, 0xb0, 0x3d, 0x50, 0x34, 0xf5, 0xb8, 0x33, 0x68, 0x1f, 0x29, 0x28, 0x9b, 0x28, 0xd8, + 0x1f, 0xe4, 0x8a, 0x6f, 0xa3, 0xab, 0xb5, 0xaf, 0x33, 0x50, 0x4d, 0xdf, 0xc0, 0xf0, 0x8f, 0xe0, + 0x62, 0xf4, 0x5c, 0xe2, 0xd3, 0x40, 0x7b, 0x62, 0x7a, 0x7c, 0x45, 0x4e, 0x88, 0x38, 0x1d, 0xe3, + 0x9c, 0xd8, 0x0c, 0x59, 0x7d, 0x1a, 0x7c, 0x62, 0x7a, 0x6c, 0xbd, 0x4d, 0x48, 0x80, 0x0f, 0xe1, + 0xb2, 0xed, 0x68, 0x7e, 0x40, 0x6c, 0x83, 0x78, 0x86, 0x36, 0x7b, 0xa8, 0xd2, 0x88, 0xae, 0x53, + 0xdf, 0x77, 0xc4, 0x49, 0x18, 0x5b, 0x79, 0xd5, 0x76, 0xfa, 0x21, 0x79, 0x76, 0x44, 0x34, 0x42, + 0xea, 0x5c, 0xfe, 0x66, 0xcf, 0xca, 0xdf, 0x57, 0xa0, 0x34, 0x21, 0xae, 0x46, 0xed, 0xc0, 0x3b, + 0xe5, 0x75, 0x77, 0x51, 0x2d, 0x4e, 0x88, 0xab, 0xb0, 0xf6, 0x0b, 0xb9, 0xfe, 0x3c, 0xc8, 0x15, + 0x8b, 0xa8, 0xf4, 0x20, 0x57, 0x2c, 0x21, 0xa8, 0xfd, 0x33, 0x0b, 0x95, 0x64, 0x1d, 0xce, 0xae, + 0x35, 0x3a, 0x3f, 0xb2, 0x24, 0xbe, 0xa9, 0xbd, 0xf1, 0x8d, 0x55, 0x7b, 0xbd, 0xc9, 0xce, 0xb2, + 0xbd, 0x82, 0xa8, 0x8e, 0x55, 0xa1, 0xc9, 0xea, 0x08, 0x96, 0x6c, 0x54, 0x54, 0x23, 0x45, 0x35, + 0x6c, 0xe1, 0x03, 0x28, 0x3c, 0xf4, 0xb9, 0xed, 0x02, 0xb7, 0xfd, 0xe6, 0x37, 0xdb, 0x7e, 0xd0, + 0xe7, 0xc6, 0x4b, 0x0f, 0xfa, 0x5a, 0xa7, 0xab, 0x1e, 0x35, 0x0e, 0xd5, 0x50, 0x1d, 0x5f, 0x82, + 0x9c, 0x45, 0xbe, 0x3c, 0x4d, 0x9f, 0x7a, 0x1c, 0x3a, 0xef, 0x24, 0x5c, 0x82, 0xdc, 0x13, 0x4a, + 0x1e, 0xa5, 0xcf, 0x1a, 0x0e, 0x7d, 0x8f, 0x8b, 0x61, 0x17, 0xf2, 0x3c, 0x5e, 0x18, 0x20, 0x8c, + 0x18, 0x7a, 0x09, 0x17, 0x21, 0xd7, 0xec, 0xaa, 0x6c, 0x41, 0x20, 0xa8, 0x08, 0x54, 0xeb, 0xb5, + 0x95, 0xa6, 0x82, 0x32, 0xb5, 0xdb, 0x50, 0x10, 0x41, 0x60, 0x8b, 0x25, 0x0e, 0x03, 0x7a, 0x29, + 0x6c, 0x86, 0x36, 0xa4, 0x48, 0x7a, 0x7c, 0xb4, 0xaf, 0xa8, 0x28, 0x93, 0x9e, 0xea, 0x1c, 0xca, + 0xd7, 0x7c, 0xa8, 0x24, 0x0b, 0xf1, 0x17, 0x73, 0xc9, 0xfe, 0x9b, 0x04, 0xe5, 0x44, 0x61, 0xcd, + 0x2a, 0x22, 0x62, 0x59, 0xce, 0x13, 0x8d, 0x58, 0x26, 0xf1, 0xc3, 0xd4, 0x00, 0x0e, 0x35, 0x18, + 0x72, 0xde, 0xa9, 0x7b, 0x41, 0x4b, 0x24, 0x8f, 0x0a, 0xb5, 0x3f, 0x4a, 0x80, 0xe6, 0x2b, 0xdb, + 0x39, 0x37, 0xa5, 0x1f, 0xd2, 0xcd, 0xda, 0x1f, 0x24, 0xa8, 0xa6, 0xcb, 0xd9, 0x39, 0xf7, 0xae, + 0xfc, 0xa0, 0xee, 0xfd, 0x23, 0x03, 0xab, 0xa9, 0x22, 0xf6, 0xbc, 0xde, 0x7d, 0x01, 0xeb, 0xa6, + 0x41, 0x27, 0xae, 0x13, 0x50, 0x5b, 0x3f, 0xd5, 0x2c, 0xfa, 0x98, 0x5a, 0x72, 0x8d, 0x6f, 0x1a, + 0xbb, 0xdf, 0x5c, 0x26, 0xd7, 0xdb, 0x33, 0xbd, 0x43, 0xa6, 0xb6, 0xb7, 0xd1, 0x6e, 0x29, 0x47, + 0xbd, 0xee, 0x40, 0xe9, 0x34, 0x3f, 0xd3, 0x8e, 0x3b, 0x3f, 0xed, 0x74, 0x3f, 0xe9, 0xa8, 0xc8, + 0x9c, 0xa3, 0x7d, 0x8f, 0xcb, 0xbe, 0x07, 0x68, 0xde, 0x29, 0x7c, 0x11, 0x96, 0xb9, 0x85, 0x5e, + 0xc2, 0x1b, 0xb0, 0xd6, 0xe9, 0x6a, 0xfd, 0x76, 0x4b, 0xd1, 0x94, 0xfb, 0xf7, 0x95, 0xe6, 0xa0, + 0x2f, 0x1e, 0x3e, 0x62, 0xf6, 0x20, 0xb5, 0xc0, 0x6b, 0xbf, 0xcf, 0xc2, 0xc6, 0x12, 0x4f, 0x70, + 0x23, 0xbc, 0xb2, 0x88, 0x5b, 0xd4, 0x8d, 0xf3, 0x78, 0x5f, 0x67, 0x35, 0x43, 0x8f, 0x78, 0x41, + 0x78, 0xc3, 0xb9, 0x06, 0x2c, 0x4a, 0x76, 0x60, 0x0e, 0x4d, 0xea, 0x85, 0xef, 0x44, 0xe2, 0x1e, + 0xb3, 0x36, 0xc3, 0xc5, 0x53, 0xd1, 0xbb, 0x80, 0x5d, 0xc7, 0x37, 0x03, 0xf3, 0x31, 0xd5, 0x4c, + 0x3b, 0x7a, 0x54, 0x62, 0xf7, 0x9a, 0x9c, 0x8a, 0x22, 0x49, 0xdb, 0x0e, 0x62, 0xb6, 0x4d, 0x47, + 0x64, 0x8e, 0xcd, 0x36, 0xf3, 0xac, 0x8a, 0x22, 0x49, 0xcc, 0xbe, 0x02, 0x15, 0xc3, 0x99, 0xb2, + 0x62, 0x4f, 0xf0, 0xd8, 0xd9, 0x21, 0xa9, 0x65, 0x81, 0xc5, 0x94, 0xb0, 0x8c, 0x9f, 0xbd, 0x66, + 0x55, 0xd4, 0xb2, 0xc0, 0x04, 0xe5, 0x2a, 0xac, 0x91, 0xd1, 0xc8, 0x63, 0xc6, 0x23, 0x43, 0xe2, + 0x62, 0x52, 0x8d, 0x61, 0x4e, 0xdc, 0x7a, 0x00, 0xc5, 0x28, 0x0e, 0xec, 0xa8, 0x66, 0x91, 0xd0, + 0x5c, 0x71, 0xdb, 0xce, 0xec, 0x94, 0xd4, 0xa2, 0x1d, 0x09, 0xaf, 0x40, 0xc5, 0xf4, 0xb5, 0xd9, + 0xe3, 0x7c, 0x66, 0x3b, 0xb3, 0x53, 0x54, 0xcb, 0xa6, 0x1f, 0x3f, 0x6c, 0xd6, 0xbe, 0xca, 0x40, + 0x35, 0xfd, 0x71, 0x01, 0xb7, 0xa0, 0x68, 0x39, 0x3a, 0xe1, 0xa9, 0x25, 0xbe, 0x6c, 0xed, 0x3c, + 0xe7, 0x7b, 0x44, 0xfd, 0x30, 0xe4, 0xab, 0xb1, 0xe6, 0xd6, 0xdf, 0x25, 0x28, 0x46, 0x30, 0xbe, + 0x00, 0x39, 0x97, 0x04, 0x63, 0x6e, 0x2e, 0xbf, 0x9f, 0x41, 0x92, 0xca, 0xdb, 0x0c, 0xf7, 0x5d, + 0x62, 0xf3, 0x14, 0x08, 0x71, 0xd6, 0x66, 0xf3, 0x6a, 0x51, 0x62, 0xf0, 0x5b, 0x8f, 0x33, 0x99, + 0x50, 0x3b, 0xf0, 0xa3, 0x79, 0x0d, 0xf1, 0x66, 0x08, 0xe3, 0x77, 0x60, 0x3d, 0xf0, 0x88, 0x69, + 0xa5, 0xb8, 0x39, 0xce, 0x45, 0x91, 0x20, 0x26, 0xef, 0xc1, 0xa5, 0xc8, 0xae, 0x41, 0x03, 0xa2, + 0x8f, 0xa9, 0x31, 0x53, 0x2a, 0xf0, 0xd7, 0x8d, 0x8b, 0x21, 0xa1, 0x15, 0xca, 0x23, 0xdd, 0xda, + 0xd7, 0x12, 0xac, 0x47, 0xf7, 0x34, 0x23, 0x0e, 0xd6, 0x11, 0x00, 0xb1, 0x6d, 0x27, 0x48, 0x86, + 0x6b, 0x31, 0x95, 0x17, 0xf4, 0xea, 0x8d, 0x58, 0x49, 0x4d, 0x18, 0xd8, 0x9a, 0x00, 0xcc, 0x24, + 0x67, 0x86, 0xed, 0x32, 0x94, 0xc3, 0x2f, 0x47, 0xfc, 0xf3, 0xa3, 0xb8, 0xd9, 0x83, 0x80, 0xd8, + 0x85, 0x0e, 0x6f, 0x42, 0xfe, 0x84, 0x8e, 0x4c, 0x3b, 0x7c, 0x0f, 0x16, 0x8d, 0xe8, 0xfd, 0x25, + 0x17, 0xbf, 0xbf, 0xec, 0xff, 0x46, 0x82, 0x0d, 0xdd, 0x99, 0xcc, 0xfb, 0xbb, 0x8f, 0xe6, 0x9e, + 0x17, 0xfc, 0x8f, 0xa5, 0xcf, 0x3f, 0x1a, 0x99, 0xc1, 0x78, 0x7a, 0x52, 0xd7, 0x9d, 0xc9, 0xee, + 0xc8, 0xb1, 0x88, 0x3d, 0x9a, 0x7d, 0x3f, 0xe5, 0x7f, 0xf4, 0x1b, 0x23, 0x6a, 0xdf, 0x18, 0x39, + 0x89, 0xaf, 0xa9, 0xf7, 0x66, 0x7f, 0xff, 0x27, 0x49, 0x7f, 0xca, 0x64, 0x0f, 0x7a, 0xfb, 0x7f, + 0xce, 0x6c, 0x1d, 0x88, 0xee, 0x7a, 0x51, 0x78, 0x54, 0x3a, 0xb4, 0xa8, 0xce, 0x86, 0xfc, 0xff, + 0x00, 0x00, 0x00, 0xff, 0xff, 0x3e, 0xe8, 0xef, 0xc4, 0x9b, 0x1d, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto new file mode 100644 index 0000000000..ed08fcbc54 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto @@ -0,0 +1,883 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. +// +// The messages in this file describe the definitions found in .proto files. +// A valid .proto file can be translated directly to a FileDescriptorProto +// without any other information (e.g. without reading its imports). + + +syntax = "proto2"; + +package google.protobuf; +option go_package = "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DescriptorProtos"; +option csharp_namespace = "Google.Protobuf.Reflection"; +option objc_class_prefix = "GPB"; +option cc_enable_arenas = true; + +// descriptor.proto must be optimized for speed because reflection-based +// algorithms don't work during bootstrapping. +option optimize_for = SPEED; + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +message FileDescriptorSet { + repeated FileDescriptorProto file = 1; +} + +// Describes a complete .proto file. +message FileDescriptorProto { + optional string name = 1; // file name, relative to root of source tree + optional string package = 2; // e.g. "foo", "foo.bar", etc. + + // Names of files imported by this file. + repeated string dependency = 3; + // Indexes of the public imported files in the dependency list above. + repeated int32 public_dependency = 10; + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + repeated int32 weak_dependency = 11; + + // All top-level definitions in this file. + repeated DescriptorProto message_type = 4; + repeated EnumDescriptorProto enum_type = 5; + repeated ServiceDescriptorProto service = 6; + repeated FieldDescriptorProto extension = 7; + + optional FileOptions options = 8; + + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + optional SourceCodeInfo source_code_info = 9; + + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + optional string syntax = 12; +} + +// Describes a message type. +message DescriptorProto { + optional string name = 1; + + repeated FieldDescriptorProto field = 2; + repeated FieldDescriptorProto extension = 6; + + repeated DescriptorProto nested_type = 3; + repeated EnumDescriptorProto enum_type = 4; + + message ExtensionRange { + optional int32 start = 1; + optional int32 end = 2; + + optional ExtensionRangeOptions options = 3; + } + repeated ExtensionRange extension_range = 5; + + repeated OneofDescriptorProto oneof_decl = 8; + + optional MessageOptions options = 7; + + // Range of reserved tag numbers. Reserved tag numbers may not be used by + // fields or extension ranges in the same message. Reserved ranges may + // not overlap. + message ReservedRange { + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Exclusive. + } + repeated ReservedRange reserved_range = 9; + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + repeated string reserved_name = 10; +} + +message ExtensionRangeOptions { + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +// Describes a field within a message. +message FieldDescriptorProto { + enum Type { + // 0 is reserved for errors. + // Order is weird for historical reasons. + TYPE_DOUBLE = 1; + TYPE_FLOAT = 2; + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + TYPE_INT64 = 3; + TYPE_UINT64 = 4; + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + TYPE_INT32 = 5; + TYPE_FIXED64 = 6; + TYPE_FIXED32 = 7; + TYPE_BOOL = 8; + TYPE_STRING = 9; + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + TYPE_GROUP = 10; + TYPE_MESSAGE = 11; // Length-delimited aggregate. + + // New in version 2. + TYPE_BYTES = 12; + TYPE_UINT32 = 13; + TYPE_ENUM = 14; + TYPE_SFIXED32 = 15; + TYPE_SFIXED64 = 16; + TYPE_SINT32 = 17; // Uses ZigZag encoding. + TYPE_SINT64 = 18; // Uses ZigZag encoding. + }; + + enum Label { + // 0 is reserved for errors + LABEL_OPTIONAL = 1; + LABEL_REQUIRED = 2; + LABEL_REPEATED = 3; + }; + + optional string name = 1; + optional int32 number = 3; + optional Label label = 4; + + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + optional Type type = 5; + + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + optional string type_name = 6; + + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + optional string extendee = 2; + + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + optional string default_value = 7; + + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + optional int32 oneof_index = 9; + + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + optional string json_name = 10; + + optional FieldOptions options = 8; +} + +// Describes a oneof. +message OneofDescriptorProto { + optional string name = 1; + optional OneofOptions options = 2; +} + +// Describes an enum type. +message EnumDescriptorProto { + optional string name = 1; + + repeated EnumValueDescriptorProto value = 2; + + optional EnumOptions options = 3; + + // Range of reserved numeric values. Reserved values may not be used by + // entries in the same enum. Reserved ranges may not overlap. + // + // Note that this is distinct from DescriptorProto.ReservedRange in that it + // is inclusive such that it can appropriately represent the entire int32 + // domain. + message EnumReservedRange { + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Inclusive. + } + + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + repeated EnumReservedRange reserved_range = 4; + + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + repeated string reserved_name = 5; +} + +// Describes a value within an enum. +message EnumValueDescriptorProto { + optional string name = 1; + optional int32 number = 2; + + optional EnumValueOptions options = 3; +} + +// Describes a service. +message ServiceDescriptorProto { + optional string name = 1; + repeated MethodDescriptorProto method = 2; + + optional ServiceOptions options = 3; +} + +// Describes a method of a service. +message MethodDescriptorProto { + optional string name = 1; + + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + optional string input_type = 2; + optional string output_type = 3; + + optional MethodOptions options = 4; + + // Identifies if client streams multiple client messages + optional bool client_streaming = 5 [default=false]; + // Identifies if server streams multiple server messages + optional bool server_streaming = 6 [default=false]; +} + + +// =================================================================== +// Options + +// Each of the definitions above may have "options" attached. These are +// just annotations which may cause code to be generated slightly differently +// or may contain hints for code that manipulates protocol messages. +// +// Clients may define custom options as extensions of the *Options messages. +// These extensions may not yet be known at parsing time, so the parser cannot +// store the values in them. Instead it stores them in a field in the *Options +// message called uninterpreted_option. This field must have the same name +// across all *Options messages. We then use this field to populate the +// extensions when we build a descriptor, at which point all protos have been +// parsed and so all extensions are known. +// +// Extension numbers for custom options may be chosen as follows: +// * For options which will only be used within a single application or +// organization, or for experimental options, use field numbers 50000 +// through 99999. It is up to you to ensure that you do not use the +// same number for multiple options. +// * For options which will be published and used publicly by multiple +// independent entities, e-mail protobuf-global-extension-registry@google.com +// to reserve extension numbers. Simply provide your project name (e.g. +// Objective-C plugin) and your project website (if available) -- there's no +// need to explain how you intend to use them. Usually you only need one +// extension number. You can declare multiple options with only one extension +// number by putting them in a sub-message. See the Custom Options section of +// the docs for examples: +// https://developers.google.com/protocol-buffers/docs/proto#options +// If this turns out to be popular, a web service will be set up +// to automatically assign option numbers. + + +message FileOptions { + + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + optional string java_package = 1; + + + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + optional string java_outer_classname = 8; + + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + optional bool java_multiple_files = 10 [default=false]; + + // This option does nothing. + optional bool java_generate_equals_and_hash = 20 [deprecated=true]; + + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + optional bool java_string_check_utf8 = 27 [default=false]; + + + // Generated classes can be optimized for speed or code size. + enum OptimizeMode { + SPEED = 1; // Generate complete code for parsing, serialization, + // etc. + CODE_SIZE = 2; // Use ReflectionOps to implement these methods. + LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime. + } + optional OptimizeMode optimize_for = 9 [default=SPEED]; + + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + optional string go_package = 11; + + + + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + optional bool cc_generic_services = 16 [default=false]; + optional bool java_generic_services = 17 [default=false]; + optional bool py_generic_services = 18 [default=false]; + optional bool php_generic_services = 42 [default=false]; + + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + optional bool deprecated = 23 [default=false]; + + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + optional bool cc_enable_arenas = 31 [default=false]; + + + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + optional string objc_class_prefix = 36; + + // Namespace for generated classes; defaults to the package. + optional string csharp_namespace = 37; + + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + optional string swift_prefix = 39; + + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + optional string php_class_prefix = 40; + + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + optional string php_namespace = 41; + + + // Use this option to change the namespace of php generated metadata classes. + // Default is empty. When this option is empty, the proto file name will be used + // for determining the namespace. + optional string php_metadata_namespace = 44; + + // Use this option to change the package of ruby generated classes. Default + // is empty. When this option is not set, the package name will be used for + // determining the ruby package. + optional string ruby_package = 45; + + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. + // See the documentation for the "Options" section above. + extensions 1000 to max; + + reserved 38; +} + +message MessageOptions { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + optional bool message_set_wire_format = 1 [default=false]; + + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + optional bool no_standard_descriptor_accessor = 2 [default=false]; + + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + optional bool deprecated = 3 [default=false]; + + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementions still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + optional bool map_entry = 7; + + reserved 8; // javalite_serializable + reserved 9; // javanano_as_lite + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message FieldOptions { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + optional CType ctype = 1 [default = STRING]; + enum CType { + // Default mode. + STRING = 0; + + CORD = 1; + + STRING_PIECE = 2; + } + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + optional bool packed = 2; + + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + optional JSType jstype = 6 [default = JS_NORMAL]; + enum JSType { + // Use the default type. + JS_NORMAL = 0; + + // Use JavaScript strings. + JS_STRING = 1; + + // Use JavaScript numbers. + JS_NUMBER = 2; + } + + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + optional bool lazy = 5 [default=false]; + + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + optional bool deprecated = 3 [default=false]; + + // For Google-internal migration only. Do not use. + optional bool weak = 10 [default=false]; + + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; + + reserved 4; // removed jtype +} + +message OneofOptions { + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message EnumOptions { + + // Set this option to true to allow mapping different tag names to the same + // value. + optional bool allow_alias = 2; + + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + optional bool deprecated = 3 [default=false]; + + reserved 5; // javanano_as_lite + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message EnumValueOptions { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + optional bool deprecated = 1 [default=false]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message ServiceOptions { + + // Note: Field numbers 1 through 32 are reserved for Google's internal RPC + // framework. We apologize for hoarding these numbers to ourselves, but + // we were already using them long before we decided to release Protocol + // Buffers. + + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + optional bool deprecated = 33 [default=false]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message MethodOptions { + + // Note: Field numbers 1 through 32 are reserved for Google's internal RPC + // framework. We apologize for hoarding these numbers to ourselves, but + // we were already using them long before we decided to release Protocol + // Buffers. + + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + optional bool deprecated = 33 [default=false]; + + // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, + // or neither? HTTP based RPC implementation may choose GET verb for safe + // methods, and PUT verb for idempotent methods instead of the default POST. + enum IdempotencyLevel { + IDEMPOTENCY_UNKNOWN = 0; + NO_SIDE_EFFECTS = 1; // implies idempotent + IDEMPOTENT = 2; // idempotent, but may have side effects + } + optional IdempotencyLevel idempotency_level = + 34 [default=IDEMPOTENCY_UNKNOWN]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +message UninterpretedOption { + // The name of the uninterpreted option. Each string represents a segment in + // a dot-separated name. is_extension is true iff a segment represents an + // extension (denoted with parentheses in options specs in .proto files). + // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents + // "foo.(bar.baz).qux". + message NamePart { + required string name_part = 1; + required bool is_extension = 2; + } + repeated NamePart name = 2; + + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + optional string identifier_value = 3; + optional uint64 positive_int_value = 4; + optional int64 negative_int_value = 5; + optional double double_value = 6; + optional bytes string_value = 7; + optional string aggregate_value = 8; +} + +// =================================================================== +// Optional source code info + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +message SourceCodeInfo { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendent. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + repeated Location location = 1; + message Location { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + repeated int32 path = 1 [packed=true]; + + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + repeated int32 span = 2 [packed=true]; + + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + optional string leading_comments = 3; + optional string trailing_comments = 4; + repeated string leading_detached_comments = 6; + } +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +message GeneratedCodeInfo { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + repeated Annotation annotation = 1; + message Annotation { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + repeated int32 path = 1 [packed=true]; + + // Identifies the filesystem path to the original source .proto. + optional string source_file = 2; + + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + optional int32 begin = 3; + + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + optional int32 end = 4; + } +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go new file mode 100644 index 0000000000..6f4a902b5b --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go @@ -0,0 +1,2806 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* + The code generator for the plugin for the Google protocol buffer compiler. + It generates Go code from the protocol buffer description files read by the + main routine. +*/ +package generator + +import ( + "bufio" + "bytes" + "compress/gzip" + "crypto/sha256" + "encoding/hex" + "fmt" + "go/ast" + "go/build" + "go/parser" + "go/printer" + "go/token" + "log" + "os" + "path" + "sort" + "strconv" + "strings" + "unicode" + "unicode/utf8" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/protoc-gen-go/generator/internal/remap" + + "github.com/golang/protobuf/protoc-gen-go/descriptor" + plugin "github.com/golang/protobuf/protoc-gen-go/plugin" +) + +// generatedCodeVersion indicates a version of the generated code. +// It is incremented whenever an incompatibility between the generated code and +// proto package is introduced; the generated code references +// a constant, proto.ProtoPackageIsVersionN (where N is generatedCodeVersion). +const generatedCodeVersion = 3 + +// A Plugin provides functionality to add to the output during Go code generation, +// such as to produce RPC stubs. +type Plugin interface { + // Name identifies the plugin. + Name() string + // Init is called once after data structures are built but before + // code generation begins. + Init(g *Generator) + // Generate produces the code generated by the plugin for this file, + // except for the imports, by calling the generator's methods P, In, and Out. + Generate(file *FileDescriptor) + // GenerateImports produces the import declarations for this file. + // It is called after Generate. + GenerateImports(file *FileDescriptor) +} + +var plugins []Plugin + +// RegisterPlugin installs a (second-order) plugin to be run when the Go output is generated. +// It is typically called during initialization. +func RegisterPlugin(p Plugin) { + plugins = append(plugins, p) +} + +// A GoImportPath is the import path of a Go package. e.g., "google.golang.org/genproto/protobuf". +type GoImportPath string + +func (p GoImportPath) String() string { return strconv.Quote(string(p)) } + +// A GoPackageName is the name of a Go package. e.g., "protobuf". +type GoPackageName string + +// Each type we import as a protocol buffer (other than FileDescriptorProto) needs +// a pointer to the FileDescriptorProto that represents it. These types achieve that +// wrapping by placing each Proto inside a struct with the pointer to its File. The +// structs have the same names as their contents, with "Proto" removed. +// FileDescriptor is used to store the things that it points to. + +// The file and package name method are common to messages and enums. +type common struct { + file *FileDescriptor // File this object comes from. +} + +// GoImportPath is the import path of the Go package containing the type. +func (c *common) GoImportPath() GoImportPath { + return c.file.importPath +} + +func (c *common) File() *FileDescriptor { return c.file } + +func fileIsProto3(file *descriptor.FileDescriptorProto) bool { + return file.GetSyntax() == "proto3" +} + +func (c *common) proto3() bool { return fileIsProto3(c.file.FileDescriptorProto) } + +// Descriptor represents a protocol buffer message. +type Descriptor struct { + common + *descriptor.DescriptorProto + parent *Descriptor // The containing message, if any. + nested []*Descriptor // Inner messages, if any. + enums []*EnumDescriptor // Inner enums, if any. + ext []*ExtensionDescriptor // Extensions, if any. + typename []string // Cached typename vector. + index int // The index into the container, whether the file or another message. + path string // The SourceCodeInfo path as comma-separated integers. + group bool +} + +// TypeName returns the elements of the dotted type name. +// The package name is not part of this name. +func (d *Descriptor) TypeName() []string { + if d.typename != nil { + return d.typename + } + n := 0 + for parent := d; parent != nil; parent = parent.parent { + n++ + } + s := make([]string, n) + for parent := d; parent != nil; parent = parent.parent { + n-- + s[n] = parent.GetName() + } + d.typename = s + return s +} + +// EnumDescriptor describes an enum. If it's at top level, its parent will be nil. +// Otherwise it will be the descriptor of the message in which it is defined. +type EnumDescriptor struct { + common + *descriptor.EnumDescriptorProto + parent *Descriptor // The containing message, if any. + typename []string // Cached typename vector. + index int // The index into the container, whether the file or a message. + path string // The SourceCodeInfo path as comma-separated integers. +} + +// TypeName returns the elements of the dotted type name. +// The package name is not part of this name. +func (e *EnumDescriptor) TypeName() (s []string) { + if e.typename != nil { + return e.typename + } + name := e.GetName() + if e.parent == nil { + s = make([]string, 1) + } else { + pname := e.parent.TypeName() + s = make([]string, len(pname)+1) + copy(s, pname) + } + s[len(s)-1] = name + e.typename = s + return s +} + +// Everything but the last element of the full type name, CamelCased. +// The values of type Foo.Bar are call Foo_value1... not Foo_Bar_value1... . +func (e *EnumDescriptor) prefix() string { + if e.parent == nil { + // If the enum is not part of a message, the prefix is just the type name. + return CamelCase(*e.Name) + "_" + } + typeName := e.TypeName() + return CamelCaseSlice(typeName[0:len(typeName)-1]) + "_" +} + +// The integer value of the named constant in this enumerated type. +func (e *EnumDescriptor) integerValueAsString(name string) string { + for _, c := range e.Value { + if c.GetName() == name { + return fmt.Sprint(c.GetNumber()) + } + } + log.Fatal("cannot find value for enum constant") + return "" +} + +// ExtensionDescriptor describes an extension. If it's at top level, its parent will be nil. +// Otherwise it will be the descriptor of the message in which it is defined. +type ExtensionDescriptor struct { + common + *descriptor.FieldDescriptorProto + parent *Descriptor // The containing message, if any. +} + +// TypeName returns the elements of the dotted type name. +// The package name is not part of this name. +func (e *ExtensionDescriptor) TypeName() (s []string) { + name := e.GetName() + if e.parent == nil { + // top-level extension + s = make([]string, 1) + } else { + pname := e.parent.TypeName() + s = make([]string, len(pname)+1) + copy(s, pname) + } + s[len(s)-1] = name + return s +} + +// DescName returns the variable name used for the generated descriptor. +func (e *ExtensionDescriptor) DescName() string { + // The full type name. + typeName := e.TypeName() + // Each scope of the extension is individually CamelCased, and all are joined with "_" with an "E_" prefix. + for i, s := range typeName { + typeName[i] = CamelCase(s) + } + return "E_" + strings.Join(typeName, "_") +} + +// ImportedDescriptor describes a type that has been publicly imported from another file. +type ImportedDescriptor struct { + common + o Object +} + +func (id *ImportedDescriptor) TypeName() []string { return id.o.TypeName() } + +// FileDescriptor describes an protocol buffer descriptor file (.proto). +// It includes slices of all the messages and enums defined within it. +// Those slices are constructed by WrapTypes. +type FileDescriptor struct { + *descriptor.FileDescriptorProto + desc []*Descriptor // All the messages defined in this file. + enum []*EnumDescriptor // All the enums defined in this file. + ext []*ExtensionDescriptor // All the top-level extensions defined in this file. + imp []*ImportedDescriptor // All types defined in files publicly imported by this file. + + // Comments, stored as a map of path (comma-separated integers) to the comment. + comments map[string]*descriptor.SourceCodeInfo_Location + + // The full list of symbols that are exported, + // as a map from the exported object to its symbols. + // This is used for supporting public imports. + exported map[Object][]symbol + + importPath GoImportPath // Import path of this file's package. + packageName GoPackageName // Name of this file's Go package. + + proto3 bool // whether to generate proto3 code for this file +} + +// VarName is the variable name we'll use in the generated code to refer +// to the compressed bytes of this descriptor. It is not exported, so +// it is only valid inside the generated package. +func (d *FileDescriptor) VarName() string { + h := sha256.Sum256([]byte(d.GetName())) + return fmt.Sprintf("fileDescriptor_%s", hex.EncodeToString(h[:8])) +} + +// goPackageOption interprets the file's go_package option. +// If there is no go_package, it returns ("", "", false). +// If there's a simple name, it returns ("", pkg, true). +// If the option implies an import path, it returns (impPath, pkg, true). +func (d *FileDescriptor) goPackageOption() (impPath GoImportPath, pkg GoPackageName, ok bool) { + opt := d.GetOptions().GetGoPackage() + if opt == "" { + return "", "", false + } + // A semicolon-delimited suffix delimits the import path and package name. + sc := strings.Index(opt, ";") + if sc >= 0 { + return GoImportPath(opt[:sc]), cleanPackageName(opt[sc+1:]), true + } + // The presence of a slash implies there's an import path. + slash := strings.LastIndex(opt, "/") + if slash >= 0 { + return GoImportPath(opt), cleanPackageName(opt[slash+1:]), true + } + return "", cleanPackageName(opt), true +} + +// goFileName returns the output name for the generated Go file. +func (d *FileDescriptor) goFileName(pathType pathType) string { + name := *d.Name + if ext := path.Ext(name); ext == ".proto" || ext == ".protodevel" { + name = name[:len(name)-len(ext)] + } + name += ".pb.go" + + if pathType == pathTypeSourceRelative { + return name + } + + // Does the file have a "go_package" option? + // If it does, it may override the filename. + if impPath, _, ok := d.goPackageOption(); ok && impPath != "" { + // Replace the existing dirname with the declared import path. + _, name = path.Split(name) + name = path.Join(string(impPath), name) + return name + } + + return name +} + +func (d *FileDescriptor) addExport(obj Object, sym symbol) { + d.exported[obj] = append(d.exported[obj], sym) +} + +// symbol is an interface representing an exported Go symbol. +type symbol interface { + // GenerateAlias should generate an appropriate alias + // for the symbol from the named package. + GenerateAlias(g *Generator, filename string, pkg GoPackageName) +} + +type messageSymbol struct { + sym string + hasExtensions, isMessageSet bool + oneofTypes []string +} + +type getterSymbol struct { + name string + typ string + typeName string // canonical name in proto world; empty for proto.Message and similar + genType bool // whether typ contains a generated type (message/group/enum) +} + +func (ms *messageSymbol) GenerateAlias(g *Generator, filename string, pkg GoPackageName) { + g.P("// ", ms.sym, " from public import ", filename) + g.P("type ", ms.sym, " = ", pkg, ".", ms.sym) + for _, name := range ms.oneofTypes { + g.P("type ", name, " = ", pkg, ".", name) + } +} + +type enumSymbol struct { + name string + proto3 bool // Whether this came from a proto3 file. +} + +func (es enumSymbol) GenerateAlias(g *Generator, filename string, pkg GoPackageName) { + s := es.name + g.P("// ", s, " from public import ", filename) + g.P("type ", s, " = ", pkg, ".", s) + g.P("var ", s, "_name = ", pkg, ".", s, "_name") + g.P("var ", s, "_value = ", pkg, ".", s, "_value") +} + +type constOrVarSymbol struct { + sym string + typ string // either "const" or "var" + cast string // if non-empty, a type cast is required (used for enums) +} + +func (cs constOrVarSymbol) GenerateAlias(g *Generator, filename string, pkg GoPackageName) { + v := string(pkg) + "." + cs.sym + if cs.cast != "" { + v = cs.cast + "(" + v + ")" + } + g.P(cs.typ, " ", cs.sym, " = ", v) +} + +// Object is an interface abstracting the abilities shared by enums, messages, extensions and imported objects. +type Object interface { + GoImportPath() GoImportPath + TypeName() []string + File() *FileDescriptor +} + +// Generator is the type whose methods generate the output, stored in the associated response structure. +type Generator struct { + *bytes.Buffer + + Request *plugin.CodeGeneratorRequest // The input. + Response *plugin.CodeGeneratorResponse // The output. + + Param map[string]string // Command-line parameters. + PackageImportPath string // Go import path of the package we're generating code for + ImportPrefix string // String to prefix to imported package file names. + ImportMap map[string]string // Mapping from .proto file name to import path + + Pkg map[string]string // The names under which we import support packages + + outputImportPath GoImportPath // Package we're generating code for. + allFiles []*FileDescriptor // All files in the tree + allFilesByName map[string]*FileDescriptor // All files by filename. + genFiles []*FileDescriptor // Those files we will generate output for. + file *FileDescriptor // The file we are compiling now. + packageNames map[GoImportPath]GoPackageName // Imported package names in the current file. + usedPackages map[GoImportPath]bool // Packages used in current file. + usedPackageNames map[GoPackageName]bool // Package names used in the current file. + addedImports map[GoImportPath]bool // Additional imports to emit. + typeNameToObject map[string]Object // Key is a fully-qualified name in input syntax. + init []string // Lines to emit in the init function. + indent string + pathType pathType // How to generate output filenames. + writeOutput bool + annotateCode bool // whether to store annotations + annotations []*descriptor.GeneratedCodeInfo_Annotation // annotations to store +} + +type pathType int + +const ( + pathTypeImport pathType = iota + pathTypeSourceRelative +) + +// New creates a new generator and allocates the request and response protobufs. +func New() *Generator { + g := new(Generator) + g.Buffer = new(bytes.Buffer) + g.Request = new(plugin.CodeGeneratorRequest) + g.Response = new(plugin.CodeGeneratorResponse) + return g +} + +// Error reports a problem, including an error, and exits the program. +func (g *Generator) Error(err error, msgs ...string) { + s := strings.Join(msgs, " ") + ":" + err.Error() + log.Print("protoc-gen-go: error:", s) + os.Exit(1) +} + +// Fail reports a problem and exits the program. +func (g *Generator) Fail(msgs ...string) { + s := strings.Join(msgs, " ") + log.Print("protoc-gen-go: error:", s) + os.Exit(1) +} + +// CommandLineParameters breaks the comma-separated list of key=value pairs +// in the parameter (a member of the request protobuf) into a key/value map. +// It then sets file name mappings defined by those entries. +func (g *Generator) CommandLineParameters(parameter string) { + g.Param = make(map[string]string) + for _, p := range strings.Split(parameter, ",") { + if i := strings.Index(p, "="); i < 0 { + g.Param[p] = "" + } else { + g.Param[p[0:i]] = p[i+1:] + } + } + + g.ImportMap = make(map[string]string) + pluginList := "none" // Default list of plugin names to enable (empty means all). + for k, v := range g.Param { + switch k { + case "import_prefix": + g.ImportPrefix = v + case "import_path": + g.PackageImportPath = v + case "paths": + switch v { + case "import": + g.pathType = pathTypeImport + case "source_relative": + g.pathType = pathTypeSourceRelative + default: + g.Fail(fmt.Sprintf(`Unknown path type %q: want "import" or "source_relative".`, v)) + } + case "plugins": + pluginList = v + case "annotate_code": + if v == "true" { + g.annotateCode = true + } + default: + if len(k) > 0 && k[0] == 'M' { + g.ImportMap[k[1:]] = v + } + } + } + if pluginList != "" { + // Amend the set of plugins. + enabled := make(map[string]bool) + for _, name := range strings.Split(pluginList, "+") { + enabled[name] = true + } + var nplugins []Plugin + for _, p := range plugins { + if enabled[p.Name()] { + nplugins = append(nplugins, p) + } + } + plugins = nplugins + } +} + +// DefaultPackageName returns the package name printed for the object. +// If its file is in a different package, it returns the package name we're using for this file, plus ".". +// Otherwise it returns the empty string. +func (g *Generator) DefaultPackageName(obj Object) string { + importPath := obj.GoImportPath() + if importPath == g.outputImportPath { + return "" + } + return string(g.GoPackageName(importPath)) + "." +} + +// GoPackageName returns the name used for a package. +func (g *Generator) GoPackageName(importPath GoImportPath) GoPackageName { + if name, ok := g.packageNames[importPath]; ok { + return name + } + name := cleanPackageName(baseName(string(importPath))) + for i, orig := 1, name; g.usedPackageNames[name] || isGoPredeclaredIdentifier[string(name)]; i++ { + name = orig + GoPackageName(strconv.Itoa(i)) + } + g.packageNames[importPath] = name + g.usedPackageNames[name] = true + return name +} + +// AddImport adds a package to the generated file's import section. +// It returns the name used for the package. +func (g *Generator) AddImport(importPath GoImportPath) GoPackageName { + g.addedImports[importPath] = true + return g.GoPackageName(importPath) +} + +var globalPackageNames = map[GoPackageName]bool{ + "fmt": true, + "math": true, + "proto": true, +} + +// Create and remember a guaranteed unique package name. Pkg is the candidate name. +// The FileDescriptor parameter is unused. +func RegisterUniquePackageName(pkg string, f *FileDescriptor) string { + name := cleanPackageName(pkg) + for i, orig := 1, name; globalPackageNames[name]; i++ { + name = orig + GoPackageName(strconv.Itoa(i)) + } + globalPackageNames[name] = true + return string(name) +} + +var isGoKeyword = map[string]bool{ + "break": true, + "case": true, + "chan": true, + "const": true, + "continue": true, + "default": true, + "else": true, + "defer": true, + "fallthrough": true, + "for": true, + "func": true, + "go": true, + "goto": true, + "if": true, + "import": true, + "interface": true, + "map": true, + "package": true, + "range": true, + "return": true, + "select": true, + "struct": true, + "switch": true, + "type": true, + "var": true, +} + +var isGoPredeclaredIdentifier = map[string]bool{ + "append": true, + "bool": true, + "byte": true, + "cap": true, + "close": true, + "complex": true, + "complex128": true, + "complex64": true, + "copy": true, + "delete": true, + "error": true, + "false": true, + "float32": true, + "float64": true, + "imag": true, + "int": true, + "int16": true, + "int32": true, + "int64": true, + "int8": true, + "iota": true, + "len": true, + "make": true, + "new": true, + "nil": true, + "panic": true, + "print": true, + "println": true, + "real": true, + "recover": true, + "rune": true, + "string": true, + "true": true, + "uint": true, + "uint16": true, + "uint32": true, + "uint64": true, + "uint8": true, + "uintptr": true, +} + +func cleanPackageName(name string) GoPackageName { + name = strings.Map(badToUnderscore, name) + // Identifier must not be keyword or predeclared identifier: insert _. + if isGoKeyword[name] { + name = "_" + name + } + // Identifier must not begin with digit: insert _. + if r, _ := utf8.DecodeRuneInString(name); unicode.IsDigit(r) { + name = "_" + name + } + return GoPackageName(name) +} + +// defaultGoPackage returns the package name to use, +// derived from the import path of the package we're building code for. +func (g *Generator) defaultGoPackage() GoPackageName { + p := g.PackageImportPath + if i := strings.LastIndex(p, "/"); i >= 0 { + p = p[i+1:] + } + return cleanPackageName(p) +} + +// SetPackageNames sets the package name for this run. +// The package name must agree across all files being generated. +// It also defines unique package names for all imported files. +func (g *Generator) SetPackageNames() { + g.outputImportPath = g.genFiles[0].importPath + + defaultPackageNames := make(map[GoImportPath]GoPackageName) + for _, f := range g.genFiles { + if _, p, ok := f.goPackageOption(); ok { + defaultPackageNames[f.importPath] = p + } + } + for _, f := range g.genFiles { + if _, p, ok := f.goPackageOption(); ok { + // Source file: option go_package = "quux/bar"; + f.packageName = p + } else if p, ok := defaultPackageNames[f.importPath]; ok { + // A go_package option in another file in the same package. + // + // This is a poor choice in general, since every source file should + // contain a go_package option. Supported mainly for historical + // compatibility. + f.packageName = p + } else if p := g.defaultGoPackage(); p != "" { + // Command-line: import_path=quux/bar. + // + // The import_path flag sets a package name for files which don't + // contain a go_package option. + f.packageName = p + } else if p := f.GetPackage(); p != "" { + // Source file: package quux.bar; + f.packageName = cleanPackageName(p) + } else { + // Source filename. + f.packageName = cleanPackageName(baseName(f.GetName())) + } + } + + // Check that all files have a consistent package name and import path. + for _, f := range g.genFiles[1:] { + if a, b := g.genFiles[0].importPath, f.importPath; a != b { + g.Fail(fmt.Sprintf("inconsistent package import paths: %v, %v", a, b)) + } + if a, b := g.genFiles[0].packageName, f.packageName; a != b { + g.Fail(fmt.Sprintf("inconsistent package names: %v, %v", a, b)) + } + } + + // Names of support packages. These never vary (if there are conflicts, + // we rename the conflicting package), so this could be removed someday. + g.Pkg = map[string]string{ + "fmt": "fmt", + "math": "math", + "proto": "proto", + } +} + +// WrapTypes walks the incoming data, wrapping DescriptorProtos, EnumDescriptorProtos +// and FileDescriptorProtos into file-referenced objects within the Generator. +// It also creates the list of files to generate and so should be called before GenerateAllFiles. +func (g *Generator) WrapTypes() { + g.allFiles = make([]*FileDescriptor, 0, len(g.Request.ProtoFile)) + g.allFilesByName = make(map[string]*FileDescriptor, len(g.allFiles)) + genFileNames := make(map[string]bool) + for _, n := range g.Request.FileToGenerate { + genFileNames[n] = true + } + for _, f := range g.Request.ProtoFile { + fd := &FileDescriptor{ + FileDescriptorProto: f, + exported: make(map[Object][]symbol), + proto3: fileIsProto3(f), + } + // The import path may be set in a number of ways. + if substitution, ok := g.ImportMap[f.GetName()]; ok { + // Command-line: M=foo.proto=quux/bar. + // + // Explicit mapping of source file to import path. + fd.importPath = GoImportPath(substitution) + } else if genFileNames[f.GetName()] && g.PackageImportPath != "" { + // Command-line: import_path=quux/bar. + // + // The import_path flag sets the import path for every file that + // we generate code for. + fd.importPath = GoImportPath(g.PackageImportPath) + } else if p, _, _ := fd.goPackageOption(); p != "" { + // Source file: option go_package = "quux/bar"; + // + // The go_package option sets the import path. Most users should use this. + fd.importPath = p + } else { + // Source filename. + // + // Last resort when nothing else is available. + fd.importPath = GoImportPath(path.Dir(f.GetName())) + } + // We must wrap the descriptors before we wrap the enums + fd.desc = wrapDescriptors(fd) + g.buildNestedDescriptors(fd.desc) + fd.enum = wrapEnumDescriptors(fd, fd.desc) + g.buildNestedEnums(fd.desc, fd.enum) + fd.ext = wrapExtensions(fd) + extractComments(fd) + g.allFiles = append(g.allFiles, fd) + g.allFilesByName[f.GetName()] = fd + } + for _, fd := range g.allFiles { + fd.imp = wrapImported(fd, g) + } + + g.genFiles = make([]*FileDescriptor, 0, len(g.Request.FileToGenerate)) + for _, fileName := range g.Request.FileToGenerate { + fd := g.allFilesByName[fileName] + if fd == nil { + g.Fail("could not find file named", fileName) + } + g.genFiles = append(g.genFiles, fd) + } +} + +// Scan the descriptors in this file. For each one, build the slice of nested descriptors +func (g *Generator) buildNestedDescriptors(descs []*Descriptor) { + for _, desc := range descs { + if len(desc.NestedType) != 0 { + for _, nest := range descs { + if nest.parent == desc { + desc.nested = append(desc.nested, nest) + } + } + if len(desc.nested) != len(desc.NestedType) { + g.Fail("internal error: nesting failure for", desc.GetName()) + } + } + } +} + +func (g *Generator) buildNestedEnums(descs []*Descriptor, enums []*EnumDescriptor) { + for _, desc := range descs { + if len(desc.EnumType) != 0 { + for _, enum := range enums { + if enum.parent == desc { + desc.enums = append(desc.enums, enum) + } + } + if len(desc.enums) != len(desc.EnumType) { + g.Fail("internal error: enum nesting failure for", desc.GetName()) + } + } + } +} + +// Construct the Descriptor +func newDescriptor(desc *descriptor.DescriptorProto, parent *Descriptor, file *FileDescriptor, index int) *Descriptor { + d := &Descriptor{ + common: common{file}, + DescriptorProto: desc, + parent: parent, + index: index, + } + if parent == nil { + d.path = fmt.Sprintf("%d,%d", messagePath, index) + } else { + d.path = fmt.Sprintf("%s,%d,%d", parent.path, messageMessagePath, index) + } + + // The only way to distinguish a group from a message is whether + // the containing message has a TYPE_GROUP field that matches. + if parent != nil { + parts := d.TypeName() + if file.Package != nil { + parts = append([]string{*file.Package}, parts...) + } + exp := "." + strings.Join(parts, ".") + for _, field := range parent.Field { + if field.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP && field.GetTypeName() == exp { + d.group = true + break + } + } + } + + for _, field := range desc.Extension { + d.ext = append(d.ext, &ExtensionDescriptor{common{file}, field, d}) + } + + return d +} + +// Return a slice of all the Descriptors defined within this file +func wrapDescriptors(file *FileDescriptor) []*Descriptor { + sl := make([]*Descriptor, 0, len(file.MessageType)+10) + for i, desc := range file.MessageType { + sl = wrapThisDescriptor(sl, desc, nil, file, i) + } + return sl +} + +// Wrap this Descriptor, recursively +func wrapThisDescriptor(sl []*Descriptor, desc *descriptor.DescriptorProto, parent *Descriptor, file *FileDescriptor, index int) []*Descriptor { + sl = append(sl, newDescriptor(desc, parent, file, index)) + me := sl[len(sl)-1] + for i, nested := range desc.NestedType { + sl = wrapThisDescriptor(sl, nested, me, file, i) + } + return sl +} + +// Construct the EnumDescriptor +func newEnumDescriptor(desc *descriptor.EnumDescriptorProto, parent *Descriptor, file *FileDescriptor, index int) *EnumDescriptor { + ed := &EnumDescriptor{ + common: common{file}, + EnumDescriptorProto: desc, + parent: parent, + index: index, + } + if parent == nil { + ed.path = fmt.Sprintf("%d,%d", enumPath, index) + } else { + ed.path = fmt.Sprintf("%s,%d,%d", parent.path, messageEnumPath, index) + } + return ed +} + +// Return a slice of all the EnumDescriptors defined within this file +func wrapEnumDescriptors(file *FileDescriptor, descs []*Descriptor) []*EnumDescriptor { + sl := make([]*EnumDescriptor, 0, len(file.EnumType)+10) + // Top-level enums. + for i, enum := range file.EnumType { + sl = append(sl, newEnumDescriptor(enum, nil, file, i)) + } + // Enums within messages. Enums within embedded messages appear in the outer-most message. + for _, nested := range descs { + for i, enum := range nested.EnumType { + sl = append(sl, newEnumDescriptor(enum, nested, file, i)) + } + } + return sl +} + +// Return a slice of all the top-level ExtensionDescriptors defined within this file. +func wrapExtensions(file *FileDescriptor) []*ExtensionDescriptor { + var sl []*ExtensionDescriptor + for _, field := range file.Extension { + sl = append(sl, &ExtensionDescriptor{common{file}, field, nil}) + } + return sl +} + +// Return a slice of all the types that are publicly imported into this file. +func wrapImported(file *FileDescriptor, g *Generator) (sl []*ImportedDescriptor) { + for _, index := range file.PublicDependency { + df := g.fileByName(file.Dependency[index]) + for _, d := range df.desc { + if d.GetOptions().GetMapEntry() { + continue + } + sl = append(sl, &ImportedDescriptor{common{file}, d}) + } + for _, e := range df.enum { + sl = append(sl, &ImportedDescriptor{common{file}, e}) + } + for _, ext := range df.ext { + sl = append(sl, &ImportedDescriptor{common{file}, ext}) + } + } + return +} + +func extractComments(file *FileDescriptor) { + file.comments = make(map[string]*descriptor.SourceCodeInfo_Location) + for _, loc := range file.GetSourceCodeInfo().GetLocation() { + if loc.LeadingComments == nil { + continue + } + var p []string + for _, n := range loc.Path { + p = append(p, strconv.Itoa(int(n))) + } + file.comments[strings.Join(p, ",")] = loc + } +} + +// BuildTypeNameMap builds the map from fully qualified type names to objects. +// The key names for the map come from the input data, which puts a period at the beginning. +// It should be called after SetPackageNames and before GenerateAllFiles. +func (g *Generator) BuildTypeNameMap() { + g.typeNameToObject = make(map[string]Object) + for _, f := range g.allFiles { + // The names in this loop are defined by the proto world, not us, so the + // package name may be empty. If so, the dotted package name of X will + // be ".X"; otherwise it will be ".pkg.X". + dottedPkg := "." + f.GetPackage() + if dottedPkg != "." { + dottedPkg += "." + } + for _, enum := range f.enum { + name := dottedPkg + dottedSlice(enum.TypeName()) + g.typeNameToObject[name] = enum + } + for _, desc := range f.desc { + name := dottedPkg + dottedSlice(desc.TypeName()) + g.typeNameToObject[name] = desc + } + } +} + +// ObjectNamed, given a fully-qualified input type name as it appears in the input data, +// returns the descriptor for the message or enum with that name. +func (g *Generator) ObjectNamed(typeName string) Object { + o, ok := g.typeNameToObject[typeName] + if !ok { + g.Fail("can't find object with type", typeName) + } + return o +} + +// AnnotatedAtoms is a list of atoms (as consumed by P) that records the file name and proto AST path from which they originated. +type AnnotatedAtoms struct { + source string + path string + atoms []interface{} +} + +// Annotate records the file name and proto AST path of a list of atoms +// so that a later call to P can emit a link from each atom to its origin. +func Annotate(file *FileDescriptor, path string, atoms ...interface{}) *AnnotatedAtoms { + return &AnnotatedAtoms{source: *file.Name, path: path, atoms: atoms} +} + +// printAtom prints the (atomic, non-annotation) argument to the generated output. +func (g *Generator) printAtom(v interface{}) { + switch v := v.(type) { + case string: + g.WriteString(v) + case *string: + g.WriteString(*v) + case bool: + fmt.Fprint(g, v) + case *bool: + fmt.Fprint(g, *v) + case int: + fmt.Fprint(g, v) + case *int32: + fmt.Fprint(g, *v) + case *int64: + fmt.Fprint(g, *v) + case float64: + fmt.Fprint(g, v) + case *float64: + fmt.Fprint(g, *v) + case GoPackageName: + g.WriteString(string(v)) + case GoImportPath: + g.WriteString(strconv.Quote(string(v))) + default: + g.Fail(fmt.Sprintf("unknown type in printer: %T", v)) + } +} + +// P prints the arguments to the generated output. It handles strings and int32s, plus +// handling indirections because they may be *string, etc. Any inputs of type AnnotatedAtoms may emit +// annotations in a .meta file in addition to outputting the atoms themselves (if g.annotateCode +// is true). +func (g *Generator) P(str ...interface{}) { + if !g.writeOutput { + return + } + g.WriteString(g.indent) + for _, v := range str { + switch v := v.(type) { + case *AnnotatedAtoms: + begin := int32(g.Len()) + for _, v := range v.atoms { + g.printAtom(v) + } + if g.annotateCode { + end := int32(g.Len()) + var path []int32 + for _, token := range strings.Split(v.path, ",") { + val, err := strconv.ParseInt(token, 10, 32) + if err != nil { + g.Fail("could not parse proto AST path: ", err.Error()) + } + path = append(path, int32(val)) + } + g.annotations = append(g.annotations, &descriptor.GeneratedCodeInfo_Annotation{ + Path: path, + SourceFile: &v.source, + Begin: &begin, + End: &end, + }) + } + default: + g.printAtom(v) + } + } + g.WriteByte('\n') +} + +// addInitf stores the given statement to be printed inside the file's init function. +// The statement is given as a format specifier and arguments. +func (g *Generator) addInitf(stmt string, a ...interface{}) { + g.init = append(g.init, fmt.Sprintf(stmt, a...)) +} + +// In Indents the output one tab stop. +func (g *Generator) In() { g.indent += "\t" } + +// Out unindents the output one tab stop. +func (g *Generator) Out() { + if len(g.indent) > 0 { + g.indent = g.indent[1:] + } +} + +// GenerateAllFiles generates the output for all the files we're outputting. +func (g *Generator) GenerateAllFiles() { + // Initialize the plugins + for _, p := range plugins { + p.Init(g) + } + // Generate the output. The generator runs for every file, even the files + // that we don't generate output for, so that we can collate the full list + // of exported symbols to support public imports. + genFileMap := make(map[*FileDescriptor]bool, len(g.genFiles)) + for _, file := range g.genFiles { + genFileMap[file] = true + } + for _, file := range g.allFiles { + g.Reset() + g.annotations = nil + g.writeOutput = genFileMap[file] + g.generate(file) + if !g.writeOutput { + continue + } + fname := file.goFileName(g.pathType) + g.Response.File = append(g.Response.File, &plugin.CodeGeneratorResponse_File{ + Name: proto.String(fname), + Content: proto.String(g.String()), + }) + if g.annotateCode { + // Store the generated code annotations in text, as the protoc plugin protocol requires that + // strings contain valid UTF-8. + g.Response.File = append(g.Response.File, &plugin.CodeGeneratorResponse_File{ + Name: proto.String(file.goFileName(g.pathType) + ".meta"), + Content: proto.String(proto.CompactTextString(&descriptor.GeneratedCodeInfo{Annotation: g.annotations})), + }) + } + } +} + +// Run all the plugins associated with the file. +func (g *Generator) runPlugins(file *FileDescriptor) { + for _, p := range plugins { + p.Generate(file) + } +} + +// Fill the response protocol buffer with the generated output for all the files we're +// supposed to generate. +func (g *Generator) generate(file *FileDescriptor) { + g.file = file + g.usedPackages = make(map[GoImportPath]bool) + g.packageNames = make(map[GoImportPath]GoPackageName) + g.usedPackageNames = make(map[GoPackageName]bool) + g.addedImports = make(map[GoImportPath]bool) + for name := range globalPackageNames { + g.usedPackageNames[name] = true + } + + g.P("// This is a compile-time assertion to ensure that this generated file") + g.P("// is compatible with the proto package it is being compiled against.") + g.P("// A compilation error at this line likely means your copy of the") + g.P("// proto package needs to be updated.") + g.P("const _ = ", g.Pkg["proto"], ".ProtoPackageIsVersion", generatedCodeVersion, " // please upgrade the proto package") + g.P() + + for _, td := range g.file.imp { + g.generateImported(td) + } + for _, enum := range g.file.enum { + g.generateEnum(enum) + } + for _, desc := range g.file.desc { + // Don't generate virtual messages for maps. + if desc.GetOptions().GetMapEntry() { + continue + } + g.generateMessage(desc) + } + for _, ext := range g.file.ext { + g.generateExtension(ext) + } + g.generateInitFunction() + g.generateFileDescriptor(file) + + // Run the plugins before the imports so we know which imports are necessary. + g.runPlugins(file) + + // Generate header and imports last, though they appear first in the output. + rem := g.Buffer + remAnno := g.annotations + g.Buffer = new(bytes.Buffer) + g.annotations = nil + g.generateHeader() + g.generateImports() + if !g.writeOutput { + return + } + // Adjust the offsets for annotations displaced by the header and imports. + for _, anno := range remAnno { + *anno.Begin += int32(g.Len()) + *anno.End += int32(g.Len()) + g.annotations = append(g.annotations, anno) + } + g.Write(rem.Bytes()) + + // Reformat generated code and patch annotation locations. + fset := token.NewFileSet() + original := g.Bytes() + if g.annotateCode { + // make a copy independent of g; we'll need it after Reset. + original = append([]byte(nil), original...) + } + fileAST, err := parser.ParseFile(fset, "", original, parser.ParseComments) + if err != nil { + // Print out the bad code with line numbers. + // This should never happen in practice, but it can while changing generated code, + // so consider this a debugging aid. + var src bytes.Buffer + s := bufio.NewScanner(bytes.NewReader(original)) + for line := 1; s.Scan(); line++ { + fmt.Fprintf(&src, "%5d\t%s\n", line, s.Bytes()) + } + g.Fail("bad Go source code was generated:", err.Error(), "\n"+src.String()) + } + ast.SortImports(fset, fileAST) + g.Reset() + err = (&printer.Config{Mode: printer.TabIndent | printer.UseSpaces, Tabwidth: 8}).Fprint(g, fset, fileAST) + if err != nil { + g.Fail("generated Go source code could not be reformatted:", err.Error()) + } + if g.annotateCode { + m, err := remap.Compute(original, g.Bytes()) + if err != nil { + g.Fail("formatted generated Go source code could not be mapped back to the original code:", err.Error()) + } + for _, anno := range g.annotations { + new, ok := m.Find(int(*anno.Begin), int(*anno.End)) + if !ok { + g.Fail("span in formatted generated Go source code could not be mapped back to the original code") + } + *anno.Begin = int32(new.Pos) + *anno.End = int32(new.End) + } + } +} + +// Generate the header, including package definition +func (g *Generator) generateHeader() { + g.P("// Code generated by protoc-gen-go. DO NOT EDIT.") + if g.file.GetOptions().GetDeprecated() { + g.P("// ", g.file.Name, " is a deprecated file.") + } else { + g.P("// source: ", g.file.Name) + } + g.P() + g.PrintComments(strconv.Itoa(packagePath)) + g.P() + g.P("package ", g.file.packageName) + g.P() +} + +// deprecationComment is the standard comment added to deprecated +// messages, fields, enums, and enum values. +var deprecationComment = "// Deprecated: Do not use." + +// PrintComments prints any comments from the source .proto file. +// The path is a comma-separated list of integers. +// It returns an indication of whether any comments were printed. +// See descriptor.proto for its format. +func (g *Generator) PrintComments(path string) bool { + if !g.writeOutput { + return false + } + if c, ok := g.makeComments(path); ok { + g.P(c) + return true + } + return false +} + +// makeComments generates the comment string for the field, no "\n" at the end +func (g *Generator) makeComments(path string) (string, bool) { + loc, ok := g.file.comments[path] + if !ok { + return "", false + } + w := new(bytes.Buffer) + nl := "" + for _, line := range strings.Split(strings.TrimSuffix(loc.GetLeadingComments(), "\n"), "\n") { + fmt.Fprintf(w, "%s//%s", nl, line) + nl = "\n" + } + return w.String(), true +} + +func (g *Generator) fileByName(filename string) *FileDescriptor { + return g.allFilesByName[filename] +} + +// weak returns whether the ith import of the current file is a weak import. +func (g *Generator) weak(i int32) bool { + for _, j := range g.file.WeakDependency { + if j == i { + return true + } + } + return false +} + +// Generate the imports +func (g *Generator) generateImports() { + imports := make(map[GoImportPath]GoPackageName) + for i, s := range g.file.Dependency { + fd := g.fileByName(s) + importPath := fd.importPath + // Do not import our own package. + if importPath == g.file.importPath { + continue + } + // Do not import weak imports. + if g.weak(int32(i)) { + continue + } + // Do not import a package twice. + if _, ok := imports[importPath]; ok { + continue + } + // We need to import all the dependencies, even if we don't reference them, + // because other code and tools depend on having the full transitive closure + // of protocol buffer types in the binary. + packageName := g.GoPackageName(importPath) + if _, ok := g.usedPackages[importPath]; !ok { + packageName = "_" + } + imports[importPath] = packageName + } + for importPath := range g.addedImports { + imports[importPath] = g.GoPackageName(importPath) + } + // We almost always need a proto import. Rather than computing when we + // do, which is tricky when there's a plugin, just import it and + // reference it later. The same argument applies to the fmt and math packages. + g.P("import (") + g.P(g.Pkg["fmt"] + ` "fmt"`) + g.P(g.Pkg["math"] + ` "math"`) + g.P(g.Pkg["proto"]+" ", GoImportPath(g.ImportPrefix)+"github.com/golang/protobuf/proto") + for importPath, packageName := range imports { + g.P(packageName, " ", GoImportPath(g.ImportPrefix)+importPath) + } + g.P(")") + g.P() + // TODO: may need to worry about uniqueness across plugins + for _, p := range plugins { + p.GenerateImports(g.file) + g.P() + } + g.P("// Reference imports to suppress errors if they are not otherwise used.") + g.P("var _ = ", g.Pkg["proto"], ".Marshal") + g.P("var _ = ", g.Pkg["fmt"], ".Errorf") + g.P("var _ = ", g.Pkg["math"], ".Inf") + g.P() +} + +func (g *Generator) generateImported(id *ImportedDescriptor) { + df := id.o.File() + filename := *df.Name + if df.importPath == g.file.importPath { + // Don't generate type aliases for files in the same Go package as this one. + return + } + if !supportTypeAliases { + g.Fail(fmt.Sprintf("%s: public imports require at least go1.9", filename)) + } + g.usedPackages[df.importPath] = true + + for _, sym := range df.exported[id.o] { + sym.GenerateAlias(g, filename, g.GoPackageName(df.importPath)) + } + + g.P() +} + +// Generate the enum definitions for this EnumDescriptor. +func (g *Generator) generateEnum(enum *EnumDescriptor) { + // The full type name + typeName := enum.TypeName() + // The full type name, CamelCased. + ccTypeName := CamelCaseSlice(typeName) + ccPrefix := enum.prefix() + + deprecatedEnum := "" + if enum.GetOptions().GetDeprecated() { + deprecatedEnum = deprecationComment + } + g.PrintComments(enum.path) + g.P("type ", Annotate(enum.file, enum.path, ccTypeName), " int32", deprecatedEnum) + g.file.addExport(enum, enumSymbol{ccTypeName, enum.proto3()}) + g.P("const (") + for i, e := range enum.Value { + etorPath := fmt.Sprintf("%s,%d,%d", enum.path, enumValuePath, i) + g.PrintComments(etorPath) + + deprecatedValue := "" + if e.GetOptions().GetDeprecated() { + deprecatedValue = deprecationComment + } + + name := ccPrefix + *e.Name + g.P(Annotate(enum.file, etorPath, name), " ", ccTypeName, " = ", e.Number, " ", deprecatedValue) + g.file.addExport(enum, constOrVarSymbol{name, "const", ccTypeName}) + } + g.P(")") + g.P() + g.P("var ", ccTypeName, "_name = map[int32]string{") + generated := make(map[int32]bool) // avoid duplicate values + for _, e := range enum.Value { + duplicate := "" + if _, present := generated[*e.Number]; present { + duplicate = "// Duplicate value: " + } + g.P(duplicate, e.Number, ": ", strconv.Quote(*e.Name), ",") + generated[*e.Number] = true + } + g.P("}") + g.P() + g.P("var ", ccTypeName, "_value = map[string]int32{") + for _, e := range enum.Value { + g.P(strconv.Quote(*e.Name), ": ", e.Number, ",") + } + g.P("}") + g.P() + + if !enum.proto3() { + g.P("func (x ", ccTypeName, ") Enum() *", ccTypeName, " {") + g.P("p := new(", ccTypeName, ")") + g.P("*p = x") + g.P("return p") + g.P("}") + g.P() + } + + g.P("func (x ", ccTypeName, ") String() string {") + g.P("return ", g.Pkg["proto"], ".EnumName(", ccTypeName, "_name, int32(x))") + g.P("}") + g.P() + + if !enum.proto3() { + g.P("func (x *", ccTypeName, ") UnmarshalJSON(data []byte) error {") + g.P("value, err := ", g.Pkg["proto"], ".UnmarshalJSONEnum(", ccTypeName, `_value, data, "`, ccTypeName, `")`) + g.P("if err != nil {") + g.P("return err") + g.P("}") + g.P("*x = ", ccTypeName, "(value)") + g.P("return nil") + g.P("}") + g.P() + } + + var indexes []string + for m := enum.parent; m != nil; m = m.parent { + // XXX: skip groups? + indexes = append([]string{strconv.Itoa(m.index)}, indexes...) + } + indexes = append(indexes, strconv.Itoa(enum.index)) + g.P("func (", ccTypeName, ") EnumDescriptor() ([]byte, []int) {") + g.P("return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "}") + g.P("}") + g.P() + if enum.file.GetPackage() == "google.protobuf" && enum.GetName() == "NullValue" { + g.P("func (", ccTypeName, `) XXX_WellKnownType() string { return "`, enum.GetName(), `" }`) + g.P() + } + + g.generateEnumRegistration(enum) +} + +// The tag is a string like "varint,2,opt,name=fieldname,def=7" that +// identifies details of the field for the protocol buffer marshaling and unmarshaling +// code. The fields are: +// wire encoding +// protocol tag number +// opt,req,rep for optional, required, or repeated +// packed whether the encoding is "packed" (optional; repeated primitives only) +// name= the original declared name +// enum= the name of the enum type if it is an enum-typed field. +// proto3 if this field is in a proto3 message +// def= string representation of the default value, if any. +// The default value must be in a representation that can be used at run-time +// to generate the default value. Thus bools become 0 and 1, for instance. +func (g *Generator) goTag(message *Descriptor, field *descriptor.FieldDescriptorProto, wiretype string) string { + optrepreq := "" + switch { + case isOptional(field): + optrepreq = "opt" + case isRequired(field): + optrepreq = "req" + case isRepeated(field): + optrepreq = "rep" + } + var defaultValue string + if dv := field.DefaultValue; dv != nil { // set means an explicit default + defaultValue = *dv + // Some types need tweaking. + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_BOOL: + if defaultValue == "true" { + defaultValue = "1" + } else { + defaultValue = "0" + } + case descriptor.FieldDescriptorProto_TYPE_STRING, + descriptor.FieldDescriptorProto_TYPE_BYTES: + // Nothing to do. Quoting is done for the whole tag. + case descriptor.FieldDescriptorProto_TYPE_ENUM: + // For enums we need to provide the integer constant. + obj := g.ObjectNamed(field.GetTypeName()) + if id, ok := obj.(*ImportedDescriptor); ok { + // It is an enum that was publicly imported. + // We need the underlying type. + obj = id.o + } + enum, ok := obj.(*EnumDescriptor) + if !ok { + log.Printf("obj is a %T", obj) + if id, ok := obj.(*ImportedDescriptor); ok { + log.Printf("id.o is a %T", id.o) + } + g.Fail("unknown enum type", CamelCaseSlice(obj.TypeName())) + } + defaultValue = enum.integerValueAsString(defaultValue) + case descriptor.FieldDescriptorProto_TYPE_FLOAT: + if def := defaultValue; def != "inf" && def != "-inf" && def != "nan" { + if f, err := strconv.ParseFloat(defaultValue, 32); err == nil { + defaultValue = fmt.Sprint(float32(f)) + } + } + case descriptor.FieldDescriptorProto_TYPE_DOUBLE: + if def := defaultValue; def != "inf" && def != "-inf" && def != "nan" { + if f, err := strconv.ParseFloat(defaultValue, 64); err == nil { + defaultValue = fmt.Sprint(f) + } + } + } + defaultValue = ",def=" + defaultValue + } + enum := "" + if *field.Type == descriptor.FieldDescriptorProto_TYPE_ENUM { + // We avoid using obj.GoPackageName(), because we want to use the + // original (proto-world) package name. + obj := g.ObjectNamed(field.GetTypeName()) + if id, ok := obj.(*ImportedDescriptor); ok { + obj = id.o + } + enum = ",enum=" + if pkg := obj.File().GetPackage(); pkg != "" { + enum += pkg + "." + } + enum += CamelCaseSlice(obj.TypeName()) + } + packed := "" + if (field.Options != nil && field.Options.GetPacked()) || + // Per https://developers.google.com/protocol-buffers/docs/proto3#simple: + // "In proto3, repeated fields of scalar numeric types use packed encoding by default." + (message.proto3() && (field.Options == nil || field.Options.Packed == nil) && + isRepeated(field) && isScalar(field)) { + packed = ",packed" + } + fieldName := field.GetName() + name := fieldName + if *field.Type == descriptor.FieldDescriptorProto_TYPE_GROUP { + // We must use the type name for groups instead of + // the field name to preserve capitalization. + // type_name in FieldDescriptorProto is fully-qualified, + // but we only want the local part. + name = *field.TypeName + if i := strings.LastIndex(name, "."); i >= 0 { + name = name[i+1:] + } + } + if json := field.GetJsonName(); field.Extendee == nil && json != "" && json != name { + // TODO: escaping might be needed, in which case + // perhaps this should be in its own "json" tag. + name += ",json=" + json + } + name = ",name=" + name + if message.proto3() { + name += ",proto3" + } + oneof := "" + if field.OneofIndex != nil { + oneof = ",oneof" + } + return strconv.Quote(fmt.Sprintf("%s,%d,%s%s%s%s%s%s", + wiretype, + field.GetNumber(), + optrepreq, + packed, + name, + enum, + oneof, + defaultValue)) +} + +func needsStar(typ descriptor.FieldDescriptorProto_Type) bool { + switch typ { + case descriptor.FieldDescriptorProto_TYPE_GROUP: + return false + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + return false + case descriptor.FieldDescriptorProto_TYPE_BYTES: + return false + } + return true +} + +// TypeName is the printed name appropriate for an item. If the object is in the current file, +// TypeName drops the package name and underscores the rest. +// Otherwise the object is from another package; and the result is the underscored +// package name followed by the item name. +// The result always has an initial capital. +func (g *Generator) TypeName(obj Object) string { + return g.DefaultPackageName(obj) + CamelCaseSlice(obj.TypeName()) +} + +// GoType returns a string representing the type name, and the wire type +func (g *Generator) GoType(message *Descriptor, field *descriptor.FieldDescriptorProto) (typ string, wire string) { + // TODO: Options. + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_DOUBLE: + typ, wire = "float64", "fixed64" + case descriptor.FieldDescriptorProto_TYPE_FLOAT: + typ, wire = "float32", "fixed32" + case descriptor.FieldDescriptorProto_TYPE_INT64: + typ, wire = "int64", "varint" + case descriptor.FieldDescriptorProto_TYPE_UINT64: + typ, wire = "uint64", "varint" + case descriptor.FieldDescriptorProto_TYPE_INT32: + typ, wire = "int32", "varint" + case descriptor.FieldDescriptorProto_TYPE_UINT32: + typ, wire = "uint32", "varint" + case descriptor.FieldDescriptorProto_TYPE_FIXED64: + typ, wire = "uint64", "fixed64" + case descriptor.FieldDescriptorProto_TYPE_FIXED32: + typ, wire = "uint32", "fixed32" + case descriptor.FieldDescriptorProto_TYPE_BOOL: + typ, wire = "bool", "varint" + case descriptor.FieldDescriptorProto_TYPE_STRING: + typ, wire = "string", "bytes" + case descriptor.FieldDescriptorProto_TYPE_GROUP: + desc := g.ObjectNamed(field.GetTypeName()) + typ, wire = "*"+g.TypeName(desc), "group" + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + desc := g.ObjectNamed(field.GetTypeName()) + typ, wire = "*"+g.TypeName(desc), "bytes" + case descriptor.FieldDescriptorProto_TYPE_BYTES: + typ, wire = "[]byte", "bytes" + case descriptor.FieldDescriptorProto_TYPE_ENUM: + desc := g.ObjectNamed(field.GetTypeName()) + typ, wire = g.TypeName(desc), "varint" + case descriptor.FieldDescriptorProto_TYPE_SFIXED32: + typ, wire = "int32", "fixed32" + case descriptor.FieldDescriptorProto_TYPE_SFIXED64: + typ, wire = "int64", "fixed64" + case descriptor.FieldDescriptorProto_TYPE_SINT32: + typ, wire = "int32", "zigzag32" + case descriptor.FieldDescriptorProto_TYPE_SINT64: + typ, wire = "int64", "zigzag64" + default: + g.Fail("unknown type for", field.GetName()) + } + if isRepeated(field) { + typ = "[]" + typ + } else if message != nil && message.proto3() { + return + } else if field.OneofIndex != nil && message != nil { + return + } else if needsStar(*field.Type) { + typ = "*" + typ + } + return +} + +func (g *Generator) RecordTypeUse(t string) { + if _, ok := g.typeNameToObject[t]; !ok { + return + } + importPath := g.ObjectNamed(t).GoImportPath() + if importPath == g.outputImportPath { + // Don't record use of objects in our package. + return + } + g.AddImport(importPath) + g.usedPackages[importPath] = true +} + +// Method names that may be generated. Fields with these names get an +// underscore appended. Any change to this set is a potential incompatible +// API change because it changes generated field names. +var methodNames = [...]string{ + "Reset", + "String", + "ProtoMessage", + "Marshal", + "Unmarshal", + "ExtensionRangeArray", + "ExtensionMap", + "Descriptor", +} + +// Names of messages in the `google.protobuf` package for which +// we will generate XXX_WellKnownType methods. +var wellKnownTypes = map[string]bool{ + "Any": true, + "Duration": true, + "Empty": true, + "Struct": true, + "Timestamp": true, + + "Value": true, + "ListValue": true, + "DoubleValue": true, + "FloatValue": true, + "Int64Value": true, + "UInt64Value": true, + "Int32Value": true, + "UInt32Value": true, + "BoolValue": true, + "StringValue": true, + "BytesValue": true, +} + +// getterDefault finds the default value for the field to return from a getter, +// regardless of if it's a built in default or explicit from the source. Returns e.g. "nil", `""`, "Default_MessageType_FieldName" +func (g *Generator) getterDefault(field *descriptor.FieldDescriptorProto, goMessageType string) string { + if isRepeated(field) { + return "nil" + } + if def := field.GetDefaultValue(); def != "" { + defaultConstant := g.defaultConstantName(goMessageType, field.GetName()) + if *field.Type != descriptor.FieldDescriptorProto_TYPE_BYTES { + return defaultConstant + } + return "append([]byte(nil), " + defaultConstant + "...)" + } + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_BOOL: + return "false" + case descriptor.FieldDescriptorProto_TYPE_STRING: + return `""` + case descriptor.FieldDescriptorProto_TYPE_GROUP, descriptor.FieldDescriptorProto_TYPE_MESSAGE, descriptor.FieldDescriptorProto_TYPE_BYTES: + return "nil" + case descriptor.FieldDescriptorProto_TYPE_ENUM: + obj := g.ObjectNamed(field.GetTypeName()) + var enum *EnumDescriptor + if id, ok := obj.(*ImportedDescriptor); ok { + // The enum type has been publicly imported. + enum, _ = id.o.(*EnumDescriptor) + } else { + enum, _ = obj.(*EnumDescriptor) + } + if enum == nil { + log.Printf("don't know how to generate getter for %s", field.GetName()) + return "nil" + } + if len(enum.Value) == 0 { + return "0 // empty enum" + } + first := enum.Value[0].GetName() + return g.DefaultPackageName(obj) + enum.prefix() + first + default: + return "0" + } +} + +// defaultConstantName builds the name of the default constant from the message +// type name and the untouched field name, e.g. "Default_MessageType_FieldName" +func (g *Generator) defaultConstantName(goMessageType, protoFieldName string) string { + return "Default_" + goMessageType + "_" + CamelCase(protoFieldName) +} + +// The different types of fields in a message and how to actually print them +// Most of the logic for generateMessage is in the methods of these types. +// +// Note that the content of the field is irrelevant, a simpleField can contain +// anything from a scalar to a group (which is just a message). +// +// Extension fields (and message sets) are however handled separately. +// +// simpleField - a field that is neiter weak nor oneof, possibly repeated +// oneofField - field containing list of subfields: +// - oneofSubField - a field within the oneof + +// msgCtx contains the context for the generator functions. +type msgCtx struct { + goName string // Go struct name of the message, e.g. MessageName + message *Descriptor // The descriptor for the message +} + +// fieldCommon contains data common to all types of fields. +type fieldCommon struct { + goName string // Go name of field, e.g. "FieldName" or "Descriptor_" + protoName string // Name of field in proto language, e.g. "field_name" or "descriptor" + getterName string // Name of the getter, e.g. "GetFieldName" or "GetDescriptor_" + goType string // The Go type as a string, e.g. "*int32" or "*OtherMessage" + tags string // The tag string/annotation for the type, e.g. `protobuf:"varint,8,opt,name=region_id,json=regionId"` + fullPath string // The full path of the field as used by Annotate etc, e.g. "4,0,2,0" +} + +// getProtoName gets the proto name of a field, e.g. "field_name" or "descriptor". +func (f *fieldCommon) getProtoName() string { + return f.protoName +} + +// getGoType returns the go type of the field as a string, e.g. "*int32". +func (f *fieldCommon) getGoType() string { + return f.goType +} + +// simpleField is not weak, not a oneof, not an extension. Can be required, optional or repeated. +type simpleField struct { + fieldCommon + protoTypeName string // Proto type name, empty if primitive, e.g. ".google.protobuf.Duration" + protoType descriptor.FieldDescriptorProto_Type // Actual type enum value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64 + deprecated string // Deprecation comment, if any, e.g. "// Deprecated: Do not use." + getterDef string // Default for getters, e.g. "nil", `""` or "Default_MessageType_FieldName" + protoDef string // Default value as defined in the proto file, e.g "yoshi" or "5" + comment string // The full comment for the field, e.g. "// Useful information" +} + +// decl prints the declaration of the field in the struct (if any). +func (f *simpleField) decl(g *Generator, mc *msgCtx) { + g.P(f.comment, Annotate(mc.message.file, f.fullPath, f.goName), "\t", f.goType, "\t`", f.tags, "`", f.deprecated) +} + +// getter prints the getter for the field. +func (f *simpleField) getter(g *Generator, mc *msgCtx) { + star := "" + tname := f.goType + if needsStar(f.protoType) && tname[0] == '*' { + tname = tname[1:] + star = "*" + } + if f.deprecated != "" { + g.P(f.deprecated) + } + g.P("func (m *", mc.goName, ") ", Annotate(mc.message.file, f.fullPath, f.getterName), "() "+tname+" {") + if f.getterDef == "nil" { // Simpler getter + g.P("if m != nil {") + g.P("return m." + f.goName) + g.P("}") + g.P("return nil") + g.P("}") + g.P() + return + } + if mc.message.proto3() { + g.P("if m != nil {") + } else { + g.P("if m != nil && m." + f.goName + " != nil {") + } + g.P("return " + star + "m." + f.goName) + g.P("}") + g.P("return ", f.getterDef) + g.P("}") + g.P() +} + +// setter prints the setter method of the field. +func (f *simpleField) setter(g *Generator, mc *msgCtx) { + // No setter for regular fields yet +} + +// getProtoDef returns the default value explicitly stated in the proto file, e.g "yoshi" or "5". +func (f *simpleField) getProtoDef() string { + return f.protoDef +} + +// getProtoTypeName returns the protobuf type name for the field as returned by field.GetTypeName(), e.g. ".google.protobuf.Duration". +func (f *simpleField) getProtoTypeName() string { + return f.protoTypeName +} + +// getProtoType returns the *field.Type value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64. +func (f *simpleField) getProtoType() descriptor.FieldDescriptorProto_Type { + return f.protoType +} + +// oneofSubFields are kept slize held by each oneofField. They do not appear in the top level slize of fields for the message. +type oneofSubField struct { + fieldCommon + protoTypeName string // Proto type name, empty if primitive, e.g. ".google.protobuf.Duration" + protoType descriptor.FieldDescriptorProto_Type // Actual type enum value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64 + oneofTypeName string // Type name of the enclosing struct, e.g. "MessageName_FieldName" + fieldNumber int // Actual field number, as defined in proto, e.g. 12 + getterDef string // Default for getters, e.g. "nil", `""` or "Default_MessageType_FieldName" + protoDef string // Default value as defined in the proto file, e.g "yoshi" or "5" + deprecated string // Deprecation comment, if any. +} + +// typedNil prints a nil casted to the pointer to this field. +// - for XXX_OneofWrappers +func (f *oneofSubField) typedNil(g *Generator) { + g.P("(*", f.oneofTypeName, ")(nil),") +} + +// getProtoDef returns the default value explicitly stated in the proto file, e.g "yoshi" or "5". +func (f *oneofSubField) getProtoDef() string { + return f.protoDef +} + +// getProtoTypeName returns the protobuf type name for the field as returned by field.GetTypeName(), e.g. ".google.protobuf.Duration". +func (f *oneofSubField) getProtoTypeName() string { + return f.protoTypeName +} + +// getProtoType returns the *field.Type value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64. +func (f *oneofSubField) getProtoType() descriptor.FieldDescriptorProto_Type { + return f.protoType +} + +// oneofField represents the oneof on top level. +// The alternative fields within the oneof are represented by oneofSubField. +type oneofField struct { + fieldCommon + subFields []*oneofSubField // All the possible oneof fields + comment string // The full comment for the field, e.g. "// Types that are valid to be assigned to MyOneof:\n\\" +} + +// decl prints the declaration of the field in the struct (if any). +func (f *oneofField) decl(g *Generator, mc *msgCtx) { + comment := f.comment + for _, sf := range f.subFields { + comment += "//\t*" + sf.oneofTypeName + "\n" + } + g.P(comment, Annotate(mc.message.file, f.fullPath, f.goName), " ", f.goType, " `", f.tags, "`") +} + +// getter for a oneof field will print additional discriminators and interfaces for the oneof, +// also it prints all the getters for the sub fields. +func (f *oneofField) getter(g *Generator, mc *msgCtx) { + // The discriminator type + g.P("type ", f.goType, " interface {") + g.P(f.goType, "()") + g.P("}") + g.P() + // The subField types, fulfilling the discriminator type contract + for _, sf := range f.subFields { + g.P("type ", Annotate(mc.message.file, sf.fullPath, sf.oneofTypeName), " struct {") + g.P(Annotate(mc.message.file, sf.fullPath, sf.goName), " ", sf.goType, " `", sf.tags, "`") + g.P("}") + g.P() + } + for _, sf := range f.subFields { + g.P("func (*", sf.oneofTypeName, ") ", f.goType, "() {}") + g.P() + } + // Getter for the oneof field + g.P("func (m *", mc.goName, ") ", Annotate(mc.message.file, f.fullPath, f.getterName), "() ", f.goType, " {") + g.P("if m != nil { return m.", f.goName, " }") + g.P("return nil") + g.P("}") + g.P() + // Getters for each oneof + for _, sf := range f.subFields { + if sf.deprecated != "" { + g.P(sf.deprecated) + } + g.P("func (m *", mc.goName, ") ", Annotate(mc.message.file, sf.fullPath, sf.getterName), "() "+sf.goType+" {") + g.P("if x, ok := m.", f.getterName, "().(*", sf.oneofTypeName, "); ok {") + g.P("return x.", sf.goName) + g.P("}") + g.P("return ", sf.getterDef) + g.P("}") + g.P() + } +} + +// setter prints the setter method of the field. +func (f *oneofField) setter(g *Generator, mc *msgCtx) { + // No setters for oneof yet +} + +// topLevelField interface implemented by all types of fields on the top level (not oneofSubField). +type topLevelField interface { + decl(g *Generator, mc *msgCtx) // print declaration within the struct + getter(g *Generator, mc *msgCtx) // print getter + setter(g *Generator, mc *msgCtx) // print setter if applicable +} + +// defField interface implemented by all types of fields that can have defaults (not oneofField, but instead oneofSubField). +type defField interface { + getProtoDef() string // default value explicitly stated in the proto file, e.g "yoshi" or "5" + getProtoName() string // proto name of a field, e.g. "field_name" or "descriptor" + getGoType() string // go type of the field as a string, e.g. "*int32" + getProtoTypeName() string // protobuf type name for the field, e.g. ".google.protobuf.Duration" + getProtoType() descriptor.FieldDescriptorProto_Type // *field.Type value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64 +} + +// generateDefaultConstants adds constants for default values if needed, which is only if the default value is. +// explicit in the proto. +func (g *Generator) generateDefaultConstants(mc *msgCtx, topLevelFields []topLevelField) { + // Collect fields that can have defaults + dFields := []defField{} + for _, pf := range topLevelFields { + if f, ok := pf.(*oneofField); ok { + for _, osf := range f.subFields { + dFields = append(dFields, osf) + } + continue + } + dFields = append(dFields, pf.(defField)) + } + for _, df := range dFields { + def := df.getProtoDef() + if def == "" { + continue + } + fieldname := g.defaultConstantName(mc.goName, df.getProtoName()) + typename := df.getGoType() + if typename[0] == '*' { + typename = typename[1:] + } + kind := "const " + switch { + case typename == "bool": + case typename == "string": + def = strconv.Quote(def) + case typename == "[]byte": + def = "[]byte(" + strconv.Quote(unescape(def)) + ")" + kind = "var " + case def == "inf", def == "-inf", def == "nan": + // These names are known to, and defined by, the protocol language. + switch def { + case "inf": + def = "math.Inf(1)" + case "-inf": + def = "math.Inf(-1)" + case "nan": + def = "math.NaN()" + } + if df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_FLOAT { + def = "float32(" + def + ")" + } + kind = "var " + case df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_FLOAT: + if f, err := strconv.ParseFloat(def, 32); err == nil { + def = fmt.Sprint(float32(f)) + } + case df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_DOUBLE: + if f, err := strconv.ParseFloat(def, 64); err == nil { + def = fmt.Sprint(f) + } + case df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_ENUM: + // Must be an enum. Need to construct the prefixed name. + obj := g.ObjectNamed(df.getProtoTypeName()) + var enum *EnumDescriptor + if id, ok := obj.(*ImportedDescriptor); ok { + // The enum type has been publicly imported. + enum, _ = id.o.(*EnumDescriptor) + } else { + enum, _ = obj.(*EnumDescriptor) + } + if enum == nil { + log.Printf("don't know how to generate constant for %s", fieldname) + continue + } + def = g.DefaultPackageName(obj) + enum.prefix() + def + } + g.P(kind, fieldname, " ", typename, " = ", def) + g.file.addExport(mc.message, constOrVarSymbol{fieldname, kind, ""}) + } + g.P() +} + +// generateInternalStructFields just adds the XXX_ fields to the message struct. +func (g *Generator) generateInternalStructFields(mc *msgCtx, topLevelFields []topLevelField) { + g.P("XXX_NoUnkeyedLiteral\tstruct{} `json:\"-\"`") // prevent unkeyed struct literals + if len(mc.message.ExtensionRange) > 0 { + messageset := "" + if opts := mc.message.Options; opts != nil && opts.GetMessageSetWireFormat() { + messageset = "protobuf_messageset:\"1\" " + } + g.P(g.Pkg["proto"], ".XXX_InternalExtensions `", messageset, "json:\"-\"`") + } + g.P("XXX_unrecognized\t[]byte `json:\"-\"`") + g.P("XXX_sizecache\tint32 `json:\"-\"`") + +} + +// generateOneofFuncs adds all the utility functions for oneof, including marshalling, unmarshalling and sizer. +func (g *Generator) generateOneofFuncs(mc *msgCtx, topLevelFields []topLevelField) { + ofields := []*oneofField{} + for _, f := range topLevelFields { + if o, ok := f.(*oneofField); ok { + ofields = append(ofields, o) + } + } + if len(ofields) == 0 { + return + } + + // OneofFuncs + g.P("// XXX_OneofWrappers is for the internal use of the proto package.") + g.P("func (*", mc.goName, ") XXX_OneofWrappers() []interface{} {") + g.P("return []interface{}{") + for _, of := range ofields { + for _, sf := range of.subFields { + sf.typedNil(g) + } + } + g.P("}") + g.P("}") + g.P() +} + +// generateMessageStruct adds the actual struct with it's members (but not methods) to the output. +func (g *Generator) generateMessageStruct(mc *msgCtx, topLevelFields []topLevelField) { + comments := g.PrintComments(mc.message.path) + + // Guarantee deprecation comments appear after user-provided comments. + if mc.message.GetOptions().GetDeprecated() { + if comments { + // Convention: Separate deprecation comments from original + // comments with an empty line. + g.P("//") + } + g.P(deprecationComment) + } + + g.P("type ", Annotate(mc.message.file, mc.message.path, mc.goName), " struct {") + for _, pf := range topLevelFields { + pf.decl(g, mc) + } + g.generateInternalStructFields(mc, topLevelFields) + g.P("}") +} + +// generateGetters adds getters for all fields, including oneofs and weak fields when applicable. +func (g *Generator) generateGetters(mc *msgCtx, topLevelFields []topLevelField) { + for _, pf := range topLevelFields { + pf.getter(g, mc) + } +} + +// generateSetters add setters for all fields, including oneofs and weak fields when applicable. +func (g *Generator) generateSetters(mc *msgCtx, topLevelFields []topLevelField) { + for _, pf := range topLevelFields { + pf.setter(g, mc) + } +} + +// generateCommonMethods adds methods to the message that are not on a per field basis. +func (g *Generator) generateCommonMethods(mc *msgCtx) { + // Reset, String and ProtoMessage methods. + g.P("func (m *", mc.goName, ") Reset() { *m = ", mc.goName, "{} }") + g.P("func (m *", mc.goName, ") String() string { return ", g.Pkg["proto"], ".CompactTextString(m) }") + g.P("func (*", mc.goName, ") ProtoMessage() {}") + var indexes []string + for m := mc.message; m != nil; m = m.parent { + indexes = append([]string{strconv.Itoa(m.index)}, indexes...) + } + g.P("func (*", mc.goName, ") Descriptor() ([]byte, []int) {") + g.P("return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "}") + g.P("}") + g.P() + // TODO: Revisit the decision to use a XXX_WellKnownType method + // if we change proto.MessageName to work with multiple equivalents. + if mc.message.file.GetPackage() == "google.protobuf" && wellKnownTypes[mc.message.GetName()] { + g.P("func (*", mc.goName, `) XXX_WellKnownType() string { return "`, mc.message.GetName(), `" }`) + g.P() + } + + // Extension support methods + if len(mc.message.ExtensionRange) > 0 { + g.P() + g.P("var extRange_", mc.goName, " = []", g.Pkg["proto"], ".ExtensionRange{") + for _, r := range mc.message.ExtensionRange { + end := fmt.Sprint(*r.End - 1) // make range inclusive on both ends + g.P("{Start: ", r.Start, ", End: ", end, "},") + } + g.P("}") + g.P("func (*", mc.goName, ") ExtensionRangeArray() []", g.Pkg["proto"], ".ExtensionRange {") + g.P("return extRange_", mc.goName) + g.P("}") + g.P() + } + + // TODO: It does not scale to keep adding another method for every + // operation on protos that we want to switch over to using the + // table-driven approach. Instead, we should only add a single method + // that allows getting access to the *InternalMessageInfo struct and then + // calling Unmarshal, Marshal, Merge, Size, and Discard directly on that. + + // Wrapper for table-driven marshaling and unmarshaling. + g.P("func (m *", mc.goName, ") XXX_Unmarshal(b []byte) error {") + g.P("return xxx_messageInfo_", mc.goName, ".Unmarshal(m, b)") + g.P("}") + + g.P("func (m *", mc.goName, ") XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {") + g.P("return xxx_messageInfo_", mc.goName, ".Marshal(b, m, deterministic)") + g.P("}") + + g.P("func (m *", mc.goName, ") XXX_Merge(src ", g.Pkg["proto"], ".Message) {") + g.P("xxx_messageInfo_", mc.goName, ".Merge(m, src)") + g.P("}") + + g.P("func (m *", mc.goName, ") XXX_Size() int {") // avoid name clash with "Size" field in some message + g.P("return xxx_messageInfo_", mc.goName, ".Size(m)") + g.P("}") + + g.P("func (m *", mc.goName, ") XXX_DiscardUnknown() {") + g.P("xxx_messageInfo_", mc.goName, ".DiscardUnknown(m)") + g.P("}") + + g.P("var xxx_messageInfo_", mc.goName, " ", g.Pkg["proto"], ".InternalMessageInfo") + g.P() +} + +// Generate the type, methods and default constant definitions for this Descriptor. +func (g *Generator) generateMessage(message *Descriptor) { + topLevelFields := []topLevelField{} + oFields := make(map[int32]*oneofField) + // The full type name + typeName := message.TypeName() + // The full type name, CamelCased. + goTypeName := CamelCaseSlice(typeName) + + usedNames := make(map[string]bool) + for _, n := range methodNames { + usedNames[n] = true + } + + // allocNames finds a conflict-free variation of the given strings, + // consistently mutating their suffixes. + // It returns the same number of strings. + allocNames := func(ns ...string) []string { + Loop: + for { + for _, n := range ns { + if usedNames[n] { + for i := range ns { + ns[i] += "_" + } + continue Loop + } + } + for _, n := range ns { + usedNames[n] = true + } + return ns + } + } + + mapFieldTypes := make(map[*descriptor.FieldDescriptorProto]string) // keep track of the map fields to be added later + + // Build a structure more suitable for generating the text in one pass + for i, field := range message.Field { + // Allocate the getter and the field at the same time so name + // collisions create field/method consistent names. + // TODO: This allocation occurs based on the order of the fields + // in the proto file, meaning that a change in the field + // ordering can change generated Method/Field names. + base := CamelCase(*field.Name) + ns := allocNames(base, "Get"+base) + fieldName, fieldGetterName := ns[0], ns[1] + typename, wiretype := g.GoType(message, field) + jsonName := *field.Name + tag := fmt.Sprintf("protobuf:%s json:%q", g.goTag(message, field, wiretype), jsonName+",omitempty") + + oneof := field.OneofIndex != nil + if oneof && oFields[*field.OneofIndex] == nil { + odp := message.OneofDecl[int(*field.OneofIndex)] + base := CamelCase(odp.GetName()) + fname := allocNames(base)[0] + + // This is the first field of a oneof we haven't seen before. + // Generate the union field. + oneofFullPath := fmt.Sprintf("%s,%d,%d", message.path, messageOneofPath, *field.OneofIndex) + c, ok := g.makeComments(oneofFullPath) + if ok { + c += "\n//\n" + } + c += "// Types that are valid to be assigned to " + fname + ":\n" + // Generate the rest of this comment later, + // when we've computed any disambiguation. + + dname := "is" + goTypeName + "_" + fname + tag := `protobuf_oneof:"` + odp.GetName() + `"` + of := oneofField{ + fieldCommon: fieldCommon{ + goName: fname, + getterName: "Get"+fname, + goType: dname, + tags: tag, + protoName: odp.GetName(), + fullPath: oneofFullPath, + }, + comment: c, + } + topLevelFields = append(topLevelFields, &of) + oFields[*field.OneofIndex] = &of + } + + if *field.Type == descriptor.FieldDescriptorProto_TYPE_MESSAGE { + desc := g.ObjectNamed(field.GetTypeName()) + if d, ok := desc.(*Descriptor); ok && d.GetOptions().GetMapEntry() { + // Figure out the Go types and tags for the key and value types. + keyField, valField := d.Field[0], d.Field[1] + keyType, keyWire := g.GoType(d, keyField) + valType, valWire := g.GoType(d, valField) + keyTag, valTag := g.goTag(d, keyField, keyWire), g.goTag(d, valField, valWire) + + // We don't use stars, except for message-typed values. + // Message and enum types are the only two possibly foreign types used in maps, + // so record their use. They are not permitted as map keys. + keyType = strings.TrimPrefix(keyType, "*") + switch *valField.Type { + case descriptor.FieldDescriptorProto_TYPE_ENUM: + valType = strings.TrimPrefix(valType, "*") + g.RecordTypeUse(valField.GetTypeName()) + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + g.RecordTypeUse(valField.GetTypeName()) + default: + valType = strings.TrimPrefix(valType, "*") + } + + typename = fmt.Sprintf("map[%s]%s", keyType, valType) + mapFieldTypes[field] = typename // record for the getter generation + + tag += fmt.Sprintf(" protobuf_key:%s protobuf_val:%s", keyTag, valTag) + } + } + + fieldDeprecated := "" + if field.GetOptions().GetDeprecated() { + fieldDeprecated = deprecationComment + } + + dvalue := g.getterDefault(field, goTypeName) + if oneof { + tname := goTypeName + "_" + fieldName + // It is possible for this to collide with a message or enum + // nested in this message. Check for collisions. + for { + ok := true + for _, desc := range message.nested { + if CamelCaseSlice(desc.TypeName()) == tname { + ok = false + break + } + } + for _, enum := range message.enums { + if CamelCaseSlice(enum.TypeName()) == tname { + ok = false + break + } + } + if !ok { + tname += "_" + continue + } + break + } + + oneofField := oFields[*field.OneofIndex] + tag := "protobuf:" + g.goTag(message, field, wiretype) + sf := oneofSubField{ + fieldCommon: fieldCommon{ + goName: fieldName, + getterName: fieldGetterName, + goType: typename, + tags: tag, + protoName: field.GetName(), + fullPath: fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i), + }, + protoTypeName: field.GetTypeName(), + fieldNumber: int(*field.Number), + protoType: *field.Type, + getterDef: dvalue, + protoDef: field.GetDefaultValue(), + oneofTypeName: tname, + deprecated: fieldDeprecated, + } + oneofField.subFields = append(oneofField.subFields, &sf) + g.RecordTypeUse(field.GetTypeName()) + continue + } + + fieldFullPath := fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i) + c, ok := g.makeComments(fieldFullPath) + if ok { + c += "\n" + } + rf := simpleField{ + fieldCommon: fieldCommon{ + goName: fieldName, + getterName: fieldGetterName, + goType: typename, + tags: tag, + protoName: field.GetName(), + fullPath: fieldFullPath, + }, + protoTypeName: field.GetTypeName(), + protoType: *field.Type, + deprecated: fieldDeprecated, + getterDef: dvalue, + protoDef: field.GetDefaultValue(), + comment: c, + } + var pf topLevelField = &rf + + topLevelFields = append(topLevelFields, pf) + g.RecordTypeUse(field.GetTypeName()) + } + + mc := &msgCtx{ + goName: goTypeName, + message: message, + } + + g.generateMessageStruct(mc, topLevelFields) + g.P() + g.generateCommonMethods(mc) + g.P() + g.generateDefaultConstants(mc, topLevelFields) + g.P() + g.generateGetters(mc, topLevelFields) + g.P() + g.generateSetters(mc, topLevelFields) + g.P() + g.generateOneofFuncs(mc, topLevelFields) + g.P() + + var oneofTypes []string + for _, f := range topLevelFields { + if of, ok := f.(*oneofField); ok { + for _, osf := range of.subFields { + oneofTypes = append(oneofTypes, osf.oneofTypeName) + } + } + } + + opts := message.Options + ms := &messageSymbol{ + sym: goTypeName, + hasExtensions: len(message.ExtensionRange) > 0, + isMessageSet: opts != nil && opts.GetMessageSetWireFormat(), + oneofTypes: oneofTypes, + } + g.file.addExport(message, ms) + + for _, ext := range message.ext { + g.generateExtension(ext) + } + + fullName := strings.Join(message.TypeName(), ".") + if g.file.Package != nil { + fullName = *g.file.Package + "." + fullName + } + + g.addInitf("%s.RegisterType((*%s)(nil), %q)", g.Pkg["proto"], goTypeName, fullName) + // Register types for native map types. + for _, k := range mapFieldKeys(mapFieldTypes) { + fullName := strings.TrimPrefix(*k.TypeName, ".") + g.addInitf("%s.RegisterMapType((%s)(nil), %q)", g.Pkg["proto"], mapFieldTypes[k], fullName) + } + +} + +type byTypeName []*descriptor.FieldDescriptorProto + +func (a byTypeName) Len() int { return len(a) } +func (a byTypeName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byTypeName) Less(i, j int) bool { return *a[i].TypeName < *a[j].TypeName } + +// mapFieldKeys returns the keys of m in a consistent order. +func mapFieldKeys(m map[*descriptor.FieldDescriptorProto]string) []*descriptor.FieldDescriptorProto { + keys := make([]*descriptor.FieldDescriptorProto, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Sort(byTypeName(keys)) + return keys +} + +var escapeChars = [256]byte{ + 'a': '\a', 'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t', 'v': '\v', '\\': '\\', '"': '"', '\'': '\'', '?': '?', +} + +// unescape reverses the "C" escaping that protoc does for default values of bytes fields. +// It is best effort in that it effectively ignores malformed input. Seemingly invalid escape +// sequences are conveyed, unmodified, into the decoded result. +func unescape(s string) string { + // NB: Sadly, we can't use strconv.Unquote because protoc will escape both + // single and double quotes, but strconv.Unquote only allows one or the + // other (based on actual surrounding quotes of its input argument). + + var out []byte + for len(s) > 0 { + // regular character, or too short to be valid escape + if s[0] != '\\' || len(s) < 2 { + out = append(out, s[0]) + s = s[1:] + } else if c := escapeChars[s[1]]; c != 0 { + // escape sequence + out = append(out, c) + s = s[2:] + } else if s[1] == 'x' || s[1] == 'X' { + // hex escape, e.g. "\x80 + if len(s) < 4 { + // too short to be valid + out = append(out, s[:2]...) + s = s[2:] + continue + } + v, err := strconv.ParseUint(s[2:4], 16, 8) + if err != nil { + out = append(out, s[:4]...) + } else { + out = append(out, byte(v)) + } + s = s[4:] + } else if '0' <= s[1] && s[1] <= '7' { + // octal escape, can vary from 1 to 3 octal digits; e.g., "\0" "\40" or "\164" + // so consume up to 2 more bytes or up to end-of-string + n := len(s[1:]) - len(strings.TrimLeft(s[1:], "01234567")) + if n > 3 { + n = 3 + } + v, err := strconv.ParseUint(s[1:1+n], 8, 8) + if err != nil { + out = append(out, s[:1+n]...) + } else { + out = append(out, byte(v)) + } + s = s[1+n:] + } else { + // bad escape, just propagate the slash as-is + out = append(out, s[0]) + s = s[1:] + } + } + + return string(out) +} + +func (g *Generator) generateExtension(ext *ExtensionDescriptor) { + ccTypeName := ext.DescName() + + extObj := g.ObjectNamed(*ext.Extendee) + var extDesc *Descriptor + if id, ok := extObj.(*ImportedDescriptor); ok { + // This is extending a publicly imported message. + // We need the underlying type for goTag. + extDesc = id.o.(*Descriptor) + } else { + extDesc = extObj.(*Descriptor) + } + extendedType := "*" + g.TypeName(extObj) // always use the original + field := ext.FieldDescriptorProto + fieldType, wireType := g.GoType(ext.parent, field) + tag := g.goTag(extDesc, field, wireType) + g.RecordTypeUse(*ext.Extendee) + if n := ext.FieldDescriptorProto.TypeName; n != nil { + // foreign extension type + g.RecordTypeUse(*n) + } + + typeName := ext.TypeName() + + // Special case for proto2 message sets: If this extension is extending + // proto2.bridge.MessageSet, and its final name component is "message_set_extension", + // then drop that last component. + // + // TODO: This should be implemented in the text formatter rather than the generator. + // In addition, the situation for when to apply this special case is implemented + // differently in other languages: + // https://github.com/google/protobuf/blob/aff10976/src/google/protobuf/text_format.cc#L1560 + if extDesc.GetOptions().GetMessageSetWireFormat() && typeName[len(typeName)-1] == "message_set_extension" { + typeName = typeName[:len(typeName)-1] + } + + // For text formatting, the package must be exactly what the .proto file declares, + // ignoring overrides such as the go_package option, and with no dot/underscore mapping. + extName := strings.Join(typeName, ".") + if g.file.Package != nil { + extName = *g.file.Package + "." + extName + } + + g.P("var ", ccTypeName, " = &", g.Pkg["proto"], ".ExtensionDesc{") + g.P("ExtendedType: (", extendedType, ")(nil),") + g.P("ExtensionType: (", fieldType, ")(nil),") + g.P("Field: ", field.Number, ",") + g.P(`Name: "`, extName, `",`) + g.P("Tag: ", tag, ",") + g.P(`Filename: "`, g.file.GetName(), `",`) + + g.P("}") + g.P() + + g.addInitf("%s.RegisterExtension(%s)", g.Pkg["proto"], ext.DescName()) + + g.file.addExport(ext, constOrVarSymbol{ccTypeName, "var", ""}) +} + +func (g *Generator) generateInitFunction() { + if len(g.init) == 0 { + return + } + g.P("func init() {") + for _, l := range g.init { + g.P(l) + } + g.P("}") + g.init = nil +} + +func (g *Generator) generateFileDescriptor(file *FileDescriptor) { + // Make a copy and trim source_code_info data. + // TODO: Trim this more when we know exactly what we need. + pb := proto.Clone(file.FileDescriptorProto).(*descriptor.FileDescriptorProto) + pb.SourceCodeInfo = nil + + b, err := proto.Marshal(pb) + if err != nil { + g.Fail(err.Error()) + } + + var buf bytes.Buffer + w, _ := gzip.NewWriterLevel(&buf, gzip.BestCompression) + w.Write(b) + w.Close() + b = buf.Bytes() + + v := file.VarName() + g.P() + g.P("func init() { ", g.Pkg["proto"], ".RegisterFile(", strconv.Quote(*file.Name), ", ", v, ") }") + g.P("var ", v, " = []byte{") + g.P("// ", len(b), " bytes of a gzipped FileDescriptorProto") + for len(b) > 0 { + n := 16 + if n > len(b) { + n = len(b) + } + + s := "" + for _, c := range b[:n] { + s += fmt.Sprintf("0x%02x,", c) + } + g.P(s) + + b = b[n:] + } + g.P("}") +} + +func (g *Generator) generateEnumRegistration(enum *EnumDescriptor) { + // // We always print the full (proto-world) package name here. + pkg := enum.File().GetPackage() + if pkg != "" { + pkg += "." + } + // The full type name + typeName := enum.TypeName() + // The full type name, CamelCased. + ccTypeName := CamelCaseSlice(typeName) + g.addInitf("%s.RegisterEnum(%q, %[3]s_name, %[3]s_value)", g.Pkg["proto"], pkg+ccTypeName, ccTypeName) +} + +// And now lots of helper functions. + +// Is c an ASCII lower-case letter? +func isASCIILower(c byte) bool { + return 'a' <= c && c <= 'z' +} + +// Is c an ASCII digit? +func isASCIIDigit(c byte) bool { + return '0' <= c && c <= '9' +} + +// CamelCase returns the CamelCased name. +// If there is an interior underscore followed by a lower case letter, +// drop the underscore and convert the letter to upper case. +// There is a remote possibility of this rewrite causing a name collision, +// but it's so remote we're prepared to pretend it's nonexistent - since the +// C++ generator lowercases names, it's extremely unlikely to have two fields +// with different capitalizations. +// In short, _my_field_name_2 becomes XMyFieldName_2. +func CamelCase(s string) string { + if s == "" { + return "" + } + t := make([]byte, 0, 32) + i := 0 + if s[0] == '_' { + // Need a capital letter; drop the '_'. + t = append(t, 'X') + i++ + } + // Invariant: if the next letter is lower case, it must be converted + // to upper case. + // That is, we process a word at a time, where words are marked by _ or + // upper case letter. Digits are treated as words. + for ; i < len(s); i++ { + c := s[i] + if c == '_' && i+1 < len(s) && isASCIILower(s[i+1]) { + continue // Skip the underscore in s. + } + if isASCIIDigit(c) { + t = append(t, c) + continue + } + // Assume we have a letter now - if not, it's a bogus identifier. + // The next word is a sequence of characters that must start upper case. + if isASCIILower(c) { + c ^= ' ' // Make it a capital letter. + } + t = append(t, c) // Guaranteed not lower case. + // Accept lower case sequence that follows. + for i+1 < len(s) && isASCIILower(s[i+1]) { + i++ + t = append(t, s[i]) + } + } + return string(t) +} + +// CamelCaseSlice is like CamelCase, but the argument is a slice of strings to +// be joined with "_". +func CamelCaseSlice(elem []string) string { return CamelCase(strings.Join(elem, "_")) } + +// dottedSlice turns a sliced name into a dotted name. +func dottedSlice(elem []string) string { return strings.Join(elem, ".") } + +// Is this field optional? +func isOptional(field *descriptor.FieldDescriptorProto) bool { + return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_OPTIONAL +} + +// Is this field required? +func isRequired(field *descriptor.FieldDescriptorProto) bool { + return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REQUIRED +} + +// Is this field repeated? +func isRepeated(field *descriptor.FieldDescriptorProto) bool { + return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED +} + +// Is this field a scalar numeric type? +func isScalar(field *descriptor.FieldDescriptorProto) bool { + if field.Type == nil { + return false + } + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_DOUBLE, + descriptor.FieldDescriptorProto_TYPE_FLOAT, + descriptor.FieldDescriptorProto_TYPE_INT64, + descriptor.FieldDescriptorProto_TYPE_UINT64, + descriptor.FieldDescriptorProto_TYPE_INT32, + descriptor.FieldDescriptorProto_TYPE_FIXED64, + descriptor.FieldDescriptorProto_TYPE_FIXED32, + descriptor.FieldDescriptorProto_TYPE_BOOL, + descriptor.FieldDescriptorProto_TYPE_UINT32, + descriptor.FieldDescriptorProto_TYPE_ENUM, + descriptor.FieldDescriptorProto_TYPE_SFIXED32, + descriptor.FieldDescriptorProto_TYPE_SFIXED64, + descriptor.FieldDescriptorProto_TYPE_SINT32, + descriptor.FieldDescriptorProto_TYPE_SINT64: + return true + default: + return false + } +} + +// badToUnderscore is the mapping function used to generate Go names from package names, +// which can be dotted in the input .proto file. It replaces non-identifier characters such as +// dot or dash with underscore. +func badToUnderscore(r rune) rune { + if unicode.IsLetter(r) || unicode.IsDigit(r) || r == '_' { + return r + } + return '_' +} + +// baseName returns the last path element of the name, with the last dotted suffix removed. +func baseName(name string) string { + // First, find the last element + if i := strings.LastIndex(name, "/"); i >= 0 { + name = name[i+1:] + } + // Now drop the suffix + if i := strings.LastIndex(name, "."); i >= 0 { + name = name[0:i] + } + return name +} + +// The SourceCodeInfo message describes the location of elements of a parsed +// .proto file by way of a "path", which is a sequence of integers that +// describe the route from a FileDescriptorProto to the relevant submessage. +// The path alternates between a field number of a repeated field, and an index +// into that repeated field. The constants below define the field numbers that +// are used. +// +// See descriptor.proto for more information about this. +const ( + // tag numbers in FileDescriptorProto + packagePath = 2 // package + messagePath = 4 // message_type + enumPath = 5 // enum_type + // tag numbers in DescriptorProto + messageFieldPath = 2 // field + messageMessagePath = 3 // nested_type + messageEnumPath = 4 // enum_type + messageOneofPath = 8 // oneof_decl + // tag numbers in EnumDescriptorProto + enumValuePath = 2 // value +) + +var supportTypeAliases bool + +func init() { + for _, tag := range build.Default.ReleaseTags { + if tag == "go1.9" { + supportTypeAliases = true + return + } + } +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go new file mode 100644 index 0000000000..a9b61036cc --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go @@ -0,0 +1,117 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2017 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package remap handles tracking the locations of Go tokens in a source text +across a rewrite by the Go formatter. +*/ +package remap + +import ( + "fmt" + "go/scanner" + "go/token" +) + +// A Location represents a span of byte offsets in the source text. +type Location struct { + Pos, End int // End is exclusive +} + +// A Map represents a mapping between token locations in an input source text +// and locations in the correspnding output text. +type Map map[Location]Location + +// Find reports whether the specified span is recorded by m, and if so returns +// the new location it was mapped to. If the input span was not found, the +// returned location is the same as the input. +func (m Map) Find(pos, end int) (Location, bool) { + key := Location{ + Pos: pos, + End: end, + } + if loc, ok := m[key]; ok { + return loc, true + } + return key, false +} + +func (m Map) add(opos, oend, npos, nend int) { + m[Location{Pos: opos, End: oend}] = Location{Pos: npos, End: nend} +} + +// Compute constructs a location mapping from input to output. An error is +// reported if any of the tokens of output cannot be mapped. +func Compute(input, output []byte) (Map, error) { + itok := tokenize(input) + otok := tokenize(output) + if len(itok) != len(otok) { + return nil, fmt.Errorf("wrong number of tokens, %d ≠ %d", len(itok), len(otok)) + } + m := make(Map) + for i, ti := range itok { + to := otok[i] + if ti.Token != to.Token { + return nil, fmt.Errorf("token %d type mismatch: %s ≠ %s", i+1, ti, to) + } + m.add(ti.pos, ti.end, to.pos, to.end) + } + return m, nil +} + +// tokinfo records the span and type of a source token. +type tokinfo struct { + pos, end int + token.Token +} + +func tokenize(src []byte) []tokinfo { + fs := token.NewFileSet() + var s scanner.Scanner + s.Init(fs.AddFile("src", fs.Base(), len(src)), src, nil, scanner.ScanComments) + var info []tokinfo + for { + pos, next, lit := s.Scan() + switch next { + case token.SEMICOLON: + continue + } + info = append(info, tokinfo{ + pos: int(pos - 1), + end: int(pos + token.Pos(len(lit)) - 1), + Token: next, + }) + if next == token.EOF { + break + } + } + return info +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go new file mode 100644 index 0000000000..61bfc10e02 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go @@ -0,0 +1,369 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/compiler/plugin.proto + +/* +Package plugin_go is a generated protocol buffer package. + +It is generated from these files: + google/protobuf/compiler/plugin.proto + +It has these top-level messages: + Version + CodeGeneratorRequest + CodeGeneratorResponse +*/ +package plugin_go + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The version number of protocol compiler. +type Version struct { + Major *int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` + Minor *int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` + Patch *int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"` + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + Suffix *string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Version) Reset() { *m = Version{} } +func (m *Version) String() string { return proto.CompactTextString(m) } +func (*Version) ProtoMessage() {} +func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (m *Version) Unmarshal(b []byte) error { + return xxx_messageInfo_Version.Unmarshal(m, b) +} +func (m *Version) Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Version.Marshal(b, m, deterministic) +} +func (dst *Version) XXX_Merge(src proto.Message) { + xxx_messageInfo_Version.Merge(dst, src) +} +func (m *Version) XXX_Size() int { + return xxx_messageInfo_Version.Size(m) +} +func (m *Version) XXX_DiscardUnknown() { + xxx_messageInfo_Version.DiscardUnknown(m) +} + +var xxx_messageInfo_Version proto.InternalMessageInfo + +func (m *Version) GetMajor() int32 { + if m != nil && m.Major != nil { + return *m.Major + } + return 0 +} + +func (m *Version) GetMinor() int32 { + if m != nil && m.Minor != nil { + return *m.Minor + } + return 0 +} + +func (m *Version) GetPatch() int32 { + if m != nil && m.Patch != nil { + return *m.Patch + } + return 0 +} + +func (m *Version) GetSuffix() string { + if m != nil && m.Suffix != nil { + return *m.Suffix + } + return "" +} + +// An encoded CodeGeneratorRequest is written to the plugin's stdin. +type CodeGeneratorRequest struct { + // The .proto files that were explicitly listed on the command-line. The + // code generator should generate code only for these files. Each file's + // descriptor will be included in proto_file, below. + FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate,json=fileToGenerate" json:"file_to_generate,omitempty"` + // The generator parameter passed on the command-line. + Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"` + // FileDescriptorProtos for all files in files_to_generate and everything + // they import. The files will appear in topological order, so each file + // appears before any file that imports it. + // + // protoc guarantees that all proto_files will be written after + // the fields above, even though this is not technically guaranteed by the + // protobuf wire format. This theoretically could allow a plugin to stream + // in the FileDescriptorProtos and handle them one by one rather than read + // the entire set into memory at once. However, as of this writing, this + // is not similarly optimized on protoc's end -- it will store all fields in + // memory at once before sending them to the plugin. + // + // Type names of fields and extensions in the FileDescriptorProto are always + // fully qualified. + ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file,json=protoFile" json:"proto_file,omitempty"` + // The version number of protocol compiler. + CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CodeGeneratorRequest) Reset() { *m = CodeGeneratorRequest{} } +func (m *CodeGeneratorRequest) String() string { return proto.CompactTextString(m) } +func (*CodeGeneratorRequest) ProtoMessage() {} +func (*CodeGeneratorRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (m *CodeGeneratorRequest) Unmarshal(b []byte) error { + return xxx_messageInfo_CodeGeneratorRequest.Unmarshal(m, b) +} +func (m *CodeGeneratorRequest) Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CodeGeneratorRequest.Marshal(b, m, deterministic) +} +func (dst *CodeGeneratorRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CodeGeneratorRequest.Merge(dst, src) +} +func (m *CodeGeneratorRequest) XXX_Size() int { + return xxx_messageInfo_CodeGeneratorRequest.Size(m) +} +func (m *CodeGeneratorRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CodeGeneratorRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CodeGeneratorRequest proto.InternalMessageInfo + +func (m *CodeGeneratorRequest) GetFileToGenerate() []string { + if m != nil { + return m.FileToGenerate + } + return nil +} + +func (m *CodeGeneratorRequest) GetParameter() string { + if m != nil && m.Parameter != nil { + return *m.Parameter + } + return "" +} + +func (m *CodeGeneratorRequest) GetProtoFile() []*google_protobuf.FileDescriptorProto { + if m != nil { + return m.ProtoFile + } + return nil +} + +func (m *CodeGeneratorRequest) GetCompilerVersion() *Version { + if m != nil { + return m.CompilerVersion + } + return nil +} + +// The plugin writes an encoded CodeGeneratorResponse to stdout. +type CodeGeneratorResponse struct { + // Error message. If non-empty, code generation failed. The plugin process + // should exit with status code zero even if it reports an error in this way. + // + // This should be used to indicate errors in .proto files which prevent the + // code generator from generating correct code. Errors which indicate a + // problem in protoc itself -- such as the input CodeGeneratorRequest being + // unparseable -- should be reported by writing a message to stderr and + // exiting with a non-zero status code. + Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` + File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CodeGeneratorResponse) Reset() { *m = CodeGeneratorResponse{} } +func (m *CodeGeneratorResponse) String() string { return proto.CompactTextString(m) } +func (*CodeGeneratorResponse) ProtoMessage() {} +func (*CodeGeneratorResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (m *CodeGeneratorResponse) Unmarshal(b []byte) error { + return xxx_messageInfo_CodeGeneratorResponse.Unmarshal(m, b) +} +func (m *CodeGeneratorResponse) Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CodeGeneratorResponse.Marshal(b, m, deterministic) +} +func (dst *CodeGeneratorResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CodeGeneratorResponse.Merge(dst, src) +} +func (m *CodeGeneratorResponse) XXX_Size() int { + return xxx_messageInfo_CodeGeneratorResponse.Size(m) +} +func (m *CodeGeneratorResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CodeGeneratorResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CodeGeneratorResponse proto.InternalMessageInfo + +func (m *CodeGeneratorResponse) GetError() string { + if m != nil && m.Error != nil { + return *m.Error + } + return "" +} + +func (m *CodeGeneratorResponse) GetFile() []*CodeGeneratorResponse_File { + if m != nil { + return m.File + } + return nil +} + +// Represents a single generated file. +type CodeGeneratorResponse_File struct { + // The file name, relative to the output directory. The name must not + // contain "." or ".." components and must be relative, not be absolute (so, + // the file cannot lie outside the output directory). "/" must be used as + // the path separator, not "\". + // + // If the name is omitted, the content will be appended to the previous + // file. This allows the generator to break large files into small chunks, + // and allows the generated text to be streamed back to protoc so that large + // files need not reside completely in memory at one time. Note that as of + // this writing protoc does not optimize for this -- it will read the entire + // CodeGeneratorResponse before writing files to disk. + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // If non-empty, indicates that the named file should already exist, and the + // content here is to be inserted into that file at a defined insertion + // point. This feature allows a code generator to extend the output + // produced by another code generator. The original generator may provide + // insertion points by placing special annotations in the file that look + // like: + // @@protoc_insertion_point(NAME) + // The annotation can have arbitrary text before and after it on the line, + // which allows it to be placed in a comment. NAME should be replaced with + // an identifier naming the point -- this is what other generators will use + // as the insertion_point. Code inserted at this point will be placed + // immediately above the line containing the insertion point (thus multiple + // insertions to the same point will come out in the order they were added). + // The double-@ is intended to make it unlikely that the generated code + // could contain things that look like insertion points by accident. + // + // For example, the C++ code generator places the following line in the + // .pb.h files that it generates: + // // @@protoc_insertion_point(namespace_scope) + // This line appears within the scope of the file's package namespace, but + // outside of any particular class. Another plugin can then specify the + // insertion_point "namespace_scope" to generate additional classes or + // other declarations that should be placed in this scope. + // + // Note that if the line containing the insertion point begins with + // whitespace, the same whitespace will be added to every line of the + // inserted text. This is useful for languages like Python, where + // indentation matters. In these languages, the insertion point comment + // should be indented the same amount as any inserted code will need to be + // in order to work correctly in that context. + // + // The code generator that generates the initial file and the one which + // inserts into it must both run as part of a single invocation of protoc. + // Code generators are executed in the order in which they appear on the + // command line. + // + // If |insertion_point| is present, |name| must also be present. + InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point,json=insertionPoint" json:"insertion_point,omitempty"` + // The file contents. + Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CodeGeneratorResponse_File) Reset() { *m = CodeGeneratorResponse_File{} } +func (m *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(m) } +func (*CodeGeneratorResponse_File) ProtoMessage() {} +func (*CodeGeneratorResponse_File) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } +func (m *CodeGeneratorResponse_File) Unmarshal(b []byte) error { + return xxx_messageInfo_CodeGeneratorResponse_File.Unmarshal(m, b) +} +func (m *CodeGeneratorResponse_File) Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CodeGeneratorResponse_File.Marshal(b, m, deterministic) +} +func (dst *CodeGeneratorResponse_File) XXX_Merge(src proto.Message) { + xxx_messageInfo_CodeGeneratorResponse_File.Merge(dst, src) +} +func (m *CodeGeneratorResponse_File) XXX_Size() int { + return xxx_messageInfo_CodeGeneratorResponse_File.Size(m) +} +func (m *CodeGeneratorResponse_File) XXX_DiscardUnknown() { + xxx_messageInfo_CodeGeneratorResponse_File.DiscardUnknown(m) +} + +var xxx_messageInfo_CodeGeneratorResponse_File proto.InternalMessageInfo + +func (m *CodeGeneratorResponse_File) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *CodeGeneratorResponse_File) GetInsertionPoint() string { + if m != nil && m.InsertionPoint != nil { + return *m.InsertionPoint + } + return "" +} + +func (m *CodeGeneratorResponse_File) GetContent() string { + if m != nil && m.Content != nil { + return *m.Content + } + return "" +} + +func init() { + proto.RegisterType((*Version)(nil), "google.protobuf.compiler.Version") + proto.RegisterType((*CodeGeneratorRequest)(nil), "google.protobuf.compiler.CodeGeneratorRequest") + proto.RegisterType((*CodeGeneratorResponse)(nil), "google.protobuf.compiler.CodeGeneratorResponse") + proto.RegisterType((*CodeGeneratorResponse_File)(nil), "google.protobuf.compiler.CodeGeneratorResponse.File") +} + +func init() { proto.RegisterFile("google/protobuf/compiler/plugin.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 417 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xcf, 0x6a, 0x14, 0x41, + 0x10, 0xc6, 0x19, 0x77, 0x63, 0x98, 0x8a, 0x64, 0x43, 0x13, 0xa5, 0x09, 0x39, 0x8c, 0x8b, 0xe2, + 0x5c, 0x32, 0x0b, 0xc1, 0x8b, 0x78, 0x4b, 0x44, 0x3d, 0x78, 0x58, 0x1a, 0xf1, 0x20, 0xc8, 0x30, + 0x99, 0xd4, 0x74, 0x5a, 0x66, 0xba, 0xc6, 0xee, 0x1e, 0xf1, 0x49, 0x7d, 0x0f, 0xdf, 0x40, 0xfa, + 0xcf, 0x24, 0xb2, 0xb8, 0xa7, 0xee, 0xef, 0x57, 0xd5, 0xd5, 0x55, 0x1f, 0x05, 0x2f, 0x25, 0x91, + 0xec, 0x71, 0x33, 0x1a, 0x72, 0x74, 0x33, 0x75, 0x9b, 0x96, 0x86, 0x51, 0xf5, 0x68, 0x36, 0x63, + 0x3f, 0x49, 0xa5, 0xab, 0x10, 0x60, 0x3c, 0xa6, 0x55, 0x73, 0x5a, 0x35, 0xa7, 0x9d, 0x15, 0xbb, + 0x05, 0x6e, 0xd1, 0xb6, 0x46, 0x8d, 0x8e, 0x4c, 0xcc, 0x5e, 0xb7, 0x70, 0xf8, 0x05, 0x8d, 0x55, + 0xa4, 0xd9, 0x29, 0x1c, 0x0c, 0xcd, 0x77, 0x32, 0x3c, 0x2b, 0xb2, 0xf2, 0x40, 0x44, 0x11, 0xa8, + 0xd2, 0x64, 0xf8, 0xa3, 0x44, 0xbd, 0xf0, 0x74, 0x6c, 0x5c, 0x7b, 0xc7, 0x17, 0x91, 0x06, 0xc1, + 0x9e, 0xc1, 0x63, 0x3b, 0x75, 0x9d, 0xfa, 0xc5, 0x97, 0x45, 0x56, 0xe6, 0x22, 0xa9, 0xf5, 0x9f, + 0x0c, 0x4e, 0xaf, 0xe9, 0x16, 0x3f, 0xa0, 0x46, 0xd3, 0x38, 0x32, 0x02, 0x7f, 0x4c, 0x68, 0x1d, + 0x2b, 0xe1, 0xa4, 0x53, 0x3d, 0xd6, 0x8e, 0x6a, 0x19, 0x63, 0xc8, 0xb3, 0x62, 0x51, 0xe6, 0xe2, + 0xd8, 0xf3, 0xcf, 0x94, 0x5e, 0x20, 0x3b, 0x87, 0x7c, 0x6c, 0x4c, 0x33, 0xa0, 0xc3, 0xd8, 0x4a, + 0x2e, 0x1e, 0x00, 0xbb, 0x06, 0x08, 0xe3, 0xd4, 0xfe, 0x15, 0x5f, 0x15, 0x8b, 0xf2, 0xe8, 0xf2, + 0x45, 0xb5, 0x6b, 0xcb, 0x7b, 0xd5, 0xe3, 0xbb, 0x7b, 0x03, 0xb6, 0x1e, 0x8b, 0x3c, 0x44, 0x7d, + 0x84, 0x7d, 0x82, 0x93, 0xd9, 0xb8, 0xfa, 0x67, 0xf4, 0x24, 0x8c, 0x77, 0x74, 0xf9, 0xbc, 0xda, + 0xe7, 0x70, 0x95, 0xcc, 0x13, 0xab, 0x99, 0x24, 0xb0, 0xfe, 0x9d, 0xc1, 0xd3, 0x9d, 0x99, 0xed, + 0x48, 0xda, 0xa2, 0xf7, 0x0e, 0x8d, 0x49, 0x3e, 0xe7, 0x22, 0x0a, 0xf6, 0x11, 0x96, 0xff, 0x34, + 0xff, 0x7a, 0xff, 0x8f, 0xff, 0x2d, 0x1a, 0x66, 0x13, 0xa1, 0xc2, 0xd9, 0x37, 0x58, 0x86, 0x79, + 0x18, 0x2c, 0x75, 0x33, 0x60, 0xfa, 0x26, 0xdc, 0xd9, 0x2b, 0x58, 0x29, 0x6d, 0xd1, 0x38, 0x45, + 0xba, 0x1e, 0x49, 0x69, 0x97, 0xcc, 0x3c, 0xbe, 0xc7, 0x5b, 0x4f, 0x19, 0x87, 0xc3, 0x96, 0xb4, + 0x43, 0xed, 0xf8, 0x2a, 0x24, 0xcc, 0xf2, 0x4a, 0xc2, 0x79, 0x4b, 0xc3, 0xde, 0xfe, 0xae, 0x9e, + 0x6c, 0xc3, 0x6e, 0x06, 0x7b, 0xed, 0xd7, 0x37, 0x52, 0xb9, 0xbb, 0xe9, 0xc6, 0x87, 0x37, 0x92, + 0xfa, 0x46, 0xcb, 0x87, 0x65, 0x0c, 0x97, 0xf6, 0x42, 0xa2, 0xbe, 0x90, 0x94, 0x56, 0xfa, 0x6d, + 0x3c, 0x6a, 0x49, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x15, 0x40, 0xc5, 0xfe, 0x02, 0x00, + 0x00, +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden new file mode 100644 index 0000000000..8953d0ff82 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden @@ -0,0 +1,83 @@ +// Code generated by protoc-gen-go. +// source: google/protobuf/compiler/plugin.proto +// DO NOT EDIT! + +package google_protobuf_compiler + +import proto "github.com/golang/protobuf/proto" +import "math" +import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor" + +// Reference proto and math imports to suppress error if they are not otherwise used. +var _ = proto.GetString +var _ = math.Inf + +type CodeGeneratorRequest struct { + FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate" json:"file_to_generate,omitempty"` + Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"` + ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file" json:"proto_file,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (this *CodeGeneratorRequest) Reset() { *this = CodeGeneratorRequest{} } +func (this *CodeGeneratorRequest) String() string { return proto.CompactTextString(this) } +func (*CodeGeneratorRequest) ProtoMessage() {} + +func (this *CodeGeneratorRequest) GetParameter() string { + if this != nil && this.Parameter != nil { + return *this.Parameter + } + return "" +} + +type CodeGeneratorResponse struct { + Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` + File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (this *CodeGeneratorResponse) Reset() { *this = CodeGeneratorResponse{} } +func (this *CodeGeneratorResponse) String() string { return proto.CompactTextString(this) } +func (*CodeGeneratorResponse) ProtoMessage() {} + +func (this *CodeGeneratorResponse) GetError() string { + if this != nil && this.Error != nil { + return *this.Error + } + return "" +} + +type CodeGeneratorResponse_File struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point" json:"insertion_point,omitempty"` + Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (this *CodeGeneratorResponse_File) Reset() { *this = CodeGeneratorResponse_File{} } +func (this *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(this) } +func (*CodeGeneratorResponse_File) ProtoMessage() {} + +func (this *CodeGeneratorResponse_File) GetName() string { + if this != nil && this.Name != nil { + return *this.Name + } + return "" +} + +func (this *CodeGeneratorResponse_File) GetInsertionPoint() string { + if this != nil && this.InsertionPoint != nil { + return *this.InsertionPoint + } + return "" +} + +func (this *CodeGeneratorResponse_File) GetContent() string { + if this != nil && this.Content != nil { + return *this.Content + } + return "" +} + +func init() { +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto new file mode 100644 index 0000000000..5b5574529e --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto @@ -0,0 +1,167 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// +// WARNING: The plugin interface is currently EXPERIMENTAL and is subject to +// change. +// +// protoc (aka the Protocol Compiler) can be extended via plugins. A plugin is +// just a program that reads a CodeGeneratorRequest from stdin and writes a +// CodeGeneratorResponse to stdout. +// +// Plugins written using C++ can use google/protobuf/compiler/plugin.h instead +// of dealing with the raw protocol defined here. +// +// A plugin executable needs only to be placed somewhere in the path. The +// plugin should be named "protoc-gen-$NAME", and will then be used when the +// flag "--${NAME}_out" is passed to protoc. + +syntax = "proto2"; +package google.protobuf.compiler; +option java_package = "com.google.protobuf.compiler"; +option java_outer_classname = "PluginProtos"; + +option go_package = "github.com/golang/protobuf/protoc-gen-go/plugin;plugin_go"; + +import "google/protobuf/descriptor.proto"; + +// The version number of protocol compiler. +message Version { + optional int32 major = 1; + optional int32 minor = 2; + optional int32 patch = 3; + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + optional string suffix = 4; +} + +// An encoded CodeGeneratorRequest is written to the plugin's stdin. +message CodeGeneratorRequest { + // The .proto files that were explicitly listed on the command-line. The + // code generator should generate code only for these files. Each file's + // descriptor will be included in proto_file, below. + repeated string file_to_generate = 1; + + // The generator parameter passed on the command-line. + optional string parameter = 2; + + // FileDescriptorProtos for all files in files_to_generate and everything + // they import. The files will appear in topological order, so each file + // appears before any file that imports it. + // + // protoc guarantees that all proto_files will be written after + // the fields above, even though this is not technically guaranteed by the + // protobuf wire format. This theoretically could allow a plugin to stream + // in the FileDescriptorProtos and handle them one by one rather than read + // the entire set into memory at once. However, as of this writing, this + // is not similarly optimized on protoc's end -- it will store all fields in + // memory at once before sending them to the plugin. + // + // Type names of fields and extensions in the FileDescriptorProto are always + // fully qualified. + repeated FileDescriptorProto proto_file = 15; + + // The version number of protocol compiler. + optional Version compiler_version = 3; + +} + +// The plugin writes an encoded CodeGeneratorResponse to stdout. +message CodeGeneratorResponse { + // Error message. If non-empty, code generation failed. The plugin process + // should exit with status code zero even if it reports an error in this way. + // + // This should be used to indicate errors in .proto files which prevent the + // code generator from generating correct code. Errors which indicate a + // problem in protoc itself -- such as the input CodeGeneratorRequest being + // unparseable -- should be reported by writing a message to stderr and + // exiting with a non-zero status code. + optional string error = 1; + + // Represents a single generated file. + message File { + // The file name, relative to the output directory. The name must not + // contain "." or ".." components and must be relative, not be absolute (so, + // the file cannot lie outside the output directory). "/" must be used as + // the path separator, not "\". + // + // If the name is omitted, the content will be appended to the previous + // file. This allows the generator to break large files into small chunks, + // and allows the generated text to be streamed back to protoc so that large + // files need not reside completely in memory at one time. Note that as of + // this writing protoc does not optimize for this -- it will read the entire + // CodeGeneratorResponse before writing files to disk. + optional string name = 1; + + // If non-empty, indicates that the named file should already exist, and the + // content here is to be inserted into that file at a defined insertion + // point. This feature allows a code generator to extend the output + // produced by another code generator. The original generator may provide + // insertion points by placing special annotations in the file that look + // like: + // @@protoc_insertion_point(NAME) + // The annotation can have arbitrary text before and after it on the line, + // which allows it to be placed in a comment. NAME should be replaced with + // an identifier naming the point -- this is what other generators will use + // as the insertion_point. Code inserted at this point will be placed + // immediately above the line containing the insertion point (thus multiple + // insertions to the same point will come out in the order they were added). + // The double-@ is intended to make it unlikely that the generated code + // could contain things that look like insertion points by accident. + // + // For example, the C++ code generator places the following line in the + // .pb.h files that it generates: + // // @@protoc_insertion_point(namespace_scope) + // This line appears within the scope of the file's package namespace, but + // outside of any particular class. Another plugin can then specify the + // insertion_point "namespace_scope" to generate additional classes or + // other declarations that should be placed in this scope. + // + // Note that if the line containing the insertion point begins with + // whitespace, the same whitespace will be added to every line of the + // inserted text. This is useful for languages like Python, where + // indentation matters. In these languages, the insertion point comment + // should be indented the same amount as any inserted code will need to be + // in order to work correctly in that context. + // + // The code generator that generates the initial file and the one which + // inserts into it must both run as part of a single invocation of protoc. + // Code generators are executed in the order in which they appear on the + // command line. + // + // If |insertion_point| is present, |name| must also be present. + optional string insertion_point = 2; + + // The file contents. + optional string content = 15; + } + repeated File file = 15; +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go index b2af97f4a9..70276e8f5c 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any.go +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -130,10 +130,12 @@ func UnmarshalAny(any *any.Any, pb proto.Message) error { // Is returns true if any value contains a given message type. func Is(any *any.Any, pb proto.Message) bool { - aname, err := AnyMessageName(any) - if err != nil { + // The following is equivalent to AnyMessageName(any) == proto.MessageName(pb), + // but it avoids scanning TypeUrl for the slash. + if any == nil { return false } - - return aname == proto.MessageName(pb) + name := proto.MessageName(pb) + prefix := len(any.TypeUrl) - len(name) + return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name } diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go index f34601723d..78ee523349 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go @@ -1,20 +1,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/any.proto -/* -Package any is a generated protocol buffer package. - -It is generated from these files: - google/protobuf/any.proto - -It has these top-level messages: - Any -*/ package any -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -25,7 +18,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // `Any` contains an arbitrary serialized protocol buffer message along with a // URL that describes the type of the serialized message. @@ -108,17 +101,18 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // } // type Any struct { - // A URL/resource name whose content describes the type of the - // serialized protocol buffer message. + // A URL/resource name that uniquely identifies the type of the serialized + // protocol buffer message. The last segment of the URL's path must represent + // the fully qualified name of the type (as in + // `path/google.protobuf.Duration`). The name should be in a canonical form + // (e.g., leading "." is not accepted). // - // For URLs which use the scheme `http`, `https`, or no scheme, the - // following restrictions and interpretations apply: + // In practice, teams usually precompile into the binary all types that they + // expect it to use in the context of Any. However, for URLs which use the + // scheme `http`, `https`, or no scheme, one can optionally set up a type + // server that maps type URLs to message definitions as follows: // // * If no scheme is provided, `https` is assumed. - // * The last segment of the URL's path must represent the fully - // qualified name of the type (as in `path/google.protobuf.Duration`). - // The name should be in a canonical form (e.g., leading "." is - // not accepted). // * An HTTP GET on the URL must yield a [google.protobuf.Type][] // value in binary format, or produce an error. // * Applications are allowed to cache lookup results based on the @@ -127,19 +121,47 @@ type Any struct { // on changes to types. (Use versioned type names to manage // breaking changes.) // + // Note: this functionality is not currently available in the official + // protobuf release, and it is not used for type URLs beginning with + // type.googleapis.com. + // // Schemes other than `http`, `https` (or the empty scheme) might be // used with implementation specific semantics. // - TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"` + TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` // Must be a valid serialized protocol buffer of the above specified type. - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Any) Reset() { *m = Any{} } +func (m *Any) String() string { return proto.CompactTextString(m) } +func (*Any) ProtoMessage() {} +func (*Any) Descriptor() ([]byte, []int) { + return fileDescriptor_b53526c13ae22eb4, []int{0} +} + +func (*Any) XXX_WellKnownType() string { return "Any" } + +func (m *Any) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Any.Unmarshal(m, b) +} +func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Any.Marshal(b, m, deterministic) +} +func (m *Any) XXX_Merge(src proto.Message) { + xxx_messageInfo_Any.Merge(m, src) +} +func (m *Any) XXX_Size() int { + return xxx_messageInfo_Any.Size(m) +} +func (m *Any) XXX_DiscardUnknown() { + xxx_messageInfo_Any.DiscardUnknown(m) } -func (m *Any) Reset() { *m = Any{} } -func (m *Any) String() string { return proto.CompactTextString(m) } -func (*Any) ProtoMessage() {} -func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (*Any) XXX_WellKnownType() string { return "Any" } +var xxx_messageInfo_Any proto.InternalMessageInfo func (m *Any) GetTypeUrl() string { if m != nil { @@ -159,9 +181,9 @@ func init() { proto.RegisterType((*Any)(nil), "google.protobuf.Any") } -func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor0) } +func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) } -var fileDescriptor0 = []byte{ +var fileDescriptor_b53526c13ae22eb4 = []byte{ // 185 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4, diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto index c748667623..4932942558 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.proto +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.proto @@ -120,17 +120,18 @@ option objc_class_prefix = "GPB"; // } // message Any { - // A URL/resource name whose content describes the type of the - // serialized protocol buffer message. + // A URL/resource name that uniquely identifies the type of the serialized + // protocol buffer message. The last segment of the URL's path must represent + // the fully qualified name of the type (as in + // `path/google.protobuf.Duration`). The name should be in a canonical form + // (e.g., leading "." is not accepted). // - // For URLs which use the scheme `http`, `https`, or no scheme, the - // following restrictions and interpretations apply: + // In practice, teams usually precompile into the binary all types that they + // expect it to use in the context of Any. However, for URLs which use the + // scheme `http`, `https`, or no scheme, one can optionally set up a type + // server that maps type URLs to message definitions as follows: // // * If no scheme is provided, `https` is assumed. - // * The last segment of the URL's path must represent the fully - // qualified name of the type (as in `path/google.protobuf.Duration`). - // The name should be in a canonical form (e.g., leading "." is - // not accepted). // * An HTTP GET on the URL must yield a [google.protobuf.Type][] // value in binary format, or produce an error. // * Applications are allowed to cache lookup results based on the @@ -139,6 +140,10 @@ message Any { // on changes to types. (Use versioned type names to manage // breaking changes.) // + // Note: this functionality is not currently available in the official + // protobuf release, and it is not used for type URLs beginning with + // type.googleapis.com. + // // Schemes other than `http`, `https` (or the empty scheme) might be // used with implementation specific semantics. // diff --git a/vendor/github.com/golang/protobuf/ptypes/any_test.go b/vendor/github.com/golang/protobuf/ptypes/any_test.go deleted file mode 100644 index ed675b489c..0000000000 --- a/vendor/github.com/golang/protobuf/ptypes/any_test.go +++ /dev/null @@ -1,113 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package ptypes - -import ( - "testing" - - "github.com/golang/protobuf/proto" - pb "github.com/golang/protobuf/protoc-gen-go/descriptor" - "github.com/golang/protobuf/ptypes/any" -) - -func TestMarshalUnmarshal(t *testing.T) { - orig := &any.Any{Value: []byte("test")} - - packed, err := MarshalAny(orig) - if err != nil { - t.Errorf("MarshalAny(%+v): got: _, %v exp: _, nil", orig, err) - } - - unpacked := &any.Any{} - err = UnmarshalAny(packed, unpacked) - if err != nil || !proto.Equal(unpacked, orig) { - t.Errorf("got: %v, %+v; want nil, %+v", err, unpacked, orig) - } -} - -func TestIs(t *testing.T) { - a, err := MarshalAny(&pb.FileDescriptorProto{}) - if err != nil { - t.Fatal(err) - } - if Is(a, &pb.DescriptorProto{}) { - t.Error("FileDescriptorProto is not a DescriptorProto, but Is says it is") - } - if !Is(a, &pb.FileDescriptorProto{}) { - t.Error("FileDescriptorProto is indeed a FileDescriptorProto, but Is says it is not") - } -} - -func TestIsDifferentUrlPrefixes(t *testing.T) { - m := &pb.FileDescriptorProto{} - a := &any.Any{TypeUrl: "foo/bar/" + proto.MessageName(m)} - if !Is(a, m) { - t.Errorf("message with type url %q didn't satisfy Is for type %q", a.TypeUrl, proto.MessageName(m)) - } -} - -func TestUnmarshalDynamic(t *testing.T) { - want := &pb.FileDescriptorProto{Name: proto.String("foo")} - a, err := MarshalAny(want) - if err != nil { - t.Fatal(err) - } - var got DynamicAny - if err := UnmarshalAny(a, &got); err != nil { - t.Fatal(err) - } - if !proto.Equal(got.Message, want) { - t.Errorf("invalid result from UnmarshalAny, got %q want %q", got.Message, want) - } -} - -func TestEmpty(t *testing.T) { - want := &pb.FileDescriptorProto{} - a, err := MarshalAny(want) - if err != nil { - t.Fatal(err) - } - got, err := Empty(a) - if err != nil { - t.Fatal(err) - } - if !proto.Equal(got, want) { - t.Errorf("unequal empty message, got %q, want %q", got, want) - } - - // that's a valid type_url for a message which shouldn't be linked into this - // test binary. We want an error. - a.TypeUrl = "type.googleapis.com/google.protobuf.FieldMask" - if _, err := Empty(a); err == nil { - t.Errorf("got no error for an attempt to create a message of type %q, which shouldn't be linked in", a.TypeUrl) - } -} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go index 65cb0f8eb5..26d1ca2fb5 100644 --- a/vendor/github.com/golang/protobuf/ptypes/duration.go +++ b/vendor/github.com/golang/protobuf/ptypes/duration.go @@ -82,7 +82,7 @@ func Duration(p *durpb.Duration) (time.Duration, error) { return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) } if p.Nanos != 0 { - d += time.Duration(p.Nanos) + d += time.Duration(p.Nanos) * time.Nanosecond if (d < 0) != (p.Nanos < 0) { return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) } diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go index b2410a098e..0d681ee21a 100644 --- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go @@ -1,20 +1,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/duration.proto -/* -Package duration is a generated protocol buffer package. - -It is generated from these files: - google/protobuf/duration.proto - -It has these top-level messages: - Duration -*/ package duration -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -25,7 +18,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // A Duration represents a signed, fixed-length span of time represented // as a count of seconds and fractions of seconds at nanosecond @@ -91,21 +84,45 @@ type Duration struct { // Signed seconds of the span of time. Must be from -315,576,000,000 // to +315,576,000,000 inclusive. Note: these bounds are computed from: // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years - Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` // Signed fractions of a second at nanosecond resolution of the span // of time. Durations less than one second are represented with a 0 // `seconds` field and a positive or negative `nanos` field. For durations // of one second or more, a non-zero value for the `nanos` field must be // of the same sign as the `seconds` field. Must be from -999,999,999 // to +999,999,999 inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Duration) Reset() { *m = Duration{} } +func (m *Duration) String() string { return proto.CompactTextString(m) } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { + return fileDescriptor_23597b2ebd7ac6c5, []int{0} +} + +func (*Duration) XXX_WellKnownType() string { return "Duration" } + +func (m *Duration) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Duration.Unmarshal(m, b) +} +func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Duration.Marshal(b, m, deterministic) +} +func (m *Duration) XXX_Merge(src proto.Message) { + xxx_messageInfo_Duration.Merge(m, src) +} +func (m *Duration) XXX_Size() int { + return xxx_messageInfo_Duration.Size(m) +} +func (m *Duration) XXX_DiscardUnknown() { + xxx_messageInfo_Duration.DiscardUnknown(m) } -func (m *Duration) Reset() { *m = Duration{} } -func (m *Duration) String() string { return proto.CompactTextString(m) } -func (*Duration) ProtoMessage() {} -func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (*Duration) XXX_WellKnownType() string { return "Duration" } +var xxx_messageInfo_Duration proto.InternalMessageInfo func (m *Duration) GetSeconds() int64 { if m != nil { @@ -125,9 +142,9 @@ func init() { proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") } -func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor0) } +func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) } -var fileDescriptor0 = []byte{ +var fileDescriptor_23597b2ebd7ac6c5 = []byte{ // 190 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a, diff --git a/vendor/github.com/golang/protobuf/ptypes/duration_test.go b/vendor/github.com/golang/protobuf/ptypes/duration_test.go deleted file mode 100644 index e00491a34f..0000000000 --- a/vendor/github.com/golang/protobuf/ptypes/duration_test.go +++ /dev/null @@ -1,121 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package ptypes - -import ( - "math" - "testing" - "time" - - "github.com/golang/protobuf/proto" - durpb "github.com/golang/protobuf/ptypes/duration" -) - -const ( - minGoSeconds = math.MinInt64 / int64(1e9) - maxGoSeconds = math.MaxInt64 / int64(1e9) -) - -var durationTests = []struct { - proto *durpb.Duration - isValid bool - inRange bool - dur time.Duration -}{ - // The zero duration. - {&durpb.Duration{Seconds: 0, Nanos: 0}, true, true, 0}, - // Some ordinary non-zero durations. - {&durpb.Duration{Seconds: 100, Nanos: 0}, true, true, 100 * time.Second}, - {&durpb.Duration{Seconds: -100, Nanos: 0}, true, true, -100 * time.Second}, - {&durpb.Duration{Seconds: 100, Nanos: 987}, true, true, 100*time.Second + 987}, - {&durpb.Duration{Seconds: -100, Nanos: -987}, true, true, -(100*time.Second + 987)}, - // The largest duration representable in Go. - {&durpb.Duration{Seconds: maxGoSeconds, Nanos: int32(math.MaxInt64 - 1e9*maxGoSeconds)}, true, true, math.MaxInt64}, - // The smallest duration representable in Go. - {&durpb.Duration{Seconds: minGoSeconds, Nanos: int32(math.MinInt64 - 1e9*minGoSeconds)}, true, true, math.MinInt64}, - {nil, false, false, 0}, - {&durpb.Duration{Seconds: -100, Nanos: 987}, false, false, 0}, - {&durpb.Duration{Seconds: 100, Nanos: -987}, false, false, 0}, - {&durpb.Duration{Seconds: math.MinInt64, Nanos: 0}, false, false, 0}, - {&durpb.Duration{Seconds: math.MaxInt64, Nanos: 0}, false, false, 0}, - // The largest valid duration. - {&durpb.Duration{Seconds: maxSeconds, Nanos: 1e9 - 1}, true, false, 0}, - // The smallest valid duration. - {&durpb.Duration{Seconds: minSeconds, Nanos: -(1e9 - 1)}, true, false, 0}, - // The smallest invalid duration above the valid range. - {&durpb.Duration{Seconds: maxSeconds + 1, Nanos: 0}, false, false, 0}, - // The largest invalid duration below the valid range. - {&durpb.Duration{Seconds: minSeconds - 1, Nanos: -(1e9 - 1)}, false, false, 0}, - // One nanosecond past the largest duration representable in Go. - {&durpb.Duration{Seconds: maxGoSeconds, Nanos: int32(math.MaxInt64-1e9*maxGoSeconds) + 1}, true, false, 0}, - // One nanosecond past the smallest duration representable in Go. - {&durpb.Duration{Seconds: minGoSeconds, Nanos: int32(math.MinInt64-1e9*minGoSeconds) - 1}, true, false, 0}, - // One second past the largest duration representable in Go. - {&durpb.Duration{Seconds: maxGoSeconds + 1, Nanos: int32(math.MaxInt64 - 1e9*maxGoSeconds)}, true, false, 0}, - // One second past the smallest duration representable in Go. - {&durpb.Duration{Seconds: minGoSeconds - 1, Nanos: int32(math.MinInt64 - 1e9*minGoSeconds)}, true, false, 0}, -} - -func TestValidateDuration(t *testing.T) { - for _, test := range durationTests { - err := validateDuration(test.proto) - gotValid := (err == nil) - if gotValid != test.isValid { - t.Errorf("validateDuration(%v) = %t, want %t", test.proto, gotValid, test.isValid) - } - } -} - -func TestDuration(t *testing.T) { - for _, test := range durationTests { - got, err := Duration(test.proto) - gotOK := (err == nil) - wantOK := test.isValid && test.inRange - if gotOK != wantOK { - t.Errorf("Duration(%v) ok = %t, want %t", test.proto, gotOK, wantOK) - } - if err == nil && got != test.dur { - t.Errorf("Duration(%v) = %v, want %v", test.proto, got, test.dur) - } - } -} - -func TestDurationProto(t *testing.T) { - for _, test := range durationTests { - if test.isValid && test.inRange { - got := DurationProto(test.dur) - if !proto.Equal(got, test.proto) { - t.Errorf("DurationProto(%v) = %v, want %v", test.dur, got, test.proto) - } - } - } -} diff --git a/vendor/github.com/golang/protobuf/ptypes/regen.sh b/vendor/github.com/golang/protobuf/ptypes/regen.sh deleted file mode 100755 index b50a9414ac..0000000000 --- a/vendor/github.com/golang/protobuf/ptypes/regen.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/bin/bash -e -# -# This script fetches and rebuilds the "well-known types" protocol buffers. -# To run this you will need protoc and goprotobuf installed; -# see https://github.com/golang/protobuf for instructions. -# You also need Go and Git installed. - -PKG=github.com/golang/protobuf/ptypes -UPSTREAM=https://github.com/google/protobuf -UPSTREAM_SUBDIR=src/google/protobuf -PROTO_FILES=(any duration empty struct timestamp wrappers) - -function die() { - echo 1>&2 $* - exit 1 -} - -# Sanity check that the right tools are accessible. -for tool in go git protoc protoc-gen-go; do - q=$(which $tool) || die "didn't find $tool" - echo 1>&2 "$tool: $q" -done - -tmpdir=$(mktemp -d -t regen-wkt.XXXXXX) -trap 'rm -rf $tmpdir' EXIT - -echo -n 1>&2 "finding package dir... " -pkgdir=$(go list -f '{{.Dir}}' $PKG) -echo 1>&2 $pkgdir -base=$(echo $pkgdir | sed "s,/$PKG\$,,") -echo 1>&2 "base: $base" -cd "$base" - -echo 1>&2 "fetching latest protos... " -git clone -q $UPSTREAM $tmpdir - -for file in ${PROTO_FILES[@]}; do - echo 1>&2 "* $file" - protoc --go_out=. -I$tmpdir/src $tmpdir/src/google/protobuf/$file.proto || die - cp $tmpdir/src/google/protobuf/$file.proto $PKG/$file -done - -echo 1>&2 "All OK" diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go new file mode 100644 index 0000000000..33daa73dd2 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go @@ -0,0 +1,336 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/struct.proto + +package structpb + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +type NullValue int32 + +const ( + // Null value. + NullValue_NULL_VALUE NullValue = 0 +) + +var NullValue_name = map[int32]string{ + 0: "NULL_VALUE", +} + +var NullValue_value = map[string]int32{ + "NULL_VALUE": 0, +} + +func (x NullValue) String() string { + return proto.EnumName(NullValue_name, int32(x)) +} + +func (NullValue) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_df322afd6c9fb402, []int{0} +} + +func (NullValue) XXX_WellKnownType() string { return "NullValue" } + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +type Struct struct { + // Unordered map of dynamically typed values. + Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Struct) Reset() { *m = Struct{} } +func (m *Struct) String() string { return proto.CompactTextString(m) } +func (*Struct) ProtoMessage() {} +func (*Struct) Descriptor() ([]byte, []int) { + return fileDescriptor_df322afd6c9fb402, []int{0} +} + +func (*Struct) XXX_WellKnownType() string { return "Struct" } + +func (m *Struct) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Struct.Unmarshal(m, b) +} +func (m *Struct) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Struct.Marshal(b, m, deterministic) +} +func (m *Struct) XXX_Merge(src proto.Message) { + xxx_messageInfo_Struct.Merge(m, src) +} +func (m *Struct) XXX_Size() int { + return xxx_messageInfo_Struct.Size(m) +} +func (m *Struct) XXX_DiscardUnknown() { + xxx_messageInfo_Struct.DiscardUnknown(m) +} + +var xxx_messageInfo_Struct proto.InternalMessageInfo + +func (m *Struct) GetFields() map[string]*Value { + if m != nil { + return m.Fields + } + return nil +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of that +// variants, absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +type Value struct { + // The kind of value. + // + // Types that are valid to be assigned to Kind: + // *Value_NullValue + // *Value_NumberValue + // *Value_StringValue + // *Value_BoolValue + // *Value_StructValue + // *Value_ListValue + Kind isValue_Kind `protobuf_oneof:"kind"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Value) Reset() { *m = Value{} } +func (m *Value) String() string { return proto.CompactTextString(m) } +func (*Value) ProtoMessage() {} +func (*Value) Descriptor() ([]byte, []int) { + return fileDescriptor_df322afd6c9fb402, []int{1} +} + +func (*Value) XXX_WellKnownType() string { return "Value" } + +func (m *Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Value.Unmarshal(m, b) +} +func (m *Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Value.Marshal(b, m, deterministic) +} +func (m *Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Value.Merge(m, src) +} +func (m *Value) XXX_Size() int { + return xxx_messageInfo_Value.Size(m) +} +func (m *Value) XXX_DiscardUnknown() { + xxx_messageInfo_Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Value proto.InternalMessageInfo + +type isValue_Kind interface { + isValue_Kind() +} + +type Value_NullValue struct { + NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"` +} + +type Value_NumberValue struct { + NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof"` +} + +type Value_StringValue struct { + StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type Value_BoolValue struct { + BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type Value_StructValue struct { + StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,proto3,oneof"` +} + +type Value_ListValue struct { + ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,proto3,oneof"` +} + +func (*Value_NullValue) isValue_Kind() {} + +func (*Value_NumberValue) isValue_Kind() {} + +func (*Value_StringValue) isValue_Kind() {} + +func (*Value_BoolValue) isValue_Kind() {} + +func (*Value_StructValue) isValue_Kind() {} + +func (*Value_ListValue) isValue_Kind() {} + +func (m *Value) GetKind() isValue_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (m *Value) GetNullValue() NullValue { + if x, ok := m.GetKind().(*Value_NullValue); ok { + return x.NullValue + } + return NullValue_NULL_VALUE +} + +func (m *Value) GetNumberValue() float64 { + if x, ok := m.GetKind().(*Value_NumberValue); ok { + return x.NumberValue + } + return 0 +} + +func (m *Value) GetStringValue() string { + if x, ok := m.GetKind().(*Value_StringValue); ok { + return x.StringValue + } + return "" +} + +func (m *Value) GetBoolValue() bool { + if x, ok := m.GetKind().(*Value_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (m *Value) GetStructValue() *Struct { + if x, ok := m.GetKind().(*Value_StructValue); ok { + return x.StructValue + } + return nil +} + +func (m *Value) GetListValue() *ListValue { + if x, ok := m.GetKind().(*Value_ListValue); ok { + return x.ListValue + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Value) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Value_NullValue)(nil), + (*Value_NumberValue)(nil), + (*Value_StringValue)(nil), + (*Value_BoolValue)(nil), + (*Value_StructValue)(nil), + (*Value_ListValue)(nil), + } +} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +type ListValue struct { + // Repeated field of dynamically typed values. + Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListValue) Reset() { *m = ListValue{} } +func (m *ListValue) String() string { return proto.CompactTextString(m) } +func (*ListValue) ProtoMessage() {} +func (*ListValue) Descriptor() ([]byte, []int) { + return fileDescriptor_df322afd6c9fb402, []int{2} +} + +func (*ListValue) XXX_WellKnownType() string { return "ListValue" } + +func (m *ListValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListValue.Unmarshal(m, b) +} +func (m *ListValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListValue.Marshal(b, m, deterministic) +} +func (m *ListValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListValue.Merge(m, src) +} +func (m *ListValue) XXX_Size() int { + return xxx_messageInfo_ListValue.Size(m) +} +func (m *ListValue) XXX_DiscardUnknown() { + xxx_messageInfo_ListValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ListValue proto.InternalMessageInfo + +func (m *ListValue) GetValues() []*Value { + if m != nil { + return m.Values + } + return nil +} + +func init() { + proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value) + proto.RegisterType((*Struct)(nil), "google.protobuf.Struct") + proto.RegisterMapType((map[string]*Value)(nil), "google.protobuf.Struct.FieldsEntry") + proto.RegisterType((*Value)(nil), "google.protobuf.Value") + proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue") +} + +func init() { proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor_df322afd6c9fb402) } + +var fileDescriptor_df322afd6c9fb402 = []byte{ + // 417 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x41, 0x8b, 0xd3, 0x40, + 0x14, 0xc7, 0x3b, 0xc9, 0x36, 0x98, 0x17, 0x59, 0x97, 0x11, 0xb4, 0xac, 0xa2, 0xa1, 0x7b, 0x09, + 0x22, 0x29, 0xd6, 0x8b, 0x18, 0x2f, 0x06, 0xd6, 0x5d, 0x30, 0x2c, 0x31, 0xba, 0x15, 0xbc, 0x94, + 0x26, 0x4d, 0x63, 0xe8, 0x74, 0x26, 0x24, 0x33, 0x4a, 0x8f, 0x7e, 0x0b, 0xcf, 0x1e, 0x3d, 0xfa, + 0xe9, 0x3c, 0xca, 0xcc, 0x24, 0xa9, 0xb4, 0xf4, 0x94, 0xbc, 0xf7, 0x7e, 0xef, 0x3f, 0xef, 0xff, + 0x66, 0xe0, 0x71, 0xc1, 0x58, 0x41, 0xf2, 0x49, 0x55, 0x33, 0xce, 0x52, 0xb1, 0x9a, 0x34, 0xbc, + 0x16, 0x19, 0xf7, 0x55, 0x8c, 0xef, 0xe9, 0xaa, 0xdf, 0x55, 0xc7, 0x3f, 0x11, 0x58, 0x1f, 0x15, + 0x81, 0x03, 0xb0, 0x56, 0x65, 0x4e, 0x96, 0xcd, 0x08, 0xb9, 0xa6, 0xe7, 0x4c, 0x2f, 0xfc, 0x3d, + 0xd8, 0xd7, 0xa0, 0xff, 0x4e, 0x51, 0x97, 0x94, 0xd7, 0xdb, 0xa4, 0x6d, 0x39, 0xff, 0x00, 0xce, + 0x7f, 0x69, 0x7c, 0x06, 0xe6, 0x3a, 0xdf, 0x8e, 0x90, 0x8b, 0x3c, 0x3b, 0x91, 0xbf, 0xf8, 0x39, + 0x0c, 0xbf, 0x2d, 0x88, 0xc8, 0x47, 0x86, 0x8b, 0x3c, 0x67, 0xfa, 0xe0, 0x40, 0x7c, 0x26, 0xab, + 0x89, 0x86, 0x5e, 0x1b, 0xaf, 0xd0, 0xf8, 0x8f, 0x01, 0x43, 0x95, 0xc4, 0x01, 0x00, 0x15, 0x84, + 0xcc, 0xb5, 0x80, 0x14, 0x3d, 0x9d, 0x9e, 0x1f, 0x08, 0xdc, 0x08, 0x42, 0x14, 0x7f, 0x3d, 0x48, + 0x6c, 0xda, 0x05, 0xf8, 0x02, 0xee, 0x52, 0xb1, 0x49, 0xf3, 0x7a, 0xbe, 0x3b, 0x1f, 0x5d, 0x0f, + 0x12, 0x47, 0x67, 0x7b, 0xa8, 0xe1, 0x75, 0x49, 0x8b, 0x16, 0x32, 0xe5, 0xe0, 0x12, 0xd2, 0x59, + 0x0d, 0x3d, 0x05, 0x48, 0x19, 0xeb, 0xc6, 0x38, 0x71, 0x91, 0x77, 0x47, 0x1e, 0x25, 0x73, 0x1a, + 0x78, 0xa3, 0x54, 0x44, 0xc6, 0x5b, 0x64, 0xa8, 0xac, 0x3e, 0x3c, 0xb2, 0xc7, 0x56, 0x5e, 0x64, + 0xbc, 0x77, 0x49, 0xca, 0xa6, 0xeb, 0xb5, 0x54, 0xef, 0xa1, 0xcb, 0xa8, 0x6c, 0x78, 0xef, 0x92, + 0x74, 0x41, 0x68, 0xc1, 0xc9, 0xba, 0xa4, 0xcb, 0x71, 0x00, 0x76, 0x4f, 0x60, 0x1f, 0x2c, 0x25, + 0xd6, 0xdd, 0xe8, 0xb1, 0xa5, 0xb7, 0xd4, 0xb3, 0x47, 0x60, 0xf7, 0x4b, 0xc4, 0xa7, 0x00, 0x37, + 0xb7, 0x51, 0x34, 0x9f, 0xbd, 0x8d, 0x6e, 0x2f, 0xcf, 0x06, 0xe1, 0x0f, 0x04, 0xf7, 0x33, 0xb6, + 0xd9, 0x97, 0x08, 0x1d, 0xed, 0x26, 0x96, 0x71, 0x8c, 0xbe, 0xbc, 0x28, 0x4a, 0xfe, 0x55, 0xa4, + 0x7e, 0xc6, 0x36, 0x93, 0x82, 0x91, 0x05, 0x2d, 0x76, 0x4f, 0xb1, 0xe2, 0xdb, 0x2a, 0x6f, 0xda, + 0x17, 0x19, 0xe8, 0x4f, 0x95, 0xfe, 0x45, 0xe8, 0x97, 0x61, 0x5e, 0xc5, 0xe1, 0x6f, 0xe3, 0xc9, + 0x95, 0x16, 0x8f, 0xbb, 0xf9, 0x3e, 0xe7, 0x84, 0xbc, 0xa7, 0xec, 0x3b, 0xfd, 0x24, 0x3b, 0x53, + 0x4b, 0x49, 0xbd, 0xfc, 0x17, 0x00, 0x00, 0xff, 0xff, 0xe8, 0x1b, 0x59, 0xf8, 0xe5, 0x02, 0x00, + 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto b/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto new file mode 100644 index 0000000000..7d7808e7fb --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto @@ -0,0 +1,96 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/struct;structpb"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "StructProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +message Struct { + // Unordered map of dynamically typed values. + map fields = 1; +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of that +// variants, absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +message Value { + // The kind of value. + oneof kind { + // Represents a null value. + NullValue null_value = 1; + // Represents a double value. + double number_value = 2; + // Represents a string value. + string string_value = 3; + // Represents a boolean value. + bool bool_value = 4; + // Represents a structured value. + Struct struct_value = 5; + // Represents a repeated `Value`. + ListValue list_value = 6; + } +} + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +enum NullValue { + // Null value. + NULL_VALUE = 0; +} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +message ListValue { + // Repeated field of dynamically typed values. + repeated Value values = 1; +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go index 47f10dbc2c..8da0df01ac 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go @@ -111,11 +111,9 @@ func TimestampNow() *tspb.Timestamp { // TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. // It returns an error if the resulting Timestamp is invalid. func TimestampProto(t time.Time) (*tspb.Timestamp, error) { - seconds := t.Unix() - nanos := int32(t.Sub(time.Unix(seconds, 0))) ts := &tspb.Timestamp{ - Seconds: seconds, - Nanos: nanos, + Seconds: t.Unix(), + Nanos: int32(t.Nanosecond()), } if err := validateTimestamp(ts); err != nil { return nil, err diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go index e23e4a25da..31cd846de9 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go @@ -1,20 +1,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/timestamp.proto -/* -Package timestamp is a generated protocol buffer package. - -It is generated from these files: - google/protobuf/timestamp.proto - -It has these top-level messages: - Timestamp -*/ package timestamp -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -25,7 +18,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // A Timestamp represents a point in time independent of any time zone // or calendar, represented as seconds and fractions of seconds at @@ -90,7 +83,9 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional // seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), // are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone -// is required, though only UTC (as indicated by "Z") is presently supported. +// is required. A proto3 JSON serializer should always use UTC (as indicated by +// "Z") when printing the Timestamp type and a proto3 JSON parser should be +// able to accept both UTC and other timezones (as indicated by an offset). // // For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past // 01:30 UTC on January 15, 2017. @@ -101,27 +96,51 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) // with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one // can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()) -// to obtain a formatter capable of generating timestamps in this format. +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime-- +// ) to obtain a formatter capable of generating timestamps in this format. // // type Timestamp struct { // Represents seconds of UTC time since Unix epoch // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to // 9999-12-31T23:59:59Z inclusive. - Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` // Non-negative fractions of a second at nanosecond resolution. Negative // second values with fractions must still have non-negative nanos values // that count forward in time. Must be from 0 to 999,999,999 // inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (m *Timestamp) String() string { return proto.CompactTextString(m) } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { + return fileDescriptor_292007bbfe81227e, []int{0} +} + +func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } + +func (m *Timestamp) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Timestamp.Unmarshal(m, b) +} +func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic) +} +func (m *Timestamp) XXX_Merge(src proto.Message) { + xxx_messageInfo_Timestamp.Merge(m, src) +} +func (m *Timestamp) XXX_Size() int { + return xxx_messageInfo_Timestamp.Size(m) +} +func (m *Timestamp) XXX_DiscardUnknown() { + xxx_messageInfo_Timestamp.DiscardUnknown(m) } -func (m *Timestamp) Reset() { *m = Timestamp{} } -func (m *Timestamp) String() string { return proto.CompactTextString(m) } -func (*Timestamp) ProtoMessage() {} -func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } +var xxx_messageInfo_Timestamp proto.InternalMessageInfo func (m *Timestamp) GetSeconds() int64 { if m != nil { @@ -141,9 +160,9 @@ func init() { proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") } -func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor0) } +func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) } -var fileDescriptor0 = []byte{ +var fileDescriptor_292007bbfe81227e = []byte{ // 191 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d, diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto index b7cbd17502..eafb3fa03a 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto @@ -103,7 +103,9 @@ option objc_class_prefix = "GPB"; // {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional // seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), // are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone -// is required, though only UTC (as indicated by "Z") is presently supported. +// is required. A proto3 JSON serializer should always use UTC (as indicated by +// "Z") when printing the Timestamp type and a proto3 JSON parser should be +// able to accept both UTC and other timezones (as indicated by an offset). // // For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past // 01:30 UTC on January 15, 2017. @@ -114,8 +116,8 @@ option objc_class_prefix = "GPB"; // to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) // with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one // can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()) -// to obtain a formatter capable of generating timestamps in this format. +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime-- +// ) to obtain a formatter capable of generating timestamps in this format. // // message Timestamp { diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp_test.go b/vendor/github.com/golang/protobuf/ptypes/timestamp_test.go deleted file mode 100644 index 6e3c969b94..0000000000 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp_test.go +++ /dev/null @@ -1,153 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package ptypes - -import ( - "math" - "testing" - "time" - - "github.com/golang/protobuf/proto" - tspb "github.com/golang/protobuf/ptypes/timestamp" -) - -var tests = []struct { - ts *tspb.Timestamp - valid bool - t time.Time -}{ - // The timestamp representing the Unix epoch date. - {&tspb.Timestamp{Seconds: 0, Nanos: 0}, true, utcDate(1970, 1, 1)}, - // The smallest representable timestamp. - {&tspb.Timestamp{Seconds: math.MinInt64, Nanos: math.MinInt32}, false, - time.Unix(math.MinInt64, math.MinInt32).UTC()}, - // The smallest representable timestamp with non-negative nanos. - {&tspb.Timestamp{Seconds: math.MinInt64, Nanos: 0}, false, time.Unix(math.MinInt64, 0).UTC()}, - // The earliest valid timestamp. - {&tspb.Timestamp{Seconds: minValidSeconds, Nanos: 0}, true, utcDate(1, 1, 1)}, - //"0001-01-01T00:00:00Z"}, - // The largest representable timestamp. - {&tspb.Timestamp{Seconds: math.MaxInt64, Nanos: math.MaxInt32}, false, - time.Unix(math.MaxInt64, math.MaxInt32).UTC()}, - // The largest representable timestamp with nanos in range. - {&tspb.Timestamp{Seconds: math.MaxInt64, Nanos: 1e9 - 1}, false, - time.Unix(math.MaxInt64, 1e9-1).UTC()}, - // The largest valid timestamp. - {&tspb.Timestamp{Seconds: maxValidSeconds - 1, Nanos: 1e9 - 1}, true, - time.Date(9999, 12, 31, 23, 59, 59, 1e9-1, time.UTC)}, - // The smallest invalid timestamp that is larger than the valid range. - {&tspb.Timestamp{Seconds: maxValidSeconds, Nanos: 0}, false, time.Unix(maxValidSeconds, 0).UTC()}, - // A date before the epoch. - {&tspb.Timestamp{Seconds: -281836800, Nanos: 0}, true, utcDate(1961, 1, 26)}, - // A date after the epoch. - {&tspb.Timestamp{Seconds: 1296000000, Nanos: 0}, true, utcDate(2011, 1, 26)}, - // A date after the epoch, in the middle of the day. - {&tspb.Timestamp{Seconds: 1296012345, Nanos: 940483}, true, - time.Date(2011, 1, 26, 3, 25, 45, 940483, time.UTC)}, -} - -func TestValidateTimestamp(t *testing.T) { - for _, s := range tests { - got := validateTimestamp(s.ts) - if (got == nil) != s.valid { - t.Errorf("validateTimestamp(%v) = %v, want %v", s.ts, got, s.valid) - } - } -} - -func TestTimestamp(t *testing.T) { - for _, s := range tests { - got, err := Timestamp(s.ts) - if (err == nil) != s.valid { - t.Errorf("Timestamp(%v) error = %v, but valid = %t", s.ts, err, s.valid) - } else if s.valid && got != s.t { - t.Errorf("Timestamp(%v) = %v, want %v", s.ts, got, s.t) - } - } - // Special case: a nil Timestamp is an error, but returns the 0 Unix time. - got, err := Timestamp(nil) - want := time.Unix(0, 0).UTC() - if got != want { - t.Errorf("Timestamp(nil) = %v, want %v", got, want) - } - if err == nil { - t.Errorf("Timestamp(nil) error = nil, expected error") - } -} - -func TestTimestampProto(t *testing.T) { - for _, s := range tests { - got, err := TimestampProto(s.t) - if (err == nil) != s.valid { - t.Errorf("TimestampProto(%v) error = %v, but valid = %t", s.t, err, s.valid) - } else if s.valid && !proto.Equal(got, s.ts) { - t.Errorf("TimestampProto(%v) = %v, want %v", s.t, got, s.ts) - } - } - // No corresponding special case here: no time.Time results in a nil Timestamp. -} - -func TestTimestampString(t *testing.T) { - for _, test := range []struct { - ts *tspb.Timestamp - want string - }{ - // Not much testing needed because presumably time.Format is - // well-tested. - {&tspb.Timestamp{Seconds: 0, Nanos: 0}, "1970-01-01T00:00:00Z"}, - {&tspb.Timestamp{Seconds: minValidSeconds - 1, Nanos: 0}, "(timestamp: seconds:-62135596801 before 0001-01-01)"}, - } { - got := TimestampString(test.ts) - if got != test.want { - t.Errorf("TimestampString(%v) = %q, want %q", test.ts, got, test.want) - } - } -} - -func utcDate(year, month, day int) time.Time { - return time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC) -} - -func TestTimestampNow(t *testing.T) { - // Bracket the expected time. - before := time.Now() - ts := TimestampNow() - after := time.Now() - - tm, err := Timestamp(ts) - if err != nil { - t.Errorf("between %v and %v\nTimestampNow() = %v\nwhich is invalid (%v)", before, after, ts, err) - } - if tm.Before(before) || tm.After(after) { - t.Errorf("between %v and %v\nTimestamp(TimestampNow()) = %v", before, after, tm) - } -} diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go new file mode 100644 index 0000000000..add19a1adb --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go @@ -0,0 +1,461 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/wrappers.proto + +package wrappers + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +type DoubleValue struct { + // The double value. + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DoubleValue) Reset() { *m = DoubleValue{} } +func (m *DoubleValue) String() string { return proto.CompactTextString(m) } +func (*DoubleValue) ProtoMessage() {} +func (*DoubleValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{0} +} + +func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" } + +func (m *DoubleValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DoubleValue.Unmarshal(m, b) +} +func (m *DoubleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DoubleValue.Marshal(b, m, deterministic) +} +func (m *DoubleValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_DoubleValue.Merge(m, src) +} +func (m *DoubleValue) XXX_Size() int { + return xxx_messageInfo_DoubleValue.Size(m) +} +func (m *DoubleValue) XXX_DiscardUnknown() { + xxx_messageInfo_DoubleValue.DiscardUnknown(m) +} + +var xxx_messageInfo_DoubleValue proto.InternalMessageInfo + +func (m *DoubleValue) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +type FloatValue struct { + // The float value. + Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FloatValue) Reset() { *m = FloatValue{} } +func (m *FloatValue) String() string { return proto.CompactTextString(m) } +func (*FloatValue) ProtoMessage() {} +func (*FloatValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{1} +} + +func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" } + +func (m *FloatValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FloatValue.Unmarshal(m, b) +} +func (m *FloatValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FloatValue.Marshal(b, m, deterministic) +} +func (m *FloatValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_FloatValue.Merge(m, src) +} +func (m *FloatValue) XXX_Size() int { + return xxx_messageInfo_FloatValue.Size(m) +} +func (m *FloatValue) XXX_DiscardUnknown() { + xxx_messageInfo_FloatValue.DiscardUnknown(m) +} + +var xxx_messageInfo_FloatValue proto.InternalMessageInfo + +func (m *FloatValue) GetValue() float32 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +type Int64Value struct { + // The int64 value. + Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Int64Value) Reset() { *m = Int64Value{} } +func (m *Int64Value) String() string { return proto.CompactTextString(m) } +func (*Int64Value) ProtoMessage() {} +func (*Int64Value) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{2} +} + +func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" } + +func (m *Int64Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Int64Value.Unmarshal(m, b) +} +func (m *Int64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Int64Value.Marshal(b, m, deterministic) +} +func (m *Int64Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Int64Value.Merge(m, src) +} +func (m *Int64Value) XXX_Size() int { + return xxx_messageInfo_Int64Value.Size(m) +} +func (m *Int64Value) XXX_DiscardUnknown() { + xxx_messageInfo_Int64Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Int64Value proto.InternalMessageInfo + +func (m *Int64Value) GetValue() int64 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +type UInt64Value struct { + // The uint64 value. + Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UInt64Value) Reset() { *m = UInt64Value{} } +func (m *UInt64Value) String() string { return proto.CompactTextString(m) } +func (*UInt64Value) ProtoMessage() {} +func (*UInt64Value) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{3} +} + +func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" } + +func (m *UInt64Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UInt64Value.Unmarshal(m, b) +} +func (m *UInt64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UInt64Value.Marshal(b, m, deterministic) +} +func (m *UInt64Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_UInt64Value.Merge(m, src) +} +func (m *UInt64Value) XXX_Size() int { + return xxx_messageInfo_UInt64Value.Size(m) +} +func (m *UInt64Value) XXX_DiscardUnknown() { + xxx_messageInfo_UInt64Value.DiscardUnknown(m) +} + +var xxx_messageInfo_UInt64Value proto.InternalMessageInfo + +func (m *UInt64Value) GetValue() uint64 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +type Int32Value struct { + // The int32 value. + Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Int32Value) Reset() { *m = Int32Value{} } +func (m *Int32Value) String() string { return proto.CompactTextString(m) } +func (*Int32Value) ProtoMessage() {} +func (*Int32Value) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{4} +} + +func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" } + +func (m *Int32Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Int32Value.Unmarshal(m, b) +} +func (m *Int32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Int32Value.Marshal(b, m, deterministic) +} +func (m *Int32Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Int32Value.Merge(m, src) +} +func (m *Int32Value) XXX_Size() int { + return xxx_messageInfo_Int32Value.Size(m) +} +func (m *Int32Value) XXX_DiscardUnknown() { + xxx_messageInfo_Int32Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Int32Value proto.InternalMessageInfo + +func (m *Int32Value) GetValue() int32 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +type UInt32Value struct { + // The uint32 value. + Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UInt32Value) Reset() { *m = UInt32Value{} } +func (m *UInt32Value) String() string { return proto.CompactTextString(m) } +func (*UInt32Value) ProtoMessage() {} +func (*UInt32Value) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{5} +} + +func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" } + +func (m *UInt32Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UInt32Value.Unmarshal(m, b) +} +func (m *UInt32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UInt32Value.Marshal(b, m, deterministic) +} +func (m *UInt32Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_UInt32Value.Merge(m, src) +} +func (m *UInt32Value) XXX_Size() int { + return xxx_messageInfo_UInt32Value.Size(m) +} +func (m *UInt32Value) XXX_DiscardUnknown() { + xxx_messageInfo_UInt32Value.DiscardUnknown(m) +} + +var xxx_messageInfo_UInt32Value proto.InternalMessageInfo + +func (m *UInt32Value) GetValue() uint32 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +type BoolValue struct { + // The bool value. + Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BoolValue) Reset() { *m = BoolValue{} } +func (m *BoolValue) String() string { return proto.CompactTextString(m) } +func (*BoolValue) ProtoMessage() {} +func (*BoolValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{6} +} + +func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" } + +func (m *BoolValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BoolValue.Unmarshal(m, b) +} +func (m *BoolValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BoolValue.Marshal(b, m, deterministic) +} +func (m *BoolValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_BoolValue.Merge(m, src) +} +func (m *BoolValue) XXX_Size() int { + return xxx_messageInfo_BoolValue.Size(m) +} +func (m *BoolValue) XXX_DiscardUnknown() { + xxx_messageInfo_BoolValue.DiscardUnknown(m) +} + +var xxx_messageInfo_BoolValue proto.InternalMessageInfo + +func (m *BoolValue) GetValue() bool { + if m != nil { + return m.Value + } + return false +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +type StringValue struct { + // The string value. + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StringValue) Reset() { *m = StringValue{} } +func (m *StringValue) String() string { return proto.CompactTextString(m) } +func (*StringValue) ProtoMessage() {} +func (*StringValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{7} +} + +func (*StringValue) XXX_WellKnownType() string { return "StringValue" } + +func (m *StringValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StringValue.Unmarshal(m, b) +} +func (m *StringValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StringValue.Marshal(b, m, deterministic) +} +func (m *StringValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_StringValue.Merge(m, src) +} +func (m *StringValue) XXX_Size() int { + return xxx_messageInfo_StringValue.Size(m) +} +func (m *StringValue) XXX_DiscardUnknown() { + xxx_messageInfo_StringValue.DiscardUnknown(m) +} + +var xxx_messageInfo_StringValue proto.InternalMessageInfo + +func (m *StringValue) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +type BytesValue struct { + // The bytes value. + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BytesValue) Reset() { *m = BytesValue{} } +func (m *BytesValue) String() string { return proto.CompactTextString(m) } +func (*BytesValue) ProtoMessage() {} +func (*BytesValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{8} +} + +func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" } + +func (m *BytesValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BytesValue.Unmarshal(m, b) +} +func (m *BytesValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BytesValue.Marshal(b, m, deterministic) +} +func (m *BytesValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_BytesValue.Merge(m, src) +} +func (m *BytesValue) XXX_Size() int { + return xxx_messageInfo_BytesValue.Size(m) +} +func (m *BytesValue) XXX_DiscardUnknown() { + xxx_messageInfo_BytesValue.DiscardUnknown(m) +} + +var xxx_messageInfo_BytesValue proto.InternalMessageInfo + +func (m *BytesValue) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func init() { + proto.RegisterType((*DoubleValue)(nil), "google.protobuf.DoubleValue") + proto.RegisterType((*FloatValue)(nil), "google.protobuf.FloatValue") + proto.RegisterType((*Int64Value)(nil), "google.protobuf.Int64Value") + proto.RegisterType((*UInt64Value)(nil), "google.protobuf.UInt64Value") + proto.RegisterType((*Int32Value)(nil), "google.protobuf.Int32Value") + proto.RegisterType((*UInt32Value)(nil), "google.protobuf.UInt32Value") + proto.RegisterType((*BoolValue)(nil), "google.protobuf.BoolValue") + proto.RegisterType((*StringValue)(nil), "google.protobuf.StringValue") + proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue") +} + +func init() { proto.RegisterFile("google/protobuf/wrappers.proto", fileDescriptor_5377b62bda767935) } + +var fileDescriptor_5377b62bda767935 = []byte{ + // 259 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x2f, 0x4a, 0x2c, + 0x28, 0x48, 0x2d, 0x2a, 0xd6, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0xca, + 0x5c, 0xdc, 0x2e, 0xf9, 0xa5, 0x49, 0x39, 0xa9, 0x61, 0x89, 0x39, 0xa5, 0xa9, 0x42, 0x22, 0x5c, + 0xac, 0x65, 0x20, 0x86, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x63, 0x10, 0x84, 0xa3, 0xa4, 0xc4, 0xc5, + 0xe5, 0x96, 0x93, 0x9f, 0x58, 0x82, 0x45, 0x0d, 0x13, 0x92, 0x1a, 0xcf, 0xbc, 0x12, 0x33, 0x13, + 0x2c, 0x6a, 0x98, 0x61, 0x6a, 0x94, 0xb9, 0xb8, 0x43, 0x71, 0x29, 0x62, 0x41, 0x35, 0xc8, 0xd8, + 0x08, 0x8b, 0x1a, 0x56, 0x34, 0x83, 0xb0, 0x2a, 0xe2, 0x85, 0x29, 0x52, 0xe4, 0xe2, 0x74, 0xca, + 0xcf, 0xcf, 0xc1, 0xa2, 0x84, 0x03, 0xc9, 0x9c, 0xe0, 0x92, 0xa2, 0xcc, 0xbc, 0x74, 0x2c, 0x8a, + 0x38, 0x91, 0x1c, 0xe4, 0x54, 0x59, 0x92, 0x5a, 0x8c, 0x45, 0x0d, 0x0f, 0x54, 0x8d, 0x53, 0x0d, + 0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x5a, 0xe8, 0x3a, 0xf1, 0x86, 0x43, 0x83, 0x3f, 0x00, 0x24, + 0x12, 0xc0, 0x18, 0xa5, 0x95, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x9f, + 0x9e, 0x9f, 0x93, 0x98, 0x97, 0x8e, 0x88, 0xaa, 0x82, 0x92, 0xca, 0x82, 0xd4, 0x62, 0x78, 0x8c, + 0xfd, 0x60, 0x64, 0x5c, 0xc4, 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, + 0x00, 0x54, 0xa9, 0x5e, 0x78, 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, + 0x12, 0x1b, 0xd8, 0x0c, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x19, 0x6c, 0xb9, 0xb8, 0xfe, + 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto new file mode 100644 index 0000000000..01947639ac --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto @@ -0,0 +1,118 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Wrappers for primitive (non-message) types. These types are useful +// for embedding primitives in the `google.protobuf.Any` type and for places +// where we need to distinguish between the absence of a primitive +// typed field and its default value. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/wrappers"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "WrappersProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +message DoubleValue { + // The double value. + double value = 1; +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +message FloatValue { + // The float value. + float value = 1; +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +message Int64Value { + // The int64 value. + int64 value = 1; +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +message UInt64Value { + // The uint64 value. + uint64 value = 1; +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +message Int32Value { + // The int32 value. + int32 value = 1; +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +message UInt32Value { + // The uint32 value. + uint32 value = 1; +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +message BoolValue { + // The bool value. + bool value = 1; +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +message StringValue { + // The string value. + string value = 1; +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +message BytesValue { + // The bytes value. + bytes value = 1; +} diff --git a/vendor/github.com/google/btree/btree.go b/vendor/github.com/google/btree/btree.go index eb74b1d39c..6ff062f9bb 100644 --- a/vendor/github.com/google/btree/btree.go +++ b/vendor/github.com/google/btree/btree.go @@ -103,12 +103,16 @@ func (f *FreeList) newNode() (n *node) { return } -func (f *FreeList) freeNode(n *node) { +// freeNode adds the given node to the list, returning true if it was added +// and false if it was discarded. +func (f *FreeList) freeNode(n *node) (out bool) { f.mu.Lock() if len(f.freelist) < cap(f.freelist) { f.freelist = append(f.freelist, n) + out = true } f.mu.Unlock() + return } // ItemIterator allows callers of Ascend* to iterate in-order over portions of @@ -496,13 +500,14 @@ const ( // thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a // "greaterThan" or "lessThan" queries. func (n *node) iterate(dir direction, start, stop Item, includeStart bool, hit bool, iter ItemIterator) (bool, bool) { - var ok bool + var ok, found bool + var index int switch dir { case ascend: - for i := 0; i < len(n.items); i++ { - if start != nil && n.items[i].Less(start) { - continue - } + if start != nil { + index, _ = n.items.find(start) + } + for i := index; i < len(n.items); i++ { if len(n.children) > 0 { if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter); !ok { return hit, false @@ -526,7 +531,15 @@ func (n *node) iterate(dir direction, start, stop Item, includeStart bool, hit b } } case descend: - for i := len(n.items) - 1; i >= 0; i-- { + if start != nil { + index, found = n.items.find(start) + if !found { + index = index - 1 + } + } else { + index = len(n.items) - 1 + } + for i := index; i >= 0; i-- { if start != nil && !n.items[i].Less(start) { if !includeStart || hit || start.Less(n.items[i]) { continue @@ -635,13 +648,30 @@ func (c *copyOnWriteContext) newNode() (n *node) { return } -func (c *copyOnWriteContext) freeNode(n *node) { +type freeType int + +const ( + ftFreelistFull freeType = iota // node was freed (available for GC, not stored in freelist) + ftStored // node was stored in the freelist for later use + ftNotOwned // node was ignored by COW, since it's owned by another one +) + +// freeNode frees a node within a given COW context, if it's owned by that +// context. It returns what happened to the node (see freeType const +// documentation). +func (c *copyOnWriteContext) freeNode(n *node) freeType { if n.cow == c { // clear to allow GC n.items.truncate(0) n.children.truncate(0) n.cow = nil - c.freelist.freeNode(n) + if c.freelist.freeNode(n) { + return ftStored + } else { + return ftFreelistFull + } + } else { + return ftNotOwned } } @@ -812,6 +842,45 @@ func (t *BTree) Len() int { return t.length } +// Clear removes all items from the btree. If addNodesToFreelist is true, +// t's nodes are added to its freelist as part of this call, until the freelist +// is full. Otherwise, the root node is simply dereferenced and the subtree +// left to Go's normal GC processes. +// +// This can be much faster +// than calling Delete on all elements, because that requires finding/removing +// each element in the tree and updating the tree accordingly. It also is +// somewhat faster than creating a new tree to replace the old one, because +// nodes from the old tree are reclaimed into the freelist for use by the new +// one, instead of being lost to the garbage collector. +// +// This call takes: +// O(1): when addNodesToFreelist is false, this is a single operation. +// O(1): when the freelist is already full, it breaks out immediately +// O(freelist size): when the freelist is empty and the nodes are all owned +// by this tree, nodes are added to the freelist until full. +// O(tree size): when all nodes are owned by another tree, all nodes are +// iterated over looking for nodes to add to the freelist, and due to +// ownership, none are. +func (t *BTree) Clear(addNodesToFreelist bool) { + if t.root != nil && addNodesToFreelist { + t.root.reset(t.cow) + } + t.root, t.length = nil, 0 +} + +// reset returns a subtree to the freelist. It breaks out immediately if the +// freelist is full, since the only benefit of iterating is to fill that +// freelist up. Returns true if parent reset call should continue. +func (n *node) reset(c *copyOnWriteContext) bool { + for _, child := range n.children { + if !child.reset(c) { + return false + } + } + return c.freeNode(n) != ftFreelistFull +} + // Int implements the Item interface for integers. type Int int diff --git a/vendor/github.com/google/btree/btree_test.go b/vendor/github.com/google/btree/btree_test.go deleted file mode 100644 index 5da9d8b694..0000000000 --- a/vendor/github.com/google/btree/btree_test.go +++ /dev/null @@ -1,689 +0,0 @@ -// Copyright 2014 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package btree - -import ( - "flag" - "fmt" - "math/rand" - "reflect" - "sort" - "sync" - "testing" - "time" -) - -func init() { - seed := time.Now().Unix() - fmt.Println(seed) - rand.Seed(seed) -} - -// perm returns a random permutation of n Int items in the range [0, n). -func perm(n int) (out []Item) { - for _, v := range rand.Perm(n) { - out = append(out, Int(v)) - } - return -} - -// rang returns an ordered list of Int items in the range [0, n). -func rang(n int) (out []Item) { - for i := 0; i < n; i++ { - out = append(out, Int(i)) - } - return -} - -// all extracts all items from a tree in order as a slice. -func all(t *BTree) (out []Item) { - t.Ascend(func(a Item) bool { - out = append(out, a) - return true - }) - return -} - -// rangerev returns a reversed ordered list of Int items in the range [0, n). -func rangrev(n int) (out []Item) { - for i := n - 1; i >= 0; i-- { - out = append(out, Int(i)) - } - return -} - -// allrev extracts all items from a tree in reverse order as a slice. -func allrev(t *BTree) (out []Item) { - t.Descend(func(a Item) bool { - out = append(out, a) - return true - }) - return -} - -var btreeDegree = flag.Int("degree", 32, "B-Tree degree") - -func TestBTree(t *testing.T) { - tr := New(*btreeDegree) - const treeSize = 10000 - for i := 0; i < 10; i++ { - if min := tr.Min(); min != nil { - t.Fatalf("empty min, got %+v", min) - } - if max := tr.Max(); max != nil { - t.Fatalf("empty max, got %+v", max) - } - for _, item := range perm(treeSize) { - if x := tr.ReplaceOrInsert(item); x != nil { - t.Fatal("insert found item", item) - } - } - for _, item := range perm(treeSize) { - if x := tr.ReplaceOrInsert(item); x == nil { - t.Fatal("insert didn't find item", item) - } - } - if min, want := tr.Min(), Item(Int(0)); min != want { - t.Fatalf("min: want %+v, got %+v", want, min) - } - if max, want := tr.Max(), Item(Int(treeSize-1)); max != want { - t.Fatalf("max: want %+v, got %+v", want, max) - } - got := all(tr) - want := rang(treeSize) - if !reflect.DeepEqual(got, want) { - t.Fatalf("mismatch:\n got: %v\nwant: %v", got, want) - } - - gotrev := allrev(tr) - wantrev := rangrev(treeSize) - if !reflect.DeepEqual(gotrev, wantrev) { - t.Fatalf("mismatch:\n got: %v\nwant: %v", got, want) - } - - for _, item := range perm(treeSize) { - if x := tr.Delete(item); x == nil { - t.Fatalf("didn't find %v", item) - } - } - if got = all(tr); len(got) > 0 { - t.Fatalf("some left!: %v", got) - } - } -} - -func ExampleBTree() { - tr := New(*btreeDegree) - for i := Int(0); i < 10; i++ { - tr.ReplaceOrInsert(i) - } - fmt.Println("len: ", tr.Len()) - fmt.Println("get3: ", tr.Get(Int(3))) - fmt.Println("get100: ", tr.Get(Int(100))) - fmt.Println("del4: ", tr.Delete(Int(4))) - fmt.Println("del100: ", tr.Delete(Int(100))) - fmt.Println("replace5: ", tr.ReplaceOrInsert(Int(5))) - fmt.Println("replace100:", tr.ReplaceOrInsert(Int(100))) - fmt.Println("min: ", tr.Min()) - fmt.Println("delmin: ", tr.DeleteMin()) - fmt.Println("max: ", tr.Max()) - fmt.Println("delmax: ", tr.DeleteMax()) - fmt.Println("len: ", tr.Len()) - // Output: - // len: 10 - // get3: 3 - // get100: - // del4: 4 - // del100: - // replace5: 5 - // replace100: - // min: 0 - // delmin: 0 - // max: 100 - // delmax: 100 - // len: 8 -} - -func TestDeleteMin(t *testing.T) { - tr := New(3) - for _, v := range perm(100) { - tr.ReplaceOrInsert(v) - } - var got []Item - for v := tr.DeleteMin(); v != nil; v = tr.DeleteMin() { - got = append(got, v) - } - if want := rang(100); !reflect.DeepEqual(got, want) { - t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want) - } -} - -func TestDeleteMax(t *testing.T) { - tr := New(3) - for _, v := range perm(100) { - tr.ReplaceOrInsert(v) - } - var got []Item - for v := tr.DeleteMax(); v != nil; v = tr.DeleteMax() { - got = append(got, v) - } - // Reverse our list. - for i := 0; i < len(got)/2; i++ { - got[i], got[len(got)-i-1] = got[len(got)-i-1], got[i] - } - if want := rang(100); !reflect.DeepEqual(got, want) { - t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want) - } -} - -func TestAscendRange(t *testing.T) { - tr := New(2) - for _, v := range perm(100) { - tr.ReplaceOrInsert(v) - } - var got []Item - tr.AscendRange(Int(40), Int(60), func(a Item) bool { - got = append(got, a) - return true - }) - if want := rang(100)[40:60]; !reflect.DeepEqual(got, want) { - t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want) - } - got = got[:0] - tr.AscendRange(Int(40), Int(60), func(a Item) bool { - if a.(Int) > 50 { - return false - } - got = append(got, a) - return true - }) - if want := rang(100)[40:51]; !reflect.DeepEqual(got, want) { - t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want) - } -} - -func TestDescendRange(t *testing.T) { - tr := New(2) - for _, v := range perm(100) { - tr.ReplaceOrInsert(v) - } - var got []Item - tr.DescendRange(Int(60), Int(40), func(a Item) bool { - got = append(got, a) - return true - }) - if want := rangrev(100)[39:59]; !reflect.DeepEqual(got, want) { - t.Fatalf("descendrange:\n got: %v\nwant: %v", got, want) - } - got = got[:0] - tr.DescendRange(Int(60), Int(40), func(a Item) bool { - if a.(Int) < 50 { - return false - } - got = append(got, a) - return true - }) - if want := rangrev(100)[39:50]; !reflect.DeepEqual(got, want) { - t.Fatalf("descendrange:\n got: %v\nwant: %v", got, want) - } -} -func TestAscendLessThan(t *testing.T) { - tr := New(*btreeDegree) - for _, v := range perm(100) { - tr.ReplaceOrInsert(v) - } - var got []Item - tr.AscendLessThan(Int(60), func(a Item) bool { - got = append(got, a) - return true - }) - if want := rang(100)[:60]; !reflect.DeepEqual(got, want) { - t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want) - } - got = got[:0] - tr.AscendLessThan(Int(60), func(a Item) bool { - if a.(Int) > 50 { - return false - } - got = append(got, a) - return true - }) - if want := rang(100)[:51]; !reflect.DeepEqual(got, want) { - t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want) - } -} - -func TestDescendLessOrEqual(t *testing.T) { - tr := New(*btreeDegree) - for _, v := range perm(100) { - tr.ReplaceOrInsert(v) - } - var got []Item - tr.DescendLessOrEqual(Int(40), func(a Item) bool { - got = append(got, a) - return true - }) - if want := rangrev(100)[59:]; !reflect.DeepEqual(got, want) { - t.Fatalf("descendlessorequal:\n got: %v\nwant: %v", got, want) - } - got = got[:0] - tr.DescendLessOrEqual(Int(60), func(a Item) bool { - if a.(Int) < 50 { - return false - } - got = append(got, a) - return true - }) - if want := rangrev(100)[39:50]; !reflect.DeepEqual(got, want) { - t.Fatalf("descendlessorequal:\n got: %v\nwant: %v", got, want) - } -} -func TestAscendGreaterOrEqual(t *testing.T) { - tr := New(*btreeDegree) - for _, v := range perm(100) { - tr.ReplaceOrInsert(v) - } - var got []Item - tr.AscendGreaterOrEqual(Int(40), func(a Item) bool { - got = append(got, a) - return true - }) - if want := rang(100)[40:]; !reflect.DeepEqual(got, want) { - t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want) - } - got = got[:0] - tr.AscendGreaterOrEqual(Int(40), func(a Item) bool { - if a.(Int) > 50 { - return false - } - got = append(got, a) - return true - }) - if want := rang(100)[40:51]; !reflect.DeepEqual(got, want) { - t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want) - } -} - -func TestDescendGreaterThan(t *testing.T) { - tr := New(*btreeDegree) - for _, v := range perm(100) { - tr.ReplaceOrInsert(v) - } - var got []Item - tr.DescendGreaterThan(Int(40), func(a Item) bool { - got = append(got, a) - return true - }) - if want := rangrev(100)[:59]; !reflect.DeepEqual(got, want) { - t.Fatalf("descendgreaterthan:\n got: %v\nwant: %v", got, want) - } - got = got[:0] - tr.DescendGreaterThan(Int(40), func(a Item) bool { - if a.(Int) < 50 { - return false - } - got = append(got, a) - return true - }) - if want := rangrev(100)[:50]; !reflect.DeepEqual(got, want) { - t.Fatalf("descendgreaterthan:\n got: %v\nwant: %v", got, want) - } -} - -const benchmarkTreeSize = 10000 - -func BenchmarkInsert(b *testing.B) { - b.StopTimer() - insertP := perm(benchmarkTreeSize) - b.StartTimer() - i := 0 - for i < b.N { - tr := New(*btreeDegree) - for _, item := range insertP { - tr.ReplaceOrInsert(item) - i++ - if i >= b.N { - return - } - } - } -} - -func BenchmarkDeleteInsert(b *testing.B) { - b.StopTimer() - insertP := perm(benchmarkTreeSize) - tr := New(*btreeDegree) - for _, item := range insertP { - tr.ReplaceOrInsert(item) - } - b.StartTimer() - for i := 0; i < b.N; i++ { - tr.Delete(insertP[i%benchmarkTreeSize]) - tr.ReplaceOrInsert(insertP[i%benchmarkTreeSize]) - } -} - -func BenchmarkDeleteInsertCloneOnce(b *testing.B) { - b.StopTimer() - insertP := perm(benchmarkTreeSize) - tr := New(*btreeDegree) - for _, item := range insertP { - tr.ReplaceOrInsert(item) - } - tr = tr.Clone() - b.StartTimer() - for i := 0; i < b.N; i++ { - tr.Delete(insertP[i%benchmarkTreeSize]) - tr.ReplaceOrInsert(insertP[i%benchmarkTreeSize]) - } -} - -func BenchmarkDeleteInsertCloneEachTime(b *testing.B) { - b.StopTimer() - insertP := perm(benchmarkTreeSize) - tr := New(*btreeDegree) - for _, item := range insertP { - tr.ReplaceOrInsert(item) - } - b.StartTimer() - for i := 0; i < b.N; i++ { - tr = tr.Clone() - tr.Delete(insertP[i%benchmarkTreeSize]) - tr.ReplaceOrInsert(insertP[i%benchmarkTreeSize]) - } -} - -func BenchmarkDelete(b *testing.B) { - b.StopTimer() - insertP := perm(benchmarkTreeSize) - removeP := perm(benchmarkTreeSize) - b.StartTimer() - i := 0 - for i < b.N { - b.StopTimer() - tr := New(*btreeDegree) - for _, v := range insertP { - tr.ReplaceOrInsert(v) - } - b.StartTimer() - for _, item := range removeP { - tr.Delete(item) - i++ - if i >= b.N { - return - } - } - if tr.Len() > 0 { - panic(tr.Len()) - } - } -} - -func BenchmarkGet(b *testing.B) { - b.StopTimer() - insertP := perm(benchmarkTreeSize) - removeP := perm(benchmarkTreeSize) - b.StartTimer() - i := 0 - for i < b.N { - b.StopTimer() - tr := New(*btreeDegree) - for _, v := range insertP { - tr.ReplaceOrInsert(v) - } - b.StartTimer() - for _, item := range removeP { - tr.Get(item) - i++ - if i >= b.N { - return - } - } - } -} - -func BenchmarkGetCloneEachTime(b *testing.B) { - b.StopTimer() - insertP := perm(benchmarkTreeSize) - removeP := perm(benchmarkTreeSize) - b.StartTimer() - i := 0 - for i < b.N { - b.StopTimer() - tr := New(*btreeDegree) - for _, v := range insertP { - tr.ReplaceOrInsert(v) - } - b.StartTimer() - for _, item := range removeP { - tr = tr.Clone() - tr.Get(item) - i++ - if i >= b.N { - return - } - } - } -} - -type byInts []Item - -func (a byInts) Len() int { - return len(a) -} - -func (a byInts) Less(i, j int) bool { - return a[i].(Int) < a[j].(Int) -} - -func (a byInts) Swap(i, j int) { - a[i], a[j] = a[j], a[i] -} - -func BenchmarkAscend(b *testing.B) { - arr := perm(benchmarkTreeSize) - tr := New(*btreeDegree) - for _, v := range arr { - tr.ReplaceOrInsert(v) - } - sort.Sort(byInts(arr)) - b.ResetTimer() - for i := 0; i < b.N; i++ { - j := 0 - tr.Ascend(func(item Item) bool { - if item.(Int) != arr[j].(Int) { - b.Fatalf("mismatch: expected: %v, got %v", arr[j].(Int), item.(Int)) - } - j++ - return true - }) - } -} - -func BenchmarkDescend(b *testing.B) { - arr := perm(benchmarkTreeSize) - tr := New(*btreeDegree) - for _, v := range arr { - tr.ReplaceOrInsert(v) - } - sort.Sort(byInts(arr)) - b.ResetTimer() - for i := 0; i < b.N; i++ { - j := len(arr) - 1 - tr.Descend(func(item Item) bool { - if item.(Int) != arr[j].(Int) { - b.Fatalf("mismatch: expected: %v, got %v", arr[j].(Int), item.(Int)) - } - j-- - return true - }) - } -} -func BenchmarkAscendRange(b *testing.B) { - arr := perm(benchmarkTreeSize) - tr := New(*btreeDegree) - for _, v := range arr { - tr.ReplaceOrInsert(v) - } - sort.Sort(byInts(arr)) - b.ResetTimer() - for i := 0; i < b.N; i++ { - j := 100 - tr.AscendRange(Int(100), arr[len(arr)-100], func(item Item) bool { - if item.(Int) != arr[j].(Int) { - b.Fatalf("mismatch: expected: %v, got %v", arr[j].(Int), item.(Int)) - } - j++ - return true - }) - if j != len(arr)-100 { - b.Fatalf("expected: %v, got %v", len(arr)-100, j) - } - } -} - -func BenchmarkDescendRange(b *testing.B) { - arr := perm(benchmarkTreeSize) - tr := New(*btreeDegree) - for _, v := range arr { - tr.ReplaceOrInsert(v) - } - sort.Sort(byInts(arr)) - b.ResetTimer() - for i := 0; i < b.N; i++ { - j := len(arr) - 100 - tr.DescendRange(arr[len(arr)-100], Int(100), func(item Item) bool { - if item.(Int) != arr[j].(Int) { - b.Fatalf("mismatch: expected: %v, got %v", arr[j].(Int), item.(Int)) - } - j-- - return true - }) - if j != 100 { - b.Fatalf("expected: %v, got %v", len(arr)-100, j) - } - } -} -func BenchmarkAscendGreaterOrEqual(b *testing.B) { - arr := perm(benchmarkTreeSize) - tr := New(*btreeDegree) - for _, v := range arr { - tr.ReplaceOrInsert(v) - } - sort.Sort(byInts(arr)) - b.ResetTimer() - for i := 0; i < b.N; i++ { - j := 100 - k := 0 - tr.AscendGreaterOrEqual(Int(100), func(item Item) bool { - if item.(Int) != arr[j].(Int) { - b.Fatalf("mismatch: expected: %v, got %v", arr[j].(Int), item.(Int)) - } - j++ - k++ - return true - }) - if j != len(arr) { - b.Fatalf("expected: %v, got %v", len(arr), j) - } - if k != len(arr)-100 { - b.Fatalf("expected: %v, got %v", len(arr)-100, k) - } - } -} -func BenchmarkDescendLessOrEqual(b *testing.B) { - arr := perm(benchmarkTreeSize) - tr := New(*btreeDegree) - for _, v := range arr { - tr.ReplaceOrInsert(v) - } - sort.Sort(byInts(arr)) - b.ResetTimer() - for i := 0; i < b.N; i++ { - j := len(arr) - 100 - k := len(arr) - tr.DescendLessOrEqual(arr[len(arr)-100], func(item Item) bool { - if item.(Int) != arr[j].(Int) { - b.Fatalf("mismatch: expected: %v, got %v", arr[j].(Int), item.(Int)) - } - j-- - k-- - return true - }) - if j != -1 { - b.Fatalf("expected: %v, got %v", -1, j) - } - if k != 99 { - b.Fatalf("expected: %v, got %v", 99, k) - } - } -} - -const cloneTestSize = 10000 - -func cloneTest(t *testing.T, b *BTree, start int, p []Item, wg *sync.WaitGroup, trees *[]*BTree) { - t.Logf("Starting new clone at %v", start) - *trees = append(*trees, b) - for i := start; i < cloneTestSize; i++ { - b.ReplaceOrInsert(p[i]) - if i%(cloneTestSize/5) == 0 { - wg.Add(1) - go cloneTest(t, b.Clone(), i+1, p, wg, trees) - } - } - wg.Done() -} - -func TestCloneConcurrentOperations(t *testing.T) { - b := New(*btreeDegree) - trees := []*BTree{} - p := perm(cloneTestSize) - var wg sync.WaitGroup - wg.Add(1) - go cloneTest(t, b, 0, p, &wg, &trees) - wg.Wait() - want := rang(cloneTestSize) - t.Logf("Starting equality checks on %d trees", len(trees)) - for i, tree := range trees { - if !reflect.DeepEqual(want, all(tree)) { - t.Errorf("tree %v mismatch", i) - } - } - t.Log("Removing half from first half") - toRemove := rang(cloneTestSize)[cloneTestSize/2:] - for i := 0; i < len(trees)/2; i++ { - tree := trees[i] - wg.Add(1) - go func() { - for _, item := range toRemove { - tree.Delete(item) - } - wg.Done() - }() - } - wg.Wait() - t.Log("Checking all values again") - for i, tree := range trees { - var wantpart []Item - if i < len(trees)/2 { - wantpart = want[:cloneTestSize/2] - } else { - wantpart = want - } - if got := all(tree); !reflect.DeepEqual(wantpart, got) { - t.Errorf("tree %v mismatch, want %v got %v", i, len(want), len(got)) - } - } -} diff --git a/vendor/github.com/google/gofuzz/example_test.go b/vendor/github.com/google/gofuzz/example_test.go deleted file mode 100644 index 792707a3a1..0000000000 --- a/vendor/github.com/google/gofuzz/example_test.go +++ /dev/null @@ -1,225 +0,0 @@ -/* -Copyright 2014 Google Inc. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fuzz_test - -import ( - "encoding/json" - "fmt" - "math/rand" - - "github.com/google/gofuzz" -) - -func ExampleSimple() { - type MyType struct { - A string - B string - C int - D struct { - E float64 - } - } - - f := fuzz.New() - object := MyType{} - - uniqueObjects := map[MyType]int{} - - for i := 0; i < 1000; i++ { - f.Fuzz(&object) - uniqueObjects[object]++ - } - fmt.Printf("Got %v unique objects.\n", len(uniqueObjects)) - // Output: - // Got 1000 unique objects. -} - -func ExampleCustom() { - type MyType struct { - A int - B string - } - - counter := 0 - f := fuzz.New().Funcs( - func(i *int, c fuzz.Continue) { - *i = counter - counter++ - }, - ) - object := MyType{} - - uniqueObjects := map[MyType]int{} - - for i := 0; i < 100; i++ { - f.Fuzz(&object) - if object.A != i { - fmt.Printf("Unexpected value: %#v\n", object) - } - uniqueObjects[object]++ - } - fmt.Printf("Got %v unique objects.\n", len(uniqueObjects)) - // Output: - // Got 100 unique objects. -} - -func ExampleComplex() { - type OtherType struct { - A string - B string - } - type MyType struct { - Pointer *OtherType - Map map[string]OtherType - PointerMap *map[string]OtherType - Slice []OtherType - SlicePointer []*OtherType - PointerSlicePointer *[]*OtherType - } - - f := fuzz.New().RandSource(rand.NewSource(0)).NilChance(0).NumElements(1, 1).Funcs( - func(o *OtherType, c fuzz.Continue) { - o.A = "Foo" - o.B = "Bar" - }, - func(op **OtherType, c fuzz.Continue) { - *op = &OtherType{"A", "B"} - }, - func(m map[string]OtherType, c fuzz.Continue) { - m["Works Because"] = OtherType{ - "Fuzzer", - "Preallocated", - } - }, - ) - object := MyType{} - f.Fuzz(&object) - bytes, err := json.MarshalIndent(&object, "", " ") - if err != nil { - fmt.Printf("error: %v\n", err) - } - fmt.Printf("%s\n", string(bytes)) - // Output: - // { - // "Pointer": { - // "A": "A", - // "B": "B" - // }, - // "Map": { - // "Works Because": { - // "A": "Fuzzer", - // "B": "Preallocated" - // } - // }, - // "PointerMap": { - // "Works Because": { - // "A": "Fuzzer", - // "B": "Preallocated" - // } - // }, - // "Slice": [ - // { - // "A": "Foo", - // "B": "Bar" - // } - // ], - // "SlicePointer": [ - // { - // "A": "A", - // "B": "B" - // } - // ], - // "PointerSlicePointer": [ - // { - // "A": "A", - // "B": "B" - // } - // ] - // } -} - -func ExampleMap() { - f := fuzz.New().NilChance(0).NumElements(1, 1) - var myMap map[struct{ A, B, C int }]string - f.Fuzz(&myMap) - fmt.Printf("myMap has %v element(s).\n", len(myMap)) - // Output: - // myMap has 1 element(s). -} - -func ExampleSingle() { - f := fuzz.New() - var i int - f.Fuzz(&i) - - // Technically, we'd expect this to fail one out of 2 billion attempts... - fmt.Printf("(i == 0) == %v", i == 0) - // Output: - // (i == 0) == false -} - -func ExampleEnum() { - type MyEnum string - const ( - A MyEnum = "A" - B MyEnum = "B" - ) - type MyInfo struct { - Type MyEnum - AInfo *string - BInfo *string - } - - f := fuzz.New().NilChance(0).Funcs( - func(e *MyInfo, c fuzz.Continue) { - // Note c's embedded Rand allows for direct use. - // We could also use c.RandBool() here. - switch c.Intn(2) { - case 0: - e.Type = A - c.Fuzz(&e.AInfo) - case 1: - e.Type = B - c.Fuzz(&e.BInfo) - } - }, - ) - - for i := 0; i < 100; i++ { - var myObject MyInfo - f.Fuzz(&myObject) - switch myObject.Type { - case A: - if myObject.AInfo == nil { - fmt.Println("AInfo should have been set!") - } - if myObject.BInfo != nil { - fmt.Println("BInfo should NOT have been set!") - } - case B: - if myObject.BInfo == nil { - fmt.Println("BInfo should have been set!") - } - if myObject.AInfo != nil { - fmt.Println("AInfo should NOT have been set!") - } - default: - fmt.Println("Invalid enum value!") - } - } - // Output: -} diff --git a/vendor/github.com/google/gofuzz/fuzz_test.go b/vendor/github.com/google/gofuzz/fuzz_test.go deleted file mode 100644 index 4059ea6feb..0000000000 --- a/vendor/github.com/google/gofuzz/fuzz_test.go +++ /dev/null @@ -1,472 +0,0 @@ -/* -Copyright 2014 Google Inc. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fuzz - -import ( - "reflect" - "testing" - "time" -) - -func TestFuzz_basic(t *testing.T) { - obj := &struct { - I int - I8 int8 - I16 int16 - I32 int32 - I64 int64 - U uint - U8 uint8 - U16 uint16 - U32 uint32 - U64 uint64 - Uptr uintptr - S string - B bool - T time.Time - }{} - - failed := map[string]int{} - for i := 0; i < 10; i++ { - New().Fuzz(obj) - - if n, v := "i", obj.I; v == 0 { - failed[n] = failed[n] + 1 - } - if n, v := "i8", obj.I8; v == 0 { - failed[n] = failed[n] + 1 - } - if n, v := "i16", obj.I16; v == 0 { - failed[n] = failed[n] + 1 - } - if n, v := "i32", obj.I32; v == 0 { - failed[n] = failed[n] + 1 - } - if n, v := "i64", obj.I64; v == 0 { - failed[n] = failed[n] + 1 - } - if n, v := "u", obj.U; v == 0 { - failed[n] = failed[n] + 1 - } - if n, v := "u8", obj.U8; v == 0 { - failed[n] = failed[n] + 1 - } - if n, v := "u16", obj.U16; v == 0 { - failed[n] = failed[n] + 1 - } - if n, v := "u32", obj.U32; v == 0 { - failed[n] = failed[n] + 1 - } - if n, v := "u64", obj.U64; v == 0 { - failed[n] = failed[n] + 1 - } - if n, v := "uptr", obj.Uptr; v == 0 { - failed[n] = failed[n] + 1 - } - if n, v := "s", obj.S; v == "" { - failed[n] = failed[n] + 1 - } - if n, v := "b", obj.B; v == false { - failed[n] = failed[n] + 1 - } - if n, v := "t", obj.T; v.IsZero() { - failed[n] = failed[n] + 1 - } - } - checkFailed(t, failed) -} - -func checkFailed(t *testing.T, failed map[string]int) { - for k, v := range failed { - if v > 8 { - t.Errorf("%v seems to not be getting set, was zero value %v times", k, v) - } - } -} - -func TestFuzz_structptr(t *testing.T) { - obj := &struct { - A *struct { - S string - } - }{} - - f := New().NilChance(.5) - failed := map[string]int{} - for i := 0; i < 10; i++ { - f.Fuzz(obj) - - if n, v := "a not nil", obj.A; v == nil { - failed[n] = failed[n] + 1 - } - if n, v := "a nil", obj.A; v != nil { - failed[n] = failed[n] + 1 - } - if n, v := "as", obj.A; v == nil || v.S == "" { - failed[n] = failed[n] + 1 - } - } - checkFailed(t, failed) -} - -// tryFuzz tries fuzzing up to 20 times. Fail if check() never passes, report the highest -// stage it ever got to. -func tryFuzz(t *testing.T, f *Fuzzer, obj interface{}, check func() (stage int, passed bool)) { - maxStage := 0 - for i := 0; i < 20; i++ { - f.Fuzz(obj) - stage, passed := check() - if stage > maxStage { - maxStage = stage - } - if passed { - return - } - } - t.Errorf("Only ever got to stage %v", maxStage) -} - -func TestFuzz_structmap(t *testing.T) { - obj := &struct { - A map[struct { - S string - }]struct { - S2 string - } - B map[string]string - }{} - - tryFuzz(t, New(), obj, func() (int, bool) { - if obj.A == nil { - return 1, false - } - if len(obj.A) == 0 { - return 2, false - } - for k, v := range obj.A { - if k.S == "" { - return 3, false - } - if v.S2 == "" { - return 4, false - } - } - - if obj.B == nil { - return 5, false - } - if len(obj.B) == 0 { - return 6, false - } - for k, v := range obj.B { - if k == "" { - return 7, false - } - if v == "" { - return 8, false - } - } - return 9, true - }) -} - -func TestFuzz_structslice(t *testing.T) { - obj := &struct { - A []struct { - S string - } - B []string - }{} - - tryFuzz(t, New(), obj, func() (int, bool) { - if obj.A == nil { - return 1, false - } - if len(obj.A) == 0 { - return 2, false - } - for _, v := range obj.A { - if v.S == "" { - return 3, false - } - } - - if obj.B == nil { - return 4, false - } - if len(obj.B) == 0 { - return 5, false - } - for _, v := range obj.B { - if v == "" { - return 6, false - } - } - return 7, true - }) -} - -func TestFuzz_structarray(t *testing.T) { - obj := &struct { - A [3]struct { - S string - } - B [2]int - }{} - - tryFuzz(t, New(), obj, func() (int, bool) { - for _, v := range obj.A { - if v.S == "" { - return 1, false - } - } - - for _, v := range obj.B { - if v == 0 { - return 2, false - } - } - return 3, true - }) -} - -func TestFuzz_custom(t *testing.T) { - obj := &struct { - A string - B *string - C map[string]string - D *map[string]string - }{} - - testPhrase := "gotcalled" - testMap := map[string]string{"C": "D"} - f := New().Funcs( - func(s *string, c Continue) { - *s = testPhrase - }, - func(m map[string]string, c Continue) { - m["C"] = "D" - }, - ) - - tryFuzz(t, f, obj, func() (int, bool) { - if obj.A != testPhrase { - return 1, false - } - if obj.B == nil { - return 2, false - } - if *obj.B != testPhrase { - return 3, false - } - if e, a := testMap, obj.C; !reflect.DeepEqual(e, a) { - return 4, false - } - if obj.D == nil { - return 5, false - } - if e, a := testMap, *obj.D; !reflect.DeepEqual(e, a) { - return 6, false - } - return 7, true - }) -} - -type SelfFuzzer string - -// Implement fuzz.Interface. -func (sf *SelfFuzzer) Fuzz(c Continue) { - *sf = selfFuzzerTestPhrase -} - -const selfFuzzerTestPhrase = "was fuzzed" - -func TestFuzz_interface(t *testing.T) { - f := New() - - var obj1 SelfFuzzer - tryFuzz(t, f, &obj1, func() (int, bool) { - if obj1 != selfFuzzerTestPhrase { - return 1, false - } - return 1, true - }) - - var obj2 map[int]SelfFuzzer - tryFuzz(t, f, &obj2, func() (int, bool) { - for _, v := range obj2 { - if v != selfFuzzerTestPhrase { - return 1, false - } - } - return 1, true - }) -} - -func TestFuzz_interfaceAndFunc(t *testing.T) { - const privateTestPhrase = "private phrase" - f := New().Funcs( - // This should take precedence over SelfFuzzer.Fuzz(). - func(s *SelfFuzzer, c Continue) { - *s = privateTestPhrase - }, - ) - - var obj1 SelfFuzzer - tryFuzz(t, f, &obj1, func() (int, bool) { - if obj1 != privateTestPhrase { - return 1, false - } - return 1, true - }) - - var obj2 map[int]SelfFuzzer - tryFuzz(t, f, &obj2, func() (int, bool) { - for _, v := range obj2 { - if v != privateTestPhrase { - return 1, false - } - } - return 1, true - }) -} - -func TestFuzz_noCustom(t *testing.T) { - type Inner struct { - Str string - } - type Outer struct { - Str string - In Inner - } - - testPhrase := "gotcalled" - f := New().Funcs( - func(outer *Outer, c Continue) { - outer.Str = testPhrase - c.Fuzz(&outer.In) - }, - func(inner *Inner, c Continue) { - inner.Str = testPhrase - }, - ) - c := Continue{fc: &fuzzerContext{fuzzer: f}, Rand: f.r} - - // Fuzzer.Fuzz() - obj1 := Outer{} - f.Fuzz(&obj1) - if obj1.Str != testPhrase { - t.Errorf("expected Outer custom function to have been called") - } - if obj1.In.Str != testPhrase { - t.Errorf("expected Inner custom function to have been called") - } - - // Continue.Fuzz() - obj2 := Outer{} - c.Fuzz(&obj2) - if obj2.Str != testPhrase { - t.Errorf("expected Outer custom function to have been called") - } - if obj2.In.Str != testPhrase { - t.Errorf("expected Inner custom function to have been called") - } - - // Fuzzer.FuzzNoCustom() - obj3 := Outer{} - f.FuzzNoCustom(&obj3) - if obj3.Str == testPhrase { - t.Errorf("expected Outer custom function to not have been called") - } - if obj3.In.Str != testPhrase { - t.Errorf("expected Inner custom function to have been called") - } - - // Continue.FuzzNoCustom() - obj4 := Outer{} - c.FuzzNoCustom(&obj4) - if obj4.Str == testPhrase { - t.Errorf("expected Outer custom function to not have been called") - } - if obj4.In.Str != testPhrase { - t.Errorf("expected Inner custom function to have been called") - } -} - -func TestFuzz_NumElements(t *testing.T) { - f := New().NilChance(0).NumElements(0, 1) - obj := &struct { - A []int - }{} - - tryFuzz(t, f, obj, func() (int, bool) { - if obj.A == nil { - return 1, false - } - return 2, len(obj.A) == 0 - }) - tryFuzz(t, f, obj, func() (int, bool) { - if obj.A == nil { - return 3, false - } - return 4, len(obj.A) == 1 - }) -} - -func TestFuzz_Maxdepth(t *testing.T) { - type S struct { - S *S - } - - f := New().NilChance(0) - - f.MaxDepth(1) - for i := 0; i < 100; i++ { - obj := S{} - f.Fuzz(&obj) - - if obj.S != nil { - t.Errorf("Expected nil") - } - } - - f.MaxDepth(3) // field, ptr - for i := 0; i < 100; i++ { - obj := S{} - f.Fuzz(&obj) - - if obj.S == nil { - t.Errorf("Expected obj.S not nil") - } else if obj.S.S != nil { - t.Errorf("Expected obj.S.S nil") - } - } - - f.MaxDepth(5) // field, ptr, field, ptr - for i := 0; i < 100; i++ { - obj := S{} - f.Fuzz(&obj) - - if obj.S == nil { - t.Errorf("Expected obj.S not nil") - } else if obj.S.S == nil { - t.Errorf("Expected obj.S.S not nil") - } else if obj.S.S.S != nil { - t.Errorf("Expected obj.S.S.S nil") - } - } -} diff --git a/vendor/github.com/google/gofuzz/go.mod b/vendor/github.com/google/gofuzz/go.mod new file mode 100644 index 0000000000..8ec4fe9e97 --- /dev/null +++ b/vendor/github.com/google/gofuzz/go.mod @@ -0,0 +1,3 @@ +module github.com/google/gofuzz + +go 1.12 diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml new file mode 100644 index 0000000000..d8156a60ba --- /dev/null +++ b/vendor/github.com/google/uuid/.travis.yml @@ -0,0 +1,9 @@ +language: go + +go: + - 1.4.3 + - 1.5.3 + - tip + +script: + - go test -v ./... diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md new file mode 100644 index 0000000000..04fdf09f13 --- /dev/null +++ b/vendor/github.com/google/uuid/CONTRIBUTING.md @@ -0,0 +1,10 @@ +# How to contribute + +We definitely welcome patches and contribution to this project! + +### Legal requirements + +In order to protect both you and ourselves, you will need to sign the +[Contributor License Agreement](https://cla.developers.google.com/clas). + +You may have already signed it for other Google projects. diff --git a/vendor/github.com/google/uuid/CONTRIBUTORS b/vendor/github.com/google/uuid/CONTRIBUTORS new file mode 100644 index 0000000000..b4bb97f6bc --- /dev/null +++ b/vendor/github.com/google/uuid/CONTRIBUTORS @@ -0,0 +1,9 @@ +Paul Borman +bmatsuo +shawnps +theory +jboverfelt +dsymonds +cd1 +wallclockbuilder +dansouza diff --git a/vendor/github.com/google/uuid/LICENSE b/vendor/github.com/google/uuid/LICENSE new file mode 100644 index 0000000000..5dc68268d9 --- /dev/null +++ b/vendor/github.com/google/uuid/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md new file mode 100644 index 0000000000..9d92c11f16 --- /dev/null +++ b/vendor/github.com/google/uuid/README.md @@ -0,0 +1,19 @@ +# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master) +The uuid package generates and inspects UUIDs based on +[RFC 4122](http://tools.ietf.org/html/rfc4122) +and DCE 1.1: Authentication and Security Services. + +This package is based on the github.com/pborman/uuid package (previously named +code.google.com/p/go-uuid). It differs from these earlier packages in that +a UUID is a 16 byte array rather than a byte slice. One loss due to this +change is the ability to represent an invalid UUID (vs a NIL UUID). + +###### Install +`go get github.com/google/uuid` + +###### Documentation +[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid) + +Full `go doc` style documentation for the package can be viewed online without +installing this package by using the GoDoc site here: +http://godoc.org/github.com/google/uuid diff --git a/vendor/github.com/google/uuid/dce.go b/vendor/github.com/google/uuid/dce.go new file mode 100644 index 0000000000..fa820b9d30 --- /dev/null +++ b/vendor/github.com/google/uuid/dce.go @@ -0,0 +1,80 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "fmt" + "os" +) + +// A Domain represents a Version 2 domain +type Domain byte + +// Domain constants for DCE Security (Version 2) UUIDs. +const ( + Person = Domain(0) + Group = Domain(1) + Org = Domain(2) +) + +// NewDCESecurity returns a DCE Security (Version 2) UUID. +// +// The domain should be one of Person, Group or Org. +// On a POSIX system the id should be the users UID for the Person +// domain and the users GID for the Group. The meaning of id for +// the domain Org or on non-POSIX systems is site defined. +// +// For a given domain/id pair the same token may be returned for up to +// 7 minutes and 10 seconds. +func NewDCESecurity(domain Domain, id uint32) (UUID, error) { + uuid, err := NewUUID() + if err == nil { + uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 + uuid[9] = byte(domain) + binary.BigEndian.PutUint32(uuid[0:], id) + } + return uuid, err +} + +// NewDCEPerson returns a DCE Security (Version 2) UUID in the person +// domain with the id returned by os.Getuid. +// +// NewDCESecurity(Person, uint32(os.Getuid())) +func NewDCEPerson() (UUID, error) { + return NewDCESecurity(Person, uint32(os.Getuid())) +} + +// NewDCEGroup returns a DCE Security (Version 2) UUID in the group +// domain with the id returned by os.Getgid. +// +// NewDCESecurity(Group, uint32(os.Getgid())) +func NewDCEGroup() (UUID, error) { + return NewDCESecurity(Group, uint32(os.Getgid())) +} + +// Domain returns the domain for a Version 2 UUID. Domains are only defined +// for Version 2 UUIDs. +func (uuid UUID) Domain() Domain { + return Domain(uuid[9]) +} + +// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2 +// UUIDs. +func (uuid UUID) ID() uint32 { + return binary.BigEndian.Uint32(uuid[0:4]) +} + +func (d Domain) String() string { + switch d { + case Person: + return "Person" + case Group: + return "Group" + case Org: + return "Org" + } + return fmt.Sprintf("Domain%d", int(d)) +} diff --git a/vendor/github.com/google/uuid/doc.go b/vendor/github.com/google/uuid/doc.go new file mode 100644 index 0000000000..5b8a4b9af8 --- /dev/null +++ b/vendor/github.com/google/uuid/doc.go @@ -0,0 +1,12 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uuid generates and inspects UUIDs. +// +// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security +// Services. +// +// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to +// maps or compared directly. +package uuid diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go new file mode 100644 index 0000000000..b174616315 --- /dev/null +++ b/vendor/github.com/google/uuid/hash.go @@ -0,0 +1,53 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "crypto/md5" + "crypto/sha1" + "hash" +) + +// Well known namespace IDs and UUIDs +var ( + NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) + Nil UUID // empty UUID, all zeros +) + +// NewHash returns a new UUID derived from the hash of space concatenated with +// data generated by h. The hash should be at least 16 byte in length. The +// first 16 bytes of the hash are used to form the UUID. The version of the +// UUID will be the lower 4 bits of version. NewHash is used to implement +// NewMD5 and NewSHA1. +func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { + h.Reset() + h.Write(space[:]) + h.Write(data) + s := h.Sum(nil) + var uuid UUID + copy(uuid[:], s) + uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) + uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant + return uuid +} + +// NewMD5 returns a new MD5 (Version 3) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(md5.New(), space, data, 3) +func NewMD5(space UUID, data []byte) UUID { + return NewHash(md5.New(), space, data, 3) +} + +// NewSHA1 returns a new SHA1 (Version 5) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(sha1.New(), space, data, 5) +func NewSHA1(space UUID, data []byte) UUID { + return NewHash(sha1.New(), space, data, 5) +} diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go new file mode 100644 index 0000000000..7f9e0c6c0e --- /dev/null +++ b/vendor/github.com/google/uuid/marshal.go @@ -0,0 +1,37 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "fmt" + +// MarshalText implements encoding.TextMarshaler. +func (uuid UUID) MarshalText() ([]byte, error) { + var js [36]byte + encodeHex(js[:], uuid) + return js[:], nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (uuid *UUID) UnmarshalText(data []byte) error { + id, err := ParseBytes(data) + if err == nil { + *uuid = id + } + return err +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (uuid UUID) MarshalBinary() ([]byte, error) { + return uuid[:], nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (uuid *UUID) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(uuid[:], data) + return nil +} diff --git a/vendor/github.com/google/uuid/node.go b/vendor/github.com/google/uuid/node.go new file mode 100644 index 0000000000..3e4e90dc44 --- /dev/null +++ b/vendor/github.com/google/uuid/node.go @@ -0,0 +1,89 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "sync" +) + +var ( + nodeMu sync.Mutex + ifname string // name of interface being used + nodeID [6]byte // hardware for version 1 UUIDs + zeroID [6]byte // nodeID with only 0's +) + +// NodeInterface returns the name of the interface from which the NodeID was +// derived. The interface "user" is returned if the NodeID was set by +// SetNodeID. +func NodeInterface() string { + defer nodeMu.Unlock() + nodeMu.Lock() + return ifname +} + +// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. +// If name is "" then the first usable interface found will be used or a random +// Node ID will be generated. If a named interface cannot be found then false +// is returned. +// +// SetNodeInterface never fails when name is "". +func SetNodeInterface(name string) bool { + defer nodeMu.Unlock() + nodeMu.Lock() + return setNodeInterface(name) +} + +func setNodeInterface(name string) bool { + iname, addr := getHardwareInterface(name) // null implementation for js + if iname != "" && addr != nil { + ifname = iname + copy(nodeID[:], addr) + return true + } + + // We found no interfaces with a valid hardware address. If name + // does not specify a specific interface generate a random Node ID + // (section 4.1.6) + if name == "" { + randomBits(nodeID[:]) + return true + } + return false +} + +// NodeID returns a slice of a copy of the current Node ID, setting the Node ID +// if not already set. +func NodeID() []byte { + defer nodeMu.Unlock() + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + nid := nodeID + return nid[:] +} + +// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes +// of id are used. If id is less than 6 bytes then false is returned and the +// Node ID is not set. +func SetNodeID(id []byte) bool { + if len(id) < 6 { + return false + } + defer nodeMu.Unlock() + nodeMu.Lock() + copy(nodeID[:], id) + ifname = "user" + return true +} + +// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is +// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) NodeID() []byte { + var node [6]byte + copy(node[:], uuid[10:]) + return node[:] +} diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go new file mode 100644 index 0000000000..24b78edc90 --- /dev/null +++ b/vendor/github.com/google/uuid/node_js.go @@ -0,0 +1,12 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build js + +package uuid + +// getHardwareInterface returns nil values for the JS version of the code. +// This remvoves the "net" dependency, because it is not used in the browser. +// Using the "net" library inflates the size of the transpiled JS code by 673k bytes. +func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/vendor/github.com/google/uuid/node_net.go b/vendor/github.com/google/uuid/node_net.go new file mode 100644 index 0000000000..0cbbcddbd6 --- /dev/null +++ b/vendor/github.com/google/uuid/node_net.go @@ -0,0 +1,33 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !js + +package uuid + +import "net" + +var interfaces []net.Interface // cached list of interfaces + +// getHardwareInterface returns the name and hardware address of interface name. +// If name is "" then the name and hardware address of one of the system's +// interfaces is returned. If no interfaces are found (name does not exist or +// there are no interfaces) then "", nil is returned. +// +// Only addresses of at least 6 bytes are returned. +func getHardwareInterface(name string) (string, []byte) { + if interfaces == nil { + var err error + interfaces, err = net.Interfaces() + if err != nil { + return "", nil + } + } + for _, ifs := range interfaces { + if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { + return ifs.Name, ifs.HardwareAddr + } + } + return "", nil +} diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go new file mode 100644 index 0000000000..f326b54db3 --- /dev/null +++ b/vendor/github.com/google/uuid/sql.go @@ -0,0 +1,59 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "database/sql/driver" + "fmt" +) + +// Scan implements sql.Scanner so UUIDs can be read from databases transparently +// Currently, database types that map to string and []byte are supported. Please +// consult database-specific driver documentation for matching types. +func (uuid *UUID) Scan(src interface{}) error { + switch src := src.(type) { + case nil: + return nil + + case string: + // if an empty UUID comes from a table, we return a null UUID + if src == "" { + return nil + } + + // see Parse for required string format + u, err := Parse(src) + if err != nil { + return fmt.Errorf("Scan: %v", err) + } + + *uuid = u + + case []byte: + // if an empty UUID comes from a table, we return a null UUID + if len(src) == 0 { + return nil + } + + // assumes a simple slice of bytes if 16 bytes + // otherwise attempts to parse + if len(src) != 16 { + return uuid.Scan(string(src)) + } + copy((*uuid)[:], src) + + default: + return fmt.Errorf("Scan: unable to scan type %T into UUID", src) + } + + return nil +} + +// Value implements sql.Valuer so that UUIDs can be written to databases +// transparently. Currently, UUIDs map to strings. Please consult +// database-specific driver documentation for matching types. +func (uuid UUID) Value() (driver.Value, error) { + return uuid.String(), nil +} diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go new file mode 100644 index 0000000000..e6ef06cdc8 --- /dev/null +++ b/vendor/github.com/google/uuid/time.go @@ -0,0 +1,123 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "sync" + "time" +) + +// A Time represents a time as the number of 100's of nanoseconds since 15 Oct +// 1582. +type Time int64 + +const ( + lillian = 2299160 // Julian day of 15 Oct 1582 + unix = 2440587 // Julian day of 1 Jan 1970 + epoch = unix - lillian // Days between epochs + g1582 = epoch * 86400 // seconds between epochs + g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs +) + +var ( + timeMu sync.Mutex + lasttime uint64 // last time we returned + clockSeq uint16 // clock sequence for this run + + timeNow = time.Now // for testing +) + +// UnixTime converts t the number of seconds and nanoseconds using the Unix +// epoch of 1 Jan 1970. +func (t Time) UnixTime() (sec, nsec int64) { + sec = int64(t - g1582ns100) + nsec = (sec % 10000000) * 100 + sec /= 10000000 + return sec, nsec +} + +// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and +// clock sequence as well as adjusting the clock sequence as needed. An error +// is returned if the current time cannot be determined. +func GetTime() (Time, uint16, error) { + defer timeMu.Unlock() + timeMu.Lock() + return getTime() +} + +func getTime() (Time, uint16, error) { + t := timeNow() + + // If we don't have a clock sequence already, set one. + if clockSeq == 0 { + setClockSequence(-1) + } + now := uint64(t.UnixNano()/100) + g1582ns100 + + // If time has gone backwards with this clock sequence then we + // increment the clock sequence + if now <= lasttime { + clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000 + } + lasttime = now + return Time(now), clockSeq, nil +} + +// ClockSequence returns the current clock sequence, generating one if not +// already set. The clock sequence is only used for Version 1 UUIDs. +// +// The uuid package does not use global static storage for the clock sequence or +// the last time a UUID was generated. Unless SetClockSequence is used, a new +// random clock sequence is generated the first time a clock sequence is +// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) +func ClockSequence() int { + defer timeMu.Unlock() + timeMu.Lock() + return clockSequence() +} + +func clockSequence() int { + if clockSeq == 0 { + setClockSequence(-1) + } + return int(clockSeq & 0x3fff) +} + +// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to +// -1 causes a new sequence to be generated. +func SetClockSequence(seq int) { + defer timeMu.Unlock() + timeMu.Lock() + setClockSequence(seq) +} + +func setClockSequence(seq int) { + if seq == -1 { + var b [2]byte + randomBits(b[:]) // clock sequence + seq = int(b[0])<<8 | int(b[1]) + } + oldSeq := clockSeq + clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant + if oldSeq != clockSeq { + lasttime = 0 + } +} + +// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in +// uuid. The time is only defined for version 1 and 2 UUIDs. +func (uuid UUID) Time() Time { + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + return Time(time) +} + +// ClockSequence returns the clock sequence encoded in uuid. +// The clock sequence is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) ClockSequence() int { + return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff +} diff --git a/vendor/github.com/google/uuid/util.go b/vendor/github.com/google/uuid/util.go new file mode 100644 index 0000000000..5ea6c73780 --- /dev/null +++ b/vendor/github.com/google/uuid/util.go @@ -0,0 +1,43 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// randomBits completely fills slice b with random data. +func randomBits(b []byte) { + if _, err := io.ReadFull(rander, b); err != nil { + panic(err.Error()) // rand should never fail + } +} + +// xvalues returns the value of a byte as a hexadecimal digit or 255. +var xvalues = [256]byte{ + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, +} + +// xtob converts hex characters x1 and x2 into a byte. +func xtob(x1, x2 byte) (byte, bool) { + b1 := xvalues[x1] + b2 := xvalues[x2] + return (b1 << 4) | b2, b1 != 255 && b2 != 255 +} diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go new file mode 100644 index 0000000000..7f3643fe9a --- /dev/null +++ b/vendor/github.com/google/uuid/uuid.go @@ -0,0 +1,198 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "io" + "strings" +) + +// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC +// 4122. +type UUID [16]byte + +// A Version represents a UUID's version. +type Version byte + +// A Variant represents a UUID's variant. +type Variant byte + +// Constants returned by Variant. +const ( + Invalid = Variant(iota) // Invalid UUID + RFC4122 // The variant specified in RFC4122 + Reserved // Reserved, NCS backward compatibility. + Microsoft // Reserved, Microsoft Corporation backward compatibility. + Future // Reserved for future definition. +) + +var rander = rand.Reader // random function + +// Parse decodes s into a UUID or returns an error. Both the UUID form of +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded. +func Parse(s string) (UUID, error) { + var uuid UUID + if len(s) != 36 { + if len(s) != 36+9 { + return uuid, fmt.Errorf("invalid UUID length: %d", len(s)) + } + if strings.ToLower(s[:9]) != "urn:uuid:" { + return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) + } + s = s[9:] + } + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + v, ok := xtob(s[x], s[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// ParseBytes is like Parse, except it parses a byte slice instead of a string. +func ParseBytes(b []byte) (UUID, error) { + var uuid UUID + if len(b) != 36 { + if len(b) != 36+9 { + return uuid, fmt.Errorf("invalid UUID length: %d", len(b)) + } + if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { + return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) + } + b = b[9:] + } + if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + v, ok := xtob(b[x], b[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// FromBytes creates a new UUID from a byte slice. Returns an error if the slice +// does not have a length of 16. The bytes are copied from the slice. +func FromBytes(b []byte) (uuid UUID, err error) { + err = uuid.UnmarshalBinary(b) + return uuid, err +} + +// Must returns uuid if err is nil and panics otherwise. +func Must(uuid UUID, err error) UUID { + if err != nil { + panic(err) + } + return uuid +} + +// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// , or "" if uuid is invalid. +func (uuid UUID) String() string { + var buf [36]byte + encodeHex(buf[:], uuid) + return string(buf[:]) +} + +// URN returns the RFC 2141 URN form of uuid, +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. +func (uuid UUID) URN() string { + var buf [36 + 9]byte + copy(buf[:], "urn:uuid:") + encodeHex(buf[9:], uuid) + return string(buf[:]) +} + +func encodeHex(dst []byte, uuid UUID) { + hex.Encode(dst[:], uuid[:4]) + dst[8] = '-' + hex.Encode(dst[9:13], uuid[4:6]) + dst[13] = '-' + hex.Encode(dst[14:18], uuid[6:8]) + dst[18] = '-' + hex.Encode(dst[19:23], uuid[8:10]) + dst[23] = '-' + hex.Encode(dst[24:], uuid[10:]) +} + +// Variant returns the variant encoded in uuid. +func (uuid UUID) Variant() Variant { + switch { + case (uuid[8] & 0xc0) == 0x80: + return RFC4122 + case (uuid[8] & 0xe0) == 0xc0: + return Microsoft + case (uuid[8] & 0xe0) == 0xe0: + return Future + default: + return Reserved + } +} + +// Version returns the version of uuid. +func (uuid UUID) Version() Version { + return Version(uuid[6] >> 4) +} + +func (v Version) String() string { + if v > 15 { + return fmt.Sprintf("BAD_VERSION_%d", v) + } + return fmt.Sprintf("VERSION_%d", v) +} + +func (v Variant) String() string { + switch v { + case RFC4122: + return "RFC4122" + case Reserved: + return "Reserved" + case Microsoft: + return "Microsoft" + case Future: + return "Future" + case Invalid: + return "Invalid" + } + return fmt.Sprintf("BadVariant%d", int(v)) +} + +// SetRand sets the random number generator to r, which implements io.Reader. +// If r.Read returns an error when the package requests random data then +// a panic will be issued. +// +// Calling SetRand with nil sets the random number generator to the default +// generator. +func SetRand(r io.Reader) { + if r == nil { + rander = rand.Reader + return + } + rander = r +} diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go new file mode 100644 index 0000000000..199a1ac654 --- /dev/null +++ b/vendor/github.com/google/uuid/version1.go @@ -0,0 +1,44 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" +) + +// NewUUID returns a Version 1 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewUUID returns nil. If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewUUID returns nil and an error. +// +// In most cases, New should be used. +func NewUUID() (UUID, error) { + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + nodeMu.Unlock() + + var uuid UUID + now, seq, err := GetTime() + if err != nil { + return uuid, err + } + + timeLow := uint32(now & 0xffffffff) + timeMid := uint16((now >> 32) & 0xffff) + timeHi := uint16((now >> 48) & 0x0fff) + timeHi |= 0x1000 // Version 1 + + binary.BigEndian.PutUint32(uuid[0:], timeLow) + binary.BigEndian.PutUint16(uuid[4:], timeMid) + binary.BigEndian.PutUint16(uuid[6:], timeHi) + binary.BigEndian.PutUint16(uuid[8:], seq) + copy(uuid[10:], nodeID[:]) + + return uuid, nil +} diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go new file mode 100644 index 0000000000..84af91c9f5 --- /dev/null +++ b/vendor/github.com/google/uuid/version4.go @@ -0,0 +1,38 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "io" + +// New creates a new random UUID or panics. New is equivalent to +// the expression +// +// uuid.Must(uuid.NewRandom()) +func New() UUID { + return Must(NewRandom()) +} + +// NewRandom returns a Random (Version 4) UUID. +// +// The strength of the UUIDs is based on the strength of the crypto/rand +// package. +// +// A note about uniqueness derived from the UUID Wikipedia entry: +// +// Randomly generated UUIDs have 122 random bits. One's annual risk of being +// hit by a meteorite is estimated to be one chance in 17 billion, that +// means the probability is about 0.00000000006 (6 × 10−11), +// equivalent to the odds of creating a few tens of trillions of UUIDs in a +// year and having one duplicate. +func NewRandom() (UUID, error) { + var uuid UUID + _, err := io.ReadFull(rander, uuid[:]) + if err != nil { + return Nil, err + } + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid, nil +} diff --git a/vendor/github.com/googleapis/gnostic/.gitignore b/vendor/github.com/googleapis/gnostic/.gitignore deleted file mode 100644 index 63149fddab..0000000000 --- a/vendor/github.com/googleapis/gnostic/.gitignore +++ /dev/null @@ -1,14 +0,0 @@ -# Eclipse -.checkstyle -.project -.settings -# Swift -.build -Packages -# vi -*.swp -# vscode -.vscode -.DS_Store -*~ -Package.resolved diff --git a/vendor/github.com/googleapis/gnostic/.travis-install.sh b/vendor/github.com/googleapis/gnostic/.travis-install.sh deleted file mode 100755 index 83319ae4c2..0000000000 --- a/vendor/github.com/googleapis/gnostic/.travis-install.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/sh - -# -# Install dependencies that aren't available as Ubuntu packages. -# -# Everything goes into $HOME/local. -# -# Scripts should add -# - $HOME/local/bin to PATH -# - $HOME/local/lib to LD_LIBRARY_PATH -# - -cd -mkdir -p local - -# Install swift -SWIFT_URL=https://swift.org/builds/swift-4.0-branch/ubuntu1404/swift-4.0-DEVELOPMENT-SNAPSHOT-2017-09-01-a/swift-4.0-DEVELOPMENT-SNAPSHOT-2017-09-01-a-ubuntu14.04.tar.gz -echo $SWIFT_URL -curl -fSsL $SWIFT_URL -o swift.tar.gz -tar -xzf swift.tar.gz --strip-components=2 --directory=local - -# Install protoc -PROTOC_URL=https://github.com/google/protobuf/releases/download/v3.4.0/protoc-3.4.0-linux-x86_64.zip -echo $PROTOC_URL -curl -fSsL $PROTOC_URL -o protoc.zip -unzip protoc.zip -d local - -# Verify installation -find local diff --git a/vendor/github.com/googleapis/gnostic/.travis.yml b/vendor/github.com/googleapis/gnostic/.travis.yml deleted file mode 100644 index d31126d162..0000000000 --- a/vendor/github.com/googleapis/gnostic/.travis.yml +++ /dev/null @@ -1,46 +0,0 @@ -# Travis CI build file for OpenAPI Compiler, including Go and Swift plugins - -# Use Ubuntu 14.04 -dist: trusty - -sudo: false - -language: go - -addons: - apt: - packages: - - clang-3.8 - - lldb-3.8 - - libicu-dev - - libtool - - libcurl4-openssl-dev - - libbsd-dev - - build-essential - - libssl-dev - - uuid-dev - - curl - - unzip - -install: - - ./.travis-install.sh - - export PATH=.:$HOME/local/bin:$PATH - - make - -script: - - go test . -v - - pushd plugins/gnostic-go-generator/examples/v2.0/bookstore - - make test - - popd - - pushd plugins/gnostic-go-generator/examples/v3.0/bookstore - - make test - - popd - - export PATH=.:$HOME/local/bin:$PATH - - export LD_LIBRARY_PATH=$HOME/local/lib - - pushd plugins/gnostic-swift-generator - - make - - cd examples/bookstore - - make - - .build/debug/Server & - - make test - diff --git a/vendor/github.com/googleapis/gnostic/COMPILE-PROTOS.sh b/vendor/github.com/googleapis/gnostic/COMPILE-PROTOS.sh deleted file mode 100755 index 017dab670e..0000000000 --- a/vendor/github.com/googleapis/gnostic/COMPILE-PROTOS.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/sh -# -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -go get github.com/golang/protobuf/protoc-gen-go - -protoc \ ---go_out=Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. \ -OpenAPIv2/OpenAPIv2.proto - -protoc \ ---go_out=:. \ -plugins/plugin.proto - -protoc \ ---go_out=Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. \ -OpenAPIv3/OpenAPIv3.proto - -protoc \ ---go_out=Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. \ -discovery/discovery.proto diff --git a/vendor/github.com/googleapis/gnostic/CONTRIBUTING.md b/vendor/github.com/googleapis/gnostic/CONTRIBUTING.md deleted file mode 100644 index 6736efd943..0000000000 --- a/vendor/github.com/googleapis/gnostic/CONTRIBUTING.md +++ /dev/null @@ -1,35 +0,0 @@ -# How to become a contributor and submit your own code - -## Contributor License Agreements - -We'd love to accept your sample apps and patches! Before we can take them, we -have to jump a couple of legal hurdles. - -Please fill out either the individual or corporate Contributor License Agreement -(CLA). - - * If you are an individual writing original source code and you're sure you - own the intellectual property, then you'll need to sign an [individual CLA] - (https://developers.google.com/open-source/cla/individual). - * If you work for a company that wants to allow you to contribute your work, - then you'll need to sign a [corporate CLA] - (https://developers.google.com/open-source/cla/corporate). - -Follow either of the two links above to access the appropriate CLA and -instructions for how to sign and return it. Once we receive it, we'll be able to -accept your pull requests. - -## Contributing A Patch - -1. Submit an issue describing your proposed change to the repo in question. -1. The repo owner will respond to your issue promptly. -1. If your proposed change is accepted, and you haven't already done so, sign a - Contributor License Agreement (see details above). -1. Fork the desired repo, develop and test your code changes. -1. Ensure that your code adheres to the existing style in the sample to which - you are contributing. Refer to the - [Google Cloud Platform Samples Style Guide] - (https://github.com/GoogleCloudPlatform/Template/wiki/style.html) for the - recommended coding standards for this organization. -1. Ensure that your code has an appropriate set of unit tests which all pass. -1. Submit a pull request. diff --git a/vendor/github.com/googleapis/gnostic/Makefile b/vendor/github.com/googleapis/gnostic/Makefile deleted file mode 100644 index 8a772811ed..0000000000 --- a/vendor/github.com/googleapis/gnostic/Makefile +++ /dev/null @@ -1,16 +0,0 @@ - -build: - go get - go install - cd generate-gnostic; go get; go install - cd apps/disco; go get; go install - cd apps/report; go get; go install - cd apps/petstore-builder; go get; go install - cd plugins/gnostic-summary; go get; go install - cd plugins/gnostic-analyze; go get; go install - cd plugins/gnostic-go-generator; go get; go install - rm -f $(GOPATH)/bin/gnostic-go-client $(GOPATH)/bin/gnostic-go-server - ln -s $(GOPATH)/bin/gnostic-go-generator $(GOPATH)/bin/gnostic-go-client - ln -s $(GOPATH)/bin/gnostic-go-generator $(GOPATH)/bin/gnostic-go-server - cd extensions/sample; make - diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go index 0e32451a32..5351f36f36 100644 --- a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go @@ -7106,20 +7106,20 @@ func (m *Any) ToRawInfo() interface{} { func (m *ApiKeySecurity) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) } if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } if m.In != "" { - info = append(info, yaml.MapItem{"in", m.In}) + info = append(info, yaml.MapItem{Key: "in", Value: m.In}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -7130,14 +7130,14 @@ func (m *ApiKeySecurity) ToRawInfo() interface{} { func (m *BasicAuthenticationSecurity) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -7148,24 +7148,24 @@ func (m *BasicAuthenticationSecurity) ToRawInfo() interface{} { func (m *BodyParameter) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } if m.In != "" { - info = append(info, yaml.MapItem{"in", m.In}) + info = append(info, yaml.MapItem{Key: "in", Value: m.In}) } if m.Required != false { - info = append(info, yaml.MapItem{"required", m.Required}) + info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) } if m.Schema != nil { - info = append(info, yaml.MapItem{"schema", m.Schema.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "schema", Value: m.Schema.ToRawInfo()}) } // &{Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -7176,17 +7176,17 @@ func (m *BodyParameter) ToRawInfo() interface{} { func (m *Contact) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } if m.Url != "" { - info = append(info, yaml.MapItem{"url", m.Url}) + info = append(info, yaml.MapItem{Key: "url", Value: m.Url}) } if m.Email != "" { - info = append(info, yaml.MapItem{"email", m.Email}) + info = append(info, yaml.MapItem{Key: "email", Value: m.Email}) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -7198,7 +7198,7 @@ func (m *Default) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:false Description:} @@ -7210,7 +7210,7 @@ func (m *Definitions) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:additionalProperties Type:NamedSchema StringEnumValues:[] MapType:Schema Repeated:true Pattern: Implicit:true Description:} @@ -7221,41 +7221,41 @@ func (m *Definitions) ToRawInfo() interface{} { func (m *Document) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Swagger != "" { - info = append(info, yaml.MapItem{"swagger", m.Swagger}) + info = append(info, yaml.MapItem{Key: "swagger", Value: m.Swagger}) } if m.Info != nil { - info = append(info, yaml.MapItem{"info", m.Info.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "info", Value: m.Info.ToRawInfo()}) } // &{Name:info Type:Info StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Host != "" { - info = append(info, yaml.MapItem{"host", m.Host}) + info = append(info, yaml.MapItem{Key: "host", Value: m.Host}) } if m.BasePath != "" { - info = append(info, yaml.MapItem{"basePath", m.BasePath}) + info = append(info, yaml.MapItem{Key: "basePath", Value: m.BasePath}) } if len(m.Schemes) != 0 { - info = append(info, yaml.MapItem{"schemes", m.Schemes}) + info = append(info, yaml.MapItem{Key: "schemes", Value: m.Schemes}) } if len(m.Consumes) != 0 { - info = append(info, yaml.MapItem{"consumes", m.Consumes}) + info = append(info, yaml.MapItem{Key: "consumes", Value: m.Consumes}) } if len(m.Produces) != 0 { - info = append(info, yaml.MapItem{"produces", m.Produces}) + info = append(info, yaml.MapItem{Key: "produces", Value: m.Produces}) } if m.Paths != nil { - info = append(info, yaml.MapItem{"paths", m.Paths.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "paths", Value: m.Paths.ToRawInfo()}) } // &{Name:paths Type:Paths StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Definitions != nil { - info = append(info, yaml.MapItem{"definitions", m.Definitions.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "definitions", Value: m.Definitions.ToRawInfo()}) } // &{Name:definitions Type:Definitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Parameters != nil { - info = append(info, yaml.MapItem{"parameters", m.Parameters.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "parameters", Value: m.Parameters.ToRawInfo()}) } // &{Name:parameters Type:ParameterDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Responses != nil { - info = append(info, yaml.MapItem{"responses", m.Responses.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "responses", Value: m.Responses.ToRawInfo()}) } // &{Name:responses Type:ResponseDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if len(m.Security) != 0 { @@ -7263,11 +7263,11 @@ func (m *Document) ToRawInfo() interface{} { for _, item := range m.Security { items = append(items, item.ToRawInfo()) } - info = append(info, yaml.MapItem{"security", items}) + info = append(info, yaml.MapItem{Key: "security", Value: items}) } // &{Name:security Type:SecurityRequirement StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.SecurityDefinitions != nil { - info = append(info, yaml.MapItem{"securityDefinitions", m.SecurityDefinitions.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "securityDefinitions", Value: m.SecurityDefinitions.ToRawInfo()}) } // &{Name:securityDefinitions Type:SecurityDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if len(m.Tags) != 0 { @@ -7275,16 +7275,16 @@ func (m *Document) ToRawInfo() interface{} { for _, item := range m.Tags { items = append(items, item.ToRawInfo()) } - info = append(info, yaml.MapItem{"tags", items}) + info = append(info, yaml.MapItem{Key: "tags", Value: items}) } // &{Name:tags Type:Tag StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.ExternalDocs != nil { - info = append(info, yaml.MapItem{"externalDocs", m.ExternalDocs.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) } // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -7296,7 +7296,7 @@ func (m *Examples) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:true Description:} @@ -7307,14 +7307,14 @@ func (m *Examples) ToRawInfo() interface{} { func (m *ExternalDocs) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.Url != "" { - info = append(info, yaml.MapItem{"url", m.Url}) + info = append(info, yaml.MapItem{Key: "url", Value: m.Url}) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -7325,38 +7325,38 @@ func (m *ExternalDocs) ToRawInfo() interface{} { func (m *FileSchema) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Format != "" { - info = append(info, yaml.MapItem{"format", m.Format}) + info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) } if m.Title != "" { - info = append(info, yaml.MapItem{"title", m.Title}) + info = append(info, yaml.MapItem{Key: "title", Value: m.Title}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.Default != nil { - info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) } // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if len(m.Required) != 0 { - info = append(info, yaml.MapItem{"required", m.Required}) + info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) } if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) } if m.ReadOnly != false { - info = append(info, yaml.MapItem{"readOnly", m.ReadOnly}) + info = append(info, yaml.MapItem{Key: "readOnly", Value: m.ReadOnly}) } if m.ExternalDocs != nil { - info = append(info, yaml.MapItem{"externalDocs", m.ExternalDocs.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) } // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Example != nil { - info = append(info, yaml.MapItem{"example", m.Example.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "example", Value: m.Example.ToRawInfo()}) } // &{Name:example Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -7367,81 +7367,81 @@ func (m *FileSchema) ToRawInfo() interface{} { func (m *FormDataParameterSubSchema) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Required != false { - info = append(info, yaml.MapItem{"required", m.Required}) + info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) } if m.In != "" { - info = append(info, yaml.MapItem{"in", m.In}) + info = append(info, yaml.MapItem{Key: "in", Value: m.In}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } if m.AllowEmptyValue != false { - info = append(info, yaml.MapItem{"allowEmptyValue", m.AllowEmptyValue}) + info = append(info, yaml.MapItem{Key: "allowEmptyValue", Value: m.AllowEmptyValue}) } if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) } if m.Format != "" { - info = append(info, yaml.MapItem{"format", m.Format}) + info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) } if m.Items != nil { - info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) } // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) + info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) } if m.Default != nil { - info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) } // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{"maximum", m.Maximum}) + info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) } if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) + info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) } if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{"minimum", m.Minimum}) + info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) } if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) + info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) } if m.MaxLength != 0 { - info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) + info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) } if m.MinLength != 0 { - info = append(info, yaml.MapItem{"minLength", m.MinLength}) + info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) } if m.Pattern != "" { - info = append(info, yaml.MapItem{"pattern", m.Pattern}) + info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) } if m.MaxItems != 0 { - info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) + info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) } if m.MinItems != 0 { - info = append(info, yaml.MapItem{"minItems", m.MinItems}) + info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) } if m.UniqueItems != false { - info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) + info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) } if len(m.Enum) != 0 { items := make([]interface{}, 0) for _, item := range m.Enum { items = append(items, item.ToRawInfo()) } - info = append(info, yaml.MapItem{"enum", items}) + info = append(info, yaml.MapItem{Key: "enum", Value: items}) } // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) + info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -7452,69 +7452,69 @@ func (m *FormDataParameterSubSchema) ToRawInfo() interface{} { func (m *Header) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) } if m.Format != "" { - info = append(info, yaml.MapItem{"format", m.Format}) + info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) } if m.Items != nil { - info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) } // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) + info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) } if m.Default != nil { - info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) } // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{"maximum", m.Maximum}) + info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) } if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) + info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) } if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{"minimum", m.Minimum}) + info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) } if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) + info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) } if m.MaxLength != 0 { - info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) + info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) } if m.MinLength != 0 { - info = append(info, yaml.MapItem{"minLength", m.MinLength}) + info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) } if m.Pattern != "" { - info = append(info, yaml.MapItem{"pattern", m.Pattern}) + info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) } if m.MaxItems != 0 { - info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) + info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) } if m.MinItems != 0 { - info = append(info, yaml.MapItem{"minItems", m.MinItems}) + info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) } if m.UniqueItems != false { - info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) + info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) } if len(m.Enum) != 0 { items := make([]interface{}, 0) for _, item := range m.Enum { items = append(items, item.ToRawInfo()) } - info = append(info, yaml.MapItem{"enum", items}) + info = append(info, yaml.MapItem{Key: "enum", Value: items}) } // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) + info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -7525,78 +7525,78 @@ func (m *Header) ToRawInfo() interface{} { func (m *HeaderParameterSubSchema) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Required != false { - info = append(info, yaml.MapItem{"required", m.Required}) + info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) } if m.In != "" { - info = append(info, yaml.MapItem{"in", m.In}) + info = append(info, yaml.MapItem{Key: "in", Value: m.In}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) } if m.Format != "" { - info = append(info, yaml.MapItem{"format", m.Format}) + info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) } if m.Items != nil { - info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) } // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) + info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) } if m.Default != nil { - info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) } // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{"maximum", m.Maximum}) + info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) } if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) + info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) } if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{"minimum", m.Minimum}) + info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) } if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) + info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) } if m.MaxLength != 0 { - info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) + info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) } if m.MinLength != 0 { - info = append(info, yaml.MapItem{"minLength", m.MinLength}) + info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) } if m.Pattern != "" { - info = append(info, yaml.MapItem{"pattern", m.Pattern}) + info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) } if m.MaxItems != 0 { - info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) + info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) } if m.MinItems != 0 { - info = append(info, yaml.MapItem{"minItems", m.MinItems}) + info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) } if m.UniqueItems != false { - info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) + info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) } if len(m.Enum) != 0 { items := make([]interface{}, 0) for _, item := range m.Enum { items = append(items, item.ToRawInfo()) } - info = append(info, yaml.MapItem{"enum", items}) + info = append(info, yaml.MapItem{Key: "enum", Value: items}) } // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) + info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -7608,7 +7608,7 @@ func (m *Headers) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:additionalProperties Type:NamedHeader StringEnumValues:[] MapType:Header Repeated:true Pattern: Implicit:true Description:} @@ -7619,28 +7619,28 @@ func (m *Headers) ToRawInfo() interface{} { func (m *Info) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Title != "" { - info = append(info, yaml.MapItem{"title", m.Title}) + info = append(info, yaml.MapItem{Key: "title", Value: m.Title}) } if m.Version != "" { - info = append(info, yaml.MapItem{"version", m.Version}) + info = append(info, yaml.MapItem{Key: "version", Value: m.Version}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.TermsOfService != "" { - info = append(info, yaml.MapItem{"termsOfService", m.TermsOfService}) + info = append(info, yaml.MapItem{Key: "termsOfService", Value: m.TermsOfService}) } if m.Contact != nil { - info = append(info, yaml.MapItem{"contact", m.Contact.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "contact", Value: m.Contact.ToRawInfo()}) } // &{Name:contact Type:Contact StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.License != nil { - info = append(info, yaml.MapItem{"license", m.License.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "license", Value: m.License.ToRawInfo()}) } // &{Name:license Type:License StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -7655,7 +7655,7 @@ func (m *ItemsItem) ToRawInfo() interface{} { for _, item := range m.Schema { items = append(items, item.ToRawInfo()) } - info = append(info, yaml.MapItem{"schema", items}) + info = append(info, yaml.MapItem{Key: "schema", Value: items}) } // &{Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} return info @@ -7665,10 +7665,10 @@ func (m *ItemsItem) ToRawInfo() interface{} { func (m *JsonReference) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.XRef != "" { - info = append(info, yaml.MapItem{"$ref", m.XRef}) + info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } return info } @@ -7677,14 +7677,14 @@ func (m *JsonReference) ToRawInfo() interface{} { func (m *License) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } if m.Url != "" { - info = append(info, yaml.MapItem{"url", m.Url}) + info = append(info, yaml.MapItem{Key: "url", Value: m.Url}) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -7695,7 +7695,7 @@ func (m *License) ToRawInfo() interface{} { func (m *NamedAny) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } // &{Name:value Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} return info @@ -7705,7 +7705,7 @@ func (m *NamedAny) ToRawInfo() interface{} { func (m *NamedHeader) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } // &{Name:value Type:Header StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} return info @@ -7715,7 +7715,7 @@ func (m *NamedHeader) ToRawInfo() interface{} { func (m *NamedParameter) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } // &{Name:value Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} return info @@ -7725,7 +7725,7 @@ func (m *NamedParameter) ToRawInfo() interface{} { func (m *NamedPathItem) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } // &{Name:value Type:PathItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} return info @@ -7735,7 +7735,7 @@ func (m *NamedPathItem) ToRawInfo() interface{} { func (m *NamedResponse) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } // &{Name:value Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} return info @@ -7745,7 +7745,7 @@ func (m *NamedResponse) ToRawInfo() interface{} { func (m *NamedResponseValue) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } // &{Name:value Type:ResponseValue StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} return info @@ -7755,7 +7755,7 @@ func (m *NamedResponseValue) ToRawInfo() interface{} { func (m *NamedSchema) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } // &{Name:value Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} return info @@ -7765,7 +7765,7 @@ func (m *NamedSchema) ToRawInfo() interface{} { func (m *NamedSecurityDefinitionsItem) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } // &{Name:value Type:SecurityDefinitionsItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} return info @@ -7775,10 +7775,10 @@ func (m *NamedSecurityDefinitionsItem) ToRawInfo() interface{} { func (m *NamedString) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } if m.Value != "" { - info = append(info, yaml.MapItem{"value", m.Value}) + info = append(info, yaml.MapItem{Key: "value", Value: m.Value}) } return info } @@ -7787,7 +7787,7 @@ func (m *NamedString) ToRawInfo() interface{} { func (m *NamedStringArray) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } // &{Name:value Type:StringArray StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} return info @@ -7824,27 +7824,27 @@ func (m *NonBodyParameter) ToRawInfo() interface{} { func (m *Oauth2AccessCodeSecurity) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) } if m.Flow != "" { - info = append(info, yaml.MapItem{"flow", m.Flow}) + info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) } if m.Scopes != nil { - info = append(info, yaml.MapItem{"scopes", m.Scopes.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()}) } // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.AuthorizationUrl != "" { - info = append(info, yaml.MapItem{"authorizationUrl", m.AuthorizationUrl}) + info = append(info, yaml.MapItem{Key: "authorizationUrl", Value: m.AuthorizationUrl}) } if m.TokenUrl != "" { - info = append(info, yaml.MapItem{"tokenUrl", m.TokenUrl}) + info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -7855,24 +7855,24 @@ func (m *Oauth2AccessCodeSecurity) ToRawInfo() interface{} { func (m *Oauth2ApplicationSecurity) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) } if m.Flow != "" { - info = append(info, yaml.MapItem{"flow", m.Flow}) + info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) } if m.Scopes != nil { - info = append(info, yaml.MapItem{"scopes", m.Scopes.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()}) } // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.TokenUrl != "" { - info = append(info, yaml.MapItem{"tokenUrl", m.TokenUrl}) + info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -7883,24 +7883,24 @@ func (m *Oauth2ApplicationSecurity) ToRawInfo() interface{} { func (m *Oauth2ImplicitSecurity) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) } if m.Flow != "" { - info = append(info, yaml.MapItem{"flow", m.Flow}) + info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) } if m.Scopes != nil { - info = append(info, yaml.MapItem{"scopes", m.Scopes.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()}) } // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.AuthorizationUrl != "" { - info = append(info, yaml.MapItem{"authorizationUrl", m.AuthorizationUrl}) + info = append(info, yaml.MapItem{Key: "authorizationUrl", Value: m.AuthorizationUrl}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -7911,24 +7911,24 @@ func (m *Oauth2ImplicitSecurity) ToRawInfo() interface{} { func (m *Oauth2PasswordSecurity) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) } if m.Flow != "" { - info = append(info, yaml.MapItem{"flow", m.Flow}) + info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) } if m.Scopes != nil { - info = append(info, yaml.MapItem{"scopes", m.Scopes.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()}) } // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.TokenUrl != "" { - info = append(info, yaml.MapItem{"tokenUrl", m.TokenUrl}) + info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -7946,56 +7946,56 @@ func (m *Oauth2Scopes) ToRawInfo() interface{} { func (m *Operation) ToRawInfo() interface{} { info := yaml.MapSlice{} if len(m.Tags) != 0 { - info = append(info, yaml.MapItem{"tags", m.Tags}) + info = append(info, yaml.MapItem{Key: "tags", Value: m.Tags}) } if m.Summary != "" { - info = append(info, yaml.MapItem{"summary", m.Summary}) + info = append(info, yaml.MapItem{Key: "summary", Value: m.Summary}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.ExternalDocs != nil { - info = append(info, yaml.MapItem{"externalDocs", m.ExternalDocs.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) } // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.OperationId != "" { - info = append(info, yaml.MapItem{"operationId", m.OperationId}) + info = append(info, yaml.MapItem{Key: "operationId", Value: m.OperationId}) } if len(m.Produces) != 0 { - info = append(info, yaml.MapItem{"produces", m.Produces}) + info = append(info, yaml.MapItem{Key: "produces", Value: m.Produces}) } if len(m.Consumes) != 0 { - info = append(info, yaml.MapItem{"consumes", m.Consumes}) + info = append(info, yaml.MapItem{Key: "consumes", Value: m.Consumes}) } if len(m.Parameters) != 0 { items := make([]interface{}, 0) for _, item := range m.Parameters { items = append(items, item.ToRawInfo()) } - info = append(info, yaml.MapItem{"parameters", items}) + info = append(info, yaml.MapItem{Key: "parameters", Value: items}) } // &{Name:parameters Type:ParametersItem StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:The parameters needed to send a valid API call.} if m.Responses != nil { - info = append(info, yaml.MapItem{"responses", m.Responses.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "responses", Value: m.Responses.ToRawInfo()}) } // &{Name:responses Type:Responses StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if len(m.Schemes) != 0 { - info = append(info, yaml.MapItem{"schemes", m.Schemes}) + info = append(info, yaml.MapItem{Key: "schemes", Value: m.Schemes}) } if m.Deprecated != false { - info = append(info, yaml.MapItem{"deprecated", m.Deprecated}) + info = append(info, yaml.MapItem{Key: "deprecated", Value: m.Deprecated}) } if len(m.Security) != 0 { items := make([]interface{}, 0) for _, item := range m.Security { items = append(items, item.ToRawInfo()) } - info = append(info, yaml.MapItem{"security", items}) + info = append(info, yaml.MapItem{Key: "security", Value: items}) } // &{Name:security Type:SecurityRequirement StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -8024,7 +8024,7 @@ func (m *ParameterDefinitions) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:additionalProperties Type:NamedParameter StringEnumValues:[] MapType:Parameter Repeated:true Pattern: Implicit:true Description:} @@ -8052,34 +8052,34 @@ func (m *ParametersItem) ToRawInfo() interface{} { func (m *PathItem) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.XRef != "" { - info = append(info, yaml.MapItem{"$ref", m.XRef}) + info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef}) } if m.Get != nil { - info = append(info, yaml.MapItem{"get", m.Get.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "get", Value: m.Get.ToRawInfo()}) } // &{Name:get Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Put != nil { - info = append(info, yaml.MapItem{"put", m.Put.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "put", Value: m.Put.ToRawInfo()}) } // &{Name:put Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Post != nil { - info = append(info, yaml.MapItem{"post", m.Post.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "post", Value: m.Post.ToRawInfo()}) } // &{Name:post Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Delete != nil { - info = append(info, yaml.MapItem{"delete", m.Delete.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "delete", Value: m.Delete.ToRawInfo()}) } // &{Name:delete Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Options != nil { - info = append(info, yaml.MapItem{"options", m.Options.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "options", Value: m.Options.ToRawInfo()}) } // &{Name:options Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Head != nil { - info = append(info, yaml.MapItem{"head", m.Head.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "head", Value: m.Head.ToRawInfo()}) } // &{Name:head Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Patch != nil { - info = append(info, yaml.MapItem{"patch", m.Patch.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "patch", Value: m.Patch.ToRawInfo()}) } // &{Name:patch Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if len(m.Parameters) != 0 { @@ -8087,12 +8087,12 @@ func (m *PathItem) ToRawInfo() interface{} { for _, item := range m.Parameters { items = append(items, item.ToRawInfo()) } - info = append(info, yaml.MapItem{"parameters", items}) + info = append(info, yaml.MapItem{Key: "parameters", Value: items}) } // &{Name:parameters Type:ParametersItem StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:The parameters needed to send a valid API call.} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -8103,78 +8103,78 @@ func (m *PathItem) ToRawInfo() interface{} { func (m *PathParameterSubSchema) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Required != false { - info = append(info, yaml.MapItem{"required", m.Required}) + info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) } if m.In != "" { - info = append(info, yaml.MapItem{"in", m.In}) + info = append(info, yaml.MapItem{Key: "in", Value: m.In}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) } if m.Format != "" { - info = append(info, yaml.MapItem{"format", m.Format}) + info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) } if m.Items != nil { - info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) } // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) + info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) } if m.Default != nil { - info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) } // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{"maximum", m.Maximum}) + info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) } if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) + info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) } if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{"minimum", m.Minimum}) + info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) } if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) + info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) } if m.MaxLength != 0 { - info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) + info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) } if m.MinLength != 0 { - info = append(info, yaml.MapItem{"minLength", m.MinLength}) + info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) } if m.Pattern != "" { - info = append(info, yaml.MapItem{"pattern", m.Pattern}) + info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) } if m.MaxItems != 0 { - info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) + info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) } if m.MinItems != 0 { - info = append(info, yaml.MapItem{"minItems", m.MinItems}) + info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) } if m.UniqueItems != false { - info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) + info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) } if len(m.Enum) != 0 { items := make([]interface{}, 0) for _, item := range m.Enum { items = append(items, item.ToRawInfo()) } - info = append(info, yaml.MapItem{"enum", items}) + info = append(info, yaml.MapItem{Key: "enum", Value: items}) } // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) + info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -8186,13 +8186,13 @@ func (m *Paths) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} if m.Path != nil { for _, item := range m.Path { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:Path Type:NamedPathItem StringEnumValues:[] MapType:PathItem Repeated:true Pattern:^/ Implicit:true Description:} @@ -8203,66 +8203,66 @@ func (m *Paths) ToRawInfo() interface{} { func (m *PrimitivesItems) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) } if m.Format != "" { - info = append(info, yaml.MapItem{"format", m.Format}) + info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) } if m.Items != nil { - info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) } // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) + info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) } if m.Default != nil { - info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) } // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{"maximum", m.Maximum}) + info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) } if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) + info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) } if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{"minimum", m.Minimum}) + info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) } if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) + info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) } if m.MaxLength != 0 { - info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) + info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) } if m.MinLength != 0 { - info = append(info, yaml.MapItem{"minLength", m.MinLength}) + info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) } if m.Pattern != "" { - info = append(info, yaml.MapItem{"pattern", m.Pattern}) + info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) } if m.MaxItems != 0 { - info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) + info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) } if m.MinItems != 0 { - info = append(info, yaml.MapItem{"minItems", m.MinItems}) + info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) } if m.UniqueItems != false { - info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) + info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) } if len(m.Enum) != 0 { items := make([]interface{}, 0) for _, item := range m.Enum { items = append(items, item.ToRawInfo()) } - info = append(info, yaml.MapItem{"enum", items}) + info = append(info, yaml.MapItem{Key: "enum", Value: items}) } // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) + info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -8274,7 +8274,7 @@ func (m *Properties) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:additionalProperties Type:NamedSchema StringEnumValues:[] MapType:Schema Repeated:true Pattern: Implicit:true Description:} @@ -8285,81 +8285,81 @@ func (m *Properties) ToRawInfo() interface{} { func (m *QueryParameterSubSchema) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Required != false { - info = append(info, yaml.MapItem{"required", m.Required}) + info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) } if m.In != "" { - info = append(info, yaml.MapItem{"in", m.In}) + info = append(info, yaml.MapItem{Key: "in", Value: m.In}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } if m.AllowEmptyValue != false { - info = append(info, yaml.MapItem{"allowEmptyValue", m.AllowEmptyValue}) + info = append(info, yaml.MapItem{Key: "allowEmptyValue", Value: m.AllowEmptyValue}) } if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) } if m.Format != "" { - info = append(info, yaml.MapItem{"format", m.Format}) + info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) } if m.Items != nil { - info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) } // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) + info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) } if m.Default != nil { - info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) } // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{"maximum", m.Maximum}) + info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) } if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) + info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) } if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{"minimum", m.Minimum}) + info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) } if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) + info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) } if m.MaxLength != 0 { - info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) + info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) } if m.MinLength != 0 { - info = append(info, yaml.MapItem{"minLength", m.MinLength}) + info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) } if m.Pattern != "" { - info = append(info, yaml.MapItem{"pattern", m.Pattern}) + info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) } if m.MaxItems != 0 { - info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) + info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) } if m.MinItems != 0 { - info = append(info, yaml.MapItem{"minItems", m.MinItems}) + info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) } if m.UniqueItems != false { - info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) + info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) } if len(m.Enum) != 0 { items := make([]interface{}, 0) for _, item := range m.Enum { items = append(items, item.ToRawInfo()) } - info = append(info, yaml.MapItem{"enum", items}) + info = append(info, yaml.MapItem{Key: "enum", Value: items}) } // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) + info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -8370,23 +8370,23 @@ func (m *QueryParameterSubSchema) ToRawInfo() interface{} { func (m *Response) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.Schema != nil { - info = append(info, yaml.MapItem{"schema", m.Schema.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "schema", Value: m.Schema.ToRawInfo()}) } // &{Name:schema Type:SchemaItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Headers != nil { - info = append(info, yaml.MapItem{"headers", m.Headers.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "headers", Value: m.Headers.ToRawInfo()}) } // &{Name:headers Type:Headers StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Examples != nil { - info = append(info, yaml.MapItem{"examples", m.Examples.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "examples", Value: m.Examples.ToRawInfo()}) } // &{Name:examples Type:Examples StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -8398,7 +8398,7 @@ func (m *ResponseDefinitions) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:additionalProperties Type:NamedResponse StringEnumValues:[] MapType:Response Repeated:true Pattern: Implicit:true Description:} @@ -8427,13 +8427,13 @@ func (m *Responses) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.ResponseCode != nil { for _, item := range m.ResponseCode { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:ResponseCode Type:NamedResponseValue StringEnumValues:[] MapType:ResponseValue Repeated:true Pattern:^([0-9]{3})$|^(default)$ Implicit:true Description:} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -8444,80 +8444,80 @@ func (m *Responses) ToRawInfo() interface{} { func (m *Schema) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.XRef != "" { - info = append(info, yaml.MapItem{"$ref", m.XRef}) + info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef}) } if m.Format != "" { - info = append(info, yaml.MapItem{"format", m.Format}) + info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) } if m.Title != "" { - info = append(info, yaml.MapItem{"title", m.Title}) + info = append(info, yaml.MapItem{Key: "title", Value: m.Title}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.Default != nil { - info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) } // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) + info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) } if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{"maximum", m.Maximum}) + info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) } if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) + info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) } if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{"minimum", m.Minimum}) + info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) } if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) + info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) } if m.MaxLength != 0 { - info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) + info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) } if m.MinLength != 0 { - info = append(info, yaml.MapItem{"minLength", m.MinLength}) + info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) } if m.Pattern != "" { - info = append(info, yaml.MapItem{"pattern", m.Pattern}) + info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) } if m.MaxItems != 0 { - info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) + info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) } if m.MinItems != 0 { - info = append(info, yaml.MapItem{"minItems", m.MinItems}) + info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) } if m.UniqueItems != false { - info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) + info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) } if m.MaxProperties != 0 { - info = append(info, yaml.MapItem{"maxProperties", m.MaxProperties}) + info = append(info, yaml.MapItem{Key: "maxProperties", Value: m.MaxProperties}) } if m.MinProperties != 0 { - info = append(info, yaml.MapItem{"minProperties", m.MinProperties}) + info = append(info, yaml.MapItem{Key: "minProperties", Value: m.MinProperties}) } if len(m.Required) != 0 { - info = append(info, yaml.MapItem{"required", m.Required}) + info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) } if len(m.Enum) != 0 { items := make([]interface{}, 0) for _, item := range m.Enum { items = append(items, item.ToRawInfo()) } - info = append(info, yaml.MapItem{"enum", items}) + info = append(info, yaml.MapItem{Key: "enum", Value: items}) } // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.AdditionalProperties != nil { - info = append(info, yaml.MapItem{"additionalProperties", m.AdditionalProperties.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "additionalProperties", Value: m.AdditionalProperties.ToRawInfo()}) } // &{Name:additionalProperties Type:AdditionalPropertiesItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Type != nil { if len(m.Type.Value) == 1 { - info = append(info, yaml.MapItem{"type", m.Type.Value[0]}) + info = append(info, yaml.MapItem{Key: "type", Value: m.Type.Value[0]}) } else { - info = append(info, yaml.MapItem{"type", m.Type.Value}) + info = append(info, yaml.MapItem{Key: "type", Value: m.Type.Value}) } } // &{Name:type Type:TypeItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} @@ -8526,7 +8526,7 @@ func (m *Schema) ToRawInfo() interface{} { for _, item := range m.Items.Schema { items = append(items, item.ToRawInfo()) } - info = append(info, yaml.MapItem{"items", items[0]}) + info = append(info, yaml.MapItem{Key: "items", Value: items[0]}) } // &{Name:items Type:ItemsItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if len(m.AllOf) != 0 { @@ -8534,34 +8534,34 @@ func (m *Schema) ToRawInfo() interface{} { for _, item := range m.AllOf { items = append(items, item.ToRawInfo()) } - info = append(info, yaml.MapItem{"allOf", items}) + info = append(info, yaml.MapItem{Key: "allOf", Value: items}) } // &{Name:allOf Type:Schema StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.Properties != nil { - info = append(info, yaml.MapItem{"properties", m.Properties.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "properties", Value: m.Properties.ToRawInfo()}) } // &{Name:properties Type:Properties StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Discriminator != "" { - info = append(info, yaml.MapItem{"discriminator", m.Discriminator}) + info = append(info, yaml.MapItem{Key: "discriminator", Value: m.Discriminator}) } if m.ReadOnly != false { - info = append(info, yaml.MapItem{"readOnly", m.ReadOnly}) + info = append(info, yaml.MapItem{Key: "readOnly", Value: m.ReadOnly}) } if m.Xml != nil { - info = append(info, yaml.MapItem{"xml", m.Xml.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "xml", Value: m.Xml.ToRawInfo()}) } // &{Name:xml Type:Xml StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.ExternalDocs != nil { - info = append(info, yaml.MapItem{"externalDocs", m.ExternalDocs.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) } // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Example != nil { - info = append(info, yaml.MapItem{"example", m.Example.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "example", Value: m.Example.ToRawInfo()}) } // &{Name:example Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -8590,7 +8590,7 @@ func (m *SecurityDefinitions) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:additionalProperties Type:NamedSecurityDefinitionsItem StringEnumValues:[] MapType:SecurityDefinitionsItem Repeated:true Pattern: Implicit:true Description:} @@ -8639,7 +8639,7 @@ func (m *SecurityRequirement) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:additionalProperties Type:NamedStringArray StringEnumValues:[] MapType:StringArray Repeated:true Pattern: Implicit:true Description:} @@ -8655,18 +8655,18 @@ func (m *StringArray) ToRawInfo() interface{} { func (m *Tag) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.ExternalDocs != nil { - info = append(info, yaml.MapItem{"externalDocs", m.ExternalDocs.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) } // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -8677,7 +8677,7 @@ func (m *Tag) ToRawInfo() interface{} { func (m *TypeItem) ToRawInfo() interface{} { info := yaml.MapSlice{} if len(m.Value) != 0 { - info = append(info, yaml.MapItem{"value", m.Value}) + info = append(info, yaml.MapItem{Key: "value", Value: m.Value}) } return info } @@ -8687,7 +8687,7 @@ func (m *VendorExtension) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:true Description:} @@ -8698,23 +8698,23 @@ func (m *VendorExtension) ToRawInfo() interface{} { func (m *Xml) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } if m.Namespace != "" { - info = append(info, yaml.MapItem{"namespace", m.Namespace}) + info = append(info, yaml.MapItem{Key: "namespace", Value: m.Namespace}) } if m.Prefix != "" { - info = append(info, yaml.MapItem{"prefix", m.Prefix}) + info = append(info, yaml.MapItem{Key: "prefix", Value: m.Prefix}) } if m.Attribute != false { - info = append(info, yaml.MapItem{"attribute", m.Attribute}) + info = append(info, yaml.MapItem{Key: "attribute", Value: m.Attribute}) } if m.Wrapped != false { - info = append(info, yaml.MapItem{"wrapped", m.Wrapped}) + info = append(info, yaml.MapItem{Key: "wrapped", Value: m.Wrapped}) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go index 37da7df256..a030fa6765 100644 --- a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-go. +// Code generated by protoc-gen-go. DO NOT EDIT. // source: OpenAPIv2/OpenAPIv2.proto -// DO NOT EDIT! /* Package openapi_v2 is a generated protocol buffer package. @@ -4257,7 +4256,7 @@ func init() { proto.RegisterFile("OpenAPIv2/OpenAPIv2.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 3129 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0x3b, 0x4b, 0x73, 0x1c, 0x57, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x3b, 0x4b, 0x73, 0x1c, 0x57, 0xd5, 0xf3, 0x7e, 0x1c, 0x69, 0x46, 0xa3, 0x96, 0x2c, 0xb7, 0x24, 0xc7, 0x71, 0xe4, 0x3c, 0x6c, 0xe7, 0xb3, 0x9c, 0x4f, 0x29, 0x48, 0x05, 0x2a, 0x05, 0xf2, 0xab, 0xc6, 0xc4, 0x44, 0x4a, 0xcb, 0x0e, 0x09, 0x04, 0xba, 0xae, 0x66, 0xee, 0x48, 0x9d, 0x74, 0xf7, 0x6d, 0x77, 0xf7, 0xc8, 0x1a, diff --git a/vendor/github.com/googleapis/gnostic/README.md b/vendor/github.com/googleapis/gnostic/README.md deleted file mode 100644 index d350f3f013..0000000000 --- a/vendor/github.com/googleapis/gnostic/README.md +++ /dev/null @@ -1,103 +0,0 @@ -[![Build Status](https://travis-ci.org/googleapis/gnostic.svg?branch=master)](https://travis-ci.org/googleapis/gnostic) - -# ⨁ gnostic - -This repository contains a Go command line tool which converts -JSON and YAML [OpenAPI](https://github.com/OAI/OpenAPI-Specification) -descriptions to and from equivalent Protocol Buffer representations. - -[Protocol Buffers](https://developers.google.com/protocol-buffers/) -provide a language-neutral, platform-neutral, extensible mechanism -for serializing structured data. -**gnostic**'s Protocol Buffer models for the OpenAPI Specification -can be used to generate code that includes data structures with -explicit fields for the elements of an OpenAPI description. -This makes it possible for developers to work with OpenAPI -descriptions in type-safe ways, which is particularly useful -in strongly-typed languages like Go and Swift. - -**gnostic** reads OpenAPI descriptions into -these generated data structures, reports errors, -resolves internal dependencies, and writes the results -in a binary form that can be used in any language that is -supported by the Protocol Buffer tools. -A plugin interface simplifies integration with API -tools written in a variety of different languages, -and when necessary, Protocol Buffer OpenAPI descriptions -can be reexported as JSON or YAML. - -**gnostic** compilation code and OpenAPI Protocol Buffer -models are automatically generated from an -[OpenAPI JSON Schema](https://github.com/OAI/OpenAPI-Specification/blob/master/schemas/v2.0/schema.json). -Source code for the generator is in the [generate-gnostic](generate-gnostic) directory. - -## Disclaimer - -This is prerelease software and work in progress. Feedback and -contributions are welcome, but we currently make no guarantees of -function or stability. - -## Requirements - -**gnostic** can be run in any environment that supports [Go](http://golang.org) -and the [Google Protocol Buffer Compiler](https://github.com/google/protobuf). - -## Installation - -1. Get this package by downloading it with `go get`. - - go get github.com/googleapis/gnostic - -2. [Optional] Build and run the compiler generator. -This uses the OpenAPI JSON schema to generate a Protocol Buffer language file -that describes the OpenAPI specification and a Go-language file of code that -will read a JSON or YAML OpenAPI representation into the generated protocol -buffers. Pre-generated versions of these files are in the OpenAPIv2 directory. - - cd $GOPATH/src/github.com/googleapis/gnostic/generate-gnostic - go install - cd .. - generate-gnostic --v2 - -3. [Optional] Generate Protocol Buffer support code. -A pre-generated version of this file is checked into the OpenAPIv2 directory. -This step requires a local installation of protoc, the Protocol Buffer Compiler. -You can get protoc [here](https://github.com/google/protobuf). - - ./COMPILE-PROTOS.sh - -4. [Optional] Rebuild **gnostic**. This is only necessary if you've performed steps -2 or 3 above. - - go install github.com/googleapis/gnostic - -5. Run **gnostic**. This will create a file in the current directory named "petstore.pb" that contains a binary -Protocol Buffer description of a sample API. - - gnostic --pb-out=. examples/petstore.json - -6. You can also compile files that you specify with a URL. Here's another way to compile the previous -example. This time we're creating "petstore.text", which contains a textual representation of the -Protocol Buffer description. This is mainly for use in testing and debugging. - - gnostic --text-out=petstore.text https://raw.githubusercontent.com/googleapis/gnostic/master/examples/petstore.json - -7. For a sample application, see apps/report. - - go install github.com/googleapis/gnostic/apps/report - report petstore.pb - -8. **gnostic** supports plugins. This builds and runs a sample plugin -that reports some basic information about an API. The "-" causes the plugin to -write its output to stdout. - - go install github.com/googleapis/gnostic/plugins/gnostic-go-sample - gnostic examples/petstore.json --go-sample-out=- - -## Copyright - -Copyright 2017, Google Inc. - -## License - -Released under the Apache 2.0 license. diff --git a/vendor/github.com/googleapis/gnostic/compiler/reader.go b/vendor/github.com/googleapis/gnostic/compiler/reader.go index 2d4b3303db..c954a2d9b2 100644 --- a/vendor/github.com/googleapis/gnostic/compiler/reader.go +++ b/vendor/github.com/googleapis/gnostic/compiler/reader.go @@ -110,7 +110,9 @@ func ReadInfoFromBytes(filename string, bytes []byte) (interface{}, error) { if err != nil { return nil, err } - infoCache[filename] = info + if len(filename) > 0 { + infoCache[filename] = info + } return info, nil } diff --git a/vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh b/vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh old mode 100755 new mode 100644 diff --git a/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go b/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go index 7c6b914967..749ff78416 100644 --- a/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go +++ b/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-go. +// Code generated by protoc-gen-go. DO NOT EDIT. // source: extension.proto -// DO NOT EDIT! /* Package openapiextension_v1 is a generated protocol buffer package. @@ -78,7 +77,7 @@ func (m *Version) GetSuffix() string { // An encoded Request is written to the ExtensionHandler's stdin. type ExtensionHandlerRequest struct { // The OpenAPI descriptions that were explicitly listed on the command line. - // The specifications will appear in the order they are specified to openapic. + // The specifications will appear in the order they are specified to gnostic. Wrapper *Wrapper `protobuf:"bytes,1,opt,name=wrapper" json:"wrapper,omitempty"` // The version number of openapi compiler. CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"` @@ -192,28 +191,28 @@ func init() { func init() { proto.RegisterFile("extension.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 355 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0x4d, 0x4b, 0xf3, 0x40, - 0x1c, 0xc4, 0x49, 0xdf, 0xf2, 0x64, 0x1f, 0xb4, 0xb2, 0x16, 0x8d, 0xe2, 0xa1, 0x04, 0x84, 0x22, - 0xb8, 0xa5, 0x0a, 0xde, 0x5b, 0x28, 0xea, 0xc5, 0x96, 0x3d, 0xd4, 0x9b, 0x65, 0x9b, 0xfe, 0xdb, - 0x46, 0x92, 0xdd, 0x75, 0xf3, 0x62, 0xfb, 0x55, 0x3c, 0xfa, 0x49, 0x25, 0xbb, 0xd9, 0x7a, 0x50, - 0x6f, 0x99, 0x1f, 0x93, 0xfc, 0x67, 0x26, 0xa8, 0x0d, 0xdb, 0x0c, 0x78, 0x1a, 0x09, 0x4e, 0xa4, - 0x12, 0x99, 0xc0, 0xc7, 0x42, 0x02, 0x67, 0x32, 0xfa, 0xe6, 0xc5, 0xe0, 0xfc, 0x6c, 0x2d, 0xc4, - 0x3a, 0x86, 0xbe, 0xb6, 0x2c, 0xf2, 0x55, 0x9f, 0xf1, 0x9d, 0xf1, 0x07, 0x21, 0x72, 0x67, 0xa0, - 0x4a, 0x23, 0xee, 0xa0, 0x66, 0xc2, 0x5e, 0x85, 0xf2, 0x9d, 0xae, 0xd3, 0x6b, 0x52, 0x23, 0x34, - 0x8d, 0xb8, 0x50, 0x7e, 0xad, 0xa2, 0xa5, 0x28, 0xa9, 0x64, 0x59, 0xb8, 0xf1, 0xeb, 0x86, 0x6a, - 0x81, 0x4f, 0x50, 0x2b, 0xcd, 0x57, 0xab, 0x68, 0xeb, 0x37, 0xba, 0x4e, 0xcf, 0xa3, 0x95, 0x0a, - 0x3e, 0x1c, 0x74, 0x3a, 0xb6, 0x81, 0x1e, 0x18, 0x5f, 0xc6, 0xa0, 0x28, 0xbc, 0xe5, 0x90, 0x66, - 0xf8, 0x0e, 0xb9, 0xef, 0x8a, 0x49, 0x09, 0xe6, 0xee, 0xff, 0x9b, 0x0b, 0xf2, 0x4b, 0x05, 0xf2, - 0x6c, 0x3c, 0xd4, 0x9a, 0xf1, 0x3d, 0x3a, 0x0a, 0x45, 0x22, 0xa3, 0x18, 0xd4, 0xbc, 0x30, 0x0d, - 0x74, 0x98, 0xbf, 0x3e, 0x50, 0xb5, 0xa4, 0x6d, 0xfb, 0x56, 0x05, 0x82, 0x02, 0xf9, 0x3f, 0xb3, - 0xa5, 0x52, 0xf0, 0x14, 0xb0, 0x8f, 0xdc, 0x8d, 0x46, 0x4b, 0x1d, 0xee, 0x1f, 0xb5, 0xb2, 0x1c, - 0x00, 0x94, 0xd2, 0xb3, 0xd4, 0x7b, 0x1e, 0x35, 0x02, 0x5f, 0xa1, 0x66, 0xc1, 0xe2, 0x1c, 0xaa, - 0x24, 0x1d, 0x62, 0x86, 0x27, 0x76, 0x78, 0x32, 0xe4, 0x3b, 0x6a, 0x2c, 0xc1, 0x0b, 0x72, 0xab, - 0x52, 0xe5, 0x19, 0x5b, 0xc1, 0xd1, 0xc3, 0x59, 0x89, 0x2f, 0xd1, 0xe1, 0xbe, 0xc5, 0x9c, 0xb3, - 0x04, 0xf4, 0x6f, 0xf0, 0xe8, 0xc1, 0x9e, 0x3e, 0xb1, 0x04, 0x30, 0x46, 0x8d, 0x1d, 0x4b, 0x62, - 0x7d, 0xd6, 0xa3, 0xfa, 0x79, 0x74, 0x8d, 0xda, 0x42, 0xad, 0xed, 0x16, 0x21, 0x29, 0x06, 0x23, - 0x3c, 0x91, 0xc0, 0x87, 0xd3, 0xc7, 0x7d, 0xdf, 0xd9, 0x60, 0xea, 0x7c, 0xd6, 0xea, 0x93, 0xe1, - 0x78, 0xd1, 0xd2, 0x19, 0x6f, 0xbf, 0x02, 0x00, 0x00, 0xff, 0xff, 0xfc, 0x56, 0x40, 0x4d, 0x52, - 0x02, 0x00, 0x00, + // 357 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0x4d, 0x4b, 0xc3, 0x40, + 0x18, 0x84, 0x49, 0xbf, 0x62, 0x56, 0x6c, 0x65, 0x2d, 0x1a, 0xc5, 0x43, 0x09, 0x08, 0x45, 0x64, + 0x4b, 0x15, 0xbc, 0xb7, 0x50, 0xd4, 0x8b, 0x2d, 0x7b, 0xa8, 0x37, 0xcb, 0x36, 0x7d, 0x9b, 0x46, + 0x92, 0xdd, 0x75, 0xf3, 0x61, 0xfb, 0x57, 0x3c, 0xfa, 0x4b, 0x25, 0xbb, 0x49, 0x3d, 0xa8, 0xb7, + 0xcc, 0xc3, 0x24, 0xef, 0xcc, 0x04, 0x75, 0x60, 0x9b, 0x02, 0x4f, 0x42, 0xc1, 0x89, 0x54, 0x22, + 0x15, 0xf8, 0x44, 0x48, 0xe0, 0x4c, 0x86, 0x3f, 0x3c, 0x1f, 0x5e, 0x9c, 0x07, 0x42, 0x04, 0x11, + 0x0c, 0xb4, 0x65, 0x99, 0xad, 0x07, 0x8c, 0xef, 0x8c, 0xdf, 0xf3, 0x91, 0x3d, 0x07, 0x55, 0x18, + 0x71, 0x17, 0x35, 0x63, 0xf6, 0x26, 0x94, 0x6b, 0xf5, 0xac, 0x7e, 0x93, 0x1a, 0xa1, 0x69, 0xc8, + 0x85, 0x72, 0x6b, 0x25, 0x2d, 0x44, 0x41, 0x25, 0x4b, 0xfd, 0x8d, 0x5b, 0x37, 0x54, 0x0b, 0x7c, + 0x8a, 0x5a, 0x49, 0xb6, 0x5e, 0x87, 0x5b, 0xb7, 0xd1, 0xb3, 0xfa, 0x0e, 0x2d, 0x95, 0xf7, 0x69, + 0xa1, 0xb3, 0x49, 0x15, 0xe8, 0x91, 0xf1, 0x55, 0x04, 0x8a, 0xc2, 0x7b, 0x06, 0x49, 0x8a, 0xef, + 0x91, 0xfd, 0xa1, 0x98, 0x94, 0x60, 0xee, 0x1e, 0xde, 0x5e, 0x92, 0x3f, 0x2a, 0x90, 0x17, 0xe3, + 0xa1, 0x95, 0x19, 0x3f, 0xa0, 0x63, 0x5f, 0xc4, 0x32, 0x8c, 0x40, 0x2d, 0x72, 0xd3, 0x40, 0x87, + 0xf9, 0xef, 0x03, 0x65, 0x4b, 0xda, 0xa9, 0xde, 0x2a, 0x81, 0x97, 0x23, 0xf7, 0x77, 0xb6, 0x44, + 0x0a, 0x9e, 0x00, 0x76, 0x91, 0xbd, 0xd1, 0x68, 0xa5, 0xc3, 0x1d, 0xd0, 0x4a, 0x16, 0x03, 0x80, + 0x52, 0x7a, 0x96, 0x7a, 0xdf, 0xa1, 0x46, 0xe0, 0x6b, 0xd4, 0xcc, 0x59, 0x94, 0x41, 0x99, 0xa4, + 0x4b, 0xcc, 0xf0, 0xa4, 0x1a, 0x9e, 0x8c, 0xf8, 0x8e, 0x1a, 0x8b, 0xf7, 0x8a, 0xec, 0xb2, 0x54, + 0x71, 0xa6, 0xaa, 0x60, 0xe9, 0xe1, 0x2a, 0x89, 0xaf, 0x50, 0x7b, 0xdf, 0x62, 0xc1, 0x59, 0x0c, + 0xfa, 0x37, 0x38, 0xf4, 0x68, 0x4f, 0x9f, 0x59, 0x0c, 0x18, 0xa3, 0xc6, 0x8e, 0xc5, 0x91, 0x3e, + 0xeb, 0x50, 0xfd, 0x3c, 0xbe, 0x41, 0x6d, 0xa1, 0x02, 0x12, 0x70, 0x91, 0xa4, 0xa1, 0x4f, 0xf2, + 0xe1, 0x18, 0x4f, 0x25, 0xf0, 0xd1, 0xec, 0x69, 0x5f, 0x77, 0x3e, 0x9c, 0x59, 0x5f, 0xb5, 0xfa, + 0x74, 0x34, 0x59, 0xb6, 0x74, 0xc4, 0xbb, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x84, 0x5c, 0x6b, + 0x80, 0x51, 0x02, 0x00, 0x00, } diff --git a/vendor/github.com/googleapis/gnostic/extensions/extension.proto b/vendor/github.com/googleapis/gnostic/extensions/extension.proto index 806760a132..04856f913b 100644 --- a/vendor/github.com/googleapis/gnostic/extensions/extension.proto +++ b/vendor/github.com/googleapis/gnostic/extensions/extension.proto @@ -29,7 +29,7 @@ option java_multiple_files = true; option java_outer_classname = "OpenAPIExtensionV1"; // The Java package name must be proto package name with proper prefix. -option java_package = "org.openapic.v1"; +option java_package = "org.gnostic.v1"; // A reasonable prefix for the Objective-C symbols generated from the package. // It should at a minimum be 3 characters long, all uppercase, and convention @@ -53,7 +53,7 @@ message Version { message ExtensionHandlerRequest { // The OpenAPI descriptions that were explicitly listed on the command line. - // The specifications will appear in the order they are specified to openapic. + // The specifications will appear in the order they are specified to gnostic. Wrapper wrapper = 1; // The version number of openapi compiler. diff --git a/vendor/github.com/googleapis/gnostic/gnostic.go b/vendor/github.com/googleapis/gnostic/gnostic.go deleted file mode 100644 index 80e050503c..0000000000 --- a/vendor/github.com/googleapis/gnostic/gnostic.go +++ /dev/null @@ -1,550 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:generate ./COMPILE-PROTOS.sh - -// Gnostic is a tool for building better REST APIs through knowledge. -// -// Gnostic reads declarative descriptions of REST APIs that conform -// to the OpenAPI Specification, reports errors, resolves internal -// dependencies, and puts the results in a binary form that can -// be used in any language that is supported by the Protocol Buffer -// tools. -// -// Gnostic models are validated and typed. This allows API tool -// developers to focus on their product and not worry about input -// validation and type checking. -// -// Gnostic calls plugins that implement a variety of API implementation -// and support features including generation of client and server -// support code. -package main - -import ( - "bytes" - "errors" - "fmt" - "io" - "os" - "os/exec" - "path/filepath" - "regexp" - "strings" - - "github.com/golang/protobuf/proto" - "github.com/googleapis/gnostic/OpenAPIv2" - "github.com/googleapis/gnostic/OpenAPIv3" - "github.com/googleapis/gnostic/compiler" - "github.com/googleapis/gnostic/discovery" - "github.com/googleapis/gnostic/jsonwriter" - plugins "github.com/googleapis/gnostic/plugins" - surface "github.com/googleapis/gnostic/surface" - "gopkg.in/yaml.v2" -) - -const ( // Source Format - SourceFormatUnknown = 0 - SourceFormatOpenAPI2 = 2 - SourceFormatOpenAPI3 = 3 - SourceFormatDiscovery = 4 -) - -// Determine the version of an OpenAPI description read from JSON or YAML. -func getOpenAPIVersionFromInfo(info interface{}) int { - m, ok := compiler.UnpackMap(info) - if !ok { - return SourceFormatUnknown - } - swagger, ok := compiler.MapValueForKey(m, "swagger").(string) - if ok && strings.HasPrefix(swagger, "2.0") { - return SourceFormatOpenAPI2 - } - openapi, ok := compiler.MapValueForKey(m, "openapi").(string) - if ok && strings.HasPrefix(openapi, "3.0") { - return SourceFormatOpenAPI3 - } - kind, ok := compiler.MapValueForKey(m, "kind").(string) - if ok && kind == "discovery#restDescription" { - return SourceFormatDiscovery - } - return SourceFormatUnknown -} - -const ( - pluginPrefix = "gnostic-" - extensionPrefix = "gnostic-x-" -) - -type pluginCall struct { - Name string - Invocation string -} - -// Invokes a plugin. -func (p *pluginCall) perform(document proto.Message, sourceFormat int, sourceName string) error { - if p.Name != "" { - request := &plugins.Request{} - - // Infer the name of the executable by adding the prefix. - executableName := pluginPrefix + p.Name - - // Validate invocation string with regular expression. - invocation := p.Invocation - - // - // Plugin invocations must consist of - // zero or more comma-separated key=value pairs followed by a path. - // If pairs are present, a colon separates them from the path. - // Keys and values must be alphanumeric strings and may contain - // dashes, underscores, periods, or forward slashes. - // A path can contain any characters other than the separators ',', ':', and '='. - // - invocationRegex := regexp.MustCompile(`^([\w-_\/\.]+=[\w-_\/\.]+(,[\w-_\/\.]+=[\w-_\/\.]+)*:)?[^,:=]+$`) - if !invocationRegex.Match([]byte(p.Invocation)) { - return fmt.Errorf("Invalid invocation of %s: %s", executableName, invocation) - } - - invocationParts := strings.Split(p.Invocation, ":") - var outputLocation string - switch len(invocationParts) { - case 1: - outputLocation = invocationParts[0] - case 2: - parameters := strings.Split(invocationParts[0], ",") - for _, keyvalue := range parameters { - pair := strings.Split(keyvalue, "=") - if len(pair) == 2 { - request.Parameters = append(request.Parameters, &plugins.Parameter{Name: pair[0], Value: pair[1]}) - } - } - outputLocation = invocationParts[1] - default: - // badly-formed request - outputLocation = invocationParts[len(invocationParts)-1] - } - - version := &plugins.Version{} - version.Major = 0 - version.Minor = 1 - version.Patch = 0 - request.CompilerVersion = version - - request.OutputPath = outputLocation - - request.SourceName = sourceName - switch sourceFormat { - case SourceFormatOpenAPI2: - request.Openapi2 = document.(*openapi_v2.Document) - request.Surface, _ = surface.NewModelFromOpenAPI2(request.Openapi2) - case SourceFormatOpenAPI3: - request.Openapi3 = document.(*openapi_v3.Document) - request.Surface, _ = surface.NewModelFromOpenAPI3(request.Openapi3) - default: - } - - requestBytes, _ := proto.Marshal(request) - - cmd := exec.Command(executableName, "-plugin") - cmd.Stdin = bytes.NewReader(requestBytes) - cmd.Stderr = os.Stderr - output, err := cmd.Output() - if err != nil { - return err - } - response := &plugins.Response{} - err = proto.Unmarshal(output, response) - if err != nil { - return err - } - - plugins.HandleResponse(response, outputLocation) - } - return nil -} - -func isFile(path string) bool { - fileInfo, err := os.Stat(path) - if err != nil { - return false - } - return !fileInfo.IsDir() -} - -func isDirectory(path string) bool { - fileInfo, err := os.Stat(path) - if err != nil { - return false - } - return fileInfo.IsDir() -} - -// Write bytes to a named file. -// Certain names have special meaning: -// ! writes nothing -// - writes to stdout -// = writes to stderr -// If a directory name is given, the file is written there with -// a name derived from the source and extension arguments. -func writeFile(name string, bytes []byte, source string, extension string) { - var writer io.Writer - if name == "!" { - return - } else if name == "-" { - writer = os.Stdout - } else if name == "=" { - writer = os.Stderr - } else if isDirectory(name) { - base := filepath.Base(source) - // Remove the original source extension. - base = base[0 : len(base)-len(filepath.Ext(base))] - // Build the path that puts the result in the passed-in directory. - filename := name + "/" + base + "." + extension - file, _ := os.Create(filename) - defer file.Close() - writer = file - } else { - file, _ := os.Create(name) - defer file.Close() - writer = file - } - writer.Write(bytes) - if name == "-" || name == "=" { - writer.Write([]byte("\n")) - } -} - -// The Gnostic structure holds global state information for gnostic. -type Gnostic struct { - usage string - sourceName string - binaryOutputPath string - textOutputPath string - yamlOutputPath string - jsonOutputPath string - errorOutputPath string - resolveReferences bool - pluginCalls []*pluginCall - extensionHandlers []compiler.ExtensionHandler - sourceFormat int -} - -// Initialize a structure to store global application state. -func newGnostic() *Gnostic { - g := &Gnostic{} - // Option fields initialize to their default values. - g.usage = ` -Usage: gnostic OPENAPI_SOURCE [OPTIONS] - OPENAPI_SOURCE is the filename or URL of an OpenAPI description to read. -Options: - --pb-out=PATH Write a binary proto to the specified location. - --text-out=PATH Write a text proto to the specified location. - --json-out=PATH Write a json API description to the specified location. - --yaml-out=PATH Write a yaml API description to the specified location. - --errors-out=PATH Write compilation errors to the specified location. - --PLUGIN-out=PATH Run the plugin named gnostic_PLUGIN and write results - to the specified location. - --x-EXTENSION Use the extension named gnostic-x-EXTENSION - to process OpenAPI specification extensions. - --resolve-refs Explicitly resolve $ref references. - This could have problems with recursive definitions. -` - // Initialize internal structures. - g.pluginCalls = make([]*pluginCall, 0) - g.extensionHandlers = make([]compiler.ExtensionHandler, 0) - return g -} - -// Parse command-line options. -func (g *Gnostic) readOptions() { - // plugin processing matches patterns of the form "--PLUGIN-out=PATH" and "--PLUGIN_out=PATH" - pluginRegex := regexp.MustCompile("--(.+)[-_]out=(.+)") - - // extension processing matches patterns of the form "--x-EXTENSION" - extensionRegex := regexp.MustCompile("--x-(.+)") - - for i, arg := range os.Args { - if i == 0 { - continue // skip the tool name - } - var m [][]byte - if m = pluginRegex.FindSubmatch([]byte(arg)); m != nil { - pluginName := string(m[1]) - invocation := string(m[2]) - switch pluginName { - case "pb": - g.binaryOutputPath = invocation - case "text": - g.textOutputPath = invocation - case "json": - g.jsonOutputPath = invocation - case "yaml": - g.yamlOutputPath = invocation - case "errors": - g.errorOutputPath = invocation - default: - p := &pluginCall{Name: pluginName, Invocation: invocation} - g.pluginCalls = append(g.pluginCalls, p) - } - } else if m = extensionRegex.FindSubmatch([]byte(arg)); m != nil { - extensionName := string(m[1]) - extensionHandler := compiler.ExtensionHandler{Name: extensionPrefix + extensionName} - g.extensionHandlers = append(g.extensionHandlers, extensionHandler) - } else if arg == "--resolve-refs" { - g.resolveReferences = true - } else if arg[0] == '-' { - fmt.Fprintf(os.Stderr, "Unknown option: %s.\n%s\n", arg, g.usage) - os.Exit(-1) - } else { - g.sourceName = arg - } - } -} - -// Validate command-line options. -func (g *Gnostic) validateOptions() { - if g.binaryOutputPath == "" && - g.textOutputPath == "" && - g.yamlOutputPath == "" && - g.jsonOutputPath == "" && - g.errorOutputPath == "" && - len(g.pluginCalls) == 0 { - fmt.Fprintf(os.Stderr, "Missing output directives.\n%s\n", g.usage) - os.Exit(-1) - } - if g.sourceName == "" { - fmt.Fprintf(os.Stderr, "No input specified.\n%s\n", g.usage) - os.Exit(-1) - } - // If we get here and the error output is unspecified, write errors to stderr. - if g.errorOutputPath == "" { - g.errorOutputPath = "=" - } -} - -// Generate an error message to be written to stderr or a file. -func (g *Gnostic) errorBytes(err error) []byte { - return []byte("Errors reading " + g.sourceName + "\n" + err.Error()) -} - -// Read an OpenAPI description from YAML or JSON. -func (g *Gnostic) readOpenAPIText(bytes []byte) (message proto.Message, err error) { - info, err := compiler.ReadInfoFromBytes(g.sourceName, bytes) - if err != nil { - return nil, err - } - // Determine the OpenAPI version. - g.sourceFormat = getOpenAPIVersionFromInfo(info) - if g.sourceFormat == SourceFormatUnknown { - return nil, errors.New("unable to identify OpenAPI version") - } - // Compile to the proto model. - if g.sourceFormat == SourceFormatOpenAPI2 { - document, err := openapi_v2.NewDocument(info, compiler.NewContextWithExtensions("$root", nil, &g.extensionHandlers)) - if err != nil { - return nil, err - } - message = document - } else if g.sourceFormat == SourceFormatOpenAPI3 { - document, err := openapi_v3.NewDocument(info, compiler.NewContextWithExtensions("$root", nil, &g.extensionHandlers)) - if err != nil { - return nil, err - } - message = document - } else { - document, err := discovery_v1.NewDocument(info, compiler.NewContextWithExtensions("$root", nil, &g.extensionHandlers)) - if err != nil { - return nil, err - } - message = document - } - return message, err -} - -// Read an OpenAPI binary file. -func (g *Gnostic) readOpenAPIBinary(data []byte) (message proto.Message, err error) { - // try to read an OpenAPI v3 document - documentV3 := &openapi_v3.Document{} - err = proto.Unmarshal(data, documentV3) - if err == nil && strings.HasPrefix(documentV3.Openapi, "3.0") { - g.sourceFormat = SourceFormatOpenAPI3 - return documentV3, nil - } - // if that failed, try to read an OpenAPI v2 document - documentV2 := &openapi_v2.Document{} - err = proto.Unmarshal(data, documentV2) - if err == nil && strings.HasPrefix(documentV2.Swagger, "2.0") { - g.sourceFormat = SourceFormatOpenAPI2 - return documentV2, nil - } - // if that failed, try to read a Discovery Format document - discoveryDocument := &discovery_v1.Document{} - err = proto.Unmarshal(data, discoveryDocument) - if err == nil { // && strings.HasPrefix(documentV2.Swagger, "2.0") { - g.sourceFormat = SourceFormatDiscovery - return discoveryDocument, nil - } - return nil, err -} - -// Write a binary pb representation. -func (g *Gnostic) writeBinaryOutput(message proto.Message) { - protoBytes, err := proto.Marshal(message) - if err != nil { - writeFile(g.errorOutputPath, g.errorBytes(err), g.sourceName, "errors") - defer os.Exit(-1) - } else { - writeFile(g.binaryOutputPath, protoBytes, g.sourceName, "pb") - } -} - -// Write a text pb representation. -func (g *Gnostic) writeTextOutput(message proto.Message) { - bytes := []byte(proto.MarshalTextString(message)) - writeFile(g.textOutputPath, bytes, g.sourceName, "text") -} - -// Write JSON/YAML OpenAPI representations. -func (g *Gnostic) writeJSONYAMLOutput(message proto.Message) { - // Convert the OpenAPI document into an exportable MapSlice. - var rawInfo yaml.MapSlice - var ok bool - var err error - if g.sourceFormat == SourceFormatOpenAPI2 { - document := message.(*openapi_v2.Document) - rawInfo, ok = document.ToRawInfo().(yaml.MapSlice) - if !ok { - rawInfo = nil - } - } else if g.sourceFormat == SourceFormatOpenAPI3 { - document := message.(*openapi_v3.Document) - rawInfo, ok = document.ToRawInfo().(yaml.MapSlice) - if !ok { - rawInfo = nil - } - } else if g.sourceFormat == SourceFormatDiscovery { - document := message.(*discovery_v1.Document) - rawInfo, ok = document.ToRawInfo().(yaml.MapSlice) - if !ok { - rawInfo = nil - } - } - // Optionally write description in yaml format. - if g.yamlOutputPath != "" { - var bytes []byte - if rawInfo != nil { - bytes, err = yaml.Marshal(rawInfo) - if err != nil { - fmt.Fprintf(os.Stderr, "Error generating yaml output %s\n", err.Error()) - } - writeFile(g.yamlOutputPath, bytes, g.sourceName, "yaml") - } else { - fmt.Fprintf(os.Stderr, "No yaml output available.\n") - } - } - // Optionally write description in json format. - if g.jsonOutputPath != "" { - var bytes []byte - if rawInfo != nil { - bytes, _ = jsonwriter.Marshal(rawInfo) - if err != nil { - fmt.Fprintf(os.Stderr, "Error generating json output %s\n", err.Error()) - } - writeFile(g.jsonOutputPath, bytes, g.sourceName, "json") - } else { - fmt.Fprintf(os.Stderr, "No json output available.\n") - } - } -} - -// Perform all actions specified in the command-line options. -func (g *Gnostic) performActions(message proto.Message) (err error) { - // Optionally resolve internal references. - if g.resolveReferences { - if g.sourceFormat == SourceFormatOpenAPI2 { - document := message.(*openapi_v2.Document) - _, err = document.ResolveReferences(g.sourceName) - } else if g.sourceFormat == SourceFormatOpenAPI3 { - document := message.(*openapi_v3.Document) - _, err = document.ResolveReferences(g.sourceName) - } - if err != nil { - return err - } - } - // Optionally write proto in binary format. - if g.binaryOutputPath != "" { - g.writeBinaryOutput(message) - } - // Optionally write proto in text format. - if g.textOutputPath != "" { - g.writeTextOutput(message) - } - // Optionaly write document in yaml and/or json formats. - if g.yamlOutputPath != "" || g.jsonOutputPath != "" { - g.writeJSONYAMLOutput(message) - } - // Call all specified plugins. - for _, p := range g.pluginCalls { - err := p.perform(message, g.sourceFormat, g.sourceName) - if err != nil { - writeFile(g.errorOutputPath, g.errorBytes(err), g.sourceName, "errors") - defer os.Exit(-1) // run all plugins, even when some have errors - } - } - return nil -} - -func (g *Gnostic) main() { - var err error - g.readOptions() - g.validateOptions() - // Read the OpenAPI source. - bytes, err := compiler.ReadBytesForFile(g.sourceName) - if err != nil { - writeFile(g.errorOutputPath, g.errorBytes(err), g.sourceName, "errors") - os.Exit(-1) - } - extension := strings.ToLower(filepath.Ext(g.sourceName)) - var message proto.Message - if extension == ".json" || extension == ".yaml" { - // Try to read the source as JSON/YAML. - message, err = g.readOpenAPIText(bytes) - if err != nil { - writeFile(g.errorOutputPath, g.errorBytes(err), g.sourceName, "errors") - os.Exit(-1) - } - } else if extension == ".pb" { - // Try to read the source as a binary protocol buffer. - message, err = g.readOpenAPIBinary(bytes) - if err != nil { - writeFile(g.errorOutputPath, g.errorBytes(err), g.sourceName, "errors") - os.Exit(-1) - } - } else { - err = errors.New("unknown file extension. 'json', 'yaml', and 'pb' are accepted") - writeFile(g.errorOutputPath, g.errorBytes(err), g.sourceName, "errors") - os.Exit(-1) - } - // Perform actions specified by command options. - err = g.performActions(message) - if err != nil { - writeFile(g.errorOutputPath, g.errorBytes(err), g.sourceName, "errors") - os.Exit(-1) - } -} - -func main() { - g := newGnostic() - g.main() -} diff --git a/vendor/github.com/googleapis/gnostic/gnostic_test.go b/vendor/github.com/googleapis/gnostic/gnostic_test.go deleted file mode 100644 index 22eaee8275..0000000000 --- a/vendor/github.com/googleapis/gnostic/gnostic_test.go +++ /dev/null @@ -1,453 +0,0 @@ -package main - -import ( - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "strings" - "testing" -) - -func testCompiler(t *testing.T, inputFile string, referenceFile string, expectErrors bool) { - textFile := strings.Replace(filepath.Base(inputFile), filepath.Ext(inputFile), ".text", 1) - errorsFile := strings.Replace(filepath.Base(inputFile), filepath.Ext(inputFile), ".errors", 1) - // remove any preexisting output files - os.Remove(textFile) - os.Remove(errorsFile) - // run the compiler - var err error - var cmd = exec.Command( - "gnostic", - inputFile, - "--text-out=.", - "--errors-out=.", - "--resolve-refs") - //t.Log(cmd.Args) - err = cmd.Run() - if err != nil && !expectErrors { - t.Logf("Compile failed: %+v", err) - t.FailNow() - } - // verify the output against a reference - var outputFile string - if expectErrors { - outputFile = errorsFile - } else { - outputFile = textFile - } - err = exec.Command("diff", outputFile, referenceFile).Run() - if err != nil { - t.Logf("Diff failed: %+v", err) - t.FailNow() - } else { - // if the test succeeded, clean up - os.Remove(textFile) - os.Remove(errorsFile) - } -} - -func testNormal(t *testing.T, inputFile string, referenceFile string) { - testCompiler(t, inputFile, referenceFile, false) -} - -func testErrors(t *testing.T, inputFile string, referenceFile string) { - testCompiler(t, inputFile, referenceFile, true) -} - -func TestPetstoreJSON(t *testing.T) { - testNormal(t, - "examples/v2.0/json/petstore.json", - "test/v2.0/petstore.text") -} - -func TestPetstoreYAML(t *testing.T) { - testNormal(t, - "examples/v2.0/yaml/petstore.yaml", - "test/v2.0/petstore.text") -} - -func TestSeparateYAML(t *testing.T) { - testNormal(t, - "examples/v2.0/yaml/petstore-separate/spec/swagger.yaml", - "test/v2.0/yaml/petstore-separate/spec/swagger.text") -} - -func TestSeparateJSON(t *testing.T) { - testNormal(t, - "examples/v2.0/json/petstore-separate/spec/swagger.json", - "test/v2.0/yaml/petstore-separate/spec/swagger.text") // yaml and json results should be identical -} - -func TestRemotePetstoreJSON(t *testing.T) { - testNormal(t, - "https://raw.githubusercontent.com/googleapis/openapi-compiler/master/examples/v2.0/json/petstore.json", - "test/v2.0/petstore.text") -} - -func TestRemotePetstoreYAML(t *testing.T) { - testNormal(t, - "https://raw.githubusercontent.com/googleapis/openapi-compiler/master/examples/v2.0/yaml/petstore.yaml", - "test/v2.0/petstore.text") -} - -func TestRemoteSeparateYAML(t *testing.T) { - testNormal(t, - "https://raw.githubusercontent.com/googleapis/openapi-compiler/master/examples/v2.0/yaml/petstore-separate/spec/swagger.yaml", - "test/v2.0/yaml/petstore-separate/spec/swagger.text") -} - -func TestRemoteSeparateJSON(t *testing.T) { - testNormal(t, - "https://raw.githubusercontent.com/googleapis/openapi-compiler/master/examples/v2.0/json/petstore-separate/spec/swagger.json", - "test/v2.0/yaml/petstore-separate/spec/swagger.text") -} - -func TestErrorBadProperties(t *testing.T) { - testErrors(t, - "examples/errors/petstore-badproperties.yaml", - "test/errors/petstore-badproperties.errors") -} - -func TestErrorUnresolvedRefs(t *testing.T) { - testErrors(t, - "examples/errors/petstore-unresolvedrefs.yaml", - "test/errors/petstore-unresolvedrefs.errors") -} - -func TestErrorMissingVersion(t *testing.T) { - testErrors(t, - "examples/errors/petstore-missingversion.yaml", - "test/errors/petstore-missingversion.errors") -} - -func testPlugin(t *testing.T, plugin string, inputFile string, outputFile string, referenceFile string) { - // remove any preexisting output files - os.Remove(outputFile) - // run the compiler - var err error - output, err := exec.Command( - "gnostic", - "--"+plugin+"-out=-", - inputFile).Output() - if err != nil { - t.Logf("Compile failed: %+v", err) - t.FailNow() - } - _ = ioutil.WriteFile(outputFile, output, 0644) - err = exec.Command("diff", outputFile, referenceFile).Run() - if err != nil { - t.Logf("Diff failed: %+v", err) - t.FailNow() - } else { - // if the test succeeded, clean up - os.Remove(outputFile) - } -} - -func TestSamplePluginWithPetstore(t *testing.T) { - testPlugin(t, - "summary", - "examples/v2.0/yaml/petstore.yaml", - "sample-petstore.out", - "test/v2.0/yaml/sample-petstore.out") -} - -func TestErrorInvalidPluginInvocations(t *testing.T) { - var err error - output, err := exec.Command( - "gnostic", - "examples/v2.0/yaml/petstore.yaml", - "--errors-out=-", - "--plugin-out=foo=bar,:abc", - "--plugin-out=,foo=bar:abc", - "--plugin-out=foo=:abc", - "--plugin-out==bar:abc", - "--plugin-out=,,:abc", - "--plugin-out=foo=bar=baz:abc", - ).Output() - if err == nil { - t.Logf("Invalid invocations were accepted") - t.FailNow() - } - outputFile := "invalid-plugin-invocation.errors" - _ = ioutil.WriteFile(outputFile, output, 0644) - err = exec.Command("diff", outputFile, "test/errors/invalid-plugin-invocation.errors").Run() - if err != nil { - t.Logf("Diff failed: %+v", err) - t.FailNow() - } else { - // if the test succeeded, clean up - os.Remove(outputFile) - } -} - -func TestValidPluginInvocations(t *testing.T) { - var err error - output, err := exec.Command( - "gnostic", - "examples/v2.0/yaml/petstore.yaml", - "--errors-out=-", - // verify an invocation with no parameters - "--summary-out=!", // "!" indicates that no output should be generated - // verify single pair of parameters - "--summary-out=a=b:!", - // verify multiple parameters - "--summary-out=a=b,c=123,xyz=alphabetagammadelta:!", - // verify that special characters / . - _ can be included in parameter keys and values - "--summary-out=a/b/c=x/y/z:!", - "--summary-out=a.b.c=x.y.z:!", - "--summary-out=a-b-c=x-y-z:!", - "--summary-out=a_b_c=x_y_z:!", - ).Output() - if len(output) != 0 { - t.Logf("Valid invocations generated invalid errors\n%s", string(output)) - t.FailNow() - } - if err != nil { - t.Logf("Valid invocations were not accepted") - t.FailNow() - } -} - -func TestExtensionHandlerWithLibraryExample(t *testing.T) { - outputFile := "library-example-with-ext.text.out" - inputFile := "test/library-example-with-ext.json" - referenceFile := "test/library-example-with-ext.text.out" - - os.Remove(outputFile) - // run the compiler - var err error - - command := exec.Command( - "gnostic", - "--x-sampleone", - "--x-sampletwo", - "--text-out="+outputFile, - "--resolve-refs", - inputFile) - - _, err = command.Output() - if err != nil { - t.Logf("Compile failed for command %v: %+v", command, err) - t.FailNow() - } - //_ = ioutil.WriteFile(outputFile, output, 0644) - err = exec.Command("diff", outputFile, referenceFile).Run() - if err != nil { - t.Logf("Diff failed: %+v", err) - t.FailNow() - } else { - // if the test succeeded, clean up - os.Remove(outputFile) - } -} - -func TestJSONOutput(t *testing.T) { - inputFile := "test/library-example-with-ext.json" - - textFile := "sample.text" - jsonFile := "sample.json" - textFile2 := "sample2.text" - jsonFile2 := "sample2.json" - - os.Remove(textFile) - os.Remove(jsonFile) - os.Remove(textFile2) - os.Remove(jsonFile2) - - var err error - - // Run the compiler once. - command := exec.Command( - "gnostic", - "--text-out="+textFile, - "--json-out="+jsonFile, - inputFile) - _, err = command.Output() - if err != nil { - t.Logf("Compile failed for command %v: %+v", command, err) - t.FailNow() - } - - // Run the compiler again, this time on the generated output. - command = exec.Command( - "gnostic", - "--text-out="+textFile2, - "--json-out="+jsonFile2, - jsonFile) - _, err = command.Output() - if err != nil { - t.Logf("Compile failed for command %v: %+v", command, err) - t.FailNow() - } - - // Verify that both models have the same internal representation. - err = exec.Command("diff", textFile, textFile2).Run() - if err != nil { - t.Logf("Diff failed: %+v", err) - t.FailNow() - } else { - // if the test succeeded, clean up - os.Remove(textFile) - os.Remove(jsonFile) - os.Remove(textFile2) - os.Remove(jsonFile2) - } -} - -func TestYAMLOutput(t *testing.T) { - inputFile := "test/library-example-with-ext.json" - - textFile := "sample.text" - yamlFile := "sample.yaml" - textFile2 := "sample2.text" - yamlFile2 := "sample2.yaml" - - os.Remove(textFile) - os.Remove(yamlFile) - os.Remove(textFile2) - os.Remove(yamlFile2) - - var err error - - // Run the compiler once. - command := exec.Command( - "gnostic", - "--text-out="+textFile, - "--yaml-out="+yamlFile, - inputFile) - _, err = command.Output() - if err != nil { - t.Logf("Compile failed for command %v: %+v", command, err) - t.FailNow() - } - - // Run the compiler again, this time on the generated output. - command = exec.Command( - "gnostic", - "--text-out="+textFile2, - "--yaml-out="+yamlFile2, - yamlFile) - _, err = command.Output() - if err != nil { - t.Logf("Compile failed for command %v: %+v", command, err) - t.FailNow() - } - - // Verify that both models have the same internal representation. - err = exec.Command("diff", textFile, textFile2).Run() - if err != nil { - t.Logf("Diff failed: %+v", err) - t.FailNow() - } else { - // if the test succeeded, clean up - os.Remove(textFile) - os.Remove(yamlFile) - os.Remove(textFile2) - os.Remove(yamlFile2) - } -} - -func testBuilder(version string, t *testing.T) { - var err error - - pbFile := "petstore-" + version + ".pb" - yamlFile := "petstore.yaml" - jsonFile := "petstore.json" - textFile := "petstore.text" - textReference := "test/" + version + ".0/petstore.text" - - os.Remove(pbFile) - os.Remove(textFile) - os.Remove(yamlFile) - os.Remove(jsonFile) - - // Generate petstore.pb. - command := exec.Command( - "petstore-builder", - "--"+version) - _, err = command.Output() - if err != nil { - t.Logf("Command %v failed: %+v", command, err) - t.FailNow() - } - - // Convert petstore.pb to yaml and json. - command = exec.Command( - "gnostic", - pbFile, - "--json-out="+jsonFile, - "--yaml-out="+yamlFile) - _, err = command.Output() - if err != nil { - t.Logf("Command %v failed: %+v", command, err) - t.FailNow() - } - - // Read petstore.yaml, resolve references, and export text. - command = exec.Command( - "gnostic", - yamlFile, - "--resolve-refs", - "--text-out="+textFile) - _, err = command.Output() - if err != nil { - t.Logf("Command %v failed: %+v", command, err) - t.FailNow() - } - - // Verify that the generated text matches our reference. - err = exec.Command("diff", textFile, textReference).Run() - if err != nil { - t.Logf("Diff failed: %+v", err) - t.FailNow() - } - - // Read petstore.json, resolve references, and export text. - command = exec.Command( - "gnostic", - jsonFile, - "--resolve-refs", - "--text-out="+textFile) - _, err = command.Output() - if err != nil { - t.Logf("Command %v failed: %+v", command, err) - t.FailNow() - } - - // Verify that the generated text matches our reference. - err = exec.Command("diff", textFile, textReference).Run() - if err != nil { - t.Logf("Diff failed: %+v", err) - t.FailNow() - } - - // if the test succeeded, clean up - os.Remove(pbFile) - os.Remove(textFile) - os.Remove(yamlFile) - os.Remove(jsonFile) -} - -func TestBuilderV2(t *testing.T) { - testBuilder("v2", t) -} - -func TestBuilderV3(t *testing.T) { - testBuilder("v3", t) -} - -// OpenAPI 3.0 tests - -func TestPetstoreYAML_30(t *testing.T) { - testNormal(t, - "examples/v3.0/yaml/petstore.yaml", - "test/v3.0/petstore.text") -} - -func TestPetstoreJSON_30(t *testing.T) { - testNormal(t, - "examples/v3.0/json/petstore.json", - "test/v3.0/petstore.text") -} diff --git a/vendor/github.com/gophercloud/gophercloud/.gitignore b/vendor/github.com/gophercloud/gophercloud/.gitignore index df9048a010..dd91ed2055 100644 --- a/vendor/github.com/gophercloud/gophercloud/.gitignore +++ b/vendor/github.com/gophercloud/gophercloud/.gitignore @@ -1,2 +1,3 @@ **/*.swp .idea +.vscode diff --git a/vendor/github.com/gophercloud/gophercloud/.travis.yml b/vendor/github.com/gophercloud/gophercloud/.travis.yml index 59c4194952..9153a00fc5 100644 --- a/vendor/github.com/gophercloud/gophercloud/.travis.yml +++ b/vendor/github.com/gophercloud/gophercloud/.travis.yml @@ -1,21 +1,25 @@ language: go sudo: false install: -- go get golang.org/x/crypto/ssh -- go get -v -tags 'fixtures acceptance' ./... -- go get github.com/wadey/gocovmerge -- go get github.com/mattn/goveralls -- go get golang.org/x/tools/cmd/goimports +- GO111MODULE=off go get golang.org/x/crypto/ssh +- GO111MODULE=off go get -v -tags 'fixtures acceptance' ./... +- GO111MODULE=off go get github.com/wadey/gocovmerge +- GO111MODULE=off go get github.com/mattn/goveralls +- GO111MODULE=off go get golang.org/x/tools/cmd/goimports go: -- 1.8 -- tip +- "1.10" +- "1.11" +- "1.12" +- "tip" env: global: - secure: "xSQsAG5wlL9emjbCdxzz/hYQsSpJ/bABO1kkbwMSISVcJ3Nk0u4ywF+LS4bgeOnwPfmFvNTOqVDu3RwEvMeWXSI76t1piCPcObutb2faKLVD/hLoAS76gYX+Z8yGWGHrSB7Do5vTPj1ERe2UljdrnsSeOXzoDwFxYRaZLX4bBOB4AyoGvRniil5QXPATiA1tsWX1VMicj8a4F8X+xeESzjt1Q5Iy31e7vkptu71bhvXCaoo5QhYwT+pLR9dN0S1b7Ro0KVvkRefmr1lUOSYd2e74h6Lc34tC1h3uYZCS4h47t7v5cOXvMNxinEj2C51RvbjvZI1RLVdkuAEJD1Iz4+Ote46nXbZ//6XRZMZz/YxQ13l7ux1PFjgEB6HAapmF5Xd8PRsgeTU9LRJxpiTJ3P5QJ3leS1va8qnziM5kYipj/Rn+V8g2ad/rgkRox9LSiR9VYZD2Pe45YCb1mTKSl2aIJnV7nkOqsShY5LNB4JZSg7xIffA+9YVDktw8dJlATjZqt7WvJJ49g6A61mIUV4C15q2JPGKTkZzDiG81NtmS7hFa7k0yaE2ELgYocbcuyUcAahhxntYTC0i23nJmEHVNiZmBO3u7EgpWe4KGVfumU+lt12tIn5b3dZRBBUk3QakKKozSK1QPHGpk/AZGrhu7H6l8to6IICKWtDcyMPQ=" + - GO111MODULE=on before_script: - go vet ./... script: - ./script/coverage +- ./script/unittest - ./script/format after_success: - $HOME/gopath/bin/goveralls -service=travis-ci -coverprofile=cover.out diff --git a/vendor/github.com/gophercloud/gophercloud/.zuul.yaml b/vendor/github.com/gophercloud/gophercloud/.zuul.yaml index c259d03e18..776ed5a796 100644 --- a/vendor/github.com/gophercloud/gophercloud/.zuul.yaml +++ b/vendor/github.com/gophercloud/gophercloud/.zuul.yaml @@ -1,12 +1,102 @@ +- job: + name: gophercloud-unittest + parent: golang-test + description: | + Run gophercloud unit test + run: .zuul/playbooks/gophercloud-unittest/run.yaml + nodeset: ubuntu-xenial-ut + +- job: + name: gophercloud-acceptance-test + parent: golang-test + description: | + Run gophercloud acceptance test on master branch + run: .zuul/playbooks/gophercloud-acceptance-test/run.yaml + +- job: + name: gophercloud-acceptance-test-ironic + parent: golang-test + description: | + Run gophercloud ironic acceptance test on master branch + run: .zuul/playbooks/gophercloud-acceptance-test-ironic/run.yaml + +- job: + name: gophercloud-acceptance-test-queens + parent: gophercloud-acceptance-test + description: | + Run gophercloud acceptance test on queens branch + vars: + global_env: + OS_BRANCH: stable/queens + +- job: + name: gophercloud-acceptance-test-rocky + parent: gophercloud-acceptance-test + description: | + Run gophercloud acceptance test on rocky branch + vars: + global_env: + OS_BRANCH: stable/rocky + +- job: + name: gophercloud-acceptance-test-pike + parent: gophercloud-acceptance-test + description: | + Run gophercloud acceptance test on pike branch + vars: + global_env: + OS_BRANCH: stable/pike + +- job: + name: gophercloud-acceptance-test-ocata + parent: gophercloud-acceptance-test + description: | + Run gophercloud acceptance test on ocata branch + vars: + global_env: + OS_BRANCH: stable/ocata + +- job: + name: gophercloud-acceptance-test-newton + parent: gophercloud-acceptance-test + description: | + Run gophercloud acceptance test on newton branch + vars: + global_env: + OS_BRANCH: stable/newton + +- job: + name: gophercloud-acceptance-test-mitaka + parent: gophercloud-acceptance-test + description: | + Run gophercloud acceptance test on mitaka branch + vars: + global_env: + OS_BRANCH: stable/mitaka + nodeset: ubuntu-trusty + - project: name: gophercloud/gophercloud check: jobs: - gophercloud-unittest - gophercloud-acceptance-test + - gophercloud-acceptance-test-ironic recheck-mitaka: jobs: - gophercloud-acceptance-test-mitaka + recheck-newton: + jobs: + - gophercloud-acceptance-test-newton + recheck-ocata: + jobs: + - gophercloud-acceptance-test-ocata recheck-pike: jobs: - gophercloud-acceptance-test-pike + recheck-queens: + jobs: + - gophercloud-acceptance-test-queens + recheck-rocky: + jobs: + - gophercloud-acceptance-test-rocky diff --git a/vendor/github.com/gophercloud/gophercloud/FAQ.md b/vendor/github.com/gophercloud/gophercloud/FAQ.md deleted file mode 100644 index 88a366a288..0000000000 --- a/vendor/github.com/gophercloud/gophercloud/FAQ.md +++ /dev/null @@ -1,148 +0,0 @@ -# Tips - -## Implementing default logging and re-authentication attempts - -You can implement custom logging and/or limit re-auth attempts by creating a custom HTTP client -like the following and setting it as the provider client's HTTP Client (via the -`gophercloud.ProviderClient.HTTPClient` field): - -```go -//... - -// LogRoundTripper satisfies the http.RoundTripper interface and is used to -// customize the default Gophercloud RoundTripper to allow for logging. -type LogRoundTripper struct { - rt http.RoundTripper - numReauthAttempts int -} - -// newHTTPClient return a custom HTTP client that allows for logging relevant -// information before and after the HTTP request. -func newHTTPClient() http.Client { - return http.Client{ - Transport: &LogRoundTripper{ - rt: http.DefaultTransport, - }, - } -} - -// RoundTrip performs a round-trip HTTP request and logs relevant information about it. -func (lrt *LogRoundTripper) RoundTrip(request *http.Request) (*http.Response, error) { - glog.Infof("Request URL: %s\n", request.URL) - - response, err := lrt.rt.RoundTrip(request) - if response == nil { - return nil, err - } - - if response.StatusCode == http.StatusUnauthorized { - if lrt.numReauthAttempts == 3 { - return response, fmt.Errorf("Tried to re-authenticate 3 times with no success.") - } - lrt.numReauthAttempts++ - } - - glog.Debugf("Response Status: %s\n", response.Status) - - return response, nil -} - -endpoint := "https://127.0.0.1/auth" -pc := openstack.NewClient(endpoint) -pc.HTTPClient = newHTTPClient() - -//... -``` - - -## Implementing custom objects - -OpenStack request/response objects may differ among variable names or types. - -### Custom request objects - -To pass custom options to a request, implement the desired `OptsBuilder` interface. For -example, to pass in - -```go -type MyCreateServerOpts struct { - Name string - Size int -} -``` - -to `servers.Create`, simply implement the `servers.CreateOptsBuilder` interface: - -```go -func (o MyCreateServeropts) ToServerCreateMap() (map[string]interface{}, error) { - return map[string]interface{}{ - "name": o.Name, - "size": o.Size, - }, nil -} -``` - -create an instance of your custom options object, and pass it to `servers.Create`: - -```go -// ... -myOpts := MyCreateServerOpts{ - Name: "s1", - Size: "100", -} -server, err := servers.Create(computeClient, myOpts).Extract() -// ... -``` - -### Custom response objects - -Some OpenStack services have extensions. Extensions that are supported in Gophercloud can be -combined to create a custom object: - -```go -// ... -type MyVolume struct { - volumes.Volume - tenantattr.VolumeExt -} - -var v struct { - MyVolume `json:"volume"` -} - -err := volumes.Get(client, volID).ExtractInto(&v) -// ... -``` - -## Overriding default `UnmarshalJSON` method - -For some response objects, a field may be a custom type or may be allowed to take on -different types. In these cases, overriding the default `UnmarshalJSON` method may be -necessary. To do this, declare the JSON `struct` field tag as "-" and create an `UnmarshalJSON` -method on the type: - -```go -// ... -type MyVolume struct { - ID string `json: "id"` - TimeCreated time.Time `json: "-"` -} - -func (r *MyVolume) UnmarshalJSON(b []byte) error { - type tmp MyVolume - var s struct { - tmp - TimeCreated gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - *r = Volume(s.tmp) - - r.TimeCreated = time.Time(s.CreatedAt) - - return err -} -// ... -``` diff --git a/vendor/github.com/gophercloud/gophercloud/MIGRATING.md b/vendor/github.com/gophercloud/gophercloud/MIGRATING.md deleted file mode 100644 index aa383c9cc9..0000000000 --- a/vendor/github.com/gophercloud/gophercloud/MIGRATING.md +++ /dev/null @@ -1,32 +0,0 @@ -# Compute - -## Floating IPs - -* `github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingip` is now `github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips` -* `floatingips.Associate` and `floatingips.Disassociate` have been removed. -* `floatingips.DisassociateOpts` is now required to disassociate a Floating IP. - -## Security Groups - -* `secgroups.AddServerToGroup` is now `secgroups.AddServer`. -* `secgroups.RemoveServerFromGroup` is now `secgroups.RemoveServer`. - -## Servers - -* `servers.Reboot` now requires a `servers.RebootOpts` struct: - - ```golang - rebootOpts := &servers.RebootOpts{ - Type: servers.SoftReboot, - } - res := servers.Reboot(client, server.ID, rebootOpts) - ``` - -# Identity - -## V3 - -### Tokens - -* `Token.ExpiresAt` is now of type `gophercloud.JSONRFC3339Milli` instead of - `time.Time` diff --git a/vendor/github.com/gophercloud/gophercloud/README.md b/vendor/github.com/gophercloud/gophercloud/README.md index 60ca479de8..ad29041d9b 100644 --- a/vendor/github.com/gophercloud/gophercloud/README.md +++ b/vendor/github.com/gophercloud/gophercloud/README.md @@ -127,7 +127,7 @@ new resource in the `server` variable (a ## Advanced Usage -Have a look at the [FAQ](./FAQ.md) for some tips on customizing the way Gophercloud works. +Have a look at the [FAQ](./docs/FAQ.md) for some tips on customizing the way Gophercloud works. ## Backwards-Compatibility Guarantees @@ -140,4 +140,20 @@ See the [contributing guide](./.github/CONTRIBUTING.md). ## Help and feedback If you're struggling with something or have spotted a potential bug, feel free -to submit an issue to our [bug tracker](/issues). +to submit an issue to our [bug tracker](https://github.com/gophercloud/gophercloud/issues). + +## Thank You + +We'd like to extend special thanks and appreciation to the following: + +### OpenLab + + + +OpenLab is providing a full CI environment to test each PR and merge for a variety of OpenStack releases. + +### VEXXHOST + + + +VEXXHOST is providing their services to assist with the development and testing of Gophercloud. diff --git a/vendor/github.com/gophercloud/gophercloud/STYLEGUIDE.md b/vendor/github.com/gophercloud/gophercloud/STYLEGUIDE.md deleted file mode 100644 index e7531a83d9..0000000000 --- a/vendor/github.com/gophercloud/gophercloud/STYLEGUIDE.md +++ /dev/null @@ -1,74 +0,0 @@ - -## On Pull Requests - -- Before you start a PR there needs to be a Github issue and a discussion about it - on that issue with a core contributor, even if it's just a 'SGTM'. - -- A PR's description must reference the issue it closes with a `For ` (e.g. For #293). - -- A PR's description must contain link(s) to the line(s) in the OpenStack - source code (on Github) that prove(s) the PR code to be valid. Links to documentation - are not good enough. The link(s) should be to a non-`master` branch. For example, - a pull request implementing the creation of a Neutron v2 subnet might put the - following link in the description: - - https://github.com/openstack/neutron/blob/stable/mitaka/neutron/api/v2/attributes.py#L749 - - From that link, a reviewer (or user) can verify the fields in the request/response - objects in the PR. - -- A PR that is in-progress should have `[wip]` in front of the PR's title. When - ready for review, remove the `[wip]` and ping a core contributor with an `@`. - -- Forcing PRs to be small can have the effect of users submitting PRs in a hierarchical chain, with - one depending on the next. If a PR depends on another one, it should have a [Pending #PRNUM] - prefix in the PR title. In addition, it will be the PR submitter's responsibility to remove the - [Pending #PRNUM] tag once the PR has been updated with the merged, dependent PR. That will - let reviewers know it is ready to review. - -- A PR should be small. Even if you intend on implementing an entire - service, a PR should only be one route of that service - (e.g. create server or get server, but not both). - -- Unless explicitly asked, do not squash commits in the middle of a review; only - append. It makes it difficult for the reviewer to see what's changed from one - review to the next. - -## On Code - -- In re design: follow as closely as is reasonable the code already in the library. - Most operations (e.g. create, delete) admit the same design. - -- Unit tests and acceptance (integration) tests must be written to cover each PR. - Tests for operations with several options (e.g. list, create) should include all - the options in the tests. This will allow users to verify an operation on their - own infrastructure and see an example of usage. - -- If in doubt, ask in-line on the PR. - -### File Structure - -- The following should be used in most cases: - - - `requests.go`: contains all the functions that make HTTP requests and the - types associated with the HTTP request (parameters for URL, body, etc) - - `results.go`: contains all the response objects and their methods - - `urls.go`: contains the endpoints to which the requests are made - -### Naming - -- For methods on a type in `results.go`, the receiver should be named `r` and the - variable into which it will be unmarshalled `s`. - -- Functions in `requests.go`, with the exception of functions that return a - `pagination.Pager`, should be named returns of the name `r`. - -- Functions in `requests.go` that accept request bodies should accept as their - last parameter an `interface` named `OptsBuilder` (eg `CreateOptsBuilder`). - This `interface` should have at the least a method named `ToMap` - (eg `ToPortCreateMap`). - -- Functions in `requests.go` that accept query strings should accept as their - last parameter an `interface` named `OptsBuilder` (eg `ListOptsBuilder`). - This `interface` should have at the least a method named `ToQuery` - (eg `ToServerListQuery`). diff --git a/vendor/github.com/gophercloud/gophercloud/auth_options.go b/vendor/github.com/gophercloud/gophercloud/auth_options.go index 4211470020..5ffa8d1e0a 100644 --- a/vendor/github.com/gophercloud/gophercloud/auth_options.go +++ b/vendor/github.com/gophercloud/gophercloud/auth_options.go @@ -81,6 +81,23 @@ type AuthOptions struct { // TokenID allows users to authenticate (possibly as another user) with an // authentication token ID. TokenID string `json:"-"` + + // Scope determines the scoping of the authentication request. + Scope *AuthScope `json:"-"` + + // Authentication through Application Credentials requires supplying name, project and secret + // For project we can use TenantID + ApplicationCredentialID string `json:"-"` + ApplicationCredentialName string `json:"-"` + ApplicationCredentialSecret string `json:"-"` +} + +// AuthScope allows a created token to be limited to a specific domain or project. +type AuthScope struct { + ProjectID string + ProjectName string + DomainID string + DomainName string } // ToTokenV2CreateMap allows AuthOptions to satisfy the AuthOptionsBuilder @@ -131,7 +148,7 @@ func (opts *AuthOptions) ToTokenV3CreateMap(scope map[string]interface{}) (map[s type userReq struct { ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` - Password string `json:"password"` + Password string `json:"password,omitempty"` Domain *domainReq `json:"domain,omitempty"` } @@ -143,10 +160,18 @@ func (opts *AuthOptions) ToTokenV3CreateMap(scope map[string]interface{}) (map[s ID string `json:"id"` } + type applicationCredentialReq struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + User *userReq `json:"user,omitempty"` + Secret *string `json:"secret,omitempty"` + } + type identityReq struct { - Methods []string `json:"methods"` - Password *passwordReq `json:"password,omitempty"` - Token *tokenReq `json:"token,omitempty"` + Methods []string `json:"methods"` + Password *passwordReq `json:"password,omitempty"` + Token *tokenReq `json:"token,omitempty"` + ApplicationCredential *applicationCredentialReq `json:"application_credential,omitempty"` } type authReq struct { @@ -183,8 +208,68 @@ func (opts *AuthOptions) ToTokenV3CreateMap(scope map[string]interface{}) (map[s req.Auth.Identity.Token = &tokenReq{ ID: opts.TokenID, } + + } else if opts.ApplicationCredentialID != "" { + // Configure the request for ApplicationCredentialID authentication. + // https://github.com/openstack/keystoneauth/blob/stable/rocky/keystoneauth1/identity/v3/application_credential.py#L48-L67 + // There are three kinds of possible application_credential requests + // 1. application_credential id + secret + // 2. application_credential name + secret + user_id + // 3. application_credential name + secret + username + domain_id / domain_name + if opts.ApplicationCredentialSecret == "" { + return nil, ErrAppCredMissingSecret{} + } + req.Auth.Identity.Methods = []string{"application_credential"} + req.Auth.Identity.ApplicationCredential = &applicationCredentialReq{ + ID: &opts.ApplicationCredentialID, + Secret: &opts.ApplicationCredentialSecret, + } + } else if opts.ApplicationCredentialName != "" { + if opts.ApplicationCredentialSecret == "" { + return nil, ErrAppCredMissingSecret{} + } + + var userRequest *userReq + + if opts.UserID != "" { + // UserID could be used without the domain information + userRequest = &userReq{ + ID: &opts.UserID, + } + } + + if userRequest == nil && opts.Username == "" { + // Make sure that Username or UserID are provided + return nil, ErrUsernameOrUserID{} + } + + if userRequest == nil && opts.DomainID != "" { + userRequest = &userReq{ + Name: &opts.Username, + Domain: &domainReq{ID: &opts.DomainID}, + } + } + + if userRequest == nil && opts.DomainName != "" { + userRequest = &userReq{ + Name: &opts.Username, + Domain: &domainReq{Name: &opts.DomainName}, + } + } + + // Make sure that DomainID or DomainName are provided among Username + if userRequest == nil { + return nil, ErrDomainIDOrDomainName{} + } + + req.Auth.Identity.Methods = []string{"application_credential"} + req.Auth.Identity.ApplicationCredential = &applicationCredentialReq{ + Name: &opts.ApplicationCredentialName, + User: userRequest, + Secret: &opts.ApplicationCredentialSecret, + } } else { - // If no password or token ID are available, authentication can't continue. + // If no password or token ID or ApplicationCredential are available, authentication can't continue. return nil, ErrMissingPassword{} } } else { @@ -263,85 +348,83 @@ func (opts *AuthOptions) ToTokenV3CreateMap(scope map[string]interface{}) (map[s } func (opts *AuthOptions) ToTokenV3ScopeMap() (map[string]interface{}, error) { - - var scope struct { - ProjectID string - ProjectName string - DomainID string - DomainName string - } - - if opts.TenantID != "" { - scope.ProjectID = opts.TenantID - } else { - if opts.TenantName != "" { - scope.ProjectName = opts.TenantName - scope.DomainID = opts.DomainID - scope.DomainName = opts.DomainName + // For backwards compatibility. + // If AuthOptions.Scope was not set, try to determine it. + // This works well for common scenarios. + if opts.Scope == nil { + opts.Scope = new(AuthScope) + if opts.TenantID != "" { + opts.Scope.ProjectID = opts.TenantID + } else { + if opts.TenantName != "" { + opts.Scope.ProjectName = opts.TenantName + opts.Scope.DomainID = opts.DomainID + opts.Scope.DomainName = opts.DomainName + } } } - if scope.ProjectName != "" { + if opts.Scope.ProjectName != "" { // ProjectName provided: either DomainID or DomainName must also be supplied. // ProjectID may not be supplied. - if scope.DomainID == "" && scope.DomainName == "" { + if opts.Scope.DomainID == "" && opts.Scope.DomainName == "" { return nil, ErrScopeDomainIDOrDomainName{} } - if scope.ProjectID != "" { + if opts.Scope.ProjectID != "" { return nil, ErrScopeProjectIDOrProjectName{} } - if scope.DomainID != "" { + if opts.Scope.DomainID != "" { // ProjectName + DomainID return map[string]interface{}{ "project": map[string]interface{}{ - "name": &scope.ProjectName, - "domain": map[string]interface{}{"id": &scope.DomainID}, + "name": &opts.Scope.ProjectName, + "domain": map[string]interface{}{"id": &opts.Scope.DomainID}, }, }, nil } - if scope.DomainName != "" { + if opts.Scope.DomainName != "" { // ProjectName + DomainName return map[string]interface{}{ "project": map[string]interface{}{ - "name": &scope.ProjectName, - "domain": map[string]interface{}{"name": &scope.DomainName}, + "name": &opts.Scope.ProjectName, + "domain": map[string]interface{}{"name": &opts.Scope.DomainName}, }, }, nil } - } else if scope.ProjectID != "" { + } else if opts.Scope.ProjectID != "" { // ProjectID provided. ProjectName, DomainID, and DomainName may not be provided. - if scope.DomainID != "" { + if opts.Scope.DomainID != "" { return nil, ErrScopeProjectIDAlone{} } - if scope.DomainName != "" { + if opts.Scope.DomainName != "" { return nil, ErrScopeProjectIDAlone{} } // ProjectID return map[string]interface{}{ "project": map[string]interface{}{ - "id": &scope.ProjectID, + "id": &opts.Scope.ProjectID, }, }, nil - } else if scope.DomainID != "" { + } else if opts.Scope.DomainID != "" { // DomainID provided. ProjectID, ProjectName, and DomainName may not be provided. - if scope.DomainName != "" { + if opts.Scope.DomainName != "" { return nil, ErrScopeDomainIDOrDomainName{} } // DomainID return map[string]interface{}{ "domain": map[string]interface{}{ - "id": &scope.DomainID, + "id": &opts.Scope.DomainID, }, }, nil - } else if scope.DomainName != "" { + } else if opts.Scope.DomainName != "" { // DomainName return map[string]interface{}{ "domain": map[string]interface{}{ - "name": &scope.DomainName, + "name": &opts.Scope.DomainName, }, }, nil } diff --git a/vendor/github.com/gophercloud/gophercloud/auth_result.go b/vendor/github.com/gophercloud/gophercloud/auth_result.go new file mode 100644 index 0000000000..2e4699b978 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/auth_result.go @@ -0,0 +1,52 @@ +package gophercloud + +/* +AuthResult is the result from the request that was used to obtain a provider +client's Keystone token. It is returned from ProviderClient.GetAuthResult(). + +The following types satisfy this interface: + + github.com/gophercloud/gophercloud/openstack/identity/v2/tokens.CreateResult + github.com/gophercloud/gophercloud/openstack/identity/v3/tokens.CreateResult + +Usage example: + + import ( + "github.com/gophercloud/gophercloud" + tokens2 "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens" + tokens3 "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens" + ) + + func GetAuthenticatedUserID(providerClient *gophercloud.ProviderClient) (string, error) { + r := providerClient.GetAuthResult() + if r == nil { + //ProviderClient did not use openstack.Authenticate(), e.g. because token + //was set manually with ProviderClient.SetToken() + return "", errors.New("no AuthResult available") + } + switch r := r.(type) { + case tokens2.CreateResult: + u, err := r.ExtractUser() + if err != nil { + return "", err + } + return u.ID, nil + case tokens3.CreateResult: + u, err := r.ExtractUser() + if err != nil { + return "", err + } + return u.ID, nil + default: + panic(fmt.Sprintf("got unexpected AuthResult type %t", r)) + } + } + +Both implementing types share a lot of methods by name, like ExtractUser() in +this example. But those methods cannot be part of the AuthResult interface +because the return types are different (in this case, type tokens2.User vs. +type tokens3.User). +*/ +type AuthResult interface { + ExtractTokenID() (string, error) +} diff --git a/vendor/github.com/gophercloud/gophercloud/doc.go b/vendor/github.com/gophercloud/gophercloud/doc.go index 30067aa352..953ca822a9 100644 --- a/vendor/github.com/gophercloud/gophercloud/doc.go +++ b/vendor/github.com/gophercloud/gophercloud/doc.go @@ -9,20 +9,37 @@ Provider structs represent the cloud providers that offer and manage a collection of services. You will generally want to create one Provider client per OpenStack cloud. + It is now recommended to use the `clientconfig` package found at + https://github.com/gophercloud/utils/tree/master/openstack/clientconfig + for all authentication purposes. + + The below documentation is still relevant. clientconfig simply implements + the below and presents it in an easier and more flexible way. + Use your OpenStack credentials to create a Provider client. The IdentityEndpoint is typically refered to as "auth_url" or "OS_AUTH_URL" in information provided by the cloud operator. Additionally, the cloud may refer to TenantID or TenantName as project_id and project_name. Credentials are specified like so: - opts := gophercloud.AuthOptions{ - IdentityEndpoint: "https://openstack.example.com:5000/v2.0", - Username: "{username}", - Password: "{password}", - TenantID: "{tenant_id}", - } + opts := gophercloud.AuthOptions{ + IdentityEndpoint: "https://openstack.example.com:5000/v2.0", + Username: "{username}", + Password: "{password}", + TenantID: "{tenant_id}", + } + + provider, err := openstack.AuthenticatedClient(opts) - provider, err := openstack.AuthenticatedClient(opts) +You can authenticate with a token by doing: + + opts := gophercloud.AuthOptions{ + IdentityEndpoint: "https://openstack.example.com:5000/v2.0", + TokenID: "{token_id}", + TenantID: "{tenant_id}", + } + + provider, err := openstack.AuthenticatedClient(opts) You may also use the openstack.AuthOptionsFromEnv() helper function. This function reads in standard environment variables frequently found in an @@ -39,16 +56,16 @@ operations for a particular OpenStack service. Examples of services include: Compute, Object Storage, Block Storage. In order to define one, you need to pass in the parent provider, like so: - opts := gophercloud.EndpointOpts{Region: "RegionOne"} + opts := gophercloud.EndpointOpts{Region: "RegionOne"} - client := openstack.NewComputeV2(provider, opts) + client, err := openstack.NewComputeV2(provider, opts) Resources Resource structs are the domain models that services make use of in order to work with and represent the state of API resources: - server, err := servers.Get(client, "{serverId}").Extract() + server, err := servers.Get(client, "{serverId}").Extract() Intermediate Result structs are returned for API operations, which allow generic access to the HTTP headers, response body, and any errors associated @@ -56,11 +73,11 @@ with the network transaction. To turn a result into a usable resource struct, you must call the Extract method which is chained to the response, or an Extract function from an applicable extension: - result := servers.Get(client, "{serverId}") + result := servers.Get(client, "{serverId}") - // Attempt to extract the disk configuration from the OS-DCF disk config - // extension: - config, err := diskconfig.ExtractGet(result) + // Attempt to extract the disk configuration from the OS-DCF disk config + // extension: + config, err := diskconfig.ExtractGet(result) All requests that enumerate a collection return a Pager struct that is used to iterate through the results one page at a time. Use the EachPage method on that @@ -68,17 +85,17 @@ Pager to handle each successive Page in a closure, then use the appropriate extraction method from that request's package to interpret that Page as a slice of results: - err := servers.List(client, nil).EachPage(func (page pagination.Page) (bool, error) { - s, err := servers.ExtractServers(page) - if err != nil { - return false, err - } + err := servers.List(client, nil).EachPage(func (page pagination.Page) (bool, error) { + s, err := servers.ExtractServers(page) + if err != nil { + return false, err + } - // Handle the []servers.Server slice. + // Handle the []servers.Server slice. - // Return "false" or an error to prematurely stop fetching new pages. - return true, nil - }) + // Return "false" or an error to prematurely stop fetching new pages. + return true, nil + }) If you want to obtain the entire collection of pages without doing any intermediary processing on each page, you can use the AllPages method: diff --git a/vendor/github.com/gophercloud/gophercloud/errors.go b/vendor/github.com/gophercloud/gophercloud/errors.go index 88fd2ac676..0bcb3af7f0 100644 --- a/vendor/github.com/gophercloud/gophercloud/errors.go +++ b/vendor/github.com/gophercloud/gophercloud/errors.go @@ -1,6 +1,9 @@ package gophercloud -import "fmt" +import ( + "fmt" + "strings" +) // BaseError is an error type that all other error types embed. type BaseError struct { @@ -43,6 +46,33 @@ func (e ErrInvalidInput) Error() string { return e.choseErrString() } +// ErrMissingEnvironmentVariable is the error when environment variable is required +// in a particular situation but not provided by the user +type ErrMissingEnvironmentVariable struct { + BaseError + EnvironmentVariable string +} + +func (e ErrMissingEnvironmentVariable) Error() string { + e.DefaultErrString = fmt.Sprintf("Missing environment variable [%s]", e.EnvironmentVariable) + return e.choseErrString() +} + +// ErrMissingAnyoneOfEnvironmentVariables is the error when anyone of the environment variables +// is required in a particular situation but not provided by the user +type ErrMissingAnyoneOfEnvironmentVariables struct { + BaseError + EnvironmentVariables []string +} + +func (e ErrMissingAnyoneOfEnvironmentVariables) Error() string { + e.DefaultErrString = fmt.Sprintf( + "Missing one of the following environment variables [%s]", + strings.Join(e.EnvironmentVariables, ", "), + ) + return e.choseErrString() +} + // ErrUnexpectedResponseCode is returned by the Request method when a response code other than // those listed in OkCodes is encountered. type ErrUnexpectedResponseCode struct { @@ -72,6 +102,11 @@ type ErrDefault401 struct { ErrUnexpectedResponseCode } +// ErrDefault403 is the default error type returned on a 403 HTTP response code. +type ErrDefault403 struct { + ErrUnexpectedResponseCode +} + // ErrDefault404 is the default error type returned on a 404 HTTP response code. type ErrDefault404 struct { ErrUnexpectedResponseCode @@ -87,6 +122,11 @@ type ErrDefault408 struct { ErrUnexpectedResponseCode } +// ErrDefault409 is the default error type returned on a 409 HTTP response code. +type ErrDefault409 struct { + ErrUnexpectedResponseCode +} + // ErrDefault429 is the default error type returned on a 429 HTTP response code. type ErrDefault429 struct { ErrUnexpectedResponseCode @@ -103,11 +143,22 @@ type ErrDefault503 struct { } func (e ErrDefault400) Error() string { - return "Invalid request due to incorrect syntax or missing required parameters." + e.DefaultErrString = fmt.Sprintf( + "Bad request with: [%s %s], error message: %s", + e.Method, e.URL, e.Body, + ) + return e.choseErrString() } func (e ErrDefault401) Error() string { return "Authentication failed" } +func (e ErrDefault403) Error() string { + e.DefaultErrString = fmt.Sprintf( + "Request forbidden: [%s %s], error message: %s", + e.Method, e.URL, e.Body, + ) + return e.choseErrString() +} func (e ErrDefault404) Error() string { return "Resource not found" } @@ -141,6 +192,12 @@ type Err401er interface { Error401(ErrUnexpectedResponseCode) error } +// Err403er is the interface resource error types implement to override the error message +// from a 403 error. +type Err403er interface { + Error403(ErrUnexpectedResponseCode) error +} + // Err404er is the interface resource error types implement to override the error message // from a 404 error. type Err404er interface { @@ -159,6 +216,12 @@ type Err408er interface { Error408(ErrUnexpectedResponseCode) error } +// Err409er is the interface resource error types implement to override the error message +// from a 409 error. +type Err409er interface { + Error409(ErrUnexpectedResponseCode) error +} + // Err429er is the interface resource error types implement to override the error message // from a 429 error. type Err429er interface { @@ -399,3 +462,10 @@ type ErrScopeEmpty struct{ BaseError } func (e ErrScopeEmpty) Error() string { return "You must provide either a Project or Domain in a Scope" } + +// ErrAppCredMissingSecret indicates that no Application Credential Secret was provided with Application Credential ID or Name +type ErrAppCredMissingSecret struct{ BaseError } + +func (e ErrAppCredMissingSecret) Error() string { + return "You must provide an Application Credential Secret" +} diff --git a/vendor/github.com/gophercloud/gophercloud/go.mod b/vendor/github.com/gophercloud/gophercloud/go.mod new file mode 100644 index 0000000000..d1ee3b472e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/go.mod @@ -0,0 +1,7 @@ +module github.com/gophercloud/gophercloud + +require ( + golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67 + golang.org/x/sys v0.0.0-20190209173611-3b5209105503 // indirect + gopkg.in/yaml.v2 v2.2.2 +) diff --git a/vendor/github.com/gophercloud/gophercloud/go.sum b/vendor/github.com/gophercloud/gophercloud/go.sum new file mode 100644 index 0000000000..33cb0be8aa --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/go.sum @@ -0,0 +1,8 @@ +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67 h1:ng3VDlRp5/DHpSWl02R4rM9I+8M2rhmsuLwAMmkLQWE= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/sys v0.0.0-20190209173611-3b5209105503 h1:5SvYFrOM3W8Mexn9/oA44Ji7vhXAZQ9hiP+1Q/DMrWg= +golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go b/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go index 95286041d6..0e8d90ff82 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go @@ -13,10 +13,19 @@ AuthOptionsFromEnv fills out an identity.AuthOptions structure with the settings found on the various OpenStack OS_* environment variables. The following variables provide sources of truth: OS_AUTH_URL, OS_USERNAME, -OS_PASSWORD, OS_TENANT_ID, and OS_TENANT_NAME. +OS_PASSWORD and OS_PROJECT_ID. Of these, OS_USERNAME, OS_PASSWORD, and OS_AUTH_URL must have settings, -or an error will result. OS_TENANT_ID and OS_TENANT_NAME are optional. +or an error will result. OS_PROJECT_ID, is optional. + +OS_TENANT_ID and OS_TENANT_NAME are deprecated forms of OS_PROJECT_ID and +OS_PROJECT_NAME and the latter are expected against a v3 auth api. + +If OS_PROJECT_ID and OS_PROJECT_NAME are set, they will still be referred +as "tenant" in Gophercloud. + +If OS_PROJECT_NAME is set, it requires OS_PROJECT_ID to be set as well to +handle projects not on the default domain. To use this function, first set the OS_* environment variables (for example, by sourcing an `openrc` file), then: @@ -33,31 +42,83 @@ func AuthOptionsFromEnv() (gophercloud.AuthOptions, error) { tenantName := os.Getenv("OS_TENANT_NAME") domainID := os.Getenv("OS_DOMAIN_ID") domainName := os.Getenv("OS_DOMAIN_NAME") + applicationCredentialID := os.Getenv("OS_APPLICATION_CREDENTIAL_ID") + applicationCredentialName := os.Getenv("OS_APPLICATION_CREDENTIAL_NAME") + applicationCredentialSecret := os.Getenv("OS_APPLICATION_CREDENTIAL_SECRET") + + // If OS_PROJECT_ID is set, overwrite tenantID with the value. + if v := os.Getenv("OS_PROJECT_ID"); v != "" { + tenantID = v + } + + // If OS_PROJECT_NAME is set, overwrite tenantName with the value. + if v := os.Getenv("OS_PROJECT_NAME"); v != "" { + tenantName = v + } if authURL == "" { - err := gophercloud.ErrMissingInput{Argument: "authURL"} + err := gophercloud.ErrMissingEnvironmentVariable{ + EnvironmentVariable: "OS_AUTH_URL", + } return nilOptions, err } - if username == "" && userID == "" { - err := gophercloud.ErrMissingInput{Argument: "username"} + if userID == "" && username == "" { + // Empty username and userID could be ignored, when applicationCredentialID and applicationCredentialSecret are set + if applicationCredentialID == "" && applicationCredentialSecret == "" { + err := gophercloud.ErrMissingAnyoneOfEnvironmentVariables{ + EnvironmentVariables: []string{"OS_USERID", "OS_USERNAME"}, + } + return nilOptions, err + } + } + + if password == "" && applicationCredentialID == "" && applicationCredentialName == "" { + err := gophercloud.ErrMissingEnvironmentVariable{ + EnvironmentVariable: "OS_PASSWORD", + } return nilOptions, err } - if password == "" { - err := gophercloud.ErrMissingInput{Argument: "password"} + if (applicationCredentialID != "" || applicationCredentialName != "") && applicationCredentialSecret == "" { + err := gophercloud.ErrMissingEnvironmentVariable{ + EnvironmentVariable: "OS_APPLICATION_CREDENTIAL_SECRET", + } return nilOptions, err } + if domainID == "" && domainName == "" && tenantID == "" && tenantName != "" { + err := gophercloud.ErrMissingEnvironmentVariable{ + EnvironmentVariable: "OS_PROJECT_ID", + } + return nilOptions, err + } + + if applicationCredentialID == "" && applicationCredentialName != "" && applicationCredentialSecret != "" { + if userID == "" && username == "" { + return nilOptions, gophercloud.ErrMissingAnyoneOfEnvironmentVariables{ + EnvironmentVariables: []string{"OS_USERID", "OS_USERNAME"}, + } + } + if username != "" && domainID == "" && domainName == "" { + return nilOptions, gophercloud.ErrMissingAnyoneOfEnvironmentVariables{ + EnvironmentVariables: []string{"OS_DOMAIN_ID", "OS_DOMAIN_NAME"}, + } + } + } + ao := gophercloud.AuthOptions{ - IdentityEndpoint: authURL, - UserID: userID, - Username: username, - Password: password, - TenantID: tenantID, - TenantName: tenantName, - DomainID: domainID, - DomainName: domainName, + IdentityEndpoint: authURL, + UserID: userID, + Username: username, + Password: password, + TenantID: tenantID, + TenantName: tenantName, + DomainID: domainID, + DomainName: domainName, + ApplicationCredentialID: applicationCredentialID, + ApplicationCredentialName: applicationCredentialName, + ApplicationCredentialSecret: applicationCredentialSecret, } return ao, nil diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/client.go b/vendor/github.com/gophercloud/gophercloud/openstack/client.go index adcf90bcef..50f239711e 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/client.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/client.go @@ -2,10 +2,7 @@ package openstack import ( "fmt" - "net/url" "reflect" - "regexp" - "strings" "github.com/gophercloud/gophercloud" tokens2 "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens" @@ -38,29 +35,20 @@ A basic example of using this would be: client, err := openstack.NewIdentityV3(provider, gophercloud.EndpointOpts{}) */ func NewClient(endpoint string) (*gophercloud.ProviderClient, error) { - u, err := url.Parse(endpoint) + base, err := utils.BaseEndpoint(endpoint) if err != nil { return nil, err } - u.RawQuery, u.Fragment = "", "" - - var base string - versionRe := regexp.MustCompile("v[0-9.]+/?") - if version := versionRe.FindString(u.Path); version != "" { - base = strings.Replace(u.String(), version, "", -1) - } else { - base = u.String() - } - endpoint = gophercloud.NormalizeURL(endpoint) base = gophercloud.NormalizeURL(base) - return &gophercloud.ProviderClient{ - IdentityBase: base, - IdentityEndpoint: endpoint, - }, nil + p := new(gophercloud.ProviderClient) + p.IdentityBase = base + p.IdentityEndpoint = endpoint + p.UseTokenLock() + return p, nil } /* @@ -147,7 +135,7 @@ func v2auth(client *gophercloud.ProviderClient, endpoint string, options gopherc result := tokens2.Create(v2Client, v2Opts) - token, err := result.ExtractToken() + err = client.SetTokenAndAuthResult(result) if err != nil { return err } @@ -158,12 +146,24 @@ func v2auth(client *gophercloud.ProviderClient, endpoint string, options gopherc } if options.AllowReauth { + // here we're creating a throw-away client (tac). it's a copy of the user's provider client, but + // with the token and reauth func zeroed out. combined with setting `AllowReauth` to `false`, + // this should retry authentication only once + tac := *client + tac.SetThrowaway(true) + tac.ReauthFunc = nil + tac.SetTokenAndAuthResult(nil) + tao := options + tao.AllowReauth = false client.ReauthFunc = func() error { - client.TokenID = "" - return v2auth(client, endpoint, options, eo) + err := v2auth(&tac, endpoint, tao, eo) + if err != nil { + return err + } + client.CopyTokenFrom(&tac) + return nil } } - client.TokenID = token.ID client.EndpointLocator = func(opts gophercloud.EndpointOpts) (string, error) { return V2EndpointURL(catalog, opts) } @@ -189,7 +189,7 @@ func v3auth(client *gophercloud.ProviderClient, endpoint string, opts tokens3.Au result := tokens3.Create(v3Client, opts) - token, err := result.ExtractToken() + err = client.SetTokenAndAuthResult(result) if err != nil { return err } @@ -199,12 +199,34 @@ func v3auth(client *gophercloud.ProviderClient, endpoint string, opts tokens3.Au return err } - client.TokenID = token.ID - if opts.CanReauth() { + // here we're creating a throw-away client (tac). it's a copy of the user's provider client, but + // with the token and reauth func zeroed out. combined with setting `AllowReauth` to `false`, + // this should retry authentication only once + tac := *client + tac.SetThrowaway(true) + tac.ReauthFunc = nil + tac.SetTokenAndAuthResult(nil) + var tao tokens3.AuthOptionsBuilder + switch ot := opts.(type) { + case *gophercloud.AuthOptions: + o := *ot + o.AllowReauth = false + tao = &o + case *tokens3.AuthOptions: + o := *ot + o.AllowReauth = false + tao = &o + default: + tao = opts + } client.ReauthFunc = func() error { - client.TokenID = "" - return v3auth(client, endpoint, opts, eo) + err := v3auth(&tac, endpoint, tao, eo) + if err != nil { + return err + } + client.CopyTokenFrom(&tac) + return nil } } client.EndpointLocator = func(opts gophercloud.EndpointOpts) (string, error) { @@ -251,11 +273,17 @@ func NewIdentityV3(client *gophercloud.ProviderClient, eo gophercloud.EndpointOp // Ensure endpoint still has a suffix of v3. // This is because EndpointLocator might have found a versionless - // endpoint and requests will fail unless targeted at /v3. - if !strings.HasSuffix(endpoint, "v3/") { - endpoint = endpoint + "v3/" + // endpoint or the published endpoint is still /v2.0. In both + // cases, we need to fix the endpoint to point to /v3. + base, err := utils.BaseEndpoint(endpoint) + if err != nil { + return nil, err } + base = gophercloud.NormalizeURL(base) + + endpoint = base + "v3/" + return &gophercloud.ServiceClient{ ProviderClient: client, Endpoint: endpoint, @@ -276,6 +304,18 @@ func initClientOpts(client *gophercloud.ProviderClient, eo gophercloud.EndpointO return sc, nil } +// NewBareMetalV1 creates a ServiceClient that may be used with the v1 +// bare metal package. +func NewBareMetalV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "baremetal") +} + +// NewBareMetalIntrospectionV1 creates a ServiceClient that may be used with the v1 +// bare metal introspection package. +func NewBareMetalIntrospectionV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "baremetal-inspector") +} + // NewObjectStorageV1 creates a ServiceClient that may be used with the v1 // object storage package. func NewObjectStorageV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { @@ -308,8 +348,12 @@ func NewBlockStorageV2(client *gophercloud.ProviderClient, eo gophercloud.Endpoi return initClientOpts(client, eo, "volumev2") } -// NewSharedFileSystemV2 creates a ServiceClient that may be used to access the -// v2 shared file system service. +// NewBlockStorageV3 creates a ServiceClient that may be used to access the v3 block storage service. +func NewBlockStorageV3(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "volumev3") +} + +// NewSharedFileSystemV2 creates a ServiceClient that may be used to access the v2 shared file system service. func NewSharedFileSystemV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { return initClientOpts(client, eo, "sharev2") } @@ -354,3 +398,41 @@ func NewLoadBalancerV2(client *gophercloud.ProviderClient, eo gophercloud.Endpoi sc.ResourceBase = sc.Endpoint + "v2.0/" return sc, err } + +// NewClusteringV1 creates a ServiceClient that may be used with the v1 clustering +// package. +func NewClusteringV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "clustering") +} + +// NewMessagingV2 creates a ServiceClient that may be used with the v2 messaging +// service. +func NewMessagingV2(client *gophercloud.ProviderClient, clientID string, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + sc, err := initClientOpts(client, eo, "messaging") + sc.MoreHeaders = map[string]string{"Client-ID": clientID} + return sc, err +} + +// NewContainerV1 creates a ServiceClient that may be used with v1 container package +func NewContainerV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "container") +} + +// NewKeyManagerV1 creates a ServiceClient that may be used with the v1 key +// manager service. +func NewKeyManagerV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + sc, err := initClientOpts(client, eo, "key-manager") + sc.ResourceBase = sc.Endpoint + "v1/" + return sc, err +} + +// NewContainerInfraV1 creates a ServiceClient that may be used with the v1 container infra management +// package. +func NewContainerInfraV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "container-infra") +} + +// NewWorkflowV2 creates a ServiceClient that may be used with the v2 workflow management package. +func NewWorkflowV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "workflowv2") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/endpoint_location.go b/vendor/github.com/gophercloud/gophercloud/openstack/endpoint_location.go index 070ea7cbef..12c8aebcf7 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/endpoint_location.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/endpoint_location.go @@ -84,7 +84,7 @@ func V3EndpointURL(catalog *tokens3.ServiceCatalog, opts gophercloud.EndpointOpt return "", err } if (opts.Availability == gophercloud.Availability(endpoint.Interface)) && - (opts.Region == "" || endpoint.Region == opts.Region) { + (opts.Region == "" || endpoint.Region == opts.Region || endpoint.RegionID == opts.Region) { endpoints = append(endpoints, endpoint) } } diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/requests.go index 60f58c8ce3..f21a58f10c 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/requests.go @@ -85,7 +85,7 @@ type UpdateOpts struct { Name string `json:"name,omitempty"` // Description is the description of the tenant. - Description string `json:"description,omitempty"` + Description *string `json:"description,omitempty"` // Enabled sets the tenant status to enabled or disabled. Enabled *bool `json:"enabled,omitempty"` diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/results.go index b11326772b..ee5da37f46 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/results.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/results.go @@ -135,6 +135,21 @@ func (r CreateResult) ExtractToken() (*Token, error) { }, nil } +// ExtractTokenID implements the gophercloud.AuthResult interface. The returned +// string is the same as the ID field of the Token struct returned from +// ExtractToken(). +func (r CreateResult) ExtractTokenID() (string, error) { + var s struct { + Access struct { + Token struct { + ID string `json:"id"` + } `json:"token"` + } `json:"access"` + } + err := r.ExtractInto(&s) + return s.Access.Token.ID, err +} + // ExtractServiceCatalog returns the ServiceCatalog that was generated along // with the user's Token. func (r CreateResult) ExtractServiceCatalog() (*ServiceCatalog, error) { diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go index ca35851e4a..2d20fa6f4b 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go @@ -52,19 +52,28 @@ type AuthOptions struct { // authentication token ID. TokenID string `json:"-"` + // Authentication through Application Credentials requires supplying name, project and secret + // For project we can use TenantID + ApplicationCredentialID string `json:"-"` + ApplicationCredentialName string `json:"-"` + ApplicationCredentialSecret string `json:"-"` + Scope Scope `json:"-"` } // ToTokenV3CreateMap builds a request body from AuthOptions. func (opts *AuthOptions) ToTokenV3CreateMap(scope map[string]interface{}) (map[string]interface{}, error) { gophercloudAuthOpts := gophercloud.AuthOptions{ - Username: opts.Username, - UserID: opts.UserID, - Password: opts.Password, - DomainID: opts.DomainID, - DomainName: opts.DomainName, - AllowReauth: opts.AllowReauth, - TokenID: opts.TokenID, + Username: opts.Username, + UserID: opts.UserID, + Password: opts.Password, + DomainID: opts.DomainID, + DomainName: opts.DomainName, + AllowReauth: opts.AllowReauth, + TokenID: opts.TokenID, + ApplicationCredentialID: opts.ApplicationCredentialID, + ApplicationCredentialName: opts.ApplicationCredentialName, + ApplicationCredentialSecret: opts.ApplicationCredentialSecret, } return gophercloudAuthOpts.ToTokenV3CreateMap(scope) @@ -72,72 +81,15 @@ func (opts *AuthOptions) ToTokenV3CreateMap(scope map[string]interface{}) (map[s // ToTokenV3CreateMap builds a scope request body from AuthOptions. func (opts *AuthOptions) ToTokenV3ScopeMap() (map[string]interface{}, error) { - if opts.Scope.ProjectName != "" { - // ProjectName provided: either DomainID or DomainName must also be supplied. - // ProjectID may not be supplied. - if opts.Scope.DomainID == "" && opts.Scope.DomainName == "" { - return nil, gophercloud.ErrScopeDomainIDOrDomainName{} - } - if opts.Scope.ProjectID != "" { - return nil, gophercloud.ErrScopeProjectIDOrProjectName{} - } - - if opts.Scope.DomainID != "" { - // ProjectName + DomainID - return map[string]interface{}{ - "project": map[string]interface{}{ - "name": &opts.Scope.ProjectName, - "domain": map[string]interface{}{"id": &opts.Scope.DomainID}, - }, - }, nil - } - - if opts.Scope.DomainName != "" { - // ProjectName + DomainName - return map[string]interface{}{ - "project": map[string]interface{}{ - "name": &opts.Scope.ProjectName, - "domain": map[string]interface{}{"name": &opts.Scope.DomainName}, - }, - }, nil - } - } else if opts.Scope.ProjectID != "" { - // ProjectID provided. ProjectName, DomainID, and DomainName may not be provided. - if opts.Scope.DomainID != "" { - return nil, gophercloud.ErrScopeProjectIDAlone{} - } - if opts.Scope.DomainName != "" { - return nil, gophercloud.ErrScopeProjectIDAlone{} - } - - // ProjectID - return map[string]interface{}{ - "project": map[string]interface{}{ - "id": &opts.Scope.ProjectID, - }, - }, nil - } else if opts.Scope.DomainID != "" { - // DomainID provided. ProjectID, ProjectName, and DomainName may not be provided. - if opts.Scope.DomainName != "" { - return nil, gophercloud.ErrScopeDomainIDOrDomainName{} - } - - // DomainID - return map[string]interface{}{ - "domain": map[string]interface{}{ - "id": &opts.Scope.DomainID, - }, - }, nil - } else if opts.Scope.DomainName != "" { - // DomainName - return map[string]interface{}{ - "domain": map[string]interface{}{ - "name": &opts.Scope.DomainName, - }, - }, nil + scope := gophercloud.AuthScope(opts.Scope) + + gophercloudAuthOpts := gophercloud.AuthOptions{ + Scope: &scope, + DomainID: opts.DomainID, + DomainName: opts.DomainName, } - return nil, nil + return gophercloudAuthOpts.ToTokenV3ScopeMap() } func (opts *AuthOptions) CanReauth() bool { @@ -190,7 +142,7 @@ func Get(c *gophercloud.ServiceClient, token string) (r GetResult) { // Validate determines if a specified token is valid or not. func Validate(c *gophercloud.ServiceClient, token string) (bool, error) { - resp, err := c.Request("HEAD", tokenURL(c), &gophercloud.RequestOpts{ + resp, err := c.Head(tokenURL(c), &gophercloud.RequestOpts{ MoreHeaders: subjectTokenHeaders(c, token), OkCodes: []int{200, 204, 404}, }) diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/results.go index 6e78d1cbdb..6f26c96bcd 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/results.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/results.go @@ -13,6 +13,7 @@ import ( type Endpoint struct { ID string `json:"id"` Region string `json:"region"` + RegionID string `json:"region_id"` Interface string `json:"interface"` URL string `json:"url"` } @@ -101,6 +102,13 @@ func (r commonResult) ExtractToken() (*Token, error) { return &s, err } +// ExtractTokenID implements the gophercloud.AuthResult interface. The returned +// string is the same as the ID field of the Token struct returned from +// ExtractToken(). +func (r CreateResult) ExtractTokenID() (string, error) { + return r.Header.Get("X-Subject-Token"), r.Err +} + // ExtractServiceCatalog returns the ServiceCatalog that was generated along // with the user's Token. func (r commonResult) ExtractServiceCatalog() (*ServiceCatalog, error) { diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/utils/base_endpoint.go b/vendor/github.com/gophercloud/gophercloud/openstack/utils/base_endpoint.go new file mode 100644 index 0000000000..40080f7af2 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/utils/base_endpoint.go @@ -0,0 +1,28 @@ +package utils + +import ( + "net/url" + "regexp" + "strings" +) + +// BaseEndpoint will return a URL without the /vX.Y +// portion of the URL. +func BaseEndpoint(endpoint string) (string, error) { + u, err := url.Parse(endpoint) + if err != nil { + return "", err + } + + u.RawQuery, u.Fragment = "", "" + + path := u.Path + versionRe := regexp.MustCompile("v[0-9.]+/?") + + if version := versionRe.FindString(path); version != "" { + versionIndex := strings.Index(path, version) + u.Path = path[:versionIndex] + } + + return u.String(), nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/pager.go b/vendor/github.com/gophercloud/gophercloud/pagination/pager.go index 7c65926b72..42c0b2dbe5 100644 --- a/vendor/github.com/gophercloud/gophercloud/pagination/pager.go +++ b/vendor/github.com/gophercloud/gophercloud/pagination/pager.go @@ -41,6 +41,8 @@ type Pager struct { createPage func(r PageResult) Page + firstPage Page + Err error // Headers supplies additional HTTP headers to populate on each paged request. @@ -89,9 +91,18 @@ func (p Pager) EachPage(handler func(Page) (bool, error)) error { } currentURL := p.initialURL for { - currentPage, err := p.fetchNextPage(currentURL) - if err != nil { - return err + var currentPage Page + + // if first page has already been fetched, no need to fetch it again + if p.firstPage != nil { + currentPage = p.firstPage + p.firstPage = nil + } else { + var err error + currentPage, err = p.fetchNextPage(currentURL) + if err != nil { + return err + } } empty, err := currentPage.IsEmpty() @@ -128,23 +139,26 @@ func (p Pager) AllPages() (Page, error) { // body will contain the final concatenated Page body. var body reflect.Value - // Grab a test page to ascertain the page body type. - testPage, err := p.fetchNextPage(p.initialURL) + // Grab a first page to ascertain the page body type. + firstPage, err := p.fetchNextPage(p.initialURL) if err != nil { return nil, err } // Store the page type so we can use reflection to create a new mega-page of // that type. - pageType := reflect.TypeOf(testPage) + pageType := reflect.TypeOf(firstPage) - // if it's a single page, just return the testPage (first page) + // if it's a single page, just return the firstPage (first page) if _, found := pageType.FieldByName("SinglePageBase"); found { - return testPage, nil + return firstPage, nil } + // store the first page to avoid getting it twice + p.firstPage = firstPage + // Switch on the page body type. Recognized types are `map[string]interface{}`, // `[]byte`, and `[]interface{}`. - switch pb := testPage.GetBody().(type) { + switch pb := firstPage.GetBody().(type) { case map[string]interface{}: // key is the map key for the page body if the body type is `map[string]interface{}`. var key string diff --git a/vendor/github.com/gophercloud/gophercloud/params.go b/vendor/github.com/gophercloud/gophercloud/params.go index 6afc8f8b72..b9986660cb 100644 --- a/vendor/github.com/gophercloud/gophercloud/params.go +++ b/vendor/github.com/gophercloud/gophercloud/params.go @@ -115,10 +115,31 @@ func BuildRequestBody(opts interface{}, parent string) (map[string]interface{}, } } + jsonTag := f.Tag.Get("json") + if jsonTag == "-" { + continue + } + + if v.Kind() == reflect.Slice || (v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Slice) { + sliceValue := v + if sliceValue.Kind() == reflect.Ptr { + sliceValue = sliceValue.Elem() + } + + for i := 0; i < sliceValue.Len(); i++ { + element := sliceValue.Index(i) + if element.Kind() == reflect.Struct || (element.Kind() == reflect.Ptr && element.Elem().Kind() == reflect.Struct) { + _, err := BuildRequestBody(element.Interface(), "") + if err != nil { + return nil, err + } + } + } + } if v.Kind() == reflect.Struct || (v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct) { if zero { //fmt.Printf("value before change: %+v\n", optsValue.Field(i)) - if jsonTag := f.Tag.Get("json"); jsonTag != "" { + if jsonTag != "" { jsonTagPieces := strings.Split(jsonTag, ",") if len(jsonTagPieces) > 1 && jsonTagPieces[1] == "omitempty" { if v.CanSet() { @@ -347,12 +368,20 @@ func BuildQueryString(opts interface{}) (*url.URL, error) { params.Add(tags[0], v.Index(i).String()) } } + case reflect.Map: + if v.Type().Key().Kind() == reflect.String && v.Type().Elem().Kind() == reflect.String { + var s []string + for _, k := range v.MapKeys() { + value := v.MapIndex(k).String() + s = append(s, fmt.Sprintf("'%s':'%s'", k.String(), value)) + } + params.Add(tags[0], fmt.Sprintf("{%s}", strings.Join(s, ", "))) + } } } else { - // Otherwise, the field is not set. - if len(tags) == 2 && tags[1] == "required" { - // And the field is required. Return an error. - return nil, fmt.Errorf("Required query parameter [%s] not set.", f.Name) + // if the field has a 'required' tag, it can't have a zero-value + if requiredTag := f.Tag.Get("required"); requiredTag == "true" { + return &url.URL{}, fmt.Errorf("Required query parameter [%s] not set.", f.Name) } } } @@ -425,10 +454,9 @@ func BuildHeaders(opts interface{}) (map[string]string, error) { optsMap[tags[0]] = strconv.FormatBool(v.Bool()) } } else { - // Otherwise, the field is not set. - if len(tags) == 2 && tags[1] == "required" { - // And the field is required. Return an error. - return optsMap, fmt.Errorf("Required header not set.") + // if the field has a 'required' tag, it can't have a zero-value + if requiredTag := f.Tag.Get("required"); requiredTag == "true" { + return optsMap, fmt.Errorf("Required header [%s] not set.", f.Name) } } } diff --git a/vendor/github.com/gophercloud/gophercloud/provider_client.go b/vendor/github.com/gophercloud/gophercloud/provider_client.go index 01b3010739..fce00462fd 100644 --- a/vendor/github.com/gophercloud/gophercloud/provider_client.go +++ b/vendor/github.com/gophercloud/gophercloud/provider_client.go @@ -2,11 +2,14 @@ package gophercloud import ( "bytes" + "context" "encoding/json" + "errors" "io" "io/ioutil" "net/http" "strings" + "sync" ) // DefaultUserAgent is the default User-Agent string set in the request header. @@ -51,6 +54,8 @@ type ProviderClient struct { IdentityEndpoint string // TokenID is the ID of the most recently issued valid token. + // NOTE: Aside from within a custom ReauthFunc, this field shouldn't be set by an application. + // To safely read or write this value, call `Token` or `SetToken`, respectively TokenID string // EndpointLocator describes how this provider discovers the endpoints for @@ -68,16 +73,192 @@ type ProviderClient struct { // authentication functions for different Identity service versions. ReauthFunc func() error - Debug bool + // Throwaway determines whether if this client is a throw-away client. It's a copy of user's provider client + // with the token and reauth func zeroed. Such client can be used to perform reauthorization. + Throwaway bool + + // Context is the context passed to the HTTP request. + Context context.Context + + // mut is a mutex for the client. It protects read and write access to client attributes such as getting + // and setting the TokenID. + mut *sync.RWMutex + + // reauthmut is a mutex for reauthentication it attempts to ensure that only one reauthentication + // attempt happens at one time. + reauthmut *reauthlock + + authResult AuthResult +} + +// reauthlock represents a set of attributes used to help in the reauthentication process. +type reauthlock struct { + sync.RWMutex + reauthing bool + reauthingErr error + done *sync.Cond } // AuthenticatedHeaders returns a map of HTTP headers that are common for all -// authenticated service requests. -func (client *ProviderClient) AuthenticatedHeaders() map[string]string { - if client.TokenID == "" { - return map[string]string{} +// authenticated service requests. Blocks if Reauthenticate is in progress. +func (client *ProviderClient) AuthenticatedHeaders() (m map[string]string) { + if client.IsThrowaway() { + return + } + if client.reauthmut != nil { + client.reauthmut.Lock() + for client.reauthmut.reauthing { + client.reauthmut.done.Wait() + } + client.reauthmut.Unlock() + } + t := client.Token() + if t == "" { + return + } + return map[string]string{"X-Auth-Token": t} +} + +// UseTokenLock creates a mutex that is used to allow safe concurrent access to the auth token. +// If the application's ProviderClient is not used concurrently, this doesn't need to be called. +func (client *ProviderClient) UseTokenLock() { + client.mut = new(sync.RWMutex) + client.reauthmut = new(reauthlock) +} + +// GetAuthResult returns the result from the request that was used to obtain a +// provider client's Keystone token. +// +// The result is nil when authentication has not yet taken place, when the token +// was set manually with SetToken(), or when a ReauthFunc was used that does not +// record the AuthResult. +func (client *ProviderClient) GetAuthResult() AuthResult { + if client.mut != nil { + client.mut.RLock() + defer client.mut.RUnlock() + } + return client.authResult +} + +// Token safely reads the value of the auth token from the ProviderClient. Applications should +// call this method to access the token instead of the TokenID field +func (client *ProviderClient) Token() string { + if client.mut != nil { + client.mut.RLock() + defer client.mut.RUnlock() + } + return client.TokenID +} + +// SetToken safely sets the value of the auth token in the ProviderClient. Applications may +// use this method in a custom ReauthFunc. +// +// WARNING: This function is deprecated. Use SetTokenAndAuthResult() instead. +func (client *ProviderClient) SetToken(t string) { + if client.mut != nil { + client.mut.Lock() + defer client.mut.Unlock() } - return map[string]string{"X-Auth-Token": client.TokenID} + client.TokenID = t + client.authResult = nil +} + +// SetTokenAndAuthResult safely sets the value of the auth token in the +// ProviderClient and also records the AuthResult that was returned from the +// token creation request. Applications may call this in a custom ReauthFunc. +func (client *ProviderClient) SetTokenAndAuthResult(r AuthResult) error { + tokenID := "" + var err error + if r != nil { + tokenID, err = r.ExtractTokenID() + if err != nil { + return err + } + } + + if client.mut != nil { + client.mut.Lock() + defer client.mut.Unlock() + } + client.TokenID = tokenID + client.authResult = r + return nil +} + +// CopyTokenFrom safely copies the token from another ProviderClient into the +// this one. +func (client *ProviderClient) CopyTokenFrom(other *ProviderClient) { + if client.mut != nil { + client.mut.Lock() + defer client.mut.Unlock() + } + if other.mut != nil && other.mut != client.mut { + other.mut.RLock() + defer other.mut.RUnlock() + } + client.TokenID = other.TokenID + client.authResult = other.authResult +} + +// IsThrowaway safely reads the value of the client Throwaway field. +func (client *ProviderClient) IsThrowaway() bool { + if client.reauthmut != nil { + client.reauthmut.RLock() + defer client.reauthmut.RUnlock() + } + return client.Throwaway +} + +// SetThrowaway safely sets the value of the client Throwaway field. +func (client *ProviderClient) SetThrowaway(v bool) { + if client.reauthmut != nil { + client.reauthmut.Lock() + defer client.reauthmut.Unlock() + } + client.Throwaway = v +} + +// Reauthenticate calls client.ReauthFunc in a thread-safe way. If this is +// called because of a 401 response, the caller may pass the previous token. In +// this case, the reauthentication can be skipped if another thread has already +// reauthenticated in the meantime. If no previous token is known, an empty +// string should be passed instead to force unconditional reauthentication. +func (client *ProviderClient) Reauthenticate(previousToken string) (err error) { + if client.ReauthFunc == nil { + return nil + } + + if client.reauthmut == nil { + return client.ReauthFunc() + } + + client.reauthmut.Lock() + if client.reauthmut.reauthing { + for !client.reauthmut.reauthing { + client.reauthmut.done.Wait() + } + err = client.reauthmut.reauthingErr + client.reauthmut.Unlock() + return err + } + client.reauthmut.Unlock() + + client.reauthmut.Lock() + client.reauthmut.reauthing = true + client.reauthmut.done = sync.NewCond(client.reauthmut) + client.reauthmut.reauthingErr = nil + client.reauthmut.Unlock() + + if previousToken == "" || client.TokenID == previousToken { + err = client.ReauthFunc() + } + + client.reauthmut.Lock() + client.reauthmut.reauthing = false + client.reauthmut.reauthingErr = err + client.reauthmut.done.Broadcast() + client.reauthmut.Unlock() + return } // RequestOpts customizes the behavior of the provider.Request() method. @@ -116,7 +297,7 @@ func (client *ProviderClient) Request(method, url string, options *RequestOpts) // io.ReadSeeker as-is. Default the content-type to application/json. if options.JSONBody != nil { if options.RawBody != nil { - panic("Please provide only one of JSONBody or RawBody to gophercloud.Request().") + return nil, errors.New("please provide only one of JSONBody or RawBody to gophercloud.Request()") } rendered, err := json.Marshal(options.JSONBody) @@ -137,6 +318,9 @@ func (client *ProviderClient) Request(method, url string, options *RequestOpts) if err != nil { return nil, err } + if client.Context != nil { + req = req.WithContext(client.Context) + } // Populate the request headers. Apply options.MoreHeaders last, to give the caller the chance to // modify or omit any header. @@ -166,6 +350,8 @@ func (client *ProviderClient) Request(method, url string, options *RequestOpts) // Set connection parameter to close the connection immediately when we've got the response req.Close = true + prereqtok := req.Header.Get("X-Auth-Token") + // Issue the request. resp, err := client.HTTPClient.Do(req) if err != nil { @@ -173,13 +359,14 @@ func (client *ProviderClient) Request(method, url string, options *RequestOpts) } // Allow default OkCodes if none explicitly set - if options.OkCodes == nil { - options.OkCodes = defaultOkCodes(method) + okc := options.OkCodes + if okc == nil { + okc = defaultOkCodes(method) } // Validate the HTTP response status. var ok bool - for _, code := range options.OkCodes { + for _, code := range okc { if resp.StatusCode == code { ok = true break @@ -189,9 +376,6 @@ func (client *ProviderClient) Request(method, url string, options *RequestOpts) if !ok { body, _ := ioutil.ReadAll(resp.Body) resp.Body.Close() - //pc := make([]uintptr, 1) - //runtime.Callers(2, pc) - //f := runtime.FuncForPC(pc[0]) respErr := ErrUnexpectedResponseCode{ URL: url, Method: method, @@ -199,7 +383,6 @@ func (client *ProviderClient) Request(method, url string, options *RequestOpts) Actual: resp.StatusCode, Body: body, } - //respErr.Function = "gophercloud.ProviderClient.Request" errType := options.ErrorContext switch resp.StatusCode { @@ -210,7 +393,7 @@ func (client *ProviderClient) Request(method, url string, options *RequestOpts) } case http.StatusUnauthorized: if client.ReauthFunc != nil { - err = client.ReauthFunc() + err = client.Reauthenticate(prereqtok) if err != nil { e := &ErrUnableToReauthenticate{} e.ErrOriginal = respErr @@ -240,6 +423,11 @@ func (client *ProviderClient) Request(method, url string, options *RequestOpts) if error401er, ok := errType.(Err401er); ok { err = error401er.Error401(respErr) } + case http.StatusForbidden: + err = ErrDefault403{respErr} + if error403er, ok := errType.(Err403er); ok { + err = error403er.Error403(respErr) + } case http.StatusNotFound: err = ErrDefault404{respErr} if error404er, ok := errType.(Err404er); ok { @@ -255,6 +443,11 @@ func (client *ProviderClient) Request(method, url string, options *RequestOpts) if error408er, ok := errType.(Err408er); ok { err = error408er.Error408(respErr) } + case http.StatusConflict: + err = ErrDefault409{respErr} + if error409er, ok := errType.(Err409er); ok { + err = error409er.Error409(respErr) + } case 429: err = ErrDefault429{respErr} if error429er, ok := errType.(Err429er); ok { @@ -299,7 +492,7 @@ func defaultOkCodes(method string) []int { case method == "PUT": return []int{201, 202} case method == "PATCH": - return []int{200, 204} + return []int{200, 202, 204} case method == "DELETE": return []int{202, 204} } diff --git a/vendor/github.com/gophercloud/gophercloud/results.go b/vendor/github.com/gophercloud/gophercloud/results.go index e64feee19e..94a16bff0b 100644 --- a/vendor/github.com/gophercloud/gophercloud/results.go +++ b/vendor/github.com/gophercloud/gophercloud/results.go @@ -89,23 +89,47 @@ func (r Result) extractIntoPtr(to interface{}, label string) error { if typeOfV.Kind() == reflect.Struct { if typeOfV.NumField() > 0 && typeOfV.Field(0).Anonymous { newSlice := reflect.MakeSlice(reflect.SliceOf(typeOfV), 0, 0) - newType := reflect.New(typeOfV).Elem() - for _, v := range m[label].([]interface{}) { - b, err := json.Marshal(v) - if err != nil { - return err - } - - for i := 0; i < newType.NumField(); i++ { - s := newType.Field(i).Addr().Interface() - err = json.NewDecoder(bytes.NewReader(b)).Decode(s) + if mSlice, ok := m[label].([]interface{}); ok { + for _, v := range mSlice { + // For each iteration of the slice, we create a new struct. + // This is to work around a bug where elements of a slice + // are reused and not overwritten when the same copy of the + // struct is used: + // + // https://github.com/golang/go/issues/21092 + // https://github.com/golang/go/issues/24155 + // https://play.golang.org/p/NHo3ywlPZli + newType := reflect.New(typeOfV).Elem() + + b, err := json.Marshal(v) if err != nil { return err } + + // This is needed for structs with an UnmarshalJSON method. + // Technically this is just unmarshalling the response into + // a struct that is never used, but it's good enough to + // trigger the UnmarshalJSON method. + for i := 0; i < newType.NumField(); i++ { + s := newType.Field(i).Addr().Interface() + + // Unmarshal is used rather than NewDecoder to also work + // around the above-mentioned bug. + err = json.Unmarshal(b, s) + if err != nil { + return err + } + } + + newSlice = reflect.Append(newSlice, newType) } - newSlice = reflect.Append(newSlice, newType) } + + // "to" should now be properly modeled to receive the + // JSON response body and unmarshal into all the correct + // fields of the struct or composed extension struct + // at the end of this method. toValue.Set(newSlice) } } @@ -345,6 +369,48 @@ func (jt *JSONRFC3339NoZ) UnmarshalJSON(data []byte) error { return nil } +// RFC3339ZNoT is the time format used in Zun (Containers Service). +const RFC3339ZNoT = "2006-01-02 15:04:05-07:00" + +type JSONRFC3339ZNoT time.Time + +func (jt *JSONRFC3339ZNoT) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + if s == "" { + return nil + } + t, err := time.Parse(RFC3339ZNoT, s) + if err != nil { + return err + } + *jt = JSONRFC3339ZNoT(t) + return nil +} + +// RFC3339ZNoTNoZ is another time format used in Zun (Containers Service). +const RFC3339ZNoTNoZ = "2006-01-02 15:04:05" + +type JSONRFC3339ZNoTNoZ time.Time + +func (jt *JSONRFC3339ZNoTNoZ) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + if s == "" { + return nil + } + t, err := time.Parse(RFC3339ZNoTNoZ, s) + if err != nil { + return err + } + *jt = JSONRFC3339ZNoTNoZ(t) + return nil +} + /* Link is an internal type to be used in packages of collection resources that are paginated in a certain way. diff --git a/vendor/github.com/gophercloud/gophercloud/service_client.go b/vendor/github.com/gophercloud/gophercloud/service_client.go index 1160fefa7c..f222f05a66 100644 --- a/vendor/github.com/gophercloud/gophercloud/service_client.go +++ b/vendor/github.com/gophercloud/gophercloud/service_client.go @@ -28,6 +28,10 @@ type ServiceClient struct { // The microversion of the service to use. Set this to use a particular microversion. Microversion string + + // MoreHeaders allows users (or Gophercloud) to set service-wide headers on requests. Put another way, + // values set in this field will be set on all the HTTP requests the service client sends. + MoreHeaders map[string]string } // ResourceBaseURL returns the base URL of any resources used by this service. It MUST end with a /. @@ -108,15 +112,43 @@ func (client *ServiceClient) Delete(url string, opts *RequestOpts) (*http.Respon return client.Request("DELETE", url, opts) } +// Head calls `Request` with the "HEAD" HTTP verb. +func (client *ServiceClient) Head(url string, opts *RequestOpts) (*http.Response, error) { + if opts == nil { + opts = new(RequestOpts) + } + client.initReqOpts(url, nil, nil, opts) + return client.Request("HEAD", url, opts) +} + func (client *ServiceClient) setMicroversionHeader(opts *RequestOpts) { switch client.Type { case "compute": opts.MoreHeaders["X-OpenStack-Nova-API-Version"] = client.Microversion case "sharev2": opts.MoreHeaders["X-OpenStack-Manila-API-Version"] = client.Microversion + case "volume": + opts.MoreHeaders["X-OpenStack-Volume-API-Version"] = client.Microversion + case "baremetal": + opts.MoreHeaders["X-OpenStack-Ironic-API-Version"] = client.Microversion + case "baremetal-introspection": + opts.MoreHeaders["X-OpenStack-Ironic-Inspector-API-Version"] = client.Microversion } if client.Type != "" { opts.MoreHeaders["OpenStack-API-Version"] = client.Type + " " + client.Microversion } } + +// Request carries out the HTTP operation for the service client +func (client *ServiceClient) Request(method, url string, options *RequestOpts) (*http.Response, error) { + if len(client.MoreHeaders) > 0 { + if options == nil { + options = new(RequestOpts) + } + for k, v := range client.MoreHeaders { + options.MoreHeaders[k] = v + } + } + return client.ProviderClient.Request(method, url, options) +} diff --git a/vendor/github.com/gregjones/httpcache/README.md b/vendor/github.com/gregjones/httpcache/README.md index eb2eae8ede..09c9e7c173 100644 --- a/vendor/github.com/gregjones/httpcache/README.md +++ b/vendor/github.com/gregjones/httpcache/README.md @@ -3,7 +3,7 @@ httpcache [![Build Status](https://travis-ci.org/gregjones/httpcache.svg?branch=master)](https://travis-ci.org/gregjones/httpcache) [![GoDoc](https://godoc.org/github.com/gregjones/httpcache?status.svg)](https://godoc.org/github.com/gregjones/httpcache) -Package httpcache provides a http.RoundTripper implementation that works as a mostly RFC-compliant cache for http responses. +Package httpcache provides a http.RoundTripper implementation that works as a mostly [RFC 7234](https://tools.ietf.org/html/rfc7234) compliant cache for http responses. It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client and not for a shared proxy). diff --git a/vendor/github.com/gregjones/httpcache/diskcache/diskcache_test.go b/vendor/github.com/gregjones/httpcache/diskcache/diskcache_test.go deleted file mode 100644 index 35c76cbd1c..0000000000 --- a/vendor/github.com/gregjones/httpcache/diskcache/diskcache_test.go +++ /dev/null @@ -1,42 +0,0 @@ -package diskcache - -import ( - "bytes" - "io/ioutil" - "os" - "testing" -) - -func TestDiskCache(t *testing.T) { - tempDir, err := ioutil.TempDir("", "httpcache") - if err != nil { - t.Fatalf("TempDir: %v", err) - } - defer os.RemoveAll(tempDir) - - cache := New(tempDir) - - key := "testKey" - _, ok := cache.Get(key) - if ok { - t.Fatal("retrieved key before adding it") - } - - val := []byte("some bytes") - cache.Set(key, val) - - retVal, ok := cache.Get(key) - if !ok { - t.Fatal("could not retrieve an element we just added") - } - if !bytes.Equal(retVal, val) { - t.Fatal("retrieved a different value than what we put in") - } - - cache.Delete(key) - - _, ok = cache.Get(key) - if ok { - t.Fatal("deleted key still present") - } -} diff --git a/vendor/github.com/gregjones/httpcache/httpcache.go b/vendor/github.com/gregjones/httpcache/httpcache.go index b26e167f0f..f6a2ec4a53 100644 --- a/vendor/github.com/gregjones/httpcache/httpcache.go +++ b/vendor/github.com/gregjones/httpcache/httpcache.go @@ -10,7 +10,6 @@ import ( "bufio" "bytes" "errors" - "fmt" "io" "io/ioutil" "net/http" @@ -193,16 +192,11 @@ func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error for _, header := range endToEndHeaders { cachedResp.Header[header] = resp.Header[header] } - cachedResp.Status = fmt.Sprintf("%d %s", http.StatusOK, http.StatusText(http.StatusOK)) - cachedResp.StatusCode = http.StatusOK - resp = cachedResp } else if (err != nil || (cachedResp != nil && resp.StatusCode >= 500)) && req.Method == "GET" && canStaleOnError(cachedResp.Header, req.Header) { // In case of transport failure and stale-if-error activated, returns cached content // when available - cachedResp.Status = fmt.Sprintf("%d %s", http.StatusOK, http.StatusText(http.StatusOK)) - cachedResp.StatusCode = http.StatusOK return cachedResp, nil } else { if err != nil || resp.StatusCode != http.StatusOK { diff --git a/vendor/github.com/gregjones/httpcache/httpcache_test.go b/vendor/github.com/gregjones/httpcache/httpcache_test.go deleted file mode 100644 index f22ed837fb..0000000000 --- a/vendor/github.com/gregjones/httpcache/httpcache_test.go +++ /dev/null @@ -1,1384 +0,0 @@ -package httpcache - -import ( - "bytes" - "errors" - "flag" - "io" - "io/ioutil" - "net/http" - "net/http/httptest" - "os" - "strconv" - "testing" - "time" -) - -var s struct { - server *httptest.Server - client http.Client - transport *Transport - done chan struct{} // Closed to unlock infinite handlers. -} - -type fakeClock struct { - elapsed time.Duration -} - -func (c *fakeClock) since(t time.Time) time.Duration { - return c.elapsed -} - -func TestMain(m *testing.M) { - flag.Parse() - setup() - code := m.Run() - teardown() - os.Exit(code) -} - -func setup() { - tp := NewMemoryCacheTransport() - client := http.Client{Transport: tp} - s.transport = tp - s.client = client - s.done = make(chan struct{}) - - mux := http.NewServeMux() - s.server = httptest.NewServer(mux) - - mux.HandleFunc("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Cache-Control", "max-age=3600") - })) - - mux.HandleFunc("/method", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Cache-Control", "max-age=3600") - w.Write([]byte(r.Method)) - })) - - mux.HandleFunc("/range", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - lm := "Fri, 14 Dec 2010 01:01:50 GMT" - if r.Header.Get("if-modified-since") == lm { - w.WriteHeader(http.StatusNotModified) - return - } - w.Header().Set("last-modified", lm) - if r.Header.Get("range") == "bytes=4-9" { - w.WriteHeader(http.StatusPartialContent) - w.Write([]byte(" text ")) - return - } - w.Write([]byte("Some text content")) - })) - - mux.HandleFunc("/nostore", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Cache-Control", "no-store") - })) - - mux.HandleFunc("/etag", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - etag := "124567" - if r.Header.Get("if-none-match") == etag { - w.WriteHeader(http.StatusNotModified) - return - } - w.Header().Set("etag", etag) - })) - - mux.HandleFunc("/lastmodified", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - lm := "Fri, 14 Dec 2010 01:01:50 GMT" - if r.Header.Get("if-modified-since") == lm { - w.WriteHeader(http.StatusNotModified) - return - } - w.Header().Set("last-modified", lm) - })) - - mux.HandleFunc("/varyaccept", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Cache-Control", "max-age=3600") - w.Header().Set("Content-Type", "text/plain") - w.Header().Set("Vary", "Accept") - w.Write([]byte("Some text content")) - })) - - mux.HandleFunc("/doublevary", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Cache-Control", "max-age=3600") - w.Header().Set("Content-Type", "text/plain") - w.Header().Set("Vary", "Accept, Accept-Language") - w.Write([]byte("Some text content")) - })) - mux.HandleFunc("/2varyheaders", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Cache-Control", "max-age=3600") - w.Header().Set("Content-Type", "text/plain") - w.Header().Add("Vary", "Accept") - w.Header().Add("Vary", "Accept-Language") - w.Write([]byte("Some text content")) - })) - mux.HandleFunc("/varyunused", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Cache-Control", "max-age=3600") - w.Header().Set("Content-Type", "text/plain") - w.Header().Set("Vary", "X-Madeup-Header") - w.Write([]byte("Some text content")) - })) - - updateFieldsCounter := 0 - mux.HandleFunc("/updatefields", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("X-Counter", strconv.Itoa(updateFieldsCounter)) - w.Header().Set("Etag", `"e"`) - updateFieldsCounter++ - if r.Header.Get("if-none-match") != "" { - w.WriteHeader(http.StatusNotModified) - return - } - w.Write([]byte("Some text content")) - })) - - // Take 3 seconds to return 200 OK (for testing client timeouts). - mux.HandleFunc("/3seconds", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - time.Sleep(3 * time.Second) - })) - - mux.HandleFunc("/infinite", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - for { - select { - case <-s.done: - return - default: - w.Write([]byte{0}) - } - } - })) -} - -func teardown() { - close(s.done) - s.server.Close() -} - -func resetTest() { - s.transport.Cache = NewMemoryCache() - clock = &realClock{} -} - -// TestCacheableMethod ensures that uncacheable method does not get stored -// in cache and get incorrectly used for a following cacheable method request. -func TestCacheableMethod(t *testing.T) { - resetTest() - { - req, err := http.NewRequest("POST", s.server.URL+"/method", nil) - if err != nil { - t.Fatal(err) - } - resp, err := s.client.Do(req) - if err != nil { - t.Fatal(err) - } - var buf bytes.Buffer - _, err = io.Copy(&buf, resp.Body) - if err != nil { - t.Fatal(err) - } - err = resp.Body.Close() - if err != nil { - t.Fatal(err) - } - if got, want := buf.String(), "POST"; got != want { - t.Errorf("got %q, want %q", got, want) - } - if resp.StatusCode != http.StatusOK { - t.Errorf("response status code isn't 200 OK: %v", resp.StatusCode) - } - } - { - req, err := http.NewRequest("GET", s.server.URL+"/method", nil) - if err != nil { - t.Fatal(err) - } - resp, err := s.client.Do(req) - if err != nil { - t.Fatal(err) - } - var buf bytes.Buffer - _, err = io.Copy(&buf, resp.Body) - if err != nil { - t.Fatal(err) - } - err = resp.Body.Close() - if err != nil { - t.Fatal(err) - } - if got, want := buf.String(), "GET"; got != want { - t.Errorf("got wrong body %q, want %q", got, want) - } - if resp.StatusCode != http.StatusOK { - t.Errorf("response status code isn't 200 OK: %v", resp.StatusCode) - } - if resp.Header.Get(XFromCache) != "" { - t.Errorf("XFromCache header isn't blank") - } - } -} - -func TestDontServeHeadResponseToGetRequest(t *testing.T) { - resetTest() - url := s.server.URL + "/" - req, err := http.NewRequest(http.MethodHead, url, nil) - if err != nil { - t.Fatal(err) - } - _, err = s.client.Do(req) - if err != nil { - t.Fatal(err) - } - req, err = http.NewRequest(http.MethodGet, url, nil) - if err != nil { - t.Fatal(err) - } - resp, err := s.client.Do(req) - if err != nil { - t.Fatal(err) - } - if resp.Header.Get(XFromCache) != "" { - t.Errorf("Cache should not match") - } -} - -func TestDontStorePartialRangeInCache(t *testing.T) { - resetTest() - { - req, err := http.NewRequest("GET", s.server.URL+"/range", nil) - if err != nil { - t.Fatal(err) - } - req.Header.Set("range", "bytes=4-9") - resp, err := s.client.Do(req) - if err != nil { - t.Fatal(err) - } - var buf bytes.Buffer - _, err = io.Copy(&buf, resp.Body) - if err != nil { - t.Fatal(err) - } - err = resp.Body.Close() - if err != nil { - t.Fatal(err) - } - if got, want := buf.String(), " text "; got != want { - t.Errorf("got %q, want %q", got, want) - } - if resp.StatusCode != http.StatusPartialContent { - t.Errorf("response status code isn't 206 Partial Content: %v", resp.StatusCode) - } - } - { - req, err := http.NewRequest("GET", s.server.URL+"/range", nil) - if err != nil { - t.Fatal(err) - } - resp, err := s.client.Do(req) - if err != nil { - t.Fatal(err) - } - var buf bytes.Buffer - _, err = io.Copy(&buf, resp.Body) - if err != nil { - t.Fatal(err) - } - err = resp.Body.Close() - if err != nil { - t.Fatal(err) - } - if got, want := buf.String(), "Some text content"; got != want { - t.Errorf("got %q, want %q", got, want) - } - if resp.StatusCode != http.StatusOK { - t.Errorf("response status code isn't 200 OK: %v", resp.StatusCode) - } - if resp.Header.Get(XFromCache) != "" { - t.Error("XFromCache header isn't blank") - } - } - { - req, err := http.NewRequest("GET", s.server.URL+"/range", nil) - if err != nil { - t.Fatal(err) - } - resp, err := s.client.Do(req) - if err != nil { - t.Fatal(err) - } - var buf bytes.Buffer - _, err = io.Copy(&buf, resp.Body) - if err != nil { - t.Fatal(err) - } - err = resp.Body.Close() - if err != nil { - t.Fatal(err) - } - if got, want := buf.String(), "Some text content"; got != want { - t.Errorf("got %q, want %q", got, want) - } - if resp.StatusCode != http.StatusOK { - t.Errorf("response status code isn't 200 OK: %v", resp.StatusCode) - } - if resp.Header.Get(XFromCache) != "1" { - t.Errorf(`XFromCache header isn't "1": %v`, resp.Header.Get(XFromCache)) - } - } - { - req, err := http.NewRequest("GET", s.server.URL+"/range", nil) - if err != nil { - t.Fatal(err) - } - req.Header.Set("range", "bytes=4-9") - resp, err := s.client.Do(req) - if err != nil { - t.Fatal(err) - } - var buf bytes.Buffer - _, err = io.Copy(&buf, resp.Body) - if err != nil { - t.Fatal(err) - } - err = resp.Body.Close() - if err != nil { - t.Fatal(err) - } - if got, want := buf.String(), " text "; got != want { - t.Errorf("got %q, want %q", got, want) - } - if resp.StatusCode != http.StatusPartialContent { - t.Errorf("response status code isn't 206 Partial Content: %v", resp.StatusCode) - } - } -} - -func TestCacheOnlyIfBodyRead(t *testing.T) { - resetTest() - { - req, err := http.NewRequest("GET", s.server.URL, nil) - if err != nil { - t.Fatal(err) - } - resp, err := s.client.Do(req) - if err != nil { - t.Fatal(err) - } - if resp.Header.Get(XFromCache) != "" { - t.Fatal("XFromCache header isn't blank") - } - // We do not read the body - resp.Body.Close() - } - { - req, err := http.NewRequest("GET", s.server.URL, nil) - if err != nil { - t.Fatal(err) - } - resp, err := s.client.Do(req) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - if resp.Header.Get(XFromCache) != "" { - t.Fatalf("XFromCache header isn't blank") - } - } -} - -func TestOnlyReadBodyOnDemand(t *testing.T) { - resetTest() - - req, err := http.NewRequest("GET", s.server.URL+"/infinite", nil) - if err != nil { - t.Fatal(err) - } - resp, err := s.client.Do(req) // This shouldn't hang forever. - if err != nil { - t.Fatal(err) - } - buf := make([]byte, 10) // Only partially read the body. - _, err = resp.Body.Read(buf) - if err != nil { - t.Fatal(err) - } - resp.Body.Close() -} - -func TestGetOnlyIfCachedHit(t *testing.T) { - resetTest() - { - req, err := http.NewRequest("GET", s.server.URL, nil) - if err != nil { - t.Fatal(err) - } - resp, err := s.client.Do(req) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - if resp.Header.Get(XFromCache) != "" { - t.Fatal("XFromCache header isn't blank") - } - _, err = ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatal(err) - } - } - { - req, err := http.NewRequest("GET", s.server.URL, nil) - if err != nil { - t.Fatal(err) - } - req.Header.Add("cache-control", "only-if-cached") - resp, err := s.client.Do(req) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - if resp.Header.Get(XFromCache) != "1" { - t.Fatalf(`XFromCache header isn't "1": %v`, resp.Header.Get(XFromCache)) - } - if resp.StatusCode != http.StatusOK { - t.Fatalf("response status code isn't 200 OK: %v", resp.StatusCode) - } - } -} - -func TestGetOnlyIfCachedMiss(t *testing.T) { - resetTest() - req, err := http.NewRequest("GET", s.server.URL, nil) - if err != nil { - t.Fatal(err) - } - req.Header.Add("cache-control", "only-if-cached") - resp, err := s.client.Do(req) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - if resp.Header.Get(XFromCache) != "" { - t.Fatal("XFromCache header isn't blank") - } - if resp.StatusCode != http.StatusGatewayTimeout { - t.Fatalf("response status code isn't 504 GatewayTimeout: %v", resp.StatusCode) - } -} - -func TestGetNoStoreRequest(t *testing.T) { - resetTest() - req, err := http.NewRequest("GET", s.server.URL, nil) - if err != nil { - t.Fatal(err) - } - req.Header.Add("Cache-Control", "no-store") - { - resp, err := s.client.Do(req) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - if resp.Header.Get(XFromCache) != "" { - t.Fatal("XFromCache header isn't blank") - } - } - { - resp, err := s.client.Do(req) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - if resp.Header.Get(XFromCache) != "" { - t.Fatal("XFromCache header isn't blank") - } - } -} - -func TestGetNoStoreResponse(t *testing.T) { - resetTest() - req, err := http.NewRequest("GET", s.server.URL+"/nostore", nil) - if err != nil { - t.Fatal(err) - } - { - resp, err := s.client.Do(req) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - if resp.Header.Get(XFromCache) != "" { - t.Fatal("XFromCache header isn't blank") - } - } - { - resp, err := s.client.Do(req) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - if resp.Header.Get(XFromCache) != "" { - t.Fatal("XFromCache header isn't blank") - } - } -} - -func TestGetWithEtag(t *testing.T) { - resetTest() - req, err := http.NewRequest("GET", s.server.URL+"/etag", nil) - if err != nil { - t.Fatal(err) - } - { - resp, err := s.client.Do(req) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - if resp.Header.Get(XFromCache) != "" { - t.Fatal("XFromCache header isn't blank") - } - _, err = ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatal(err) - } - - } - { - resp, err := s.client.Do(req) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - if resp.Header.Get(XFromCache) != "1" { - t.Fatalf(`XFromCache header isn't "1": %v`, resp.Header.Get(XFromCache)) - } - // additional assertions to verify that 304 response is converted properly - if resp.StatusCode != http.StatusOK { - t.Fatalf("response status code isn't 200 OK: %v", resp.StatusCode) - } - if _, ok := resp.Header["Connection"]; ok { - t.Fatalf("Connection header isn't absent") - } - } -} - -func TestGetWithLastModified(t *testing.T) { - resetTest() - req, err := http.NewRequest("GET", s.server.URL+"/lastmodified", nil) - if err != nil { - t.Fatal(err) - } - { - resp, err := s.client.Do(req) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - if resp.Header.Get(XFromCache) != "" { - t.Fatal("XFromCache header isn't blank") - } - _, err = ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatal(err) - } - } - { - resp, err := s.client.Do(req) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - if resp.Header.Get(XFromCache) != "1" { - t.Fatalf(`XFromCache header isn't "1": %v`, resp.Header.Get(XFromCache)) - } - } -} - -func TestGetWithVary(t *testing.T) { - resetTest() - req, err := http.NewRequest("GET", s.server.URL+"/varyaccept", nil) - if err != nil { - t.Fatal(err) - } - req.Header.Set("Accept", "text/plain") - { - resp, err := s.client.Do(req) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - if resp.Header.Get("Vary") != "Accept" { - t.Fatalf(`Vary header isn't "Accept": %v`, resp.Header.Get("Vary")) - } - _, err = ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatal(err) - } - } - { - resp, err := s.client.Do(req) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - if resp.Header.Get(XFromCache) != "1" { - t.Fatalf(`XFromCache header isn't "1": %v`, resp.Header.Get(XFromCache)) - } - } - req.Header.Set("Accept", "text/html") - { - resp, err := s.client.Do(req) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - if resp.Header.Get(XFromCache) != "" { - t.Fatal("XFromCache header isn't blank") - } - } - req.Header.Set("Accept", "") - { - resp, err := s.client.Do(req) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - if resp.Header.Get(XFromCache) != "" { - t.Fatal("XFromCache header isn't blank") - } - } -} - -func TestGetWithDoubleVary(t *testing.T) { - resetTest() - req, err := http.NewRequest("GET", s.server.URL+"/doublevary", nil) - if err != nil { - t.Fatal(err) - } - req.Header.Set("Accept", "text/plain") - req.Header.Set("Accept-Language", "da, en-gb;q=0.8, en;q=0.7") - { - resp, err := s.client.Do(req) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - if resp.Header.Get("Vary") == "" { - t.Fatalf(`Vary header is blank`) - } - _, err = ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatal(err) - } - } - { - resp, err := s.client.Do(req) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - if resp.Header.Get(XFromCache) != "1" { - t.Fatalf(`XFromCache header isn't "1": %v`, resp.Header.Get(XFromCache)) - } - } - req.Header.Set("Accept-Language", "") - { - resp, err := s.client.Do(req) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - if resp.Header.Get(XFromCache) != "" { - t.Fatal("XFromCache header isn't blank") - } - } - req.Header.Set("Accept-Language", "da") - { - resp, err := s.client.Do(req) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - if resp.Header.Get(XFromCache) != "" { - t.Fatal("XFromCache header isn't blank") - } - } -} - -func TestGetWith2VaryHeaders(t *testing.T) { - resetTest() - // Tests that multiple Vary headers' comma-separated lists are - // merged. See https://github.com/gregjones/httpcache/issues/27. - const ( - accept = "text/plain" - acceptLanguage = "da, en-gb;q=0.8, en;q=0.7" - ) - req, err := http.NewRequest("GET", s.server.URL+"/2varyheaders", nil) - if err != nil { - t.Fatal(err) - } - req.Header.Set("Accept", accept) - req.Header.Set("Accept-Language", acceptLanguage) - { - resp, err := s.client.Do(req) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - if resp.Header.Get("Vary") == "" { - t.Fatalf(`Vary header is blank`) - } - _, err = ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatal(err) - } - } - { - resp, err := s.client.Do(req) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - if resp.Header.Get(XFromCache) != "1" { - t.Fatalf(`XFromCache header isn't "1": %v`, resp.Header.Get(XFromCache)) - } - } - req.Header.Set("Accept-Language", "") - { - resp, err := s.client.Do(req) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - if resp.Header.Get(XFromCache) != "" { - t.Fatal("XFromCache header isn't blank") - } - } - req.Header.Set("Accept-Language", "da") - { - resp, err := s.client.Do(req) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - if resp.Header.Get(XFromCache) != "" { - t.Fatal("XFromCache header isn't blank") - } - } - req.Header.Set("Accept-Language", acceptLanguage) - req.Header.Set("Accept", "") - { - resp, err := s.client.Do(req) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - if resp.Header.Get(XFromCache) != "" { - t.Fatal("XFromCache header isn't blank") - } - } - req.Header.Set("Accept", "image/png") - { - resp, err := s.client.Do(req) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - if resp.Header.Get(XFromCache) != "" { - t.Fatal("XFromCache header isn't blank") - } - _, err = ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatal(err) - } - } - { - resp, err := s.client.Do(req) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - if resp.Header.Get(XFromCache) != "1" { - t.Fatalf(`XFromCache header isn't "1": %v`, resp.Header.Get(XFromCache)) - } - } -} - -func TestGetVaryUnused(t *testing.T) { - resetTest() - req, err := http.NewRequest("GET", s.server.URL+"/varyunused", nil) - if err != nil { - t.Fatal(err) - } - req.Header.Set("Accept", "text/plain") - { - resp, err := s.client.Do(req) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - if resp.Header.Get("Vary") == "" { - t.Fatalf(`Vary header is blank`) - } - _, err = ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatal(err) - } - } - { - resp, err := s.client.Do(req) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - if resp.Header.Get(XFromCache) != "1" { - t.Fatalf(`XFromCache header isn't "1": %v`, resp.Header.Get(XFromCache)) - } - } -} - -func TestUpdateFields(t *testing.T) { - resetTest() - req, err := http.NewRequest("GET", s.server.URL+"/updatefields", nil) - if err != nil { - t.Fatal(err) - } - var counter, counter2 string - { - resp, err := s.client.Do(req) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - counter = resp.Header.Get("x-counter") - _, err = ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatal(err) - } - } - { - resp, err := s.client.Do(req) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - if resp.Header.Get(XFromCache) != "1" { - t.Fatalf(`XFromCache header isn't "1": %v`, resp.Header.Get(XFromCache)) - } - counter2 = resp.Header.Get("x-counter") - } - if counter == counter2 { - t.Fatalf(`both "x-counter" values are equal: %v %v`, counter, counter2) - } -} - -func TestParseCacheControl(t *testing.T) { - resetTest() - h := http.Header{} - for range parseCacheControl(h) { - t.Fatal("cacheControl should be empty") - } - - h.Set("cache-control", "no-cache") - { - cc := parseCacheControl(h) - if _, ok := cc["foo"]; ok { - t.Error(`Value "foo" shouldn't exist`) - } - noCache, ok := cc["no-cache"] - if !ok { - t.Fatalf(`"no-cache" value isn't set`) - } - if noCache != "" { - t.Fatalf(`"no-cache" value isn't blank: %v`, noCache) - } - } - h.Set("cache-control", "no-cache, max-age=3600") - { - cc := parseCacheControl(h) - noCache, ok := cc["no-cache"] - if !ok { - t.Fatalf(`"no-cache" value isn't set`) - } - if noCache != "" { - t.Fatalf(`"no-cache" value isn't blank: %v`, noCache) - } - if cc["max-age"] != "3600" { - t.Fatalf(`"max-age" value isn't "3600": %v`, cc["max-age"]) - } - } -} - -func TestNoCacheRequestExpiration(t *testing.T) { - resetTest() - respHeaders := http.Header{} - respHeaders.Set("Cache-Control", "max-age=7200") - - reqHeaders := http.Header{} - reqHeaders.Set("Cache-Control", "no-cache") - if getFreshness(respHeaders, reqHeaders) != transparent { - t.Fatal("freshness isn't transparent") - } -} - -func TestNoCacheResponseExpiration(t *testing.T) { - resetTest() - respHeaders := http.Header{} - respHeaders.Set("Cache-Control", "no-cache") - respHeaders.Set("Expires", "Wed, 19 Apr 3000 11:43:00 GMT") - - reqHeaders := http.Header{} - if getFreshness(respHeaders, reqHeaders) != stale { - t.Fatal("freshness isn't stale") - } -} - -func TestReqMustRevalidate(t *testing.T) { - resetTest() - // not paying attention to request setting max-stale means never returning stale - // responses, so always acting as if must-revalidate is set - respHeaders := http.Header{} - - reqHeaders := http.Header{} - reqHeaders.Set("Cache-Control", "must-revalidate") - if getFreshness(respHeaders, reqHeaders) != stale { - t.Fatal("freshness isn't stale") - } -} - -func TestRespMustRevalidate(t *testing.T) { - resetTest() - respHeaders := http.Header{} - respHeaders.Set("Cache-Control", "must-revalidate") - - reqHeaders := http.Header{} - if getFreshness(respHeaders, reqHeaders) != stale { - t.Fatal("freshness isn't stale") - } -} - -func TestFreshExpiration(t *testing.T) { - resetTest() - now := time.Now() - respHeaders := http.Header{} - respHeaders.Set("date", now.Format(time.RFC1123)) - respHeaders.Set("expires", now.Add(time.Duration(2)*time.Second).Format(time.RFC1123)) - - reqHeaders := http.Header{} - if getFreshness(respHeaders, reqHeaders) != fresh { - t.Fatal("freshness isn't fresh") - } - - clock = &fakeClock{elapsed: 3 * time.Second} - if getFreshness(respHeaders, reqHeaders) != stale { - t.Fatal("freshness isn't stale") - } -} - -func TestMaxAge(t *testing.T) { - resetTest() - now := time.Now() - respHeaders := http.Header{} - respHeaders.Set("date", now.Format(time.RFC1123)) - respHeaders.Set("cache-control", "max-age=2") - - reqHeaders := http.Header{} - if getFreshness(respHeaders, reqHeaders) != fresh { - t.Fatal("freshness isn't fresh") - } - - clock = &fakeClock{elapsed: 3 * time.Second} - if getFreshness(respHeaders, reqHeaders) != stale { - t.Fatal("freshness isn't stale") - } -} - -func TestMaxAgeZero(t *testing.T) { - resetTest() - now := time.Now() - respHeaders := http.Header{} - respHeaders.Set("date", now.Format(time.RFC1123)) - respHeaders.Set("cache-control", "max-age=0") - - reqHeaders := http.Header{} - if getFreshness(respHeaders, reqHeaders) != stale { - t.Fatal("freshness isn't stale") - } -} - -func TestBothMaxAge(t *testing.T) { - resetTest() - now := time.Now() - respHeaders := http.Header{} - respHeaders.Set("date", now.Format(time.RFC1123)) - respHeaders.Set("cache-control", "max-age=2") - - reqHeaders := http.Header{} - reqHeaders.Set("cache-control", "max-age=0") - if getFreshness(respHeaders, reqHeaders) != stale { - t.Fatal("freshness isn't stale") - } -} - -func TestMinFreshWithExpires(t *testing.T) { - resetTest() - now := time.Now() - respHeaders := http.Header{} - respHeaders.Set("date", now.Format(time.RFC1123)) - respHeaders.Set("expires", now.Add(time.Duration(2)*time.Second).Format(time.RFC1123)) - - reqHeaders := http.Header{} - reqHeaders.Set("cache-control", "min-fresh=1") - if getFreshness(respHeaders, reqHeaders) != fresh { - t.Fatal("freshness isn't fresh") - } - - reqHeaders = http.Header{} - reqHeaders.Set("cache-control", "min-fresh=2") - if getFreshness(respHeaders, reqHeaders) != stale { - t.Fatal("freshness isn't stale") - } -} - -func TestEmptyMaxStale(t *testing.T) { - resetTest() - now := time.Now() - respHeaders := http.Header{} - respHeaders.Set("date", now.Format(time.RFC1123)) - respHeaders.Set("cache-control", "max-age=20") - - reqHeaders := http.Header{} - reqHeaders.Set("cache-control", "max-stale") - clock = &fakeClock{elapsed: 10 * time.Second} - if getFreshness(respHeaders, reqHeaders) != fresh { - t.Fatal("freshness isn't fresh") - } - - clock = &fakeClock{elapsed: 60 * time.Second} - if getFreshness(respHeaders, reqHeaders) != fresh { - t.Fatal("freshness isn't fresh") - } -} - -func TestMaxStaleValue(t *testing.T) { - resetTest() - now := time.Now() - respHeaders := http.Header{} - respHeaders.Set("date", now.Format(time.RFC1123)) - respHeaders.Set("cache-control", "max-age=10") - - reqHeaders := http.Header{} - reqHeaders.Set("cache-control", "max-stale=20") - clock = &fakeClock{elapsed: 5 * time.Second} - if getFreshness(respHeaders, reqHeaders) != fresh { - t.Fatal("freshness isn't fresh") - } - - clock = &fakeClock{elapsed: 15 * time.Second} - if getFreshness(respHeaders, reqHeaders) != fresh { - t.Fatal("freshness isn't fresh") - } - - clock = &fakeClock{elapsed: 30 * time.Second} - if getFreshness(respHeaders, reqHeaders) != stale { - t.Fatal("freshness isn't stale") - } -} - -func containsHeader(headers []string, header string) bool { - for _, v := range headers { - if http.CanonicalHeaderKey(v) == http.CanonicalHeaderKey(header) { - return true - } - } - return false -} - -func TestGetEndToEndHeaders(t *testing.T) { - resetTest() - var ( - headers http.Header - end2end []string - ) - - headers = http.Header{} - headers.Set("content-type", "text/html") - headers.Set("te", "deflate") - - end2end = getEndToEndHeaders(headers) - if !containsHeader(end2end, "content-type") { - t.Fatal(`doesn't contain "content-type" header`) - } - if containsHeader(end2end, "te") { - t.Fatal(`doesn't contain "te" header`) - } - - headers = http.Header{} - headers.Set("connection", "content-type") - headers.Set("content-type", "text/csv") - headers.Set("te", "deflate") - end2end = getEndToEndHeaders(headers) - if containsHeader(end2end, "connection") { - t.Fatal(`doesn't contain "connection" header`) - } - if containsHeader(end2end, "content-type") { - t.Fatal(`doesn't contain "content-type" header`) - } - if containsHeader(end2end, "te") { - t.Fatal(`doesn't contain "te" header`) - } - - headers = http.Header{} - end2end = getEndToEndHeaders(headers) - if len(end2end) != 0 { - t.Fatal(`non-zero end2end headers`) - } - - headers = http.Header{} - headers.Set("connection", "content-type") - end2end = getEndToEndHeaders(headers) - if len(end2end) != 0 { - t.Fatal(`non-zero end2end headers`) - } -} - -type transportMock struct { - response *http.Response - err error -} - -func (t transportMock) RoundTrip(req *http.Request) (resp *http.Response, err error) { - return t.response, t.err -} - -func TestStaleIfErrorRequest(t *testing.T) { - resetTest() - now := time.Now() - tmock := transportMock{ - response: &http.Response{ - Status: http.StatusText(http.StatusOK), - StatusCode: http.StatusOK, - Header: http.Header{ - "Date": []string{now.Format(time.RFC1123)}, - "Cache-Control": []string{"no-cache"}, - }, - Body: ioutil.NopCloser(bytes.NewBuffer([]byte("some data"))), - }, - err: nil, - } - tp := NewMemoryCacheTransport() - tp.Transport = &tmock - - // First time, response is cached on success - r, _ := http.NewRequest("GET", "http://somewhere.com/", nil) - r.Header.Set("Cache-Control", "stale-if-error") - resp, err := tp.RoundTrip(r) - if err != nil { - t.Fatal(err) - } - if resp == nil { - t.Fatal("resp is nil") - } - _, err = ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatal(err) - } - - // On failure, response is returned from the cache - tmock.response = nil - tmock.err = errors.New("some error") - resp, err = tp.RoundTrip(r) - if err != nil { - t.Fatal(err) - } - if resp == nil { - t.Fatal("resp is nil") - } -} - -func TestStaleIfErrorRequestLifetime(t *testing.T) { - resetTest() - now := time.Now() - tmock := transportMock{ - response: &http.Response{ - Status: http.StatusText(http.StatusOK), - StatusCode: http.StatusOK, - Header: http.Header{ - "Date": []string{now.Format(time.RFC1123)}, - "Cache-Control": []string{"no-cache"}, - }, - Body: ioutil.NopCloser(bytes.NewBuffer([]byte("some data"))), - }, - err: nil, - } - tp := NewMemoryCacheTransport() - tp.Transport = &tmock - - // First time, response is cached on success - r, _ := http.NewRequest("GET", "http://somewhere.com/", nil) - r.Header.Set("Cache-Control", "stale-if-error=100") - resp, err := tp.RoundTrip(r) - if err != nil { - t.Fatal(err) - } - if resp == nil { - t.Fatal("resp is nil") - } - _, err = ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatal(err) - } - - // On failure, response is returned from the cache - tmock.response = nil - tmock.err = errors.New("some error") - resp, err = tp.RoundTrip(r) - if err != nil { - t.Fatal(err) - } - if resp == nil { - t.Fatal("resp is nil") - } - - // Same for http errors - tmock.response = &http.Response{StatusCode: http.StatusInternalServerError} - tmock.err = nil - resp, err = tp.RoundTrip(r) - if err != nil { - t.Fatal(err) - } - if resp == nil { - t.Fatal("resp is nil") - } - - // If failure last more than max stale, error is returned - clock = &fakeClock{elapsed: 200 * time.Second} - _, err = tp.RoundTrip(r) - if err != tmock.err { - t.Fatalf("got err %v, want %v", err, tmock.err) - } -} - -func TestStaleIfErrorResponse(t *testing.T) { - resetTest() - now := time.Now() - tmock := transportMock{ - response: &http.Response{ - Status: http.StatusText(http.StatusOK), - StatusCode: http.StatusOK, - Header: http.Header{ - "Date": []string{now.Format(time.RFC1123)}, - "Cache-Control": []string{"no-cache, stale-if-error"}, - }, - Body: ioutil.NopCloser(bytes.NewBuffer([]byte("some data"))), - }, - err: nil, - } - tp := NewMemoryCacheTransport() - tp.Transport = &tmock - - // First time, response is cached on success - r, _ := http.NewRequest("GET", "http://somewhere.com/", nil) - resp, err := tp.RoundTrip(r) - if err != nil { - t.Fatal(err) - } - if resp == nil { - t.Fatal("resp is nil") - } - _, err = ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatal(err) - } - - // On failure, response is returned from the cache - tmock.response = nil - tmock.err = errors.New("some error") - resp, err = tp.RoundTrip(r) - if err != nil { - t.Fatal(err) - } - if resp == nil { - t.Fatal("resp is nil") - } -} - -func TestStaleIfErrorResponseLifetime(t *testing.T) { - resetTest() - now := time.Now() - tmock := transportMock{ - response: &http.Response{ - Status: http.StatusText(http.StatusOK), - StatusCode: http.StatusOK, - Header: http.Header{ - "Date": []string{now.Format(time.RFC1123)}, - "Cache-Control": []string{"no-cache, stale-if-error=100"}, - }, - Body: ioutil.NopCloser(bytes.NewBuffer([]byte("some data"))), - }, - err: nil, - } - tp := NewMemoryCacheTransport() - tp.Transport = &tmock - - // First time, response is cached on success - r, _ := http.NewRequest("GET", "http://somewhere.com/", nil) - resp, err := tp.RoundTrip(r) - if err != nil { - t.Fatal(err) - } - if resp == nil { - t.Fatal("resp is nil") - } - _, err = ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatal(err) - } - - // On failure, response is returned from the cache - tmock.response = nil - tmock.err = errors.New("some error") - resp, err = tp.RoundTrip(r) - if err != nil { - t.Fatal(err) - } - if resp == nil { - t.Fatal("resp is nil") - } - - // If failure last more than max stale, error is returned - clock = &fakeClock{elapsed: 200 * time.Second} - _, err = tp.RoundTrip(r) - if err != tmock.err { - t.Fatalf("got err %v, want %v", err, tmock.err) - } -} - -// Test that http.Client.Timeout is respected when cache transport is used. -// That is so as long as request cancellation is propagated correctly. -// In the past, that required CancelRequest to be implemented correctly, -// but modern http.Client uses Request.Cancel (or request context) instead, -// so we don't have to do anything. -func TestClientTimeout(t *testing.T) { - if testing.Short() { - t.Skip("skipping timeout test in short mode") // Because it takes at least 3 seconds to run. - } - resetTest() - client := &http.Client{ - Transport: NewMemoryCacheTransport(), - Timeout: time.Second, - } - started := time.Now() - resp, err := client.Get(s.server.URL + "/3seconds") - taken := time.Since(started) - if err == nil { - t.Error("got nil error, want timeout error") - } - if resp != nil { - t.Error("got non-nil resp, want nil resp") - } - if taken >= 2*time.Second { - t.Error("client.Do took 2+ seconds, want < 2 seconds") - } -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt b/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt new file mode 100644 index 0000000000..364516251b --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt @@ -0,0 +1,27 @@ +Copyright (c) 2015, Gengo, Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + * Neither the name of Gengo, Inc. nor the names of its + contributors may be used to endorse or promote products derived from this + software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel new file mode 100644 index 0000000000..76cafe6ec7 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel @@ -0,0 +1,22 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") + +package(default_visibility = ["//visibility:public"]) + +proto_library( + name = "internal_proto", + srcs = ["stream_chunk.proto"], + deps = ["@com_google_protobuf//:any_proto"], +) + +go_proto_library( + name = "internal_go_proto", + importpath = "github.com/grpc-ecosystem/grpc-gateway/internal", + proto = ":internal_proto", +) + +go_library( + name = "go_default_library", + embed = [":internal_go_proto"], + importpath = "github.com/grpc-ecosystem/grpc-gateway/internal", +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.pb.go new file mode 100644 index 0000000000..8858f06904 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.pb.go @@ -0,0 +1,118 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: internal/stream_chunk.proto + +package internal + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import any "github.com/golang/protobuf/ptypes/any" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// StreamError is a response type which is returned when +// streaming rpc returns an error. +type StreamError struct { + GrpcCode int32 `protobuf:"varint,1,opt,name=grpc_code,json=grpcCode,proto3" json:"grpc_code,omitempty"` + HttpCode int32 `protobuf:"varint,2,opt,name=http_code,json=httpCode,proto3" json:"http_code,omitempty"` + Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"` + HttpStatus string `protobuf:"bytes,4,opt,name=http_status,json=httpStatus,proto3" json:"http_status,omitempty"` + Details []*any.Any `protobuf:"bytes,5,rep,name=details,proto3" json:"details,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StreamError) Reset() { *m = StreamError{} } +func (m *StreamError) String() string { return proto.CompactTextString(m) } +func (*StreamError) ProtoMessage() {} +func (*StreamError) Descriptor() ([]byte, []int) { + return fileDescriptor_stream_chunk_a2afb657504565d7, []int{0} +} +func (m *StreamError) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StreamError.Unmarshal(m, b) +} +func (m *StreamError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StreamError.Marshal(b, m, deterministic) +} +func (dst *StreamError) XXX_Merge(src proto.Message) { + xxx_messageInfo_StreamError.Merge(dst, src) +} +func (m *StreamError) XXX_Size() int { + return xxx_messageInfo_StreamError.Size(m) +} +func (m *StreamError) XXX_DiscardUnknown() { + xxx_messageInfo_StreamError.DiscardUnknown(m) +} + +var xxx_messageInfo_StreamError proto.InternalMessageInfo + +func (m *StreamError) GetGrpcCode() int32 { + if m != nil { + return m.GrpcCode + } + return 0 +} + +func (m *StreamError) GetHttpCode() int32 { + if m != nil { + return m.HttpCode + } + return 0 +} + +func (m *StreamError) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func (m *StreamError) GetHttpStatus() string { + if m != nil { + return m.HttpStatus + } + return "" +} + +func (m *StreamError) GetDetails() []*any.Any { + if m != nil { + return m.Details + } + return nil +} + +func init() { + proto.RegisterType((*StreamError)(nil), "grpc.gateway.runtime.StreamError") +} + +func init() { + proto.RegisterFile("internal/stream_chunk.proto", fileDescriptor_stream_chunk_a2afb657504565d7) +} + +var fileDescriptor_stream_chunk_a2afb657504565d7 = []byte{ + // 223 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x34, 0x90, 0x41, 0x4e, 0xc3, 0x30, + 0x10, 0x45, 0x15, 0x4a, 0x69, 0x3b, 0xd9, 0x45, 0x5d, 0x18, 0xba, 0x20, 0x62, 0x95, 0x95, 0x23, + 0xc1, 0x09, 0x00, 0x71, 0x81, 0x74, 0xc7, 0xa6, 0x9a, 0x26, 0x83, 0x13, 0x91, 0xd8, 0xd1, 0x78, + 0x22, 0x94, 0x6b, 0x71, 0xc2, 0xca, 0x8e, 0xb2, 0xf4, 0x7b, 0x7f, 0xbe, 0xbe, 0x0c, 0xa7, 0xce, + 0x0a, 0xb1, 0xc5, 0xbe, 0xf4, 0xc2, 0x84, 0xc3, 0xa5, 0x6e, 0x27, 0xfb, 0xab, 0x47, 0x76, 0xe2, + 0xb2, 0xa3, 0xe1, 0xb1, 0xd6, 0x06, 0x85, 0xfe, 0x70, 0xd6, 0x3c, 0x59, 0xe9, 0x06, 0x7a, 0x7a, + 0x34, 0xce, 0x99, 0x9e, 0xca, 0x98, 0xb9, 0x4e, 0x3f, 0x25, 0xda, 0x79, 0x39, 0x78, 0xf9, 0x4f, + 0x20, 0x3d, 0xc7, 0x9e, 0x2f, 0x66, 0xc7, 0xd9, 0x09, 0x0e, 0xa1, 0xe2, 0x52, 0xbb, 0x86, 0x54, + 0x92, 0x27, 0xc5, 0xb6, 0xda, 0x07, 0xf0, 0xe9, 0x1a, 0x0a, 0xb2, 0x15, 0x19, 0x17, 0x79, 0xb7, + 0xc8, 0x00, 0xa2, 0x54, 0xb0, 0x1b, 0xc8, 0x7b, 0x34, 0xa4, 0x36, 0x79, 0x52, 0x1c, 0xaa, 0xf5, + 0x99, 0x3d, 0x43, 0x1a, 0xcf, 0xbc, 0xa0, 0x4c, 0x5e, 0xdd, 0x47, 0x0b, 0x01, 0x9d, 0x23, 0xc9, + 0x34, 0xec, 0x1a, 0x12, 0xec, 0x7a, 0xaf, 0xb6, 0xf9, 0xa6, 0x48, 0x5f, 0x8f, 0x7a, 0x59, 0xac, + 0xd7, 0xc5, 0xfa, 0xdd, 0xce, 0xd5, 0x1a, 0xfa, 0x80, 0xef, 0xfd, 0xfa, 0x09, 0xd7, 0x87, 0x18, + 0x79, 0xbb, 0x05, 0x00, 0x00, 0xff, 0xff, 0x0d, 0x7d, 0xa5, 0x18, 0x17, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.proto b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.proto new file mode 100644 index 0000000000..55f42ce63e --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; +package grpc.gateway.runtime; +option go_package = "internal"; + +import "google/protobuf/any.proto"; + +// StreamError is a response type which is returned when +// streaming rpc returns an error. +message StreamError { + int32 grpc_code = 1; + int32 http_code = 2; + string message = 3; + string http_status = 4; + repeated google.protobuf.Any details = 5; +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel new file mode 100644 index 0000000000..c99f83e585 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel @@ -0,0 +1,80 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package(default_visibility = ["//visibility:public"]) + +go_library( + name = "go_default_library", + srcs = [ + "context.go", + "convert.go", + "doc.go", + "errors.go", + "fieldmask.go", + "handler.go", + "marshal_json.go", + "marshal_jsonpb.go", + "marshal_proto.go", + "marshaler.go", + "marshaler_registry.go", + "mux.go", + "pattern.go", + "proto2_convert.go", + "proto_errors.go", + "query.go", + ], + importpath = "github.com/grpc-ecosystem/grpc-gateway/runtime", + deps = [ + "//internal:go_default_library", + "//utilities:go_default_library", + "@com_github_golang_protobuf//jsonpb:go_default_library_gen", + "@com_github_golang_protobuf//proto:go_default_library", + "@com_github_golang_protobuf//protoc-gen-go/generator:go_default_library_gen", + "@io_bazel_rules_go//proto/wkt:any_go_proto", + "@io_bazel_rules_go//proto/wkt:duration_go_proto", + "@io_bazel_rules_go//proto/wkt:field_mask_go_proto", + "@io_bazel_rules_go//proto/wkt:timestamp_go_proto", + "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", + "@org_golang_google_grpc//codes:go_default_library", + "@org_golang_google_grpc//grpclog:go_default_library", + "@org_golang_google_grpc//metadata:go_default_library", + "@org_golang_google_grpc//status:go_default_library", + ], +) + +go_test( + name = "go_default_test", + size = "small", + srcs = [ + "context_test.go", + "errors_test.go", + "fieldmask_test.go", + "handler_test.go", + "marshal_json_test.go", + "marshal_jsonpb_test.go", + "marshal_proto_test.go", + "marshaler_registry_test.go", + "mux_test.go", + "pattern_test.go", + "query_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//examples/proto/examplepb:go_default_library", + "//internal:go_default_library", + "//utilities:go_default_library", + "@com_github_golang_protobuf//jsonpb:go_default_library_gen", + "@com_github_golang_protobuf//proto:go_default_library", + "@com_github_golang_protobuf//ptypes:go_default_library_gen", + "@go_googleapis//google/rpc:errdetails_go_proto", + "@io_bazel_rules_go//proto/wkt:duration_go_proto", + "@io_bazel_rules_go//proto/wkt:empty_go_proto", + "@io_bazel_rules_go//proto/wkt:field_mask_go_proto", + "@io_bazel_rules_go//proto/wkt:struct_go_proto", + "@io_bazel_rules_go//proto/wkt:timestamp_go_proto", + "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", + "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//codes:go_default_library", + "@org_golang_google_grpc//metadata:go_default_library", + "@org_golang_google_grpc//status:go_default_library", + ], +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go new file mode 100644 index 0000000000..896057e1e1 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go @@ -0,0 +1,210 @@ +package runtime + +import ( + "context" + "encoding/base64" + "fmt" + "net" + "net/http" + "net/textproto" + "strconv" + "strings" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// MetadataHeaderPrefix is the http prefix that represents custom metadata +// parameters to or from a gRPC call. +const MetadataHeaderPrefix = "Grpc-Metadata-" + +// MetadataPrefix is prepended to permanent HTTP header keys (as specified +// by the IANA) when added to the gRPC context. +const MetadataPrefix = "grpcgateway-" + +// MetadataTrailerPrefix is prepended to gRPC metadata as it is converted to +// HTTP headers in a response handled by grpc-gateway +const MetadataTrailerPrefix = "Grpc-Trailer-" + +const metadataGrpcTimeout = "Grpc-Timeout" +const metadataHeaderBinarySuffix = "-Bin" + +const xForwardedFor = "X-Forwarded-For" +const xForwardedHost = "X-Forwarded-Host" + +var ( + // DefaultContextTimeout is used for gRPC call context.WithTimeout whenever a Grpc-Timeout inbound + // header isn't present. If the value is 0 the sent `context` will not have a timeout. + DefaultContextTimeout = 0 * time.Second +) + +func decodeBinHeader(v string) ([]byte, error) { + if len(v)%4 == 0 { + // Input was padded, or padding was not necessary. + return base64.StdEncoding.DecodeString(v) + } + return base64.RawStdEncoding.DecodeString(v) +} + +/* +AnnotateContext adds context information such as metadata from the request. + +At a minimum, the RemoteAddr is included in the fashion of "X-Forwarded-For", +except that the forwarded destination is not another HTTP service but rather +a gRPC service. +*/ +func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, error) { + var pairs []string + timeout := DefaultContextTimeout + if tm := req.Header.Get(metadataGrpcTimeout); tm != "" { + var err error + timeout, err = timeoutDecode(tm) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm) + } + } + + for key, vals := range req.Header { + for _, val := range vals { + key = textproto.CanonicalMIMEHeaderKey(key) + // For backwards-compatibility, pass through 'authorization' header with no prefix. + if key == "Authorization" { + pairs = append(pairs, "authorization", val) + } + if h, ok := mux.incomingHeaderMatcher(key); ok { + // Handles "-bin" metadata in grpc, since grpc will do another base64 + // encode before sending to server, we need to decode it first. + if strings.HasSuffix(key, metadataHeaderBinarySuffix) { + b, err := decodeBinHeader(val) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid binary header %s: %s", key, err) + } + + val = string(b) + } + pairs = append(pairs, h, val) + } + } + } + if host := req.Header.Get(xForwardedHost); host != "" { + pairs = append(pairs, strings.ToLower(xForwardedHost), host) + } else if req.Host != "" { + pairs = append(pairs, strings.ToLower(xForwardedHost), req.Host) + } + + if addr := req.RemoteAddr; addr != "" { + if remoteIP, _, err := net.SplitHostPort(addr); err == nil { + if fwd := req.Header.Get(xForwardedFor); fwd == "" { + pairs = append(pairs, strings.ToLower(xForwardedFor), remoteIP) + } else { + pairs = append(pairs, strings.ToLower(xForwardedFor), fmt.Sprintf("%s, %s", fwd, remoteIP)) + } + } else { + grpclog.Infof("invalid remote addr: %s", addr) + } + } + + if timeout != 0 { + ctx, _ = context.WithTimeout(ctx, timeout) + } + if len(pairs) == 0 { + return ctx, nil + } + md := metadata.Pairs(pairs...) + for _, mda := range mux.metadataAnnotators { + md = metadata.Join(md, mda(ctx, req)) + } + return metadata.NewOutgoingContext(ctx, md), nil +} + +// ServerMetadata consists of metadata sent from gRPC server. +type ServerMetadata struct { + HeaderMD metadata.MD + TrailerMD metadata.MD +} + +type serverMetadataKey struct{} + +// NewServerMetadataContext creates a new context with ServerMetadata +func NewServerMetadataContext(ctx context.Context, md ServerMetadata) context.Context { + return context.WithValue(ctx, serverMetadataKey{}, md) +} + +// ServerMetadataFromContext returns the ServerMetadata in ctx +func ServerMetadataFromContext(ctx context.Context) (md ServerMetadata, ok bool) { + md, ok = ctx.Value(serverMetadataKey{}).(ServerMetadata) + return +} + +func timeoutDecode(s string) (time.Duration, error) { + size := len(s) + if size < 2 { + return 0, fmt.Errorf("timeout string is too short: %q", s) + } + d, ok := timeoutUnitToDuration(s[size-1]) + if !ok { + return 0, fmt.Errorf("timeout unit is not recognized: %q", s) + } + t, err := strconv.ParseInt(s[:size-1], 10, 64) + if err != nil { + return 0, err + } + return d * time.Duration(t), nil +} + +func timeoutUnitToDuration(u uint8) (d time.Duration, ok bool) { + switch u { + case 'H': + return time.Hour, true + case 'M': + return time.Minute, true + case 'S': + return time.Second, true + case 'm': + return time.Millisecond, true + case 'u': + return time.Microsecond, true + case 'n': + return time.Nanosecond, true + default: + } + return +} + +// isPermanentHTTPHeader checks whether hdr belongs to the list of +// permenant request headers maintained by IANA. +// http://www.iana.org/assignments/message-headers/message-headers.xml +func isPermanentHTTPHeader(hdr string) bool { + switch hdr { + case + "Accept", + "Accept-Charset", + "Accept-Language", + "Accept-Ranges", + "Authorization", + "Cache-Control", + "Content-Type", + "Cookie", + "Date", + "Expect", + "From", + "Host", + "If-Match", + "If-Modified-Since", + "If-None-Match", + "If-Schedule-Tag-Match", + "If-Unmodified-Since", + "Max-Forwards", + "Origin", + "Pragma", + "Referer", + "User-Agent", + "Via", + "Warning": + return true + } + return false +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go new file mode 100644 index 0000000000..a5b3bd6a79 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go @@ -0,0 +1,312 @@ +package runtime + +import ( + "encoding/base64" + "fmt" + "strconv" + "strings" + + "github.com/golang/protobuf/jsonpb" + "github.com/golang/protobuf/ptypes/duration" + "github.com/golang/protobuf/ptypes/timestamp" + "github.com/golang/protobuf/ptypes/wrappers" +) + +// String just returns the given string. +// It is just for compatibility to other types. +func String(val string) (string, error) { + return val, nil +} + +// StringSlice converts 'val' where individual strings are separated by +// 'sep' into a string slice. +func StringSlice(val, sep string) ([]string, error) { + return strings.Split(val, sep), nil +} + +// Bool converts the given string representation of a boolean value into bool. +func Bool(val string) (bool, error) { + return strconv.ParseBool(val) +} + +// BoolSlice converts 'val' where individual booleans are separated by +// 'sep' into a bool slice. +func BoolSlice(val, sep string) ([]bool, error) { + s := strings.Split(val, sep) + values := make([]bool, len(s)) + for i, v := range s { + value, err := Bool(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Float64 converts the given string representation into representation of a floating point number into float64. +func Float64(val string) (float64, error) { + return strconv.ParseFloat(val, 64) +} + +// Float64Slice converts 'val' where individual floating point numbers are separated by +// 'sep' into a float64 slice. +func Float64Slice(val, sep string) ([]float64, error) { + s := strings.Split(val, sep) + values := make([]float64, len(s)) + for i, v := range s { + value, err := Float64(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Float32 converts the given string representation of a floating point number into float32. +func Float32(val string) (float32, error) { + f, err := strconv.ParseFloat(val, 32) + if err != nil { + return 0, err + } + return float32(f), nil +} + +// Float32Slice converts 'val' where individual floating point numbers are separated by +// 'sep' into a float32 slice. +func Float32Slice(val, sep string) ([]float32, error) { + s := strings.Split(val, sep) + values := make([]float32, len(s)) + for i, v := range s { + value, err := Float32(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Int64 converts the given string representation of an integer into int64. +func Int64(val string) (int64, error) { + return strconv.ParseInt(val, 0, 64) +} + +// Int64Slice converts 'val' where individual integers are separated by +// 'sep' into a int64 slice. +func Int64Slice(val, sep string) ([]int64, error) { + s := strings.Split(val, sep) + values := make([]int64, len(s)) + for i, v := range s { + value, err := Int64(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Int32 converts the given string representation of an integer into int32. +func Int32(val string) (int32, error) { + i, err := strconv.ParseInt(val, 0, 32) + if err != nil { + return 0, err + } + return int32(i), nil +} + +// Int32Slice converts 'val' where individual integers are separated by +// 'sep' into a int32 slice. +func Int32Slice(val, sep string) ([]int32, error) { + s := strings.Split(val, sep) + values := make([]int32, len(s)) + for i, v := range s { + value, err := Int32(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Uint64 converts the given string representation of an integer into uint64. +func Uint64(val string) (uint64, error) { + return strconv.ParseUint(val, 0, 64) +} + +// Uint64Slice converts 'val' where individual integers are separated by +// 'sep' into a uint64 slice. +func Uint64Slice(val, sep string) ([]uint64, error) { + s := strings.Split(val, sep) + values := make([]uint64, len(s)) + for i, v := range s { + value, err := Uint64(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Uint32 converts the given string representation of an integer into uint32. +func Uint32(val string) (uint32, error) { + i, err := strconv.ParseUint(val, 0, 32) + if err != nil { + return 0, err + } + return uint32(i), nil +} + +// Uint32Slice converts 'val' where individual integers are separated by +// 'sep' into a uint32 slice. +func Uint32Slice(val, sep string) ([]uint32, error) { + s := strings.Split(val, sep) + values := make([]uint32, len(s)) + for i, v := range s { + value, err := Uint32(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Bytes converts the given string representation of a byte sequence into a slice of bytes +// A bytes sequence is encoded in URL-safe base64 without padding +func Bytes(val string) ([]byte, error) { + b, err := base64.StdEncoding.DecodeString(val) + if err != nil { + b, err = base64.URLEncoding.DecodeString(val) + if err != nil { + return nil, err + } + } + return b, nil +} + +// BytesSlice converts 'val' where individual bytes sequences, encoded in URL-safe +// base64 without padding, are separated by 'sep' into a slice of bytes slices slice. +func BytesSlice(val, sep string) ([][]byte, error) { + s := strings.Split(val, sep) + values := make([][]byte, len(s)) + for i, v := range s { + value, err := Bytes(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Timestamp converts the given RFC3339 formatted string into a timestamp.Timestamp. +func Timestamp(val string) (*timestamp.Timestamp, error) { + var r *timestamp.Timestamp + err := jsonpb.UnmarshalString(val, r) + return r, err +} + +// Duration converts the given string into a timestamp.Duration. +func Duration(val string) (*duration.Duration, error) { + var r *duration.Duration + err := jsonpb.UnmarshalString(val, r) + return r, err +} + +// Enum converts the given string into an int32 that should be type casted into the +// correct enum proto type. +func Enum(val string, enumValMap map[string]int32) (int32, error) { + e, ok := enumValMap[val] + if ok { + return e, nil + } + + i, err := Int32(val) + if err != nil { + return 0, fmt.Errorf("%s is not valid", val) + } + for _, v := range enumValMap { + if v == i { + return i, nil + } + } + return 0, fmt.Errorf("%s is not valid", val) +} + +// EnumSlice converts 'val' where individual enums are separated by 'sep' +// into a int32 slice. Each individual int32 should be type casted into the +// correct enum proto type. +func EnumSlice(val, sep string, enumValMap map[string]int32) ([]int32, error) { + s := strings.Split(val, sep) + values := make([]int32, len(s)) + for i, v := range s { + value, err := Enum(v, enumValMap) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +/* + Support fot google.protobuf.wrappers on top of primitive types +*/ + +// StringValue well-known type support as wrapper around string type +func StringValue(val string) (*wrappers.StringValue, error) { + return &wrappers.StringValue{Value: val}, nil +} + +// FloatValue well-known type support as wrapper around float32 type +func FloatValue(val string) (*wrappers.FloatValue, error) { + parsedVal, err := Float32(val) + return &wrappers.FloatValue{Value: parsedVal}, err +} + +// DoubleValue well-known type support as wrapper around float64 type +func DoubleValue(val string) (*wrappers.DoubleValue, error) { + parsedVal, err := Float64(val) + return &wrappers.DoubleValue{Value: parsedVal}, err +} + +// BoolValue well-known type support as wrapper around bool type +func BoolValue(val string) (*wrappers.BoolValue, error) { + parsedVal, err := Bool(val) + return &wrappers.BoolValue{Value: parsedVal}, err +} + +// Int32Value well-known type support as wrapper around int32 type +func Int32Value(val string) (*wrappers.Int32Value, error) { + parsedVal, err := Int32(val) + return &wrappers.Int32Value{Value: parsedVal}, err +} + +// UInt32Value well-known type support as wrapper around uint32 type +func UInt32Value(val string) (*wrappers.UInt32Value, error) { + parsedVal, err := Uint32(val) + return &wrappers.UInt32Value{Value: parsedVal}, err +} + +// Int64Value well-known type support as wrapper around int64 type +func Int64Value(val string) (*wrappers.Int64Value, error) { + parsedVal, err := Int64(val) + return &wrappers.Int64Value{Value: parsedVal}, err +} + +// UInt64Value well-known type support as wrapper around uint64 type +func UInt64Value(val string) (*wrappers.UInt64Value, error) { + parsedVal, err := Uint64(val) + return &wrappers.UInt64Value{Value: parsedVal}, err +} + +// BytesValue well-known type support as wrapper around bytes[] type +func BytesValue(val string) (*wrappers.BytesValue, error) { + parsedVal, err := Bytes(val) + return &wrappers.BytesValue{Value: parsedVal}, err +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go new file mode 100644 index 0000000000..b6e5ddf7a9 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go @@ -0,0 +1,5 @@ +/* +Package runtime contains runtime helper functions used by +servers which protoc-gen-grpc-gateway generates. +*/ +package runtime diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go new file mode 100644 index 0000000000..41d54ef916 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go @@ -0,0 +1,145 @@ +package runtime + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/any" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +// HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status. +// See: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto +func HTTPStatusFromCode(code codes.Code) int { + switch code { + case codes.OK: + return http.StatusOK + case codes.Canceled: + return http.StatusRequestTimeout + case codes.Unknown: + return http.StatusInternalServerError + case codes.InvalidArgument: + return http.StatusBadRequest + case codes.DeadlineExceeded: + return http.StatusGatewayTimeout + case codes.NotFound: + return http.StatusNotFound + case codes.AlreadyExists: + return http.StatusConflict + case codes.PermissionDenied: + return http.StatusForbidden + case codes.Unauthenticated: + return http.StatusUnauthorized + case codes.ResourceExhausted: + return http.StatusTooManyRequests + case codes.FailedPrecondition: + return http.StatusPreconditionFailed + case codes.Aborted: + return http.StatusConflict + case codes.OutOfRange: + return http.StatusBadRequest + case codes.Unimplemented: + return http.StatusNotImplemented + case codes.Internal: + return http.StatusInternalServerError + case codes.Unavailable: + return http.StatusServiceUnavailable + case codes.DataLoss: + return http.StatusInternalServerError + } + + grpclog.Infof("Unknown gRPC error code: %v", code) + return http.StatusInternalServerError +} + +var ( + // HTTPError replies to the request with the error. + // You can set a custom function to this variable to customize error format. + HTTPError = DefaultHTTPError + // OtherErrorHandler handles the following error used by the gateway: StatusMethodNotAllowed StatusNotFound and StatusBadRequest + OtherErrorHandler = DefaultOtherErrorHandler +) + +type errorBody struct { + Error string `protobuf:"bytes,1,name=error" json:"error"` + // This is to make the error more compatible with users that expect errors to be Status objects: + // https://github.com/grpc/grpc/blob/master/src/proto/grpc/status/status.proto + // It should be the exact same message as the Error field. + Message string `protobuf:"bytes,1,name=message" json:"message"` + Code int32 `protobuf:"varint,2,name=code" json:"code"` + Details []*any.Any `protobuf:"bytes,3,rep,name=details" json:"details,omitempty"` +} + +// Make this also conform to proto.Message for builtin JSONPb Marshaler +func (e *errorBody) Reset() { *e = errorBody{} } +func (e *errorBody) String() string { return proto.CompactTextString(e) } +func (*errorBody) ProtoMessage() {} + +// DefaultHTTPError is the default implementation of HTTPError. +// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode. +// If otherwise, it replies with http.StatusInternalServerError. +// +// The response body returned by this function is a JSON object, +// which contains a member whose key is "error" and whose value is err.Error(). +func DefaultHTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) { + const fallback = `{"error": "failed to marshal error message"}` + + s, ok := status.FromError(err) + if !ok { + s = status.New(codes.Unknown, err.Error()) + } + + w.Header().Del("Trailer") + + contentType := marshaler.ContentType() + // Check marshaler on run time in order to keep backwards compatability + // An interface param needs to be added to the ContentType() function on + // the Marshal interface to be able to remove this check + if httpBodyMarshaler, ok := marshaler.(*HTTPBodyMarshaler); ok { + pb := s.Proto() + contentType = httpBodyMarshaler.ContentTypeFromMessage(pb) + } + w.Header().Set("Content-Type", contentType) + + body := &errorBody{ + Error: s.Message(), + Message: s.Message(), + Code: int32(s.Code()), + Details: s.Proto().GetDetails(), + } + + buf, merr := marshaler.Marshal(body) + if merr != nil { + grpclog.Infof("Failed to marshal error message %q: %v", body, merr) + w.WriteHeader(http.StatusInternalServerError) + if _, err := io.WriteString(w, fallback); err != nil { + grpclog.Infof("Failed to write response: %v", err) + } + return + } + + md, ok := ServerMetadataFromContext(ctx) + if !ok { + grpclog.Infof("Failed to extract ServerMetadata from context") + } + + handleForwardResponseServerMetadata(w, mux, md) + handleForwardResponseTrailerHeader(w, md) + st := HTTPStatusFromCode(s.Code()) + w.WriteHeader(st) + if _, err := w.Write(buf); err != nil { + grpclog.Infof("Failed to write response: %v", err) + } + + handleForwardResponseTrailer(w, md) +} + +// DefaultOtherErrorHandler is the default implementation of OtherErrorHandler. +// It simply writes a string representation of the given error into "w". +func DefaultOtherErrorHandler(w http.ResponseWriter, _ *http.Request, msg string, code int) { + http.Error(w, msg, code) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go new file mode 100644 index 0000000000..e1cf7a9146 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go @@ -0,0 +1,70 @@ +package runtime + +import ( + "encoding/json" + "io" + "strings" + + "github.com/golang/protobuf/protoc-gen-go/generator" + "google.golang.org/genproto/protobuf/field_mask" +) + +// FieldMaskFromRequestBody creates a FieldMask printing all complete paths from the JSON body. +func FieldMaskFromRequestBody(r io.Reader) (*field_mask.FieldMask, error) { + fm := &field_mask.FieldMask{} + var root interface{} + if err := json.NewDecoder(r).Decode(&root); err != nil { + if err == io.EOF { + return fm, nil + } + return nil, err + } + + queue := []fieldMaskPathItem{{node: root}} + for len(queue) > 0 { + // dequeue an item + item := queue[0] + queue = queue[1:] + + if m, ok := item.node.(map[string]interface{}); ok { + // if the item is an object, then enqueue all of its children + for k, v := range m { + queue = append(queue, fieldMaskPathItem{path: append(item.path, generator.CamelCase(k)), node: v}) + } + } else if len(item.path) > 0 { + // otherwise, it's a leaf node so print its path + fm.Paths = append(fm.Paths, strings.Join(item.path, ".")) + } + } + + return fm, nil +} + +// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask +type fieldMaskPathItem struct { + // the list of prior fields leading up to node + path []string + + // a generic decoded json object the current item to inspect for further path extraction + node interface{} +} + +// CamelCaseFieldMask updates the given FieldMask by converting all of its paths to CamelCase, using the same heuristic +// that's used for naming protobuf fields in Go. +func CamelCaseFieldMask(mask *field_mask.FieldMask) { + if mask == nil || mask.Paths == nil { + return + } + + var newPaths []string + for _, path := range mask.Paths { + lowerCasedParts := strings.Split(path, ".") + var camelCasedParts []string + for _, part := range lowerCasedParts { + camelCasedParts = append(camelCasedParts, generator.CamelCase(part)) + } + newPaths = append(newPaths, strings.Join(camelCasedParts, ".")) + } + + mask.Paths = newPaths +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go new file mode 100644 index 0000000000..1fc63f7f58 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go @@ -0,0 +1,215 @@ +package runtime + +import ( + "fmt" + "io" + "net/http" + "net/textproto" + + "context" + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/any" + "github.com/grpc-ecosystem/grpc-gateway/internal" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +// ForwardResponseStream forwards the stream from gRPC server to REST client. +func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { + f, ok := w.(http.Flusher) + if !ok { + grpclog.Infof("Flush not supported in %T", w) + http.Error(w, "unexpected type of web server", http.StatusInternalServerError) + return + } + + md, ok := ServerMetadataFromContext(ctx) + if !ok { + grpclog.Infof("Failed to extract ServerMetadata from context") + http.Error(w, "unexpected error", http.StatusInternalServerError) + return + } + handleForwardResponseServerMetadata(w, mux, md) + + w.Header().Set("Transfer-Encoding", "chunked") + w.Header().Set("Content-Type", marshaler.ContentType()) + if err := handleForwardResponseOptions(ctx, w, nil, opts); err != nil { + HTTPError(ctx, mux, marshaler, w, req, err) + return + } + + var delimiter []byte + if d, ok := marshaler.(Delimited); ok { + delimiter = d.Delimiter() + } else { + delimiter = []byte("\n") + } + + var wroteHeader bool + for { + resp, err := recv() + if err == io.EOF { + return + } + if err != nil { + handleForwardResponseStreamError(wroteHeader, marshaler, w, err) + return + } + if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil { + handleForwardResponseStreamError(wroteHeader, marshaler, w, err) + return + } + + buf, err := marshaler.Marshal(streamChunk(resp, nil)) + if err != nil { + grpclog.Infof("Failed to marshal response chunk: %v", err) + handleForwardResponseStreamError(wroteHeader, marshaler, w, err) + return + } + if _, err = w.Write(buf); err != nil { + grpclog.Infof("Failed to send response chunk: %v", err) + return + } + wroteHeader = true + if _, err = w.Write(delimiter); err != nil { + grpclog.Infof("Failed to send delimiter chunk: %v", err) + return + } + f.Flush() + } +} + +func handleForwardResponseServerMetadata(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) { + for k, vs := range md.HeaderMD { + if h, ok := mux.outgoingHeaderMatcher(k); ok { + for _, v := range vs { + w.Header().Add(h, v) + } + } + } +} + +func handleForwardResponseTrailerHeader(w http.ResponseWriter, md ServerMetadata) { + for k := range md.TrailerMD { + tKey := textproto.CanonicalMIMEHeaderKey(fmt.Sprintf("%s%s", MetadataTrailerPrefix, k)) + w.Header().Add("Trailer", tKey) + } +} + +func handleForwardResponseTrailer(w http.ResponseWriter, md ServerMetadata) { + for k, vs := range md.TrailerMD { + tKey := fmt.Sprintf("%s%s", MetadataTrailerPrefix, k) + for _, v := range vs { + w.Header().Add(tKey, v) + } + } +} + +// responseBody interface contains method for getting field for marshaling to the response body +// this method is generated for response struct from the value of `response_body` in the `google.api.HttpRule` +type responseBody interface { + XXX_ResponseBody() interface{} +} + +// ForwardResponseMessage forwards the message "resp" from gRPC server to REST client. +func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { + md, ok := ServerMetadataFromContext(ctx) + if !ok { + grpclog.Infof("Failed to extract ServerMetadata from context") + } + + handleForwardResponseServerMetadata(w, mux, md) + handleForwardResponseTrailerHeader(w, md) + + contentType := marshaler.ContentType() + // Check marshaler on run time in order to keep backwards compatability + // An interface param needs to be added to the ContentType() function on + // the Marshal interface to be able to remove this check + if httpBodyMarshaler, ok := marshaler.(*HTTPBodyMarshaler); ok { + contentType = httpBodyMarshaler.ContentTypeFromMessage(resp) + } + w.Header().Set("Content-Type", contentType) + + if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil { + HTTPError(ctx, mux, marshaler, w, req, err) + return + } + var buf []byte + var err error + if rb, ok := resp.(responseBody); ok { + buf, err = marshaler.Marshal(rb.XXX_ResponseBody()) + } else { + buf, err = marshaler.Marshal(resp) + } + if err != nil { + grpclog.Infof("Marshal error: %v", err) + HTTPError(ctx, mux, marshaler, w, req, err) + return + } + + if _, err = w.Write(buf); err != nil { + grpclog.Infof("Failed to write response: %v", err) + } + + handleForwardResponseTrailer(w, md) +} + +func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, resp proto.Message, opts []func(context.Context, http.ResponseWriter, proto.Message) error) error { + if len(opts) == 0 { + return nil + } + for _, opt := range opts { + if err := opt(ctx, w, resp); err != nil { + grpclog.Infof("Error handling ForwardResponseOptions: %v", err) + return err + } + } + return nil +} + +func handleForwardResponseStreamError(wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, err error) { + buf, merr := marshaler.Marshal(streamChunk(nil, err)) + if merr != nil { + grpclog.Infof("Failed to marshal an error: %v", merr) + return + } + if !wroteHeader { + s, ok := status.FromError(err) + if !ok { + s = status.New(codes.Unknown, err.Error()) + } + w.WriteHeader(HTTPStatusFromCode(s.Code())) + } + if _, werr := w.Write(buf); werr != nil { + grpclog.Infof("Failed to notify error to client: %v", werr) + return + } +} + +func streamChunk(result proto.Message, err error) map[string]proto.Message { + if err != nil { + grpcCode := codes.Unknown + grpcMessage := err.Error() + var grpcDetails []*any.Any + if s, ok := status.FromError(err); ok { + grpcCode = s.Code() + grpcMessage = s.Message() + grpcDetails = s.Proto().GetDetails() + } + httpCode := HTTPStatusFromCode(grpcCode) + return map[string]proto.Message{ + "error": &internal.StreamError{ + GrpcCode: int32(grpcCode), + HttpCode: int32(httpCode), + Message: grpcMessage, + HttpStatus: http.StatusText(httpCode), + Details: grpcDetails, + }, + } + } + if result == nil { + return streamChunk(nil, fmt.Errorf("empty response")) + } + return map[string]proto.Message{"result": result} +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go new file mode 100644 index 0000000000..f55285b5d6 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go @@ -0,0 +1,43 @@ +package runtime + +import ( + "google.golang.org/genproto/googleapis/api/httpbody" +) + +// SetHTTPBodyMarshaler overwrite the default marshaler with the HTTPBodyMarshaler +func SetHTTPBodyMarshaler(serveMux *ServeMux) { + serveMux.marshalers.mimeMap[MIMEWildcard] = &HTTPBodyMarshaler{ + Marshaler: &JSONPb{OrigName: true}, + } +} + +// HTTPBodyMarshaler is a Marshaler which supports marshaling of a +// google.api.HttpBody message as the full response body if it is +// the actual message used as the response. If not, then this will +// simply fallback to the Marshaler specified as its default Marshaler. +type HTTPBodyMarshaler struct { + Marshaler +} + +// ContentType implementation to keep backwards compatability with marshal interface +func (h *HTTPBodyMarshaler) ContentType() string { + return h.ContentTypeFromMessage(nil) +} + +// ContentTypeFromMessage in case v is a google.api.HttpBody message it returns +// its specified content type otherwise fall back to the default Marshaler. +func (h *HTTPBodyMarshaler) ContentTypeFromMessage(v interface{}) string { + if httpBody, ok := v.(*httpbody.HttpBody); ok { + return httpBody.GetContentType() + } + return h.Marshaler.ContentType() +} + +// Marshal marshals "v" by returning the body bytes if v is a +// google.api.HttpBody message, otherwise it falls back to the default Marshaler. +func (h *HTTPBodyMarshaler) Marshal(v interface{}) ([]byte, error) { + if httpBody, ok := v.(*httpbody.HttpBody); ok { + return httpBody.Data, nil + } + return h.Marshaler.Marshal(v) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go new file mode 100644 index 0000000000..f9d3a585a4 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go @@ -0,0 +1,45 @@ +package runtime + +import ( + "encoding/json" + "io" +) + +// JSONBuiltin is a Marshaler which marshals/unmarshals into/from JSON +// with the standard "encoding/json" package of Golang. +// Although it is generally faster for simple proto messages than JSONPb, +// it does not support advanced features of protobuf, e.g. map, oneof, .... +// +// The NewEncoder and NewDecoder types return *json.Encoder and +// *json.Decoder respectively. +type JSONBuiltin struct{} + +// ContentType always Returns "application/json". +func (*JSONBuiltin) ContentType() string { + return "application/json" +} + +// Marshal marshals "v" into JSON +func (j *JSONBuiltin) Marshal(v interface{}) ([]byte, error) { + return json.Marshal(v) +} + +// Unmarshal unmarshals JSON data into "v". +func (j *JSONBuiltin) Unmarshal(data []byte, v interface{}) error { + return json.Unmarshal(data, v) +} + +// NewDecoder returns a Decoder which reads JSON stream from "r". +func (j *JSONBuiltin) NewDecoder(r io.Reader) Decoder { + return json.NewDecoder(r) +} + +// NewEncoder returns an Encoder which writes JSON stream into "w". +func (j *JSONBuiltin) NewEncoder(w io.Writer) Encoder { + return json.NewEncoder(w) +} + +// Delimiter for newline encoded JSON streams. +func (j *JSONBuiltin) Delimiter() []byte { + return []byte("\n") +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go new file mode 100644 index 0000000000..3530dddd0a --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go @@ -0,0 +1,242 @@ +package runtime + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "reflect" + + "github.com/golang/protobuf/jsonpb" + "github.com/golang/protobuf/proto" +) + +// JSONPb is a Marshaler which marshals/unmarshals into/from JSON +// with the "github.com/golang/protobuf/jsonpb". +// It supports fully functionality of protobuf unlike JSONBuiltin. +// +// The NewDecoder method returns a DecoderWrapper, so the underlying +// *json.Decoder methods can be used. +type JSONPb jsonpb.Marshaler + +// ContentType always returns "application/json". +func (*JSONPb) ContentType() string { + return "application/json" +} + +// Marshal marshals "v" into JSON. +func (j *JSONPb) Marshal(v interface{}) ([]byte, error) { + if _, ok := v.(proto.Message); !ok { + return j.marshalNonProtoField(v) + } + + var buf bytes.Buffer + if err := j.marshalTo(&buf, v); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error { + p, ok := v.(proto.Message) + if !ok { + buf, err := j.marshalNonProtoField(v) + if err != nil { + return err + } + _, err = w.Write(buf) + return err + } + return (*jsonpb.Marshaler)(j).Marshal(w, p) +} + +var ( + // protoMessageType is stored to prevent constant lookup of the same type at runtime. + protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem() +) + +// marshalNonProto marshals a non-message field of a protobuf message. +// This function does not correctly marshals arbitrary data structure into JSON, +// but it is only capable of marshaling non-message field values of protobuf, +// i.e. primitive types, enums; pointers to primitives or enums; maps from +// integer/string types to primitives/enums/pointers to messages. +func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) { + if v == nil { + return []byte("null"), nil + } + rv := reflect.ValueOf(v) + for rv.Kind() == reflect.Ptr { + if rv.IsNil() { + return []byte("null"), nil + } + rv = rv.Elem() + } + + if rv.Kind() == reflect.Slice { + if rv.IsNil() { + if j.EmitDefaults { + return []byte("[]"), nil + } + return []byte("null"), nil + } + + if rv.Type().Elem().Implements(protoMessageType) { + var buf bytes.Buffer + err := buf.WriteByte('[') + if err != nil { + return nil, err + } + for i := 0; i < rv.Len(); i++ { + if i != 0 { + err = buf.WriteByte(',') + if err != nil { + return nil, err + } + } + if err = (*jsonpb.Marshaler)(j).Marshal(&buf, rv.Index(i).Interface().(proto.Message)); err != nil { + return nil, err + } + } + err = buf.WriteByte(']') + if err != nil { + return nil, err + } + + return buf.Bytes(), nil + } + } + + if rv.Kind() == reflect.Map { + m := make(map[string]*json.RawMessage) + for _, k := range rv.MapKeys() { + buf, err := j.Marshal(rv.MapIndex(k).Interface()) + if err != nil { + return nil, err + } + m[fmt.Sprintf("%v", k.Interface())] = (*json.RawMessage)(&buf) + } + if j.Indent != "" { + return json.MarshalIndent(m, "", j.Indent) + } + return json.Marshal(m) + } + if enum, ok := rv.Interface().(protoEnum); ok && !j.EnumsAsInts { + return json.Marshal(enum.String()) + } + return json.Marshal(rv.Interface()) +} + +// Unmarshal unmarshals JSON "data" into "v" +func (j *JSONPb) Unmarshal(data []byte, v interface{}) error { + return unmarshalJSONPb(data, v) +} + +// NewDecoder returns a Decoder which reads JSON stream from "r". +func (j *JSONPb) NewDecoder(r io.Reader) Decoder { + d := json.NewDecoder(r) + return DecoderWrapper{Decoder: d} +} + +// DecoderWrapper is a wrapper around a *json.Decoder that adds +// support for protos to the Decode method. +type DecoderWrapper struct { + *json.Decoder +} + +// Decode wraps the embedded decoder's Decode method to support +// protos using a jsonpb.Unmarshaler. +func (d DecoderWrapper) Decode(v interface{}) error { + return decodeJSONPb(d.Decoder, v) +} + +// NewEncoder returns an Encoder which writes JSON stream into "w". +func (j *JSONPb) NewEncoder(w io.Writer) Encoder { + return EncoderFunc(func(v interface{}) error { return j.marshalTo(w, v) }) +} + +func unmarshalJSONPb(data []byte, v interface{}) error { + d := json.NewDecoder(bytes.NewReader(data)) + return decodeJSONPb(d, v) +} + +func decodeJSONPb(d *json.Decoder, v interface{}) error { + p, ok := v.(proto.Message) + if !ok { + return decodeNonProtoField(d, v) + } + unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: true} + return unmarshaler.UnmarshalNext(d, p) +} + +func decodeNonProtoField(d *json.Decoder, v interface{}) error { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr { + return fmt.Errorf("%T is not a pointer", v) + } + for rv.Kind() == reflect.Ptr { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + if rv.Type().ConvertibleTo(typeProtoMessage) { + unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: true} + return unmarshaler.UnmarshalNext(d, rv.Interface().(proto.Message)) + } + rv = rv.Elem() + } + if rv.Kind() == reflect.Map { + if rv.IsNil() { + rv.Set(reflect.MakeMap(rv.Type())) + } + conv, ok := convFromType[rv.Type().Key().Kind()] + if !ok { + return fmt.Errorf("unsupported type of map field key: %v", rv.Type().Key()) + } + + m := make(map[string]*json.RawMessage) + if err := d.Decode(&m); err != nil { + return err + } + for k, v := range m { + result := conv.Call([]reflect.Value{reflect.ValueOf(k)}) + if err := result[1].Interface(); err != nil { + return err.(error) + } + bk := result[0] + bv := reflect.New(rv.Type().Elem()) + if err := unmarshalJSONPb([]byte(*v), bv.Interface()); err != nil { + return err + } + rv.SetMapIndex(bk, bv.Elem()) + } + return nil + } + if _, ok := rv.Interface().(protoEnum); ok { + var repr interface{} + if err := d.Decode(&repr); err != nil { + return err + } + switch repr.(type) { + case string: + // TODO(yugui) Should use proto.StructProperties? + return fmt.Errorf("unmarshaling of symbolic enum %q not supported: %T", repr, rv.Interface()) + case float64: + rv.Set(reflect.ValueOf(int32(repr.(float64))).Convert(rv.Type())) + return nil + default: + return fmt.Errorf("cannot assign %#v into Go type %T", repr, rv.Interface()) + } + } + return d.Decode(v) +} + +type protoEnum interface { + fmt.Stringer + EnumDescriptor() ([]byte, []int) +} + +var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem() + +// Delimiter for newline encoded JSON streams. +func (j *JSONPb) Delimiter() []byte { + return []byte("\n") +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go new file mode 100644 index 0000000000..f65d1a2676 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go @@ -0,0 +1,62 @@ +package runtime + +import ( + "io" + + "errors" + "github.com/golang/protobuf/proto" + "io/ioutil" +) + +// ProtoMarshaller is a Marshaller which marshals/unmarshals into/from serialize proto bytes +type ProtoMarshaller struct{} + +// ContentType always returns "application/octet-stream". +func (*ProtoMarshaller) ContentType() string { + return "application/octet-stream" +} + +// Marshal marshals "value" into Proto +func (*ProtoMarshaller) Marshal(value interface{}) ([]byte, error) { + message, ok := value.(proto.Message) + if !ok { + return nil, errors.New("unable to marshal non proto field") + } + return proto.Marshal(message) +} + +// Unmarshal unmarshals proto "data" into "value" +func (*ProtoMarshaller) Unmarshal(data []byte, value interface{}) error { + message, ok := value.(proto.Message) + if !ok { + return errors.New("unable to unmarshal non proto field") + } + return proto.Unmarshal(data, message) +} + +// NewDecoder returns a Decoder which reads proto stream from "reader". +func (marshaller *ProtoMarshaller) NewDecoder(reader io.Reader) Decoder { + return DecoderFunc(func(value interface{}) error { + buffer, err := ioutil.ReadAll(reader) + if err != nil { + return err + } + return marshaller.Unmarshal(buffer, value) + }) +} + +// NewEncoder returns an Encoder which writes proto stream into "writer". +func (marshaller *ProtoMarshaller) NewEncoder(writer io.Writer) Encoder { + return EncoderFunc(func(value interface{}) error { + buffer, err := marshaller.Marshal(value) + if err != nil { + return err + } + _, err = writer.Write(buffer) + if err != nil { + return err + } + + return nil + }) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go new file mode 100644 index 0000000000..98fe6e88ac --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go @@ -0,0 +1,48 @@ +package runtime + +import ( + "io" +) + +// Marshaler defines a conversion between byte sequence and gRPC payloads / fields. +type Marshaler interface { + // Marshal marshals "v" into byte sequence. + Marshal(v interface{}) ([]byte, error) + // Unmarshal unmarshals "data" into "v". + // "v" must be a pointer value. + Unmarshal(data []byte, v interface{}) error + // NewDecoder returns a Decoder which reads byte sequence from "r". + NewDecoder(r io.Reader) Decoder + // NewEncoder returns an Encoder which writes bytes sequence into "w". + NewEncoder(w io.Writer) Encoder + // ContentType returns the Content-Type which this marshaler is responsible for. + ContentType() string +} + +// Decoder decodes a byte sequence +type Decoder interface { + Decode(v interface{}) error +} + +// Encoder encodes gRPC payloads / fields into byte sequence. +type Encoder interface { + Encode(v interface{}) error +} + +// DecoderFunc adapts an decoder function into Decoder. +type DecoderFunc func(v interface{}) error + +// Decode delegates invocations to the underlying function itself. +func (f DecoderFunc) Decode(v interface{}) error { return f(v) } + +// EncoderFunc adapts an encoder function into Encoder +type EncoderFunc func(v interface{}) error + +// Encode delegates invocations to the underlying function itself. +func (f EncoderFunc) Encode(v interface{}) error { return f(v) } + +// Delimited defines the streaming delimiter. +type Delimited interface { + // Delimiter returns the record seperator for the stream. + Delimiter() []byte +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go new file mode 100644 index 0000000000..5cc53ae4f6 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go @@ -0,0 +1,91 @@ +package runtime + +import ( + "errors" + "net/http" +) + +// MIMEWildcard is the fallback MIME type used for requests which do not match +// a registered MIME type. +const MIMEWildcard = "*" + +var ( + acceptHeader = http.CanonicalHeaderKey("Accept") + contentTypeHeader = http.CanonicalHeaderKey("Content-Type") + + defaultMarshaler = &JSONPb{OrigName: true} +) + +// MarshalerForRequest returns the inbound/outbound marshalers for this request. +// It checks the registry on the ServeMux for the MIME type set by the Content-Type header. +// If it isn't set (or the request Content-Type is empty), checks for "*". +// If there are multiple Content-Type headers set, choose the first one that it can +// exactly match in the registry. +// Otherwise, it follows the above logic for "*"/InboundMarshaler/OutboundMarshaler. +func MarshalerForRequest(mux *ServeMux, r *http.Request) (inbound Marshaler, outbound Marshaler) { + for _, acceptVal := range r.Header[acceptHeader] { + if m, ok := mux.marshalers.mimeMap[acceptVal]; ok { + outbound = m + break + } + } + + for _, contentTypeVal := range r.Header[contentTypeHeader] { + if m, ok := mux.marshalers.mimeMap[contentTypeVal]; ok { + inbound = m + break + } + } + + if inbound == nil { + inbound = mux.marshalers.mimeMap[MIMEWildcard] + } + if outbound == nil { + outbound = inbound + } + + return inbound, outbound +} + +// marshalerRegistry is a mapping from MIME types to Marshalers. +type marshalerRegistry struct { + mimeMap map[string]Marshaler +} + +// add adds a marshaler for a case-sensitive MIME type string ("*" to match any +// MIME type). +func (m marshalerRegistry) add(mime string, marshaler Marshaler) error { + if len(mime) == 0 { + return errors.New("empty MIME type") + } + + m.mimeMap[mime] = marshaler + + return nil +} + +// makeMarshalerMIMERegistry returns a new registry of marshalers. +// It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces. +// +// For example, you could allow the client to specify the use of the runtime.JSONPb marshaler +// with a "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler +// with a "application/json" Content-Type. +// "*" can be used to match any Content-Type. +// This can be attached to a ServerMux with the marshaler option. +func makeMarshalerMIMERegistry() marshalerRegistry { + return marshalerRegistry{ + mimeMap: map[string]Marshaler{ + MIMEWildcard: defaultMarshaler, + }, + } +} + +// WithMarshalerOption returns a ServeMuxOption which associates inbound and outbound +// Marshalers to a MIME type in mux. +func WithMarshalerOption(mime string, marshaler Marshaler) ServeMuxOption { + return func(mux *ServeMux) { + if err := mux.marshalers.add(mime, marshaler); err != nil { + panic(err) + } + } +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go new file mode 100644 index 0000000000..ec81e55b5e --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go @@ -0,0 +1,268 @@ +package runtime + +import ( + "context" + "fmt" + "net/http" + "net/textproto" + "strings" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// A HandlerFunc handles a specific pair of path pattern and HTTP method. +type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string) + +// ServeMux is a request multiplexer for grpc-gateway. +// It matches http requests to patterns and invokes the corresponding handler. +type ServeMux struct { + // handlers maps HTTP method to a list of handlers. + handlers map[string][]handler + forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error + marshalers marshalerRegistry + incomingHeaderMatcher HeaderMatcherFunc + outgoingHeaderMatcher HeaderMatcherFunc + metadataAnnotators []func(context.Context, *http.Request) metadata.MD + protoErrorHandler ProtoErrorHandlerFunc + disablePathLengthFallback bool +} + +// ServeMuxOption is an option that can be given to a ServeMux on construction. +type ServeMuxOption func(*ServeMux) + +// WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption. +// +// forwardResponseOption is an option that will be called on the relevant context.Context, +// http.ResponseWriter, and proto.Message before every forwarded response. +// +// The message may be nil in the case where just a header is being sent. +func WithForwardResponseOption(forwardResponseOption func(context.Context, http.ResponseWriter, proto.Message) error) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.forwardResponseOptions = append(serveMux.forwardResponseOptions, forwardResponseOption) + } +} + +// HeaderMatcherFunc checks whether a header key should be forwarded to/from gRPC context. +type HeaderMatcherFunc func(string) (string, bool) + +// DefaultHeaderMatcher is used to pass http request headers to/from gRPC context. This adds permanent HTTP header +// keys (as specified by the IANA) to gRPC context with grpcgateway- prefix. HTTP headers that start with +// 'Grpc-Metadata-' are mapped to gRPC metadata after removing prefix 'Grpc-Metadata-'. +func DefaultHeaderMatcher(key string) (string, bool) { + key = textproto.CanonicalMIMEHeaderKey(key) + if isPermanentHTTPHeader(key) { + return MetadataPrefix + key, true + } else if strings.HasPrefix(key, MetadataHeaderPrefix) { + return key[len(MetadataHeaderPrefix):], true + } + return "", false +} + +// WithIncomingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for incoming request to gateway. +// +// This matcher will be called with each header in http.Request. If matcher returns true, that header will be +// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return modified header. +func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption { + return func(mux *ServeMux) { + mux.incomingHeaderMatcher = fn + } +} + +// WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway. +// +// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be +// passed to http response returned from gateway. To transform the header before passing to response, +// matcher should return modified header. +func WithOutgoingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption { + return func(mux *ServeMux) { + mux.outgoingHeaderMatcher = fn + } +} + +// WithMetadata returns a ServeMuxOption for passing metadata to a gRPC context. +// +// This can be used by services that need to read from http.Request and modify gRPC context. A common use case +// is reading token from cookie and adding it in gRPC context. +func WithMetadata(annotator func(context.Context, *http.Request) metadata.MD) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.metadataAnnotators = append(serveMux.metadataAnnotators, annotator) + } +} + +// WithProtoErrorHandler returns a ServeMuxOption for passing metadata to a gRPC context. +// +// This can be used to handle an error as general proto message defined by gRPC. +// The response including body and status is not backward compatible with the default error handler. +// When this option is used, HTTPError and OtherErrorHandler are overwritten on initialization. +func WithProtoErrorHandler(fn ProtoErrorHandlerFunc) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.protoErrorHandler = fn + } +} + +// WithDisablePathLengthFallback returns a ServeMuxOption for disable path length fallback. +func WithDisablePathLengthFallback() ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.disablePathLengthFallback = true + } +} + +// NewServeMux returns a new ServeMux whose internal mapping is empty. +func NewServeMux(opts ...ServeMuxOption) *ServeMux { + serveMux := &ServeMux{ + handlers: make(map[string][]handler), + forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0), + marshalers: makeMarshalerMIMERegistry(), + } + + for _, opt := range opts { + opt(serveMux) + } + + if serveMux.protoErrorHandler != nil { + HTTPError = serveMux.protoErrorHandler + // OtherErrorHandler is no longer used when protoErrorHandler is set. + // Overwritten by a special error handler to return Unknown. + OtherErrorHandler = func(w http.ResponseWriter, r *http.Request, _ string, _ int) { + ctx := context.Background() + _, outboundMarshaler := MarshalerForRequest(serveMux, r) + sterr := status.Error(codes.Unknown, "unexpected use of OtherErrorHandler") + serveMux.protoErrorHandler(ctx, serveMux, outboundMarshaler, w, r, sterr) + } + } + + if serveMux.incomingHeaderMatcher == nil { + serveMux.incomingHeaderMatcher = DefaultHeaderMatcher + } + + if serveMux.outgoingHeaderMatcher == nil { + serveMux.outgoingHeaderMatcher = func(key string) (string, bool) { + return fmt.Sprintf("%s%s", MetadataHeaderPrefix, key), true + } + } + + return serveMux +} + +// Handle associates "h" to the pair of HTTP method and path pattern. +func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) { + s.handlers[meth] = append(s.handlers[meth], handler{pat: pat, h: h}) +} + +// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.Path. +func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + path := r.URL.Path + if !strings.HasPrefix(path, "/") { + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.InvalidArgument, http.StatusText(http.StatusBadRequest)) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) + } else { + OtherErrorHandler(w, r, http.StatusText(http.StatusBadRequest), http.StatusBadRequest) + } + return + } + + components := strings.Split(path[1:], "/") + l := len(components) + var verb string + if idx := strings.LastIndex(components[l-1], ":"); idx == 0 { + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.Unimplemented, http.StatusText(http.StatusNotImplemented)) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) + } else { + OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound) + } + return + } else if idx > 0 { + c := components[l-1] + components[l-1], verb = c[:idx], c[idx+1:] + } + + if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) { + r.Method = strings.ToUpper(override) + if err := r.ParseForm(); err != nil { + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.InvalidArgument, err.Error()) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) + } else { + OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest) + } + return + } + } + for _, h := range s.handlers[r.Method] { + pathParams, err := h.pat.Match(components, verb) + if err != nil { + continue + } + h.h(w, r, pathParams) + return + } + + // lookup other methods to handle fallback from GET to POST and + // to determine if it is MethodNotAllowed or NotFound. + for m, handlers := range s.handlers { + if m == r.Method { + continue + } + for _, h := range handlers { + pathParams, err := h.pat.Match(components, verb) + if err != nil { + continue + } + // X-HTTP-Method-Override is optional. Always allow fallback to POST. + if s.isPathLengthFallback(r) { + if err := r.ParseForm(); err != nil { + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.InvalidArgument, err.Error()) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) + } else { + OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest) + } + return + } + h.h(w, r, pathParams) + return + } + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.Unimplemented, http.StatusText(http.StatusMethodNotAllowed)) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) + } else { + OtherErrorHandler(w, r, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed) + } + return + } + } + + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.Unimplemented, http.StatusText(http.StatusNotImplemented)) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) + } else { + OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound) + } +} + +// GetForwardResponseOptions returns the ForwardResponseOptions associated with this ServeMux. +func (s *ServeMux) GetForwardResponseOptions() []func(context.Context, http.ResponseWriter, proto.Message) error { + return s.forwardResponseOptions +} + +func (s *ServeMux) isPathLengthFallback(r *http.Request) bool { + return !s.disablePathLengthFallback && r.Method == "POST" && r.Header.Get("Content-Type") == "application/x-www-form-urlencoded" +} + +type handler struct { + pat Pattern + h HandlerFunc +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go new file mode 100644 index 0000000000..f16a84ad38 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go @@ -0,0 +1,227 @@ +package runtime + +import ( + "errors" + "fmt" + "strings" + + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc/grpclog" +) + +var ( + // ErrNotMatch indicates that the given HTTP request path does not match to the pattern. + ErrNotMatch = errors.New("not match to the path pattern") + // ErrInvalidPattern indicates that the given definition of Pattern is not valid. + ErrInvalidPattern = errors.New("invalid pattern") +) + +type op struct { + code utilities.OpCode + operand int +} + +// Pattern is a template pattern of http request paths defined in github.com/googleapis/googleapis/google/api/http.proto. +type Pattern struct { + // ops is a list of operations + ops []op + // pool is a constant pool indexed by the operands or vars. + pool []string + // vars is a list of variables names to be bound by this pattern + vars []string + // stacksize is the max depth of the stack + stacksize int + // tailLen is the length of the fixed-size segments after a deep wildcard + tailLen int + // verb is the VERB part of the path pattern. It is empty if the pattern does not have VERB part. + verb string +} + +// NewPattern returns a new Pattern from the given definition values. +// "ops" is a sequence of op codes. "pool" is a constant pool. +// "verb" is the verb part of the pattern. It is empty if the pattern does not have the part. +// "version" must be 1 for now. +// It returns an error if the given definition is invalid. +func NewPattern(version int, ops []int, pool []string, verb string) (Pattern, error) { + if version != 1 { + grpclog.Infof("unsupported version: %d", version) + return Pattern{}, ErrInvalidPattern + } + + l := len(ops) + if l%2 != 0 { + grpclog.Infof("odd number of ops codes: %d", l) + return Pattern{}, ErrInvalidPattern + } + + var ( + typedOps []op + stack, maxstack int + tailLen int + pushMSeen bool + vars []string + ) + for i := 0; i < l; i += 2 { + op := op{code: utilities.OpCode(ops[i]), operand: ops[i+1]} + switch op.code { + case utilities.OpNop: + continue + case utilities.OpPush: + if pushMSeen { + tailLen++ + } + stack++ + case utilities.OpPushM: + if pushMSeen { + grpclog.Infof("pushM appears twice") + return Pattern{}, ErrInvalidPattern + } + pushMSeen = true + stack++ + case utilities.OpLitPush: + if op.operand < 0 || len(pool) <= op.operand { + grpclog.Infof("negative literal index: %d", op.operand) + return Pattern{}, ErrInvalidPattern + } + if pushMSeen { + tailLen++ + } + stack++ + case utilities.OpConcatN: + if op.operand <= 0 { + grpclog.Infof("negative concat size: %d", op.operand) + return Pattern{}, ErrInvalidPattern + } + stack -= op.operand + if stack < 0 { + grpclog.Print("stack underflow") + return Pattern{}, ErrInvalidPattern + } + stack++ + case utilities.OpCapture: + if op.operand < 0 || len(pool) <= op.operand { + grpclog.Infof("variable name index out of bound: %d", op.operand) + return Pattern{}, ErrInvalidPattern + } + v := pool[op.operand] + op.operand = len(vars) + vars = append(vars, v) + stack-- + if stack < 0 { + grpclog.Infof("stack underflow") + return Pattern{}, ErrInvalidPattern + } + default: + grpclog.Infof("invalid opcode: %d", op.code) + return Pattern{}, ErrInvalidPattern + } + + if maxstack < stack { + maxstack = stack + } + typedOps = append(typedOps, op) + } + return Pattern{ + ops: typedOps, + pool: pool, + vars: vars, + stacksize: maxstack, + tailLen: tailLen, + verb: verb, + }, nil +} + +// MustPattern is a helper function which makes it easier to call NewPattern in variable initialization. +func MustPattern(p Pattern, err error) Pattern { + if err != nil { + grpclog.Fatalf("Pattern initialization failed: %v", err) + } + return p +} + +// Match examines components if it matches to the Pattern. +// If it matches, the function returns a mapping from field paths to their captured values. +// If otherwise, the function returns an error. +func (p Pattern) Match(components []string, verb string) (map[string]string, error) { + if p.verb != verb { + return nil, ErrNotMatch + } + + var pos int + stack := make([]string, 0, p.stacksize) + captured := make([]string, len(p.vars)) + l := len(components) + for _, op := range p.ops { + switch op.code { + case utilities.OpNop: + continue + case utilities.OpPush, utilities.OpLitPush: + if pos >= l { + return nil, ErrNotMatch + } + c := components[pos] + if op.code == utilities.OpLitPush { + if lit := p.pool[op.operand]; c != lit { + return nil, ErrNotMatch + } + } + stack = append(stack, c) + pos++ + case utilities.OpPushM: + end := len(components) + if end < pos+p.tailLen { + return nil, ErrNotMatch + } + end -= p.tailLen + stack = append(stack, strings.Join(components[pos:end], "/")) + pos = end + case utilities.OpConcatN: + n := op.operand + l := len(stack) - n + stack = append(stack[:l], strings.Join(stack[l:], "/")) + case utilities.OpCapture: + n := len(stack) - 1 + captured[op.operand] = stack[n] + stack = stack[:n] + } + } + if pos < l { + return nil, ErrNotMatch + } + bindings := make(map[string]string) + for i, val := range captured { + bindings[p.vars[i]] = val + } + return bindings, nil +} + +// Verb returns the verb part of the Pattern. +func (p Pattern) Verb() string { return p.verb } + +func (p Pattern) String() string { + var stack []string + for _, op := range p.ops { + switch op.code { + case utilities.OpNop: + continue + case utilities.OpPush: + stack = append(stack, "*") + case utilities.OpLitPush: + stack = append(stack, p.pool[op.operand]) + case utilities.OpPushM: + stack = append(stack, "**") + case utilities.OpConcatN: + n := op.operand + l := len(stack) - n + stack = append(stack[:l], strings.Join(stack[l:], "/")) + case utilities.OpCapture: + n := len(stack) - 1 + stack[n] = fmt.Sprintf("{%s=%s}", p.vars[op.operand], stack[n]) + } + } + segs := strings.Join(stack, "/") + if p.verb != "" { + return fmt.Sprintf("/%s:%s", segs, p.verb) + } + return "/" + segs +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go new file mode 100644 index 0000000000..a3151e2a55 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go @@ -0,0 +1,80 @@ +package runtime + +import ( + "github.com/golang/protobuf/proto" +) + +// StringP returns a pointer to a string whose pointee is same as the given string value. +func StringP(val string) (*string, error) { + return proto.String(val), nil +} + +// BoolP parses the given string representation of a boolean value, +// and returns a pointer to a bool whose value is same as the parsed value. +func BoolP(val string) (*bool, error) { + b, err := Bool(val) + if err != nil { + return nil, err + } + return proto.Bool(b), nil +} + +// Float64P parses the given string representation of a floating point number, +// and returns a pointer to a float64 whose value is same as the parsed number. +func Float64P(val string) (*float64, error) { + f, err := Float64(val) + if err != nil { + return nil, err + } + return proto.Float64(f), nil +} + +// Float32P parses the given string representation of a floating point number, +// and returns a pointer to a float32 whose value is same as the parsed number. +func Float32P(val string) (*float32, error) { + f, err := Float32(val) + if err != nil { + return nil, err + } + return proto.Float32(f), nil +} + +// Int64P parses the given string representation of an integer +// and returns a pointer to a int64 whose value is same as the parsed integer. +func Int64P(val string) (*int64, error) { + i, err := Int64(val) + if err != nil { + return nil, err + } + return proto.Int64(i), nil +} + +// Int32P parses the given string representation of an integer +// and returns a pointer to a int32 whose value is same as the parsed integer. +func Int32P(val string) (*int32, error) { + i, err := Int32(val) + if err != nil { + return nil, err + } + return proto.Int32(i), err +} + +// Uint64P parses the given string representation of an integer +// and returns a pointer to a uint64 whose value is same as the parsed integer. +func Uint64P(val string) (*uint64, error) { + i, err := Uint64(val) + if err != nil { + return nil, err + } + return proto.Uint64(i), err +} + +// Uint32P parses the given string representation of an integer +// and returns a pointer to a uint32 whose value is same as the parsed integer. +func Uint32P(val string) (*uint32, error) { + i, err := Uint32(val) + if err != nil { + return nil, err + } + return proto.Uint32(i), err +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go new file mode 100644 index 0000000000..b7fa32e45d --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go @@ -0,0 +1,70 @@ +package runtime + +import ( + "io" + "net/http" + + "context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +// ProtoErrorHandlerFunc handles the error as a gRPC error generated via status package and replies to the request. +type ProtoErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, error) + +var _ ProtoErrorHandlerFunc = DefaultHTTPProtoErrorHandler + +// DefaultHTTPProtoErrorHandler is an implementation of HTTPError. +// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode. +// If otherwise, it replies with http.StatusInternalServerError. +// +// The response body returned by this function is a Status message marshaled by a Marshaler. +// +// Do not set this function to HTTPError variable directly, use WithProtoErrorHandler option instead. +func DefaultHTTPProtoErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) { + // return Internal when Marshal failed + const fallback = `{"code": 13, "message": "failed to marshal error message"}` + + s, ok := status.FromError(err) + if !ok { + s = status.New(codes.Unknown, err.Error()) + } + + w.Header().Del("Trailer") + + contentType := marshaler.ContentType() + // Check marshaler on run time in order to keep backwards compatability + // An interface param needs to be added to the ContentType() function on + // the Marshal interface to be able to remove this check + if httpBodyMarshaler, ok := marshaler.(*HTTPBodyMarshaler); ok { + pb := s.Proto() + contentType = httpBodyMarshaler.ContentTypeFromMessage(pb) + } + w.Header().Set("Content-Type", contentType) + + buf, merr := marshaler.Marshal(s.Proto()) + if merr != nil { + grpclog.Infof("Failed to marshal error message %q: %v", s.Proto(), merr) + w.WriteHeader(http.StatusInternalServerError) + if _, err := io.WriteString(w, fallback); err != nil { + grpclog.Infof("Failed to write response: %v", err) + } + return + } + + md, ok := ServerMetadataFromContext(ctx) + if !ok { + grpclog.Infof("Failed to extract ServerMetadata from context") + } + + handleForwardResponseServerMetadata(w, mux, md) + handleForwardResponseTrailerHeader(w, md) + st := HTTPStatusFromCode(s.Code()) + w.WriteHeader(st) + if _, err := w.Write(buf); err != nil { + grpclog.Infof("Failed to write response: %v", err) + } + + handleForwardResponseTrailer(w, md) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go new file mode 100644 index 0000000000..bb9359f17c --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go @@ -0,0 +1,392 @@ +package runtime + +import ( + "encoding/base64" + "fmt" + "net/url" + "reflect" + "regexp" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc/grpclog" +) + +// PopulateQueryParameters populates "values" into "msg". +// A value is ignored if its key starts with one of the elements in "filter". +func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error { + for key, values := range values { + re, err := regexp.Compile("^(.*)\\[(.*)\\]$") + if err != nil { + return err + } + match := re.FindStringSubmatch(key) + if len(match) == 3 { + key = match[1] + values = append([]string{match[2]}, values...) + } + fieldPath := strings.Split(key, ".") + if filter.HasCommonPrefix(fieldPath) { + continue + } + if err := populateFieldValueFromPath(msg, fieldPath, values); err != nil { + return err + } + } + return nil +} + +// PopulateFieldFromPath sets a value in a nested Protobuf structure. +// It instantiates missing protobuf fields as it goes. +func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value string) error { + fieldPath := strings.Split(fieldPathString, ".") + return populateFieldValueFromPath(msg, fieldPath, []string{value}) +} + +func populateFieldValueFromPath(msg proto.Message, fieldPath []string, values []string) error { + m := reflect.ValueOf(msg) + if m.Kind() != reflect.Ptr { + return fmt.Errorf("unexpected type %T: %v", msg, msg) + } + var props *proto.Properties + m = m.Elem() + for i, fieldName := range fieldPath { + isLast := i == len(fieldPath)-1 + if !isLast && m.Kind() != reflect.Struct { + return fmt.Errorf("non-aggregate type in the mid of path: %s", strings.Join(fieldPath, ".")) + } + var f reflect.Value + var err error + f, props, err = fieldByProtoName(m, fieldName) + if err != nil { + return err + } else if !f.IsValid() { + grpclog.Infof("field not found in %T: %s", msg, strings.Join(fieldPath, ".")) + return nil + } + + switch f.Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, reflect.String, reflect.Uint32, reflect.Uint64: + if !isLast { + return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], ".")) + } + m = f + case reflect.Slice: + if !isLast { + return fmt.Errorf("unexpected repeated field in %s", strings.Join(fieldPath, ".")) + } + // Handle []byte + if f.Type().Elem().Kind() == reflect.Uint8 { + m = f + break + } + return populateRepeatedField(f, values, props) + case reflect.Ptr: + if f.IsNil() { + m = reflect.New(f.Type().Elem()) + f.Set(m.Convert(f.Type())) + } + m = f.Elem() + continue + case reflect.Struct: + m = f + continue + case reflect.Map: + if !isLast { + return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], ".")) + } + return populateMapField(f, values, props) + default: + return fmt.Errorf("unexpected type %s in %T", f.Type(), msg) + } + } + switch len(values) { + case 0: + return fmt.Errorf("no value of field: %s", strings.Join(fieldPath, ".")) + case 1: + default: + grpclog.Infof("too many field values: %s", strings.Join(fieldPath, ".")) + } + return populateField(m, values[0], props) +} + +// fieldByProtoName looks up a field whose corresponding protobuf field name is "name". +// "m" must be a struct value. It returns zero reflect.Value if no such field found. +func fieldByProtoName(m reflect.Value, name string) (reflect.Value, *proto.Properties, error) { + props := proto.GetProperties(m.Type()) + + // look up field name in oneof map + if op, ok := props.OneofTypes[name]; ok { + v := reflect.New(op.Type.Elem()) + field := m.Field(op.Field) + if !field.IsNil() { + return reflect.Value{}, nil, fmt.Errorf("field already set for %s oneof", props.Prop[op.Field].OrigName) + } + field.Set(v) + return v.Elem().Field(0), op.Prop, nil + } + + for _, p := range props.Prop { + if p.OrigName == name { + return m.FieldByName(p.Name), p, nil + } + if p.JSONName == name { + return m.FieldByName(p.Name), p, nil + } + } + return reflect.Value{}, nil, nil +} + +func populateMapField(f reflect.Value, values []string, props *proto.Properties) error { + if len(values) != 2 { + return fmt.Errorf("more than one value provided for key %s in map %s", values[0], props.Name) + } + + key, value := values[0], values[1] + keyType := f.Type().Key() + valueType := f.Type().Elem() + if f.IsNil() { + f.Set(reflect.MakeMap(f.Type())) + } + + keyConv, ok := convFromType[keyType.Kind()] + if !ok { + return fmt.Errorf("unsupported key type %s in map %s", keyType, props.Name) + } + valueConv, ok := convFromType[valueType.Kind()] + if !ok { + return fmt.Errorf("unsupported value type %s in map %s", valueType, props.Name) + } + + keyV := keyConv.Call([]reflect.Value{reflect.ValueOf(key)}) + if err := keyV[1].Interface(); err != nil { + return err.(error) + } + valueV := valueConv.Call([]reflect.Value{reflect.ValueOf(value)}) + if err := valueV[1].Interface(); err != nil { + return err.(error) + } + + f.SetMapIndex(keyV[0].Convert(keyType), valueV[0].Convert(valueType)) + + return nil +} + +func populateRepeatedField(f reflect.Value, values []string, props *proto.Properties) error { + elemType := f.Type().Elem() + + // is the destination field a slice of an enumeration type? + if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil { + return populateFieldEnumRepeated(f, values, enumValMap) + } + + conv, ok := convFromType[elemType.Kind()] + if !ok { + return fmt.Errorf("unsupported field type %s", elemType) + } + f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type())) + for i, v := range values { + result := conv.Call([]reflect.Value{reflect.ValueOf(v)}) + if err := result[1].Interface(); err != nil { + return err.(error) + } + f.Index(i).Set(result[0].Convert(f.Index(i).Type())) + } + return nil +} + +func populateField(f reflect.Value, value string, props *proto.Properties) error { + i := f.Addr().Interface() + + // Handle protobuf well known types + type wkt interface { + XXX_WellKnownType() string + } + if wkt, ok := i.(wkt); ok { + switch wkt.XXX_WellKnownType() { + case "Timestamp": + if value == "null" { + f.Field(0).SetInt(0) + f.Field(1).SetInt(0) + return nil + } + + t, err := time.Parse(time.RFC3339Nano, value) + if err != nil { + return fmt.Errorf("bad Timestamp: %v", err) + } + f.Field(0).SetInt(int64(t.Unix())) + f.Field(1).SetInt(int64(t.Nanosecond())) + return nil + case "Duration": + if value == "null" { + f.Field(0).SetInt(0) + f.Field(1).SetInt(0) + return nil + } + d, err := time.ParseDuration(value) + if err != nil { + return fmt.Errorf("bad Duration: %v", err) + } + + ns := d.Nanoseconds() + s := ns / 1e9 + ns %= 1e9 + f.Field(0).SetInt(s) + f.Field(1).SetInt(ns) + return nil + case "DoubleValue": + fallthrough + case "FloatValue": + float64Val, err := strconv.ParseFloat(value, 64) + if err != nil { + return fmt.Errorf("bad DoubleValue: %s", value) + } + f.Field(0).SetFloat(float64Val) + return nil + case "Int64Value": + fallthrough + case "Int32Value": + int64Val, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return fmt.Errorf("bad DoubleValue: %s", value) + } + f.Field(0).SetInt(int64Val) + return nil + case "UInt64Value": + fallthrough + case "UInt32Value": + uint64Val, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return fmt.Errorf("bad DoubleValue: %s", value) + } + f.Field(0).SetUint(uint64Val) + return nil + case "BoolValue": + if value == "true" { + f.Field(0).SetBool(true) + } else if value == "false" { + f.Field(0).SetBool(false) + } else { + return fmt.Errorf("bad BoolValue: %s", value) + } + return nil + case "StringValue": + f.Field(0).SetString(value) + return nil + case "BytesValue": + bytesVal, err := base64.StdEncoding.DecodeString(value) + if err != nil { + return fmt.Errorf("bad BytesValue: %s", value) + } + f.Field(0).SetBytes(bytesVal) + return nil + } + } + + // Handle google well known types + if gwkt, ok := i.(proto.Message); ok { + switch proto.MessageName(gwkt) { + case "google.protobuf.FieldMask": + p := f.Field(0) + for _, v := range strings.Split(value, ",") { + if v != "" { + p.Set(reflect.Append(p, reflect.ValueOf(v))) + } + } + return nil + } + } + + // Handle Time and Duration stdlib types + switch t := i.(type) { + case *time.Time: + pt, err := time.Parse(time.RFC3339Nano, value) + if err != nil { + return fmt.Errorf("bad Timestamp: %v", err) + } + *t = pt + return nil + case *time.Duration: + d, err := time.ParseDuration(value) + if err != nil { + return fmt.Errorf("bad Duration: %v", err) + } + *t = d + return nil + } + + // is the destination field an enumeration type? + if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil { + return populateFieldEnum(f, value, enumValMap) + } + + conv, ok := convFromType[f.Kind()] + if !ok { + return fmt.Errorf("field type %T is not supported in query parameters", i) + } + result := conv.Call([]reflect.Value{reflect.ValueOf(value)}) + if err := result[1].Interface(); err != nil { + return err.(error) + } + f.Set(result[0].Convert(f.Type())) + return nil +} + +func convertEnum(value string, t reflect.Type, enumValMap map[string]int32) (reflect.Value, error) { + // see if it's an enumeration string + if enumVal, ok := enumValMap[value]; ok { + return reflect.ValueOf(enumVal).Convert(t), nil + } + + // check for an integer that matches an enumeration value + eVal, err := strconv.Atoi(value) + if err != nil { + return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t) + } + for _, v := range enumValMap { + if v == int32(eVal) { + return reflect.ValueOf(eVal).Convert(t), nil + } + } + return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t) +} + +func populateFieldEnum(f reflect.Value, value string, enumValMap map[string]int32) error { + cval, err := convertEnum(value, f.Type(), enumValMap) + if err != nil { + return err + } + f.Set(cval) + return nil +} + +func populateFieldEnumRepeated(f reflect.Value, values []string, enumValMap map[string]int32) error { + elemType := f.Type().Elem() + f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type())) + for i, v := range values { + result, err := convertEnum(v, elemType, enumValMap) + if err != nil { + return err + } + f.Index(i).Set(result) + } + return nil +} + +var ( + convFromType = map[reflect.Kind]reflect.Value{ + reflect.String: reflect.ValueOf(String), + reflect.Bool: reflect.ValueOf(Bool), + reflect.Float64: reflect.ValueOf(Float64), + reflect.Float32: reflect.ValueOf(Float32), + reflect.Int64: reflect.ValueOf(Int64), + reflect.Int32: reflect.ValueOf(Int32), + reflect.Uint64: reflect.ValueOf(Uint64), + reflect.Uint32: reflect.ValueOf(Uint32), + reflect.Slice: reflect.ValueOf(Bytes), + } +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel new file mode 100644 index 0000000000..7109d79323 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel @@ -0,0 +1,21 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package(default_visibility = ["//visibility:public"]) + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "pattern.go", + "readerfactory.go", + "trie.go", + ], + importpath = "github.com/grpc-ecosystem/grpc-gateway/utilities", +) + +go_test( + name = "go_default_test", + size = "small", + srcs = ["trie_test.go"], + embed = [":go_default_library"], +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go new file mode 100644 index 0000000000..cf79a4d588 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go @@ -0,0 +1,2 @@ +// Package utilities provides members for internal use in grpc-gateway. +package utilities diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go new file mode 100644 index 0000000000..dfe7de4864 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go @@ -0,0 +1,22 @@ +package utilities + +// An OpCode is a opcode of compiled path patterns. +type OpCode int + +// These constants are the valid values of OpCode. +const ( + // OpNop does nothing + OpNop = OpCode(iota) + // OpPush pushes a component to stack + OpPush + // OpLitPush pushes a component to stack if it matches to the literal + OpLitPush + // OpPushM concatenates the remaining components and pushes it to stack + OpPushM + // OpConcatN pops N items from stack, concatenates them and pushes it back to stack + OpConcatN + // OpCapture pops an item and binds it to the variable + OpCapture + // OpEnd is the least positive invalid opcode. + OpEnd +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go new file mode 100644 index 0000000000..6dd3854665 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go @@ -0,0 +1,20 @@ +package utilities + +import ( + "bytes" + "io" + "io/ioutil" +) + +// IOReaderFactory takes in an io.Reader and returns a function that will allow you to create a new reader that begins +// at the start of the stream +func IOReaderFactory(r io.Reader) (func() io.Reader, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + + return func() io.Reader { + return bytes.NewReader(b) + }, nil +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go new file mode 100644 index 0000000000..c2b7b30dd9 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go @@ -0,0 +1,177 @@ +package utilities + +import ( + "sort" +) + +// DoubleArray is a Double Array implementation of trie on sequences of strings. +type DoubleArray struct { + // Encoding keeps an encoding from string to int + Encoding map[string]int + // Base is the base array of Double Array + Base []int + // Check is the check array of Double Array + Check []int +} + +// NewDoubleArray builds a DoubleArray from a set of sequences of strings. +func NewDoubleArray(seqs [][]string) *DoubleArray { + da := &DoubleArray{Encoding: make(map[string]int)} + if len(seqs) == 0 { + return da + } + + encoded := registerTokens(da, seqs) + sort.Sort(byLex(encoded)) + + root := node{row: -1, col: -1, left: 0, right: len(encoded)} + addSeqs(da, encoded, 0, root) + + for i := len(da.Base); i > 0; i-- { + if da.Check[i-1] != 0 { + da.Base = da.Base[:i] + da.Check = da.Check[:i] + break + } + } + return da +} + +func registerTokens(da *DoubleArray, seqs [][]string) [][]int { + var result [][]int + for _, seq := range seqs { + var encoded []int + for _, token := range seq { + if _, ok := da.Encoding[token]; !ok { + da.Encoding[token] = len(da.Encoding) + } + encoded = append(encoded, da.Encoding[token]) + } + result = append(result, encoded) + } + for i := range result { + result[i] = append(result[i], len(da.Encoding)) + } + return result +} + +type node struct { + row, col int + left, right int +} + +func (n node) value(seqs [][]int) int { + return seqs[n.row][n.col] +} + +func (n node) children(seqs [][]int) []*node { + var result []*node + lastVal := int(-1) + last := new(node) + for i := n.left; i < n.right; i++ { + if lastVal == seqs[i][n.col+1] { + continue + } + last.right = i + last = &node{ + row: i, + col: n.col + 1, + left: i, + } + result = append(result, last) + } + last.right = n.right + return result +} + +func addSeqs(da *DoubleArray, seqs [][]int, pos int, n node) { + ensureSize(da, pos) + + children := n.children(seqs) + var i int + for i = 1; ; i++ { + ok := func() bool { + for _, child := range children { + code := child.value(seqs) + j := i + code + ensureSize(da, j) + if da.Check[j] != 0 { + return false + } + } + return true + }() + if ok { + break + } + } + da.Base[pos] = i + for _, child := range children { + code := child.value(seqs) + j := i + code + da.Check[j] = pos + 1 + } + terminator := len(da.Encoding) + for _, child := range children { + code := child.value(seqs) + if code == terminator { + continue + } + j := i + code + addSeqs(da, seqs, j, *child) + } +} + +func ensureSize(da *DoubleArray, i int) { + for i >= len(da.Base) { + da.Base = append(da.Base, make([]int, len(da.Base)+1)...) + da.Check = append(da.Check, make([]int, len(da.Check)+1)...) + } +} + +type byLex [][]int + +func (l byLex) Len() int { return len(l) } +func (l byLex) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l byLex) Less(i, j int) bool { + si := l[i] + sj := l[j] + var k int + for k = 0; k < len(si) && k < len(sj); k++ { + if si[k] < sj[k] { + return true + } + if si[k] > sj[k] { + return false + } + } + if k < len(sj) { + return true + } + return false +} + +// HasCommonPrefix determines if any sequence in the DoubleArray is a prefix of the given sequence. +func (da *DoubleArray) HasCommonPrefix(seq []string) bool { + if len(da.Base) == 0 { + return false + } + + var i int + for _, t := range seq { + code, ok := da.Encoding[t] + if !ok { + break + } + j := da.Base[i] + code + if len(da.Check) <= j || da.Check[j] != i+1 { + break + } + i = j + } + j := da.Base[i] + len(da.Encoding) + if len(da.Check) <= j || da.Check[j] != i+1 { + return false + } + return true +} diff --git a/vendor/github.com/hashicorp/golang-lru/2q.go b/vendor/github.com/hashicorp/golang-lru/2q.go index 337d963296..e474cd0758 100644 --- a/vendor/github.com/hashicorp/golang-lru/2q.go +++ b/vendor/github.com/hashicorp/golang-lru/2q.go @@ -30,9 +30,9 @@ type TwoQueueCache struct { size int recentSize int - recent *simplelru.LRU - frequent *simplelru.LRU - recentEvict *simplelru.LRU + recent simplelru.LRUCache + frequent simplelru.LRUCache + recentEvict simplelru.LRUCache lock sync.RWMutex } @@ -84,7 +84,8 @@ func New2QParams(size int, recentRatio float64, ghostRatio float64) (*TwoQueueCa return c, nil } -func (c *TwoQueueCache) Get(key interface{}) (interface{}, bool) { +// Get looks up a key's value from the cache. +func (c *TwoQueueCache) Get(key interface{}) (value interface{}, ok bool) { c.lock.Lock() defer c.lock.Unlock() @@ -105,6 +106,7 @@ func (c *TwoQueueCache) Get(key interface{}) (interface{}, bool) { return nil, false } +// Add adds a value to the cache. func (c *TwoQueueCache) Add(key, value interface{}) { c.lock.Lock() defer c.lock.Unlock() @@ -160,12 +162,15 @@ func (c *TwoQueueCache) ensureSpace(recentEvict bool) { c.frequent.RemoveOldest() } +// Len returns the number of items in the cache. func (c *TwoQueueCache) Len() int { c.lock.RLock() defer c.lock.RUnlock() return c.recent.Len() + c.frequent.Len() } +// Keys returns a slice of the keys in the cache. +// The frequently used keys are first in the returned slice. func (c *TwoQueueCache) Keys() []interface{} { c.lock.RLock() defer c.lock.RUnlock() @@ -174,6 +179,7 @@ func (c *TwoQueueCache) Keys() []interface{} { return append(k1, k2...) } +// Remove removes the provided key from the cache. func (c *TwoQueueCache) Remove(key interface{}) { c.lock.Lock() defer c.lock.Unlock() @@ -188,6 +194,7 @@ func (c *TwoQueueCache) Remove(key interface{}) { } } +// Purge is used to completely clear the cache. func (c *TwoQueueCache) Purge() { c.lock.Lock() defer c.lock.Unlock() @@ -196,13 +203,17 @@ func (c *TwoQueueCache) Purge() { c.recentEvict.Purge() } +// Contains is used to check if the cache contains a key +// without updating recency or frequency. func (c *TwoQueueCache) Contains(key interface{}) bool { c.lock.RLock() defer c.lock.RUnlock() return c.frequent.Contains(key) || c.recent.Contains(key) } -func (c *TwoQueueCache) Peek(key interface{}) (interface{}, bool) { +// Peek is used to inspect the cache value of a key +// without updating recency or frequency. +func (c *TwoQueueCache) Peek(key interface{}) (value interface{}, ok bool) { c.lock.RLock() defer c.lock.RUnlock() if val, ok := c.frequent.Peek(key); ok { diff --git a/vendor/github.com/hashicorp/golang-lru/2q_test.go b/vendor/github.com/hashicorp/golang-lru/2q_test.go deleted file mode 100644 index 1b0f351817..0000000000 --- a/vendor/github.com/hashicorp/golang-lru/2q_test.go +++ /dev/null @@ -1,306 +0,0 @@ -package lru - -import ( - "math/rand" - "testing" -) - -func Benchmark2Q_Rand(b *testing.B) { - l, err := New2Q(8192) - if err != nil { - b.Fatalf("err: %v", err) - } - - trace := make([]int64, b.N*2) - for i := 0; i < b.N*2; i++ { - trace[i] = rand.Int63() % 32768 - } - - b.ResetTimer() - - var hit, miss int - for i := 0; i < 2*b.N; i++ { - if i%2 == 0 { - l.Add(trace[i], trace[i]) - } else { - _, ok := l.Get(trace[i]) - if ok { - hit++ - } else { - miss++ - } - } - } - b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(miss)) -} - -func Benchmark2Q_Freq(b *testing.B) { - l, err := New2Q(8192) - if err != nil { - b.Fatalf("err: %v", err) - } - - trace := make([]int64, b.N*2) - for i := 0; i < b.N*2; i++ { - if i%2 == 0 { - trace[i] = rand.Int63() % 16384 - } else { - trace[i] = rand.Int63() % 32768 - } - } - - b.ResetTimer() - - for i := 0; i < b.N; i++ { - l.Add(trace[i], trace[i]) - } - var hit, miss int - for i := 0; i < b.N; i++ { - _, ok := l.Get(trace[i]) - if ok { - hit++ - } else { - miss++ - } - } - b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(miss)) -} - -func Test2Q_RandomOps(t *testing.T) { - size := 128 - l, err := New2Q(128) - if err != nil { - t.Fatalf("err: %v", err) - } - - n := 200000 - for i := 0; i < n; i++ { - key := rand.Int63() % 512 - r := rand.Int63() - switch r % 3 { - case 0: - l.Add(key, key) - case 1: - l.Get(key) - case 2: - l.Remove(key) - } - - if l.recent.Len()+l.frequent.Len() > size { - t.Fatalf("bad: recent: %d freq: %d", - l.recent.Len(), l.frequent.Len()) - } - } -} - -func Test2Q_Get_RecentToFrequent(t *testing.T) { - l, err := New2Q(128) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Touch all the entries, should be in t1 - for i := 0; i < 128; i++ { - l.Add(i, i) - } - if n := l.recent.Len(); n != 128 { - t.Fatalf("bad: %d", n) - } - if n := l.frequent.Len(); n != 0 { - t.Fatalf("bad: %d", n) - } - - // Get should upgrade to t2 - for i := 0; i < 128; i++ { - _, ok := l.Get(i) - if !ok { - t.Fatalf("missing: %d", i) - } - } - if n := l.recent.Len(); n != 0 { - t.Fatalf("bad: %d", n) - } - if n := l.frequent.Len(); n != 128 { - t.Fatalf("bad: %d", n) - } - - // Get be from t2 - for i := 0; i < 128; i++ { - _, ok := l.Get(i) - if !ok { - t.Fatalf("missing: %d", i) - } - } - if n := l.recent.Len(); n != 0 { - t.Fatalf("bad: %d", n) - } - if n := l.frequent.Len(); n != 128 { - t.Fatalf("bad: %d", n) - } -} - -func Test2Q_Add_RecentToFrequent(t *testing.T) { - l, err := New2Q(128) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Add initially to recent - l.Add(1, 1) - if n := l.recent.Len(); n != 1 { - t.Fatalf("bad: %d", n) - } - if n := l.frequent.Len(); n != 0 { - t.Fatalf("bad: %d", n) - } - - // Add should upgrade to frequent - l.Add(1, 1) - if n := l.recent.Len(); n != 0 { - t.Fatalf("bad: %d", n) - } - if n := l.frequent.Len(); n != 1 { - t.Fatalf("bad: %d", n) - } - - // Add should remain in frequent - l.Add(1, 1) - if n := l.recent.Len(); n != 0 { - t.Fatalf("bad: %d", n) - } - if n := l.frequent.Len(); n != 1 { - t.Fatalf("bad: %d", n) - } -} - -func Test2Q_Add_RecentEvict(t *testing.T) { - l, err := New2Q(4) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Add 1,2,3,4,5 -> Evict 1 - l.Add(1, 1) - l.Add(2, 2) - l.Add(3, 3) - l.Add(4, 4) - l.Add(5, 5) - if n := l.recent.Len(); n != 4 { - t.Fatalf("bad: %d", n) - } - if n := l.recentEvict.Len(); n != 1 { - t.Fatalf("bad: %d", n) - } - if n := l.frequent.Len(); n != 0 { - t.Fatalf("bad: %d", n) - } - - // Pull in the recently evicted - l.Add(1, 1) - if n := l.recent.Len(); n != 3 { - t.Fatalf("bad: %d", n) - } - if n := l.recentEvict.Len(); n != 1 { - t.Fatalf("bad: %d", n) - } - if n := l.frequent.Len(); n != 1 { - t.Fatalf("bad: %d", n) - } - - // Add 6, should cause another recent evict - l.Add(6, 6) - if n := l.recent.Len(); n != 3 { - t.Fatalf("bad: %d", n) - } - if n := l.recentEvict.Len(); n != 2 { - t.Fatalf("bad: %d", n) - } - if n := l.frequent.Len(); n != 1 { - t.Fatalf("bad: %d", n) - } -} - -func Test2Q(t *testing.T) { - l, err := New2Q(128) - if err != nil { - t.Fatalf("err: %v", err) - } - - for i := 0; i < 256; i++ { - l.Add(i, i) - } - if l.Len() != 128 { - t.Fatalf("bad len: %v", l.Len()) - } - - for i, k := range l.Keys() { - if v, ok := l.Get(k); !ok || v != k || v != i+128 { - t.Fatalf("bad key: %v", k) - } - } - for i := 0; i < 128; i++ { - _, ok := l.Get(i) - if ok { - t.Fatalf("should be evicted") - } - } - for i := 128; i < 256; i++ { - _, ok := l.Get(i) - if !ok { - t.Fatalf("should not be evicted") - } - } - for i := 128; i < 192; i++ { - l.Remove(i) - _, ok := l.Get(i) - if ok { - t.Fatalf("should be deleted") - } - } - - l.Purge() - if l.Len() != 0 { - t.Fatalf("bad len: %v", l.Len()) - } - if _, ok := l.Get(200); ok { - t.Fatalf("should contain nothing") - } -} - -// Test that Contains doesn't update recent-ness -func Test2Q_Contains(t *testing.T) { - l, err := New2Q(2) - if err != nil { - t.Fatalf("err: %v", err) - } - - l.Add(1, 1) - l.Add(2, 2) - if !l.Contains(1) { - t.Errorf("1 should be contained") - } - - l.Add(3, 3) - if l.Contains(1) { - t.Errorf("Contains should not have updated recent-ness of 1") - } -} - -// Test that Peek doesn't update recent-ness -func Test2Q_Peek(t *testing.T) { - l, err := New2Q(2) - if err != nil { - t.Fatalf("err: %v", err) - } - - l.Add(1, 1) - l.Add(2, 2) - if v, ok := l.Peek(1); !ok || v != 1 { - t.Errorf("1 should be set to 1: %v, %v", v, ok) - } - - l.Add(3, 3) - if l.Contains(1) { - t.Errorf("should not have updated recent-ness of 1") - } -} diff --git a/vendor/github.com/hashicorp/golang-lru/arc.go b/vendor/github.com/hashicorp/golang-lru/arc.go index a2a2528173..555225a218 100644 --- a/vendor/github.com/hashicorp/golang-lru/arc.go +++ b/vendor/github.com/hashicorp/golang-lru/arc.go @@ -18,11 +18,11 @@ type ARCCache struct { size int // Size is the total capacity of the cache p int // P is the dynamic preference towards T1 or T2 - t1 *simplelru.LRU // T1 is the LRU for recently accessed items - b1 *simplelru.LRU // B1 is the LRU for evictions from t1 + t1 simplelru.LRUCache // T1 is the LRU for recently accessed items + b1 simplelru.LRUCache // B1 is the LRU for evictions from t1 - t2 *simplelru.LRU // T2 is the LRU for frequently accessed items - b2 *simplelru.LRU // B2 is the LRU for evictions from t2 + t2 simplelru.LRUCache // T2 is the LRU for frequently accessed items + b2 simplelru.LRUCache // B2 is the LRU for evictions from t2 lock sync.RWMutex } @@ -60,11 +60,11 @@ func NewARC(size int) (*ARCCache, error) { } // Get looks up a key's value from the cache. -func (c *ARCCache) Get(key interface{}) (interface{}, bool) { +func (c *ARCCache) Get(key interface{}) (value interface{}, ok bool) { c.lock.Lock() defer c.lock.Unlock() - // Ff the value is contained in T1 (recent), then + // If the value is contained in T1 (recent), then // promote it to T2 (frequent) if val, ok := c.t1.Peek(key); ok { c.t1.Remove(key) @@ -153,7 +153,7 @@ func (c *ARCCache) Add(key, value interface{}) { // Remove from B2 c.b2.Remove(key) - // Add the key to the frequntly used list + // Add the key to the frequently used list c.t2.Add(key, value) return } @@ -247,7 +247,7 @@ func (c *ARCCache) Contains(key interface{}) bool { // Peek is used to inspect the cache value of a key // without updating recency or frequency. -func (c *ARCCache) Peek(key interface{}) (interface{}, bool) { +func (c *ARCCache) Peek(key interface{}) (value interface{}, ok bool) { c.lock.RLock() defer c.lock.RUnlock() if val, ok := c.t1.Peek(key); ok { diff --git a/vendor/github.com/hashicorp/golang-lru/arc_test.go b/vendor/github.com/hashicorp/golang-lru/arc_test.go deleted file mode 100644 index e2d9b68c6a..0000000000 --- a/vendor/github.com/hashicorp/golang-lru/arc_test.go +++ /dev/null @@ -1,377 +0,0 @@ -package lru - -import ( - "math/rand" - "testing" - "time" -) - -func init() { - rand.Seed(time.Now().Unix()) -} - -func BenchmarkARC_Rand(b *testing.B) { - l, err := NewARC(8192) - if err != nil { - b.Fatalf("err: %v", err) - } - - trace := make([]int64, b.N*2) - for i := 0; i < b.N*2; i++ { - trace[i] = rand.Int63() % 32768 - } - - b.ResetTimer() - - var hit, miss int - for i := 0; i < 2*b.N; i++ { - if i%2 == 0 { - l.Add(trace[i], trace[i]) - } else { - _, ok := l.Get(trace[i]) - if ok { - hit++ - } else { - miss++ - } - } - } - b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(miss)) -} - -func BenchmarkARC_Freq(b *testing.B) { - l, err := NewARC(8192) - if err != nil { - b.Fatalf("err: %v", err) - } - - trace := make([]int64, b.N*2) - for i := 0; i < b.N*2; i++ { - if i%2 == 0 { - trace[i] = rand.Int63() % 16384 - } else { - trace[i] = rand.Int63() % 32768 - } - } - - b.ResetTimer() - - for i := 0; i < b.N; i++ { - l.Add(trace[i], trace[i]) - } - var hit, miss int - for i := 0; i < b.N; i++ { - _, ok := l.Get(trace[i]) - if ok { - hit++ - } else { - miss++ - } - } - b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(miss)) -} - -func TestARC_RandomOps(t *testing.T) { - size := 128 - l, err := NewARC(128) - if err != nil { - t.Fatalf("err: %v", err) - } - - n := 200000 - for i := 0; i < n; i++ { - key := rand.Int63() % 512 - r := rand.Int63() - switch r % 3 { - case 0: - l.Add(key, key) - case 1: - l.Get(key) - case 2: - l.Remove(key) - } - - if l.t1.Len()+l.t2.Len() > size { - t.Fatalf("bad: t1: %d t2: %d b1: %d b2: %d p: %d", - l.t1.Len(), l.t2.Len(), l.b1.Len(), l.b2.Len(), l.p) - } - if l.b1.Len()+l.b2.Len() > size { - t.Fatalf("bad: t1: %d t2: %d b1: %d b2: %d p: %d", - l.t1.Len(), l.t2.Len(), l.b1.Len(), l.b2.Len(), l.p) - } - } -} - -func TestARC_Get_RecentToFrequent(t *testing.T) { - l, err := NewARC(128) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Touch all the entries, should be in t1 - for i := 0; i < 128; i++ { - l.Add(i, i) - } - if n := l.t1.Len(); n != 128 { - t.Fatalf("bad: %d", n) - } - if n := l.t2.Len(); n != 0 { - t.Fatalf("bad: %d", n) - } - - // Get should upgrade to t2 - for i := 0; i < 128; i++ { - _, ok := l.Get(i) - if !ok { - t.Fatalf("missing: %d", i) - } - } - if n := l.t1.Len(); n != 0 { - t.Fatalf("bad: %d", n) - } - if n := l.t2.Len(); n != 128 { - t.Fatalf("bad: %d", n) - } - - // Get be from t2 - for i := 0; i < 128; i++ { - _, ok := l.Get(i) - if !ok { - t.Fatalf("missing: %d", i) - } - } - if n := l.t1.Len(); n != 0 { - t.Fatalf("bad: %d", n) - } - if n := l.t2.Len(); n != 128 { - t.Fatalf("bad: %d", n) - } -} - -func TestARC_Add_RecentToFrequent(t *testing.T) { - l, err := NewARC(128) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Add initially to t1 - l.Add(1, 1) - if n := l.t1.Len(); n != 1 { - t.Fatalf("bad: %d", n) - } - if n := l.t2.Len(); n != 0 { - t.Fatalf("bad: %d", n) - } - - // Add should upgrade to t2 - l.Add(1, 1) - if n := l.t1.Len(); n != 0 { - t.Fatalf("bad: %d", n) - } - if n := l.t2.Len(); n != 1 { - t.Fatalf("bad: %d", n) - } - - // Add should remain in t2 - l.Add(1, 1) - if n := l.t1.Len(); n != 0 { - t.Fatalf("bad: %d", n) - } - if n := l.t2.Len(); n != 1 { - t.Fatalf("bad: %d", n) - } -} - -func TestARC_Adaptive(t *testing.T) { - l, err := NewARC(4) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Fill t1 - for i := 0; i < 4; i++ { - l.Add(i, i) - } - if n := l.t1.Len(); n != 4 { - t.Fatalf("bad: %d", n) - } - - // Move to t2 - l.Get(0) - l.Get(1) - if n := l.t2.Len(); n != 2 { - t.Fatalf("bad: %d", n) - } - - // Evict from t1 - l.Add(4, 4) - if n := l.b1.Len(); n != 1 { - t.Fatalf("bad: %d", n) - } - - // Current state - // t1 : (MRU) [4, 3] (LRU) - // t2 : (MRU) [1, 0] (LRU) - // b1 : (MRU) [2] (LRU) - // b2 : (MRU) [] (LRU) - - // Add 2, should cause hit on b1 - l.Add(2, 2) - if n := l.b1.Len(); n != 1 { - t.Fatalf("bad: %d", n) - } - if l.p != 1 { - t.Fatalf("bad: %d", l.p) - } - if n := l.t2.Len(); n != 3 { - t.Fatalf("bad: %d", n) - } - - // Current state - // t1 : (MRU) [4] (LRU) - // t2 : (MRU) [2, 1, 0] (LRU) - // b1 : (MRU) [3] (LRU) - // b2 : (MRU) [] (LRU) - - // Add 4, should migrate to t2 - l.Add(4, 4) - if n := l.t1.Len(); n != 0 { - t.Fatalf("bad: %d", n) - } - if n := l.t2.Len(); n != 4 { - t.Fatalf("bad: %d", n) - } - - // Current state - // t1 : (MRU) [] (LRU) - // t2 : (MRU) [4, 2, 1, 0] (LRU) - // b1 : (MRU) [3] (LRU) - // b2 : (MRU) [] (LRU) - - // Add 4, should evict to b2 - l.Add(5, 5) - if n := l.t1.Len(); n != 1 { - t.Fatalf("bad: %d", n) - } - if n := l.t2.Len(); n != 3 { - t.Fatalf("bad: %d", n) - } - if n := l.b2.Len(); n != 1 { - t.Fatalf("bad: %d", n) - } - - // Current state - // t1 : (MRU) [5] (LRU) - // t2 : (MRU) [4, 2, 1] (LRU) - // b1 : (MRU) [3] (LRU) - // b2 : (MRU) [0] (LRU) - - // Add 0, should decrease p - l.Add(0, 0) - if n := l.t1.Len(); n != 0 { - t.Fatalf("bad: %d", n) - } - if n := l.t2.Len(); n != 4 { - t.Fatalf("bad: %d", n) - } - if n := l.b1.Len(); n != 2 { - t.Fatalf("bad: %d", n) - } - if n := l.b2.Len(); n != 0 { - t.Fatalf("bad: %d", n) - } - if l.p != 0 { - t.Fatalf("bad: %d", l.p) - } - - // Current state - // t1 : (MRU) [] (LRU) - // t2 : (MRU) [0, 4, 2, 1] (LRU) - // b1 : (MRU) [5, 3] (LRU) - // b2 : (MRU) [0] (LRU) -} - -func TestARC(t *testing.T) { - l, err := NewARC(128) - if err != nil { - t.Fatalf("err: %v", err) - } - - for i := 0; i < 256; i++ { - l.Add(i, i) - } - if l.Len() != 128 { - t.Fatalf("bad len: %v", l.Len()) - } - - for i, k := range l.Keys() { - if v, ok := l.Get(k); !ok || v != k || v != i+128 { - t.Fatalf("bad key: %v", k) - } - } - for i := 0; i < 128; i++ { - _, ok := l.Get(i) - if ok { - t.Fatalf("should be evicted") - } - } - for i := 128; i < 256; i++ { - _, ok := l.Get(i) - if !ok { - t.Fatalf("should not be evicted") - } - } - for i := 128; i < 192; i++ { - l.Remove(i) - _, ok := l.Get(i) - if ok { - t.Fatalf("should be deleted") - } - } - - l.Purge() - if l.Len() != 0 { - t.Fatalf("bad len: %v", l.Len()) - } - if _, ok := l.Get(200); ok { - t.Fatalf("should contain nothing") - } -} - -// Test that Contains doesn't update recent-ness -func TestARC_Contains(t *testing.T) { - l, err := NewARC(2) - if err != nil { - t.Fatalf("err: %v", err) - } - - l.Add(1, 1) - l.Add(2, 2) - if !l.Contains(1) { - t.Errorf("1 should be contained") - } - - l.Add(3, 3) - if l.Contains(1) { - t.Errorf("Contains should not have updated recent-ness of 1") - } -} - -// Test that Peek doesn't update recent-ness -func TestARC_Peek(t *testing.T) { - l, err := NewARC(2) - if err != nil { - t.Fatalf("err: %v", err) - } - - l.Add(1, 1) - l.Add(2, 2) - if v, ok := l.Peek(1); !ok || v != 1 { - t.Errorf("1 should be set to 1: %v, %v", v, ok) - } - - l.Add(3, 3) - if l.Contains(1) { - t.Errorf("should not have updated recent-ness of 1") - } -} diff --git a/vendor/github.com/hashicorp/golang-lru/doc.go b/vendor/github.com/hashicorp/golang-lru/doc.go new file mode 100644 index 0000000000..2547df979d --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/doc.go @@ -0,0 +1,21 @@ +// Package lru provides three different LRU caches of varying sophistication. +// +// Cache is a simple LRU cache. It is based on the +// LRU implementation in groupcache: +// https://github.com/golang/groupcache/tree/master/lru +// +// TwoQueueCache tracks frequently used and recently used entries separately. +// This avoids a burst of accesses from taking out frequently used entries, +// at the cost of about 2x computational overhead and some extra bookkeeping. +// +// ARCCache is an adaptive replacement cache. It tracks recent evictions as +// well as recent usage in both the frequent and recent caches. Its +// computational overhead is comparable to TwoQueueCache, but the memory +// overhead is linear with the size of the cache. +// +// ARC has been patented by IBM, so do not use it if that is problematic for +// your program. +// +// All caches in this package take locks while operating, and are therefore +// thread-safe for consumers. +package lru diff --git a/vendor/github.com/hashicorp/golang-lru/go.mod b/vendor/github.com/hashicorp/golang-lru/go.mod new file mode 100644 index 0000000000..824cb97e83 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/golang-lru diff --git a/vendor/github.com/hashicorp/golang-lru/lru.go b/vendor/github.com/hashicorp/golang-lru/lru.go index a6285f989e..c8d9b0a230 100644 --- a/vendor/github.com/hashicorp/golang-lru/lru.go +++ b/vendor/github.com/hashicorp/golang-lru/lru.go @@ -1,6 +1,3 @@ -// This package provides a simple LRU cache. It is based on the -// LRU implementation in groupcache: -// https://github.com/golang/groupcache/tree/master/lru package lru import ( @@ -11,11 +8,11 @@ import ( // Cache is a thread-safe fixed size LRU cache. type Cache struct { - lru *simplelru.LRU + lru simplelru.LRUCache lock sync.RWMutex } -// New creates an LRU of the given size +// New creates an LRU of the given size. func New(size int) (*Cache, error) { return NewWithEvict(size, nil) } @@ -33,7 +30,7 @@ func NewWithEvict(size int, onEvicted func(key interface{}, value interface{})) return c, nil } -// Purge is used to completely clear the cache +// Purge is used to completely clear the cache. func (c *Cache) Purge() { c.lock.Lock() c.lru.Purge() @@ -41,30 +38,30 @@ func (c *Cache) Purge() { } // Add adds a value to the cache. Returns true if an eviction occurred. -func (c *Cache) Add(key, value interface{}) bool { +func (c *Cache) Add(key, value interface{}) (evicted bool) { c.lock.Lock() defer c.lock.Unlock() return c.lru.Add(key, value) } // Get looks up a key's value from the cache. -func (c *Cache) Get(key interface{}) (interface{}, bool) { +func (c *Cache) Get(key interface{}) (value interface{}, ok bool) { c.lock.Lock() defer c.lock.Unlock() return c.lru.Get(key) } -// Check if a key is in the cache, without updating the recent-ness -// or deleting it for being stale. +// Contains checks if a key is in the cache, without updating the +// recent-ness or deleting it for being stale. func (c *Cache) Contains(key interface{}) bool { c.lock.RLock() defer c.lock.RUnlock() return c.lru.Contains(key) } -// Returns the key value (or undefined if not found) without updating +// Peek returns the key value (or undefined if not found) without updating // the "recently used"-ness of the key. -func (c *Cache) Peek(key interface{}) (interface{}, bool) { +func (c *Cache) Peek(key interface{}) (value interface{}, ok bool) { c.lock.RLock() defer c.lock.RUnlock() return c.lru.Peek(key) @@ -73,16 +70,15 @@ func (c *Cache) Peek(key interface{}) (interface{}, bool) { // ContainsOrAdd checks if a key is in the cache without updating the // recent-ness or deleting it for being stale, and if not, adds the value. // Returns whether found and whether an eviction occurred. -func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evict bool) { +func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evicted bool) { c.lock.Lock() defer c.lock.Unlock() if c.lru.Contains(key) { return true, false - } else { - evict := c.lru.Add(key, value) - return false, evict } + evicted = c.lru.Add(key, value) + return false, evicted } // Remove removes the provided key from the cache. diff --git a/vendor/github.com/hashicorp/golang-lru/lru_test.go b/vendor/github.com/hashicorp/golang-lru/lru_test.go deleted file mode 100644 index 2b31218b07..0000000000 --- a/vendor/github.com/hashicorp/golang-lru/lru_test.go +++ /dev/null @@ -1,221 +0,0 @@ -package lru - -import ( - "math/rand" - "testing" -) - -func BenchmarkLRU_Rand(b *testing.B) { - l, err := New(8192) - if err != nil { - b.Fatalf("err: %v", err) - } - - trace := make([]int64, b.N*2) - for i := 0; i < b.N*2; i++ { - trace[i] = rand.Int63() % 32768 - } - - b.ResetTimer() - - var hit, miss int - for i := 0; i < 2*b.N; i++ { - if i%2 == 0 { - l.Add(trace[i], trace[i]) - } else { - _, ok := l.Get(trace[i]) - if ok { - hit++ - } else { - miss++ - } - } - } - b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(miss)) -} - -func BenchmarkLRU_Freq(b *testing.B) { - l, err := New(8192) - if err != nil { - b.Fatalf("err: %v", err) - } - - trace := make([]int64, b.N*2) - for i := 0; i < b.N*2; i++ { - if i%2 == 0 { - trace[i] = rand.Int63() % 16384 - } else { - trace[i] = rand.Int63() % 32768 - } - } - - b.ResetTimer() - - for i := 0; i < b.N; i++ { - l.Add(trace[i], trace[i]) - } - var hit, miss int - for i := 0; i < b.N; i++ { - _, ok := l.Get(trace[i]) - if ok { - hit++ - } else { - miss++ - } - } - b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(miss)) -} - -func TestLRU(t *testing.T) { - evictCounter := 0 - onEvicted := func(k interface{}, v interface{}) { - if k != v { - t.Fatalf("Evict values not equal (%v!=%v)", k, v) - } - evictCounter += 1 - } - l, err := NewWithEvict(128, onEvicted) - if err != nil { - t.Fatalf("err: %v", err) - } - - for i := 0; i < 256; i++ { - l.Add(i, i) - } - if l.Len() != 128 { - t.Fatalf("bad len: %v", l.Len()) - } - - if evictCounter != 128 { - t.Fatalf("bad evict count: %v", evictCounter) - } - - for i, k := range l.Keys() { - if v, ok := l.Get(k); !ok || v != k || v != i+128 { - t.Fatalf("bad key: %v", k) - } - } - for i := 0; i < 128; i++ { - _, ok := l.Get(i) - if ok { - t.Fatalf("should be evicted") - } - } - for i := 128; i < 256; i++ { - _, ok := l.Get(i) - if !ok { - t.Fatalf("should not be evicted") - } - } - for i := 128; i < 192; i++ { - l.Remove(i) - _, ok := l.Get(i) - if ok { - t.Fatalf("should be deleted") - } - } - - l.Get(192) // expect 192 to be last key in l.Keys() - - for i, k := range l.Keys() { - if (i < 63 && k != i+193) || (i == 63 && k != 192) { - t.Fatalf("out of order key: %v", k) - } - } - - l.Purge() - if l.Len() != 0 { - t.Fatalf("bad len: %v", l.Len()) - } - if _, ok := l.Get(200); ok { - t.Fatalf("should contain nothing") - } -} - -// test that Add returns true/false if an eviction occurred -func TestLRUAdd(t *testing.T) { - evictCounter := 0 - onEvicted := func(k interface{}, v interface{}) { - evictCounter += 1 - } - - l, err := NewWithEvict(1, onEvicted) - if err != nil { - t.Fatalf("err: %v", err) - } - - if l.Add(1, 1) == true || evictCounter != 0 { - t.Errorf("should not have an eviction") - } - if l.Add(2, 2) == false || evictCounter != 1 { - t.Errorf("should have an eviction") - } -} - -// test that Contains doesn't update recent-ness -func TestLRUContains(t *testing.T) { - l, err := New(2) - if err != nil { - t.Fatalf("err: %v", err) - } - - l.Add(1, 1) - l.Add(2, 2) - if !l.Contains(1) { - t.Errorf("1 should be contained") - } - - l.Add(3, 3) - if l.Contains(1) { - t.Errorf("Contains should not have updated recent-ness of 1") - } -} - -// test that Contains doesn't update recent-ness -func TestLRUContainsOrAdd(t *testing.T) { - l, err := New(2) - if err != nil { - t.Fatalf("err: %v", err) - } - - l.Add(1, 1) - l.Add(2, 2) - contains, evict := l.ContainsOrAdd(1, 1) - if !contains { - t.Errorf("1 should be contained") - } - if evict { - t.Errorf("nothing should be evicted here") - } - - l.Add(3, 3) - contains, evict = l.ContainsOrAdd(1, 1) - if contains { - t.Errorf("1 should not have been contained") - } - if !evict { - t.Errorf("an eviction should have occurred") - } - if !l.Contains(1) { - t.Errorf("now 1 should be contained") - } -} - -// test that Peek doesn't update recent-ness -func TestLRUPeek(t *testing.T) { - l, err := New(2) - if err != nil { - t.Fatalf("err: %v", err) - } - - l.Add(1, 1) - l.Add(2, 2) - if v, ok := l.Peek(1); !ok || v != 1 { - t.Errorf("1 should be set to 1: %v, %v", v, ok) - } - - l.Add(3, 3) - if l.Contains(1) { - t.Errorf("should not have updated recent-ness of 1") - } -} diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go index cb416b394f..5673773b22 100644 --- a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go @@ -36,7 +36,7 @@ func NewLRU(size int, onEvict EvictCallback) (*LRU, error) { return c, nil } -// Purge is used to completely clear the cache +// Purge is used to completely clear the cache. func (c *LRU) Purge() { for k, v := range c.items { if c.onEvict != nil { @@ -48,7 +48,7 @@ func (c *LRU) Purge() { } // Add adds a value to the cache. Returns true if an eviction occurred. -func (c *LRU) Add(key, value interface{}) bool { +func (c *LRU) Add(key, value interface{}) (evicted bool) { // Check for existing item if ent, ok := c.items[key]; ok { c.evictList.MoveToFront(ent) @@ -78,17 +78,18 @@ func (c *LRU) Get(key interface{}) (value interface{}, ok bool) { return } -// Check if a key is in the cache, without updating the recent-ness +// Contains checks if a key is in the cache, without updating the recent-ness // or deleting it for being stale. func (c *LRU) Contains(key interface{}) (ok bool) { _, ok = c.items[key] return ok } -// Returns the key value (or undefined if not found) without updating +// Peek returns the key value (or undefined if not found) without updating // the "recently used"-ness of the key. func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) { - if ent, ok := c.items[key]; ok { + var ent *list.Element + if ent, ok = c.items[key]; ok { return ent.Value.(*entry).value, true } return nil, ok @@ -96,7 +97,7 @@ func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) { // Remove removes the provided key from the cache, returning if the // key was contained. -func (c *LRU) Remove(key interface{}) bool { +func (c *LRU) Remove(key interface{}) (present bool) { if ent, ok := c.items[key]; ok { c.removeElement(ent) return true @@ -105,7 +106,7 @@ func (c *LRU) Remove(key interface{}) bool { } // RemoveOldest removes the oldest item from the cache. -func (c *LRU) RemoveOldest() (interface{}, interface{}, bool) { +func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) { ent := c.evictList.Back() if ent != nil { c.removeElement(ent) @@ -116,7 +117,7 @@ func (c *LRU) RemoveOldest() (interface{}, interface{}, bool) { } // GetOldest returns the oldest entry -func (c *LRU) GetOldest() (interface{}, interface{}, bool) { +func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) { ent := c.evictList.Back() if ent != nil { kv := ent.Value.(*entry) diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go new file mode 100644 index 0000000000..74c7077440 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go @@ -0,0 +1,36 @@ +package simplelru + +// LRUCache is the interface for simple LRU cache. +type LRUCache interface { + // Adds a value to the cache, returns true if an eviction occurred and + // updates the "recently used"-ness of the key. + Add(key, value interface{}) bool + + // Returns key's value from the cache and + // updates the "recently used"-ness of the key. #value, isFound + Get(key interface{}) (value interface{}, ok bool) + + // Check if a key exsists in cache without updating the recent-ness. + Contains(key interface{}) (ok bool) + + // Returns key's value without updating the "recently used"-ness of the key. + Peek(key interface{}) (value interface{}, ok bool) + + // Removes a key from the cache. + Remove(key interface{}) bool + + // Removes the oldest entry from cache. + RemoveOldest() (interface{}, interface{}, bool) + + // Returns the oldest entry from the cache. #key, value, isFound + GetOldest() (interface{}, interface{}, bool) + + // Returns a slice of the keys in the cache, from oldest to newest. + Keys() []interface{} + + // Returns the number of items in the cache. + Len() int + + // Clear all cache entries + Purge() +} diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_test.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_test.go deleted file mode 100644 index a958934f64..0000000000 --- a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_test.go +++ /dev/null @@ -1,167 +0,0 @@ -package simplelru - -import "testing" - -func TestLRU(t *testing.T) { - evictCounter := 0 - onEvicted := func(k interface{}, v interface{}) { - if k != v { - t.Fatalf("Evict values not equal (%v!=%v)", k, v) - } - evictCounter += 1 - } - l, err := NewLRU(128, onEvicted) - if err != nil { - t.Fatalf("err: %v", err) - } - - for i := 0; i < 256; i++ { - l.Add(i, i) - } - if l.Len() != 128 { - t.Fatalf("bad len: %v", l.Len()) - } - - if evictCounter != 128 { - t.Fatalf("bad evict count: %v", evictCounter) - } - - for i, k := range l.Keys() { - if v, ok := l.Get(k); !ok || v != k || v != i+128 { - t.Fatalf("bad key: %v", k) - } - } - for i := 0; i < 128; i++ { - _, ok := l.Get(i) - if ok { - t.Fatalf("should be evicted") - } - } - for i := 128; i < 256; i++ { - _, ok := l.Get(i) - if !ok { - t.Fatalf("should not be evicted") - } - } - for i := 128; i < 192; i++ { - ok := l.Remove(i) - if !ok { - t.Fatalf("should be contained") - } - ok = l.Remove(i) - if ok { - t.Fatalf("should not be contained") - } - _, ok = l.Get(i) - if ok { - t.Fatalf("should be deleted") - } - } - - l.Get(192) // expect 192 to be last key in l.Keys() - - for i, k := range l.Keys() { - if (i < 63 && k != i+193) || (i == 63 && k != 192) { - t.Fatalf("out of order key: %v", k) - } - } - - l.Purge() - if l.Len() != 0 { - t.Fatalf("bad len: %v", l.Len()) - } - if _, ok := l.Get(200); ok { - t.Fatalf("should contain nothing") - } -} - -func TestLRU_GetOldest_RemoveOldest(t *testing.T) { - l, err := NewLRU(128, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - for i := 0; i < 256; i++ { - l.Add(i, i) - } - k, _, ok := l.GetOldest() - if !ok { - t.Fatalf("missing") - } - if k.(int) != 128 { - t.Fatalf("bad: %v", k) - } - - k, _, ok = l.RemoveOldest() - if !ok { - t.Fatalf("missing") - } - if k.(int) != 128 { - t.Fatalf("bad: %v", k) - } - - k, _, ok = l.RemoveOldest() - if !ok { - t.Fatalf("missing") - } - if k.(int) != 129 { - t.Fatalf("bad: %v", k) - } -} - -// Test that Add returns true/false if an eviction occurred -func TestLRU_Add(t *testing.T) { - evictCounter := 0 - onEvicted := func(k interface{}, v interface{}) { - evictCounter += 1 - } - - l, err := NewLRU(1, onEvicted) - if err != nil { - t.Fatalf("err: %v", err) - } - - if l.Add(1, 1) == true || evictCounter != 0 { - t.Errorf("should not have an eviction") - } - if l.Add(2, 2) == false || evictCounter != 1 { - t.Errorf("should have an eviction") - } -} - -// Test that Contains doesn't update recent-ness -func TestLRU_Contains(t *testing.T) { - l, err := NewLRU(2, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - - l.Add(1, 1) - l.Add(2, 2) - if !l.Contains(1) { - t.Errorf("1 should be contained") - } - - l.Add(3, 3) - if l.Contains(1) { - t.Errorf("Contains should not have updated recent-ness of 1") - } -} - -// Test that Peek doesn't update recent-ness -func TestLRU_Peek(t *testing.T) { - l, err := NewLRU(2, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - - l.Add(1, 1) - l.Add(2, 2) - if v, ok := l.Peek(1); !ok || v != 1 { - t.Errorf("1 should be set to 1: %v, %v", v, ok) - } - - l.Add(3, 3) - if l.Contains(1) { - t.Errorf("should not have updated recent-ness of 1") - } -} diff --git a/vendor/github.com/howeyc/gopass/.travis.yml b/vendor/github.com/howeyc/gopass/.travis.yml deleted file mode 100644 index cc5d509fdf..0000000000 --- a/vendor/github.com/howeyc/gopass/.travis.yml +++ /dev/null @@ -1,11 +0,0 @@ -language: go - -os: - - linux - - osx - -go: - - 1.3 - - 1.4 - - 1.5 - - tip diff --git a/vendor/github.com/howeyc/gopass/LICENSE.txt b/vendor/github.com/howeyc/gopass/LICENSE.txt deleted file mode 100644 index 14f74708a4..0000000000 --- a/vendor/github.com/howeyc/gopass/LICENSE.txt +++ /dev/null @@ -1,15 +0,0 @@ -ISC License - -Copyright (c) 2012 Chris Howey - -Permission to use, copy, modify, and distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/howeyc/gopass/OPENSOLARIS.LICENSE b/vendor/github.com/howeyc/gopass/OPENSOLARIS.LICENSE deleted file mode 100644 index da23621dc8..0000000000 --- a/vendor/github.com/howeyc/gopass/OPENSOLARIS.LICENSE +++ /dev/null @@ -1,384 +0,0 @@ -Unless otherwise noted, all files in this distribution are released -under the Common Development and Distribution License (CDDL). -Exceptions are noted within the associated source files. - --------------------------------------------------------------------- - - -COMMON DEVELOPMENT AND DISTRIBUTION LICENSE Version 1.0 - -1. Definitions. - - 1.1. "Contributor" means each individual or entity that creates - or contributes to the creation of Modifications. - - 1.2. "Contributor Version" means the combination of the Original - Software, prior Modifications used by a Contributor (if any), - and the Modifications made by that particular Contributor. - - 1.3. "Covered Software" means (a) the Original Software, or (b) - Modifications, or (c) the combination of files containing - Original Software with files containing Modifications, in - each case including portions thereof. - - 1.4. "Executable" means the Covered Software in any form other - than Source Code. - - 1.5. "Initial Developer" means the individual or entity that first - makes Original Software available under this License. - - 1.6. "Larger Work" means a work which combines Covered Software or - portions thereof with code not governed by the terms of this - License. - - 1.7. "License" means this document. - - 1.8. "Licensable" means having the right to grant, to the maximum - extent possible, whether at the time of the initial grant or - subsequently acquired, any and all of the rights conveyed - herein. - - 1.9. "Modifications" means the Source Code and Executable form of - any of the following: - - A. Any file that results from an addition to, deletion from or - modification of the contents of a file containing Original - Software or previous Modifications; - - B. Any new file that contains any part of the Original - Software or previous Modifications; or - - C. Any new file that is contributed or otherwise made - available under the terms of this License. - - 1.10. "Original Software" means the Source Code and Executable - form of computer software code that is originally released - under this License. - - 1.11. "Patent Claims" means any patent claim(s), now owned or - hereafter acquired, including without limitation, method, - process, and apparatus claims, in any patent Licensable by - grantor. - - 1.12. "Source Code" means (a) the common form of computer software - code in which modifications are made and (b) associated - documentation included in or with such code. - - 1.13. "You" (or "Your") means an individual or a legal entity - exercising rights under, and complying with all of the terms - of, this License. For legal entities, "You" includes any - entity which controls, is controlled by, or is under common - control with You. For purposes of this definition, - "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by - contract or otherwise, or (b) ownership of more than fifty - percent (50%) of the outstanding shares or beneficial - ownership of such entity. - -2. License Grants. - - 2.1. The Initial Developer Grant. - - Conditioned upon Your compliance with Section 3.1 below and - subject to third party intellectual property claims, the Initial - Developer hereby grants You a world-wide, royalty-free, - non-exclusive license: - - (a) under intellectual property rights (other than patent or - trademark) Licensable by Initial Developer, to use, - reproduce, modify, display, perform, sublicense and - distribute the Original Software (or portions thereof), - with or without Modifications, and/or as part of a Larger - Work; and - - (b) under Patent Claims infringed by the making, using or - selling of Original Software, to make, have made, use, - practice, sell, and offer for sale, and/or otherwise - dispose of the Original Software (or portions thereof). - - (c) The licenses granted in Sections 2.1(a) and (b) are - effective on the date Initial Developer first distributes - or otherwise makes the Original Software available to a - third party under the terms of this License. - - (d) Notwithstanding Section 2.1(b) above, no patent license is - granted: (1) for code that You delete from the Original - Software, or (2) for infringements caused by: (i) the - modification of the Original Software, or (ii) the - combination of the Original Software with other software - or devices. - - 2.2. Contributor Grant. - - Conditioned upon Your compliance with Section 3.1 below and - subject to third party intellectual property claims, each - Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - (a) under intellectual property rights (other than patent or - trademark) Licensable by Contributor to use, reproduce, - modify, display, perform, sublicense and distribute the - Modifications created by such Contributor (or portions - thereof), either on an unmodified basis, with other - Modifications, as Covered Software and/or as part of a - Larger Work; and - - (b) under Patent Claims infringed by the making, using, or - selling of Modifications made by that Contributor either - alone and/or in combination with its Contributor Version - (or portions of such combination), to make, use, sell, - offer for sale, have made, and/or otherwise dispose of: - (1) Modifications made by that Contributor (or portions - thereof); and (2) the combination of Modifications made by - that Contributor with its Contributor Version (or portions - of such combination). - - (c) The licenses granted in Sections 2.2(a) and 2.2(b) are - effective on the date Contributor first distributes or - otherwise makes the Modifications available to a third - party. - - (d) Notwithstanding Section 2.2(b) above, no patent license is - granted: (1) for any code that Contributor has deleted - from the Contributor Version; (2) for infringements caused - by: (i) third party modifications of Contributor Version, - or (ii) the combination of Modifications made by that - Contributor with other software (except as part of the - Contributor Version) or other devices; or (3) under Patent - Claims infringed by Covered Software in the absence of - Modifications made by that Contributor. - -3. Distribution Obligations. - - 3.1. Availability of Source Code. - - Any Covered Software that You distribute or otherwise make - available in Executable form must also be made available in Source - Code form and that Source Code form must be distributed only under - the terms of this License. You must include a copy of this - License with every copy of the Source Code form of the Covered - Software You distribute or otherwise make available. You must - inform recipients of any such Covered Software in Executable form - as to how they can obtain such Covered Software in Source Code - form in a reasonable manner on or through a medium customarily - used for software exchange. - - 3.2. Modifications. - - The Modifications that You create or to which You contribute are - governed by the terms of this License. You represent that You - believe Your Modifications are Your original creation(s) and/or - You have sufficient rights to grant the rights conveyed by this - License. - - 3.3. Required Notices. - - You must include a notice in each of Your Modifications that - identifies You as the Contributor of the Modification. You may - not remove or alter any copyright, patent or trademark notices - contained within the Covered Software, or any notices of licensing - or any descriptive text giving attribution to any Contributor or - the Initial Developer. - - 3.4. Application of Additional Terms. - - You may not offer or impose any terms on any Covered Software in - Source Code form that alters or restricts the applicable version - of this License or the recipients' rights hereunder. You may - choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of - Covered Software. However, you may do so only on Your own behalf, - and not on behalf of the Initial Developer or any Contributor. - You must make it absolutely clear that any such warranty, support, - indemnity or liability obligation is offered by You alone, and You - hereby agree to indemnify the Initial Developer and every - Contributor for any liability incurred by the Initial Developer or - such Contributor as a result of warranty, support, indemnity or - liability terms You offer. - - 3.5. Distribution of Executable Versions. - - You may distribute the Executable form of the Covered Software - under the terms of this License or under the terms of a license of - Your choice, which may contain terms different from this License, - provided that You are in compliance with the terms of this License - and that the license for the Executable form does not attempt to - limit or alter the recipient's rights in the Source Code form from - the rights set forth in this License. If You distribute the - Covered Software in Executable form under a different license, You - must make it absolutely clear that any terms which differ from - this License are offered by You alone, not by the Initial - Developer or Contributor. You hereby agree to indemnify the - Initial Developer and every Contributor for any liability incurred - by the Initial Developer or such Contributor as a result of any - such terms You offer. - - 3.6. Larger Works. - - You may create a Larger Work by combining Covered Software with - other code not governed by the terms of this License and - distribute the Larger Work as a single product. In such a case, - You must make sure the requirements of this License are fulfilled - for the Covered Software. - -4. Versions of the License. - - 4.1. New Versions. - - Sun Microsystems, Inc. is the initial license steward and may - publish revised and/or new versions of this License from time to - time. Each version will be given a distinguishing version number. - Except as provided in Section 4.3, no one other than the license - steward has the right to modify this License. - - 4.2. Effect of New Versions. - - You may always continue to use, distribute or otherwise make the - Covered Software available under the terms of the version of the - License under which You originally received the Covered Software. - If the Initial Developer includes a notice in the Original - Software prohibiting it from being distributed or otherwise made - available under any subsequent version of the License, You must - distribute and make the Covered Software available under the terms - of the version of the License under which You originally received - the Covered Software. Otherwise, You may also choose to use, - distribute or otherwise make the Covered Software available under - the terms of any subsequent version of the License published by - the license steward. - - 4.3. Modified Versions. - - When You are an Initial Developer and You want to create a new - license for Your Original Software, You may create and use a - modified version of this License if You: (a) rename the license - and remove any references to the name of the license steward - (except to note that the license differs from this License); and - (b) otherwise make it clear that the license contains terms which - differ from this License. - -5. DISCLAIMER OF WARRANTY. - - COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" - BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, - INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED - SOFTWARE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR - PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND - PERFORMANCE OF THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY - COVERED SOFTWARE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE - INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF ANY - NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF - WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF - ANY COVERED SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS - DISCLAIMER. - -6. TERMINATION. - - 6.1. This License and the rights granted hereunder will terminate - automatically if You fail to comply with terms herein and fail to - cure such breach within 30 days of becoming aware of the breach. - Provisions which, by their nature, must remain in effect beyond - the termination of this License shall survive. - - 6.2. If You assert a patent infringement claim (excluding - declaratory judgment actions) against Initial Developer or a - Contributor (the Initial Developer or Contributor against whom You - assert such claim is referred to as "Participant") alleging that - the Participant Software (meaning the Contributor Version where - the Participant is a Contributor or the Original Software where - the Participant is the Initial Developer) directly or indirectly - infringes any patent, then any and all rights granted directly or - indirectly to You by such Participant, the Initial Developer (if - the Initial Developer is not the Participant) and all Contributors - under Sections 2.1 and/or 2.2 of this License shall, upon 60 days - notice from Participant terminate prospectively and automatically - at the expiration of such 60 day notice period, unless if within - such 60 day period You withdraw Your claim with respect to the - Participant Software against such Participant either unilaterally - or pursuant to a written agreement with Participant. - - 6.3. In the event of termination under Sections 6.1 or 6.2 above, - all end user licenses that have been validly granted by You or any - distributor hereunder prior to termination (excluding licenses - granted to You by any distributor) shall survive termination. - -7. LIMITATION OF LIABILITY. - - UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT - (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE - INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF - COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE - LIABLE TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR - CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT - LIMITATION, DAMAGES FOR LOST PROFITS, LOSS OF GOODWILL, WORK - STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER - COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN - INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF - LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL - INJURY RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT - APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO - NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR - CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT - APPLY TO YOU. - -8. U.S. GOVERNMENT END USERS. - - The Covered Software is a "commercial item," as that term is - defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial - computer software" (as that term is defined at 48 - C.F.R. 252.227-7014(a)(1)) and "commercial computer software - documentation" as such terms are used in 48 C.F.R. 12.212 - (Sept. 1995). Consistent with 48 C.F.R. 12.212 and 48 - C.F.R. 227.7202-1 through 227.7202-4 (June 1995), all - U.S. Government End Users acquire Covered Software with only those - rights set forth herein. This U.S. Government Rights clause is in - lieu of, and supersedes, any other FAR, DFAR, or other clause or - provision that addresses Government rights in computer software - under this License. - -9. MISCELLANEOUS. - - This License represents the complete agreement concerning subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. This License shall be governed - by the law of the jurisdiction specified in a notice contained - within the Original Software (except to the extent applicable law, - if any, provides otherwise), excluding such jurisdiction's - conflict-of-law provisions. Any litigation relating to this - License shall be subject to the jurisdiction of the courts located - in the jurisdiction and venue specified in a notice contained - within the Original Software, with the losing party responsible - for costs, including, without limitation, court costs and - reasonable attorneys' fees and expenses. The application of the - United Nations Convention on Contracts for the International Sale - of Goods is expressly excluded. Any law or regulation which - provides that the language of a contract shall be construed - against the drafter shall not apply to this License. You agree - that You alone are responsible for compliance with the United - States export administration regulations (and the export control - laws and regulation of any other countries) when You use, - distribute or otherwise make available any Covered Software. - -10. RESPONSIBILITY FOR CLAIMS. - - As between Initial Developer and the Contributors, each party is - responsible for claims and damages arising, directly or - indirectly, out of its utilization of rights under this License - and You agree to work with Initial Developer and Contributors to - distribute such responsibility on an equitable basis. Nothing - herein is intended or shall be deemed to constitute any admission - of liability. - --------------------------------------------------------------------- - -NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND -DISTRIBUTION LICENSE (CDDL) - -For Covered Software in this distribution, this License shall -be governed by the laws of the State of California (excluding -conflict-of-law provisions). - -Any litigation relating to this License shall be subject to the -jurisdiction of the Federal Courts of the Northern District of -California and the state courts of the State of California, with -venue lying in Santa Clara County, California. diff --git a/vendor/github.com/howeyc/gopass/README.md b/vendor/github.com/howeyc/gopass/README.md deleted file mode 100644 index 2d6a4e72c9..0000000000 --- a/vendor/github.com/howeyc/gopass/README.md +++ /dev/null @@ -1,27 +0,0 @@ -# getpasswd in Go [![GoDoc](https://godoc.org/github.com/howeyc/gopass?status.svg)](https://godoc.org/github.com/howeyc/gopass) [![Build Status](https://secure.travis-ci.org/howeyc/gopass.png?branch=master)](http://travis-ci.org/howeyc/gopass) - -Retrieve password from user terminal or piped input without echo. - -Verified on BSD, Linux, and Windows. - -Example: -```go -package main - -import "fmt" -import "github.com/howeyc/gopass" - -func main() { - fmt.Printf("Password: ") - - // Silent. For printing *'s use gopass.GetPasswdMasked() - pass, err := gopass.GetPasswd() - if err != nil { - // Handle gopass.ErrInterrupted or getch() read error - } - - // Do something with pass -} -``` - -Caution: Multi-byte characters not supported! diff --git a/vendor/github.com/howeyc/gopass/pass.go b/vendor/github.com/howeyc/gopass/pass.go deleted file mode 100644 index f5bd5a51a8..0000000000 --- a/vendor/github.com/howeyc/gopass/pass.go +++ /dev/null @@ -1,110 +0,0 @@ -package gopass - -import ( - "errors" - "fmt" - "io" - "os" -) - -type FdReader interface { - io.Reader - Fd() uintptr -} - -var defaultGetCh = func(r io.Reader) (byte, error) { - buf := make([]byte, 1) - if n, err := r.Read(buf); n == 0 || err != nil { - if err != nil { - return 0, err - } - return 0, io.EOF - } - return buf[0], nil -} - -var ( - maxLength = 512 - ErrInterrupted = errors.New("interrupted") - ErrMaxLengthExceeded = fmt.Errorf("maximum byte limit (%v) exceeded", maxLength) - - // Provide variable so that tests can provide a mock implementation. - getch = defaultGetCh -) - -// getPasswd returns the input read from terminal. -// If prompt is not empty, it will be output as a prompt to the user -// If masked is true, typing will be matched by asterisks on the screen. -// Otherwise, typing will echo nothing. -func getPasswd(prompt string, masked bool, r FdReader, w io.Writer) ([]byte, error) { - var err error - var pass, bs, mask []byte - if masked { - bs = []byte("\b \b") - mask = []byte("*") - } - - if isTerminal(r.Fd()) { - if oldState, err := makeRaw(r.Fd()); err != nil { - return pass, err - } else { - defer func() { - restore(r.Fd(), oldState) - fmt.Fprintln(w) - }() - } - } - - if prompt != "" { - fmt.Fprint(w, prompt) - } - - // Track total bytes read, not just bytes in the password. This ensures any - // errors that might flood the console with nil or -1 bytes infinitely are - // capped. - var counter int - for counter = 0; counter <= maxLength; counter++ { - if v, e := getch(r); e != nil { - err = e - break - } else if v == 127 || v == 8 { - if l := len(pass); l > 0 { - pass = pass[:l-1] - fmt.Fprint(w, string(bs)) - } - } else if v == 13 || v == 10 { - break - } else if v == 3 { - err = ErrInterrupted - break - } else if v != 0 { - pass = append(pass, v) - fmt.Fprint(w, string(mask)) - } - } - - if counter > maxLength { - err = ErrMaxLengthExceeded - } - - return pass, err -} - -// GetPasswd returns the password read from the terminal without echoing input. -// The returned byte array does not include end-of-line characters. -func GetPasswd() ([]byte, error) { - return getPasswd("", false, os.Stdin, os.Stdout) -} - -// GetPasswdMasked returns the password read from the terminal, echoing asterisks. -// The returned byte array does not include end-of-line characters. -func GetPasswdMasked() ([]byte, error) { - return getPasswd("", true, os.Stdin, os.Stdout) -} - -// GetPasswdPrompt prompts the user and returns the password read from the terminal. -// If mask is true, then asterisks are echoed. -// The returned byte array does not include end-of-line characters. -func GetPasswdPrompt(prompt string, mask bool, r FdReader, w io.Writer) ([]byte, error) { - return getPasswd(prompt, mask, r, w) -} diff --git a/vendor/github.com/howeyc/gopass/pass_test.go b/vendor/github.com/howeyc/gopass/pass_test.go deleted file mode 100644 index 7ac3151357..0000000000 --- a/vendor/github.com/howeyc/gopass/pass_test.go +++ /dev/null @@ -1,225 +0,0 @@ -package gopass - -import ( - "bufio" - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "testing" - "time" -) - -// TestGetPasswd tests the password creation and output based on a byte buffer -// as input to mock the underlying getch() methods. -func TestGetPasswd(t *testing.T) { - type testData struct { - input []byte - - // Due to how backspaces are written, it is easier to manually write - // each expected output for the masked cases. - masked string - password string - byesLeft int - reason string - } - - ds := []testData{ - testData{[]byte("abc\n"), "***", "abc", 0, "Password parsing should stop at \\n"}, - testData{[]byte("abc\r"), "***", "abc", 0, "Password parsing should stop at \\r"}, - testData{[]byte("a\nbc\n"), "*", "a", 3, "Password parsing should stop at \\n"}, - testData{[]byte("*!]|\n"), "****", "*!]|", 0, "Special characters shouldn't affect the password."}, - - testData{[]byte("abc\r\n"), "***", "abc", 1, - "Password parsing should stop at \\r; Windows LINE_MODE should be unset so \\r is not converted to \\r\\n."}, - - testData{[]byte{'a', 'b', 'c', 8, '\n'}, "***\b \b", "ab", 0, "Backspace byte should remove the last read byte."}, - testData{[]byte{'a', 'b', 127, 'c', '\n'}, "**\b \b*", "ac", 0, "Delete byte should remove the last read byte."}, - testData{[]byte{'a', 'b', 127, 'c', 8, 127, '\n'}, "**\b \b*\b \b\b \b", "", 0, "Successive deletes continue to delete."}, - testData{[]byte{8, 8, 8, '\n'}, "", "", 0, "Deletes before characters are noops."}, - testData{[]byte{8, 8, 8, 'a', 'b', 'c', '\n'}, "***", "abc", 0, "Deletes before characters are noops."}, - - testData{[]byte{'a', 'b', 0, 'c', '\n'}, "***", "abc", 0, - "Nil byte should be ignored due; may get unintended nil bytes from syscalls on Windows."}, - } - - // Redirecting output for tests as they print to os.Stdout but we want to - // capture and test the output. - for _, masked := range []bool{true, false} { - for _, d := range ds { - pipeBytesToStdin(d.input) - - r, w, err := os.Pipe() - if err != nil { - t.Fatal(err.Error()) - } - - result, err := getPasswd("", masked, os.Stdin, w) - if err != nil { - t.Errorf("Error getting password: %s", err.Error()) - } - leftOnBuffer := flushStdin() - - // Test output (masked and unmasked). Delete/backspace actually - // deletes, overwrites and deletes again. As a result, we need to - // remove those from the pipe afterwards to mimic the console's - // interpretation of those bytes. - w.Close() - output, err := ioutil.ReadAll(r) - if err != nil { - t.Fatal(err.Error()) - } - var expectedOutput []byte - if masked { - expectedOutput = []byte(d.masked) - } else { - expectedOutput = []byte("") - } - if bytes.Compare(expectedOutput, output) != 0 { - t.Errorf("Expected output to equal %v (%q) but got %v (%q) instead when masked=%v. %s", expectedOutput, string(expectedOutput), output, string(output), masked, d.reason) - } - - if string(result) != d.password { - t.Errorf("Expected %q but got %q instead when masked=%v. %s", d.password, result, masked, d.reason) - } - - if leftOnBuffer != d.byesLeft { - t.Errorf("Expected %v bytes left on buffer but instead got %v when masked=%v. %s", d.byesLeft, leftOnBuffer, masked, d.reason) - } - } - } -} - -// TestPipe ensures we get our expected pipe behavior. -func TestPipe(t *testing.T) { - type testData struct { - input string - password string - expError error - } - ds := []testData{ - testData{"abc", "abc", io.EOF}, - testData{"abc\n", "abc", nil}, - testData{"abc\r", "abc", nil}, - testData{"abc\r\n", "abc", nil}, - } - - for _, d := range ds { - _, err := pipeToStdin(d.input) - if err != nil { - t.Log("Error writing input to stdin:", err) - t.FailNow() - } - pass, err := GetPasswd() - if string(pass) != d.password { - t.Errorf("Expected %q but got %q instead.", d.password, string(pass)) - } - if err != d.expError { - t.Errorf("Expected %v but got %q instead.", d.expError, err) - } - } -} - -// flushStdin reads from stdin for .5 seconds to ensure no bytes are left on -// the buffer. Returns the number of bytes read. -func flushStdin() int { - ch := make(chan byte) - go func(ch chan byte) { - reader := bufio.NewReader(os.Stdin) - for { - b, err := reader.ReadByte() - if err != nil { // Maybe log non io.EOF errors, if you want - close(ch) - return - } - ch <- b - } - close(ch) - }(ch) - - numBytes := 0 - for { - select { - case _, ok := <-ch: - if !ok { - return numBytes - } - numBytes++ - case <-time.After(500 * time.Millisecond): - return numBytes - } - } - return numBytes -} - -// pipeToStdin pipes the given string onto os.Stdin by replacing it with an -// os.Pipe. The write end of the pipe is closed so that EOF is read after the -// final byte. -func pipeToStdin(s string) (int, error) { - pipeReader, pipeWriter, err := os.Pipe() - if err != nil { - fmt.Println("Error getting os pipes:", err) - os.Exit(1) - } - os.Stdin = pipeReader - w, err := pipeWriter.WriteString(s) - pipeWriter.Close() - return w, err -} - -func pipeBytesToStdin(b []byte) (int, error) { - return pipeToStdin(string(b)) -} - -// TestGetPasswd_Err tests errors are properly handled from getch() -func TestGetPasswd_Err(t *testing.T) { - var inBuffer *bytes.Buffer - getch = func(io.Reader) (byte, error) { - b, err := inBuffer.ReadByte() - if err != nil { - return 13, err - } - if b == 'z' { - return 'z', fmt.Errorf("Forced error; byte returned should not be considered accurate.") - } - return b, nil - } - defer func() { getch = defaultGetCh }() - - for input, expectedPassword := range map[string]string{"abc": "abc", "abzc": "ab"} { - inBuffer = bytes.NewBufferString(input) - p, err := GetPasswdMasked() - if string(p) != expectedPassword { - t.Errorf("Expected %q but got %q instead.", expectedPassword, p) - } - if err == nil { - t.Errorf("Expected error to be returned.") - } - } -} - -func TestMaxPasswordLength(t *testing.T) { - type testData struct { - input []byte - expectedErr error - - // Helper field to output in case of failure; rather than hundreds of - // bytes. - inputDesc string - } - - ds := []testData{ - testData{append(bytes.Repeat([]byte{'a'}, maxLength), '\n'), nil, fmt.Sprintf("%v 'a' bytes followed by a newline", maxLength)}, - testData{append(bytes.Repeat([]byte{'a'}, maxLength+1), '\n'), ErrMaxLengthExceeded, fmt.Sprintf("%v 'a' bytes followed by a newline", maxLength+1)}, - testData{append(bytes.Repeat([]byte{0x00}, maxLength+1), '\n'), ErrMaxLengthExceeded, fmt.Sprintf("%v 0x00 bytes followed by a newline", maxLength+1)}, - } - - for _, d := range ds { - pipeBytesToStdin(d.input) - _, err := GetPasswd() - if err != d.expectedErr { - t.Errorf("Expected error to be %v; isntead got %v from %v", d.expectedErr, err, d.inputDesc) - } - } -} diff --git a/vendor/github.com/howeyc/gopass/terminal.go b/vendor/github.com/howeyc/gopass/terminal.go deleted file mode 100644 index 0835641462..0000000000 --- a/vendor/github.com/howeyc/gopass/terminal.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build !solaris - -package gopass - -import "golang.org/x/crypto/ssh/terminal" - -type terminalState struct { - state *terminal.State -} - -func isTerminal(fd uintptr) bool { - return terminal.IsTerminal(int(fd)) -} - -func makeRaw(fd uintptr) (*terminalState, error) { - state, err := terminal.MakeRaw(int(fd)) - - return &terminalState{ - state: state, - }, err -} - -func restore(fd uintptr, oldState *terminalState) error { - return terminal.Restore(int(fd), oldState.state) -} diff --git a/vendor/github.com/howeyc/gopass/terminal_solaris.go b/vendor/github.com/howeyc/gopass/terminal_solaris.go deleted file mode 100644 index 257e1b4e81..0000000000 --- a/vendor/github.com/howeyc/gopass/terminal_solaris.go +++ /dev/null @@ -1,69 +0,0 @@ -/* - * CDDL HEADER START - * - * The contents of this file are subject to the terms of the - * Common Development and Distribution License, Version 1.0 only - * (the "License"). You may not use this file except in compliance - * with the License. - * - * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE - * or http://www.opensolaris.org/os/licensing. - * See the License for the specific language governing permissions - * and limitations under the License. - * - * When distributing Covered Code, include this CDDL HEADER in each - * file and include the License file at usr/src/OPENSOLARIS.LICENSE. - * If applicable, add the following below this CDDL HEADER, with the - * fields enclosed by brackets "[]" replaced with your own identifying - * information: Portions Copyright [yyyy] [name of copyright owner] - * - * CDDL HEADER END - */ -// Below is derived from Solaris source, so CDDL license is included. - -package gopass - -import ( - "syscall" - - "golang.org/x/sys/unix" -) - -type terminalState struct { - state *unix.Termios -} - -// isTerminal returns true if there is a terminal attached to the given -// file descriptor. -// Source: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c -func isTerminal(fd uintptr) bool { - var termio unix.Termio - err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio) - return err == nil -} - -// makeRaw puts the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -// Source: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libast/common/uwin/getpass.c -func makeRaw(fd uintptr) (*terminalState, error) { - oldTermiosPtr, err := unix.IoctlGetTermios(int(fd), unix.TCGETS) - if err != nil { - return nil, err - } - oldTermios := *oldTermiosPtr - - newTermios := oldTermios - newTermios.Lflag &^= syscall.ECHO | syscall.ECHOE | syscall.ECHOK | syscall.ECHONL - if err := unix.IoctlSetTermios(int(fd), unix.TCSETS, &newTermios); err != nil { - return nil, err - } - - return &terminalState{ - state: oldTermiosPtr, - }, nil -} - -func restore(fd uintptr, oldState *terminalState) error { - return unix.IoctlSetTermios(int(fd), unix.TCSETS, oldState.state) -} diff --git a/vendor/github.com/hpcloud/tail/.gitignore b/vendor/github.com/hpcloud/tail/.gitignore new file mode 100644 index 0000000000..6d9953c3c6 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/.gitignore @@ -0,0 +1,3 @@ +.test +.go + diff --git a/vendor/github.com/hpcloud/tail/.travis.yml b/vendor/github.com/hpcloud/tail/.travis.yml new file mode 100644 index 0000000000..9cf8bb7fc5 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/.travis.yml @@ -0,0 +1,18 @@ +language: go + +script: + - go test -race -v ./... + +go: + - 1.4 + - 1.5 + - 1.6 + - tip + +matrix: + allow_failures: + - go: tip + +install: + - go get gopkg.in/fsnotify.v1 + - go get gopkg.in/tomb.v1 diff --git a/vendor/github.com/hpcloud/tail/CHANGES.md b/vendor/github.com/hpcloud/tail/CHANGES.md new file mode 100644 index 0000000000..422790c073 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/CHANGES.md @@ -0,0 +1,63 @@ +# API v1 (gopkg.in/hpcloud/tail.v1) + +## April, 2016 + +* Migrated to godep, as depman is not longer supported +* Introduced golang vendoring feature +* Fixed issue [#57](https://github.com/hpcloud/tail/issues/57) related to reopen deleted file + +## July, 2015 + +* Fix inotify watcher leak; remove `Cleanup` (#51) + +# API v0 (gopkg.in/hpcloud/tail.v0) + +## June, 2015 + +* Don't return partial lines (PR #40) +* Use stable version of fsnotify (#46) + +## July, 2014 + +* Fix tail for Windows (PR #36) + +## May, 2014 + +* Improved rate limiting using leaky bucket (PR #29) +* Fix odd line splitting (PR #30) + +## Apr, 2014 + +* LimitRate now discards read buffer (PR #28) +* allow reading of longer lines if MaxLineSize is unset (PR #24) +* updated deps.json to latest fsnotify (441bbc86b1) + +## Feb, 2014 + +* added `Config.Logger` to suppress library logging + +## Nov, 2013 + +* add Cleanup to remove leaky inotify watches (PR #20) + +## Aug, 2013 + +* redesigned Location field (PR #12) +* add tail.Tell (PR #14) + +## July, 2013 + +* Rate limiting (PR #10) + +## May, 2013 + +* Detect file deletions/renames in polling file watcher (PR #1) +* Detect file truncation +* Fix potential race condition when reopening the file (issue 5) +* Fix potential blocking of `tail.Stop` (issue 4) +* Fix uncleaned up ChangeEvents goroutines after calling tail.Stop +* Support Follow=false + +## Feb, 2013 + +* Initial open source release diff --git a/vendor/github.com/hpcloud/tail/Dockerfile b/vendor/github.com/hpcloud/tail/Dockerfile new file mode 100644 index 0000000000..cd297b940a --- /dev/null +++ b/vendor/github.com/hpcloud/tail/Dockerfile @@ -0,0 +1,19 @@ +FROM golang + +RUN mkdir -p $GOPATH/src/github.com/hpcloud/tail/ +ADD . $GOPATH/src/github.com/hpcloud/tail/ + +# expecting to fetch dependencies successfully. +RUN go get -v github.com/hpcloud/tail + +# expecting to run the test successfully. +RUN go test -v github.com/hpcloud/tail + +# expecting to install successfully +RUN go install -v github.com/hpcloud/tail +RUN go install -v github.com/hpcloud/tail/cmd/gotail + +RUN $GOPATH/bin/gotail -h || true + +ENV PATH $GOPATH/bin:$PATH +CMD ["gotail"] diff --git a/vendor/github.com/hpcloud/tail/LICENSE.txt b/vendor/github.com/hpcloud/tail/LICENSE.txt new file mode 100644 index 0000000000..818d802a59 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/LICENSE.txt @@ -0,0 +1,21 @@ +# The MIT License (MIT) + +# © Copyright 2015 Hewlett Packard Enterprise Development LP +Copyright (c) 2014 ActiveState + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/hpcloud/tail/Makefile b/vendor/github.com/hpcloud/tail/Makefile new file mode 100644 index 0000000000..6591b24fc1 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/Makefile @@ -0,0 +1,11 @@ +default: test + +test: *.go + go test -v -race ./... + +fmt: + gofmt -w . + +# Run the test in an isolated environment. +fulltest: + docker build -t hpcloud/tail . diff --git a/vendor/github.com/hpcloud/tail/README.md b/vendor/github.com/hpcloud/tail/README.md new file mode 100644 index 0000000000..fb7fbc26c6 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/README.md @@ -0,0 +1,28 @@ +[![Build Status](https://travis-ci.org/hpcloud/tail.svg)](https://travis-ci.org/hpcloud/tail) +[![Build status](https://ci.appveyor.com/api/projects/status/kohpsf3rvhjhrox6?svg=true)](https://ci.appveyor.com/project/HelionCloudFoundry/tail) + +# Go package for tail-ing files + +A Go package striving to emulate the features of the BSD `tail` program. + +```Go +t, err := tail.TailFile("/var/log/nginx.log", tail.Config{Follow: true}) +for line := range t.Lines { + fmt.Println(line.Text) +} +``` + +See [API documentation](http://godoc.org/github.com/hpcloud/tail). + +## Log rotation + +Tail comes with full support for truncation/move detection as it is +designed to work with log rotation tools. + +## Installing + + go get github.com/hpcloud/tail/... + +## Windows support + +This package [needs assistance](https://github.com/hpcloud/tail/labels/Windows) for full Windows support. diff --git a/vendor/github.com/hpcloud/tail/appveyor.yml b/vendor/github.com/hpcloud/tail/appveyor.yml new file mode 100644 index 0000000000..d370055b6f --- /dev/null +++ b/vendor/github.com/hpcloud/tail/appveyor.yml @@ -0,0 +1,11 @@ +version: 0.{build} +skip_tags: true +cache: C:\Users\appveyor\AppData\Local\NuGet\Cache +build_script: +- SET GOPATH=c:\workspace +- go test -v -race ./... +test: off +clone_folder: c:\workspace\src\github.com\hpcloud\tail +branches: + only: + - master diff --git a/vendor/github.com/hpcloud/tail/ratelimiter/Licence b/vendor/github.com/hpcloud/tail/ratelimiter/Licence new file mode 100644 index 0000000000..434aab19f1 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/ratelimiter/Licence @@ -0,0 +1,7 @@ +Copyright (C) 2013 99designs + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/hpcloud/tail/ratelimiter/leakybucket.go b/vendor/github.com/hpcloud/tail/ratelimiter/leakybucket.go new file mode 100644 index 0000000000..358b69e7f5 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/ratelimiter/leakybucket.go @@ -0,0 +1,97 @@ +// Package ratelimiter implements the Leaky Bucket ratelimiting algorithm with memcached and in-memory backends. +package ratelimiter + +import ( + "time" +) + +type LeakyBucket struct { + Size uint16 + Fill float64 + LeakInterval time.Duration // time.Duration for 1 unit of size to leak + Lastupdate time.Time + Now func() time.Time +} + +func NewLeakyBucket(size uint16, leakInterval time.Duration) *LeakyBucket { + bucket := LeakyBucket{ + Size: size, + Fill: 0, + LeakInterval: leakInterval, + Now: time.Now, + Lastupdate: time.Now(), + } + + return &bucket +} + +func (b *LeakyBucket) updateFill() { + now := b.Now() + if b.Fill > 0 { + elapsed := now.Sub(b.Lastupdate) + + b.Fill -= float64(elapsed) / float64(b.LeakInterval) + if b.Fill < 0 { + b.Fill = 0 + } + } + b.Lastupdate = now +} + +func (b *LeakyBucket) Pour(amount uint16) bool { + b.updateFill() + + var newfill float64 = b.Fill + float64(amount) + + if newfill > float64(b.Size) { + return false + } + + b.Fill = newfill + + return true +} + +// The time at which this bucket will be completely drained +func (b *LeakyBucket) DrainedAt() time.Time { + return b.Lastupdate.Add(time.Duration(b.Fill * float64(b.LeakInterval))) +} + +// The duration until this bucket is completely drained +func (b *LeakyBucket) TimeToDrain() time.Duration { + return b.DrainedAt().Sub(b.Now()) +} + +func (b *LeakyBucket) TimeSinceLastUpdate() time.Duration { + return b.Now().Sub(b.Lastupdate) +} + +type LeakyBucketSer struct { + Size uint16 + Fill float64 + LeakInterval time.Duration // time.Duration for 1 unit of size to leak + Lastupdate time.Time +} + +func (b *LeakyBucket) Serialise() *LeakyBucketSer { + bucket := LeakyBucketSer{ + Size: b.Size, + Fill: b.Fill, + LeakInterval: b.LeakInterval, + Lastupdate: b.Lastupdate, + } + + return &bucket +} + +func (b *LeakyBucketSer) DeSerialise() *LeakyBucket { + bucket := LeakyBucket{ + Size: b.Size, + Fill: b.Fill, + LeakInterval: b.LeakInterval, + Lastupdate: b.Lastupdate, + Now: time.Now, + } + + return &bucket +} diff --git a/vendor/github.com/hpcloud/tail/ratelimiter/memory.go b/vendor/github.com/hpcloud/tail/ratelimiter/memory.go new file mode 100644 index 0000000000..8f6a5784a9 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/ratelimiter/memory.go @@ -0,0 +1,58 @@ +package ratelimiter + +import ( + "errors" + "time" +) + +const GC_SIZE int = 100 + +type Memory struct { + store map[string]LeakyBucket + lastGCCollected time.Time +} + +func NewMemory() *Memory { + m := new(Memory) + m.store = make(map[string]LeakyBucket) + m.lastGCCollected = time.Now() + return m +} + +func (m *Memory) GetBucketFor(key string) (*LeakyBucket, error) { + + bucket, ok := m.store[key] + if !ok { + return nil, errors.New("miss") + } + + return &bucket, nil +} + +func (m *Memory) SetBucketFor(key string, bucket LeakyBucket) error { + + if len(m.store) > GC_SIZE { + m.GarbageCollect() + } + + m.store[key] = bucket + + return nil +} + +func (m *Memory) GarbageCollect() { + now := time.Now() + + // rate limit GC to once per minute + if now.Add(60*time.Second).Unix() > m.lastGCCollected.Unix() { + + for key, bucket := range m.store { + // if the bucket is drained, then GC + if bucket.DrainedAt().Unix() > now.Unix() { + delete(m.store, key) + } + } + + m.lastGCCollected = now + } +} diff --git a/vendor/github.com/hpcloud/tail/ratelimiter/storage.go b/vendor/github.com/hpcloud/tail/ratelimiter/storage.go new file mode 100644 index 0000000000..89b2fe8826 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/ratelimiter/storage.go @@ -0,0 +1,6 @@ +package ratelimiter + +type Storage interface { + GetBucketFor(string) (*LeakyBucket, error) + SetBucketFor(string, LeakyBucket) error +} diff --git a/vendor/github.com/hpcloud/tail/tail.go b/vendor/github.com/hpcloud/tail/tail.go new file mode 100644 index 0000000000..2d252d6057 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/tail.go @@ -0,0 +1,438 @@ +// Copyright (c) 2015 HPE Software Inc. All rights reserved. +// Copyright (c) 2013 ActiveState Software Inc. All rights reserved. + +package tail + +import ( + "bufio" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "strings" + "sync" + "time" + + "github.com/hpcloud/tail/ratelimiter" + "github.com/hpcloud/tail/util" + "github.com/hpcloud/tail/watch" + "gopkg.in/tomb.v1" +) + +var ( + ErrStop = fmt.Errorf("tail should now stop") +) + +type Line struct { + Text string + Time time.Time + Err error // Error from tail +} + +// NewLine returns a Line with present time. +func NewLine(text string) *Line { + return &Line{text, time.Now(), nil} +} + +// SeekInfo represents arguments to `os.Seek` +type SeekInfo struct { + Offset int64 + Whence int // os.SEEK_* +} + +type logger interface { + Fatal(v ...interface{}) + Fatalf(format string, v ...interface{}) + Fatalln(v ...interface{}) + Panic(v ...interface{}) + Panicf(format string, v ...interface{}) + Panicln(v ...interface{}) + Print(v ...interface{}) + Printf(format string, v ...interface{}) + Println(v ...interface{}) +} + +// Config is used to specify how a file must be tailed. +type Config struct { + // File-specifc + Location *SeekInfo // Seek to this location before tailing + ReOpen bool // Reopen recreated files (tail -F) + MustExist bool // Fail early if the file does not exist + Poll bool // Poll for file changes instead of using inotify + Pipe bool // Is a named pipe (mkfifo) + RateLimiter *ratelimiter.LeakyBucket + + // Generic IO + Follow bool // Continue looking for new lines (tail -f) + MaxLineSize int // If non-zero, split longer lines into multiple lines + + // Logger, when nil, is set to tail.DefaultLogger + // To disable logging: set field to tail.DiscardingLogger + Logger logger +} + +type Tail struct { + Filename string + Lines chan *Line + Config + + file *os.File + reader *bufio.Reader + + watcher watch.FileWatcher + changes *watch.FileChanges + + tomb.Tomb // provides: Done, Kill, Dying + + lk sync.Mutex +} + +var ( + // DefaultLogger is used when Config.Logger == nil + DefaultLogger = log.New(os.Stderr, "", log.LstdFlags) + // DiscardingLogger can be used to disable logging output + DiscardingLogger = log.New(ioutil.Discard, "", 0) +) + +// TailFile begins tailing the file. Output stream is made available +// via the `Tail.Lines` channel. To handle errors during tailing, +// invoke the `Wait` or `Err` method after finishing reading from the +// `Lines` channel. +func TailFile(filename string, config Config) (*Tail, error) { + if config.ReOpen && !config.Follow { + util.Fatal("cannot set ReOpen without Follow.") + } + + t := &Tail{ + Filename: filename, + Lines: make(chan *Line), + Config: config, + } + + // when Logger was not specified in config, use default logger + if t.Logger == nil { + t.Logger = log.New(os.Stderr, "", log.LstdFlags) + } + + if t.Poll { + t.watcher = watch.NewPollingFileWatcher(filename) + } else { + t.watcher = watch.NewInotifyFileWatcher(filename) + } + + if t.MustExist { + var err error + t.file, err = OpenFile(t.Filename) + if err != nil { + return nil, err + } + } + + go t.tailFileSync() + + return t, nil +} + +// Return the file's current position, like stdio's ftell(). +// But this value is not very accurate. +// it may readed one line in the chan(tail.Lines), +// so it may lost one line. +func (tail *Tail) Tell() (offset int64, err error) { + if tail.file == nil { + return + } + offset, err = tail.file.Seek(0, os.SEEK_CUR) + if err != nil { + return + } + + tail.lk.Lock() + defer tail.lk.Unlock() + if tail.reader == nil { + return + } + + offset -= int64(tail.reader.Buffered()) + return +} + +// Stop stops the tailing activity. +func (tail *Tail) Stop() error { + tail.Kill(nil) + return tail.Wait() +} + +// StopAtEOF stops tailing as soon as the end of the file is reached. +func (tail *Tail) StopAtEOF() error { + tail.Kill(errStopAtEOF) + return tail.Wait() +} + +var errStopAtEOF = errors.New("tail: stop at eof") + +func (tail *Tail) close() { + close(tail.Lines) + tail.closeFile() +} + +func (tail *Tail) closeFile() { + if tail.file != nil { + tail.file.Close() + tail.file = nil + } +} + +func (tail *Tail) reopen() error { + tail.closeFile() + for { + var err error + tail.file, err = OpenFile(tail.Filename) + if err != nil { + if os.IsNotExist(err) { + tail.Logger.Printf("Waiting for %s to appear...", tail.Filename) + if err := tail.watcher.BlockUntilExists(&tail.Tomb); err != nil { + if err == tomb.ErrDying { + return err + } + return fmt.Errorf("Failed to detect creation of %s: %s", tail.Filename, err) + } + continue + } + return fmt.Errorf("Unable to open file %s: %s", tail.Filename, err) + } + break + } + return nil +} + +func (tail *Tail) readLine() (string, error) { + tail.lk.Lock() + line, err := tail.reader.ReadString('\n') + tail.lk.Unlock() + if err != nil { + // Note ReadString "returns the data read before the error" in + // case of an error, including EOF, so we return it as is. The + // caller is expected to process it if err is EOF. + return line, err + } + + line = strings.TrimRight(line, "\n") + + return line, err +} + +func (tail *Tail) tailFileSync() { + defer tail.Done() + defer tail.close() + + if !tail.MustExist { + // deferred first open. + err := tail.reopen() + if err != nil { + if err != tomb.ErrDying { + tail.Kill(err) + } + return + } + } + + // Seek to requested location on first open of the file. + if tail.Location != nil { + _, err := tail.file.Seek(tail.Location.Offset, tail.Location.Whence) + tail.Logger.Printf("Seeked %s - %+v\n", tail.Filename, tail.Location) + if err != nil { + tail.Killf("Seek error on %s: %s", tail.Filename, err) + return + } + } + + tail.openReader() + + var offset int64 = 0 + var err error + + // Read line by line. + for { + // do not seek in named pipes + if !tail.Pipe { + // grab the position in case we need to back up in the event of a half-line + offset, err = tail.Tell() + if err != nil { + tail.Kill(err) + return + } + } + + line, err := tail.readLine() + + // Process `line` even if err is EOF. + if err == nil { + cooloff := !tail.sendLine(line) + if cooloff { + // Wait a second before seeking till the end of + // file when rate limit is reached. + msg := fmt.Sprintf( + "Too much log activity; waiting a second " + + "before resuming tailing") + tail.Lines <- &Line{msg, time.Now(), fmt.Errorf(msg)} + select { + case <-time.After(time.Second): + case <-tail.Dying(): + return + } + if err := tail.seekEnd(); err != nil { + tail.Kill(err) + return + } + } + } else if err == io.EOF { + if !tail.Follow { + if line != "" { + tail.sendLine(line) + } + return + } + + if tail.Follow && line != "" { + // this has the potential to never return the last line if + // it's not followed by a newline; seems a fair trade here + err := tail.seekTo(SeekInfo{Offset: offset, Whence: 0}) + if err != nil { + tail.Kill(err) + return + } + } + + // When EOF is reached, wait for more data to become + // available. Wait strategy is based on the `tail.watcher` + // implementation (inotify or polling). + err := tail.waitForChanges() + if err != nil { + if err != ErrStop { + tail.Kill(err) + } + return + } + } else { + // non-EOF error + tail.Killf("Error reading %s: %s", tail.Filename, err) + return + } + + select { + case <-tail.Dying(): + if tail.Err() == errStopAtEOF { + continue + } + return + default: + } + } +} + +// waitForChanges waits until the file has been appended, deleted, +// moved or truncated. When moved or deleted - the file will be +// reopened if ReOpen is true. Truncated files are always reopened. +func (tail *Tail) waitForChanges() error { + if tail.changes == nil { + pos, err := tail.file.Seek(0, os.SEEK_CUR) + if err != nil { + return err + } + tail.changes, err = tail.watcher.ChangeEvents(&tail.Tomb, pos) + if err != nil { + return err + } + } + + select { + case <-tail.changes.Modified: + return nil + case <-tail.changes.Deleted: + tail.changes = nil + if tail.ReOpen { + // XXX: we must not log from a library. + tail.Logger.Printf("Re-opening moved/deleted file %s ...", tail.Filename) + if err := tail.reopen(); err != nil { + return err + } + tail.Logger.Printf("Successfully reopened %s", tail.Filename) + tail.openReader() + return nil + } else { + tail.Logger.Printf("Stopping tail as file no longer exists: %s", tail.Filename) + return ErrStop + } + case <-tail.changes.Truncated: + // Always reopen truncated files (Follow is true) + tail.Logger.Printf("Re-opening truncated file %s ...", tail.Filename) + if err := tail.reopen(); err != nil { + return err + } + tail.Logger.Printf("Successfully reopened truncated %s", tail.Filename) + tail.openReader() + return nil + case <-tail.Dying(): + return ErrStop + } + panic("unreachable") +} + +func (tail *Tail) openReader() { + if tail.MaxLineSize > 0 { + // add 2 to account for newline characters + tail.reader = bufio.NewReaderSize(tail.file, tail.MaxLineSize+2) + } else { + tail.reader = bufio.NewReader(tail.file) + } +} + +func (tail *Tail) seekEnd() error { + return tail.seekTo(SeekInfo{Offset: 0, Whence: os.SEEK_END}) +} + +func (tail *Tail) seekTo(pos SeekInfo) error { + _, err := tail.file.Seek(pos.Offset, pos.Whence) + if err != nil { + return fmt.Errorf("Seek error on %s: %s", tail.Filename, err) + } + // Reset the read buffer whenever the file is re-seek'ed + tail.reader.Reset(tail.file) + return nil +} + +// sendLine sends the line(s) to Lines channel, splitting longer lines +// if necessary. Return false if rate limit is reached. +func (tail *Tail) sendLine(line string) bool { + now := time.Now() + lines := []string{line} + + // Split longer lines + if tail.MaxLineSize > 0 && len(line) > tail.MaxLineSize { + lines = util.PartitionString(line, tail.MaxLineSize) + } + + for _, line := range lines { + tail.Lines <- &Line{line, now, nil} + } + + if tail.Config.RateLimiter != nil { + ok := tail.Config.RateLimiter.Pour(uint16(len(lines))) + if !ok { + tail.Logger.Printf("Leaky bucket full (%v); entering 1s cooloff period.\n", + tail.Filename) + return false + } + } + + return true +} + +// Cleanup removes inotify watches added by the tail package. This function is +// meant to be invoked from a process's exit handler. Linux kernel may not +// automatically remove inotify watches after the process exits. +func (tail *Tail) Cleanup() { + watch.Cleanup(tail.Filename) +} diff --git a/vendor/github.com/hpcloud/tail/tail_posix.go b/vendor/github.com/hpcloud/tail/tail_posix.go new file mode 100644 index 0000000000..bc4dc3357a --- /dev/null +++ b/vendor/github.com/hpcloud/tail/tail_posix.go @@ -0,0 +1,11 @@ +// +build linux darwin freebsd netbsd openbsd + +package tail + +import ( + "os" +) + +func OpenFile(name string) (file *os.File, err error) { + return os.Open(name) +} diff --git a/vendor/github.com/hpcloud/tail/tail_windows.go b/vendor/github.com/hpcloud/tail/tail_windows.go new file mode 100644 index 0000000000..ef2cfca1b7 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/tail_windows.go @@ -0,0 +1,12 @@ +// +build windows + +package tail + +import ( + "github.com/hpcloud/tail/winfile" + "os" +) + +func OpenFile(name string) (file *os.File, err error) { + return winfile.OpenFile(name, os.O_RDONLY, 0) +} diff --git a/vendor/github.com/hpcloud/tail/util/util.go b/vendor/github.com/hpcloud/tail/util/util.go new file mode 100644 index 0000000000..54151fe39f --- /dev/null +++ b/vendor/github.com/hpcloud/tail/util/util.go @@ -0,0 +1,48 @@ +// Copyright (c) 2015 HPE Software Inc. All rights reserved. +// Copyright (c) 2013 ActiveState Software Inc. All rights reserved. + +package util + +import ( + "fmt" + "log" + "os" + "runtime/debug" +) + +type Logger struct { + *log.Logger +} + +var LOGGER = &Logger{log.New(os.Stderr, "", log.LstdFlags)} + +// fatal is like panic except it displays only the current goroutine's stack. +func Fatal(format string, v ...interface{}) { + // https://github.com/hpcloud/log/blob/master/log.go#L45 + LOGGER.Output(2, fmt.Sprintf("FATAL -- "+format, v...)+"\n"+string(debug.Stack())) + os.Exit(1) +} + +// partitionString partitions the string into chunks of given size, +// with the last chunk of variable size. +func PartitionString(s string, chunkSize int) []string { + if chunkSize <= 0 { + panic("invalid chunkSize") + } + length := len(s) + chunks := 1 + length/chunkSize + start := 0 + end := chunkSize + parts := make([]string, 0, chunks) + for { + if end > length { + end = length + } + parts = append(parts, s[start:end]) + if end == length { + break + } + start, end = end, end+chunkSize + } + return parts +} diff --git a/vendor/github.com/hpcloud/tail/watch/filechanges.go b/vendor/github.com/hpcloud/tail/watch/filechanges.go new file mode 100644 index 0000000000..3ce5dcecbb --- /dev/null +++ b/vendor/github.com/hpcloud/tail/watch/filechanges.go @@ -0,0 +1,36 @@ +package watch + +type FileChanges struct { + Modified chan bool // Channel to get notified of modifications + Truncated chan bool // Channel to get notified of truncations + Deleted chan bool // Channel to get notified of deletions/renames +} + +func NewFileChanges() *FileChanges { + return &FileChanges{ + make(chan bool), make(chan bool), make(chan bool)} +} + +func (fc *FileChanges) NotifyModified() { + sendOnlyIfEmpty(fc.Modified) +} + +func (fc *FileChanges) NotifyTruncated() { + sendOnlyIfEmpty(fc.Truncated) +} + +func (fc *FileChanges) NotifyDeleted() { + sendOnlyIfEmpty(fc.Deleted) +} + +// sendOnlyIfEmpty sends on a bool channel only if the channel has no +// backlog to be read by other goroutines. This concurrency pattern +// can be used to notify other goroutines if and only if they are +// looking for it (i.e., subsequent notifications can be compressed +// into one). +func sendOnlyIfEmpty(ch chan bool) { + select { + case ch <- true: + default: + } +} diff --git a/vendor/github.com/hpcloud/tail/watch/inotify.go b/vendor/github.com/hpcloud/tail/watch/inotify.go new file mode 100644 index 0000000000..4478f1e1a0 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/watch/inotify.go @@ -0,0 +1,128 @@ +// Copyright (c) 2015 HPE Software Inc. All rights reserved. +// Copyright (c) 2013 ActiveState Software Inc. All rights reserved. + +package watch + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/hpcloud/tail/util" + + "gopkg.in/fsnotify.v1" + "gopkg.in/tomb.v1" +) + +// InotifyFileWatcher uses inotify to monitor file changes. +type InotifyFileWatcher struct { + Filename string + Size int64 +} + +func NewInotifyFileWatcher(filename string) *InotifyFileWatcher { + fw := &InotifyFileWatcher{filepath.Clean(filename), 0} + return fw +} + +func (fw *InotifyFileWatcher) BlockUntilExists(t *tomb.Tomb) error { + err := WatchCreate(fw.Filename) + if err != nil { + return err + } + defer RemoveWatchCreate(fw.Filename) + + // Do a real check now as the file might have been created before + // calling `WatchFlags` above. + if _, err = os.Stat(fw.Filename); !os.IsNotExist(err) { + // file exists, or stat returned an error. + return err + } + + events := Events(fw.Filename) + + for { + select { + case evt, ok := <-events: + if !ok { + return fmt.Errorf("inotify watcher has been closed") + } + evtName, err := filepath.Abs(evt.Name) + if err != nil { + return err + } + fwFilename, err := filepath.Abs(fw.Filename) + if err != nil { + return err + } + if evtName == fwFilename { + return nil + } + case <-t.Dying(): + return tomb.ErrDying + } + } + panic("unreachable") +} + +func (fw *InotifyFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChanges, error) { + err := Watch(fw.Filename) + if err != nil { + return nil, err + } + + changes := NewFileChanges() + fw.Size = pos + + go func() { + defer RemoveWatch(fw.Filename) + + events := Events(fw.Filename) + + for { + prevSize := fw.Size + + var evt fsnotify.Event + var ok bool + + select { + case evt, ok = <-events: + if !ok { + return + } + case <-t.Dying(): + return + } + + switch { + case evt.Op&fsnotify.Remove == fsnotify.Remove: + fallthrough + + case evt.Op&fsnotify.Rename == fsnotify.Rename: + changes.NotifyDeleted() + return + + case evt.Op&fsnotify.Write == fsnotify.Write: + fi, err := os.Stat(fw.Filename) + if err != nil { + if os.IsNotExist(err) { + changes.NotifyDeleted() + return + } + // XXX: report this error back to the user + util.Fatal("Failed to stat file %v: %v", fw.Filename, err) + } + fw.Size = fi.Size() + + if prevSize > 0 && prevSize > fw.Size { + changes.NotifyTruncated() + } else { + changes.NotifyModified() + } + prevSize = fw.Size + } + } + }() + + return changes, nil +} diff --git a/vendor/github.com/hpcloud/tail/watch/inotify_tracker.go b/vendor/github.com/hpcloud/tail/watch/inotify_tracker.go new file mode 100644 index 0000000000..03be4275ca --- /dev/null +++ b/vendor/github.com/hpcloud/tail/watch/inotify_tracker.go @@ -0,0 +1,260 @@ +// Copyright (c) 2015 HPE Software Inc. All rights reserved. +// Copyright (c) 2013 ActiveState Software Inc. All rights reserved. + +package watch + +import ( + "log" + "os" + "path/filepath" + "sync" + "syscall" + + "github.com/hpcloud/tail/util" + + "gopkg.in/fsnotify.v1" +) + +type InotifyTracker struct { + mux sync.Mutex + watcher *fsnotify.Watcher + chans map[string]chan fsnotify.Event + done map[string]chan bool + watchNums map[string]int + watch chan *watchInfo + remove chan *watchInfo + error chan error +} + +type watchInfo struct { + op fsnotify.Op + fname string +} + +func (this *watchInfo) isCreate() bool { + return this.op == fsnotify.Create +} + +var ( + // globally shared InotifyTracker; ensures only one fsnotify.Watcher is used + shared *InotifyTracker + + // these are used to ensure the shared InotifyTracker is run exactly once + once = sync.Once{} + goRun = func() { + shared = &InotifyTracker{ + mux: sync.Mutex{}, + chans: make(map[string]chan fsnotify.Event), + done: make(map[string]chan bool), + watchNums: make(map[string]int), + watch: make(chan *watchInfo), + remove: make(chan *watchInfo), + error: make(chan error), + } + go shared.run() + } + + logger = log.New(os.Stderr, "", log.LstdFlags) +) + +// Watch signals the run goroutine to begin watching the input filename +func Watch(fname string) error { + return watch(&watchInfo{ + fname: fname, + }) +} + +// Watch create signals the run goroutine to begin watching the input filename +// if call the WatchCreate function, don't call the Cleanup, call the RemoveWatchCreate +func WatchCreate(fname string) error { + return watch(&watchInfo{ + op: fsnotify.Create, + fname: fname, + }) +} + +func watch(winfo *watchInfo) error { + // start running the shared InotifyTracker if not already running + once.Do(goRun) + + winfo.fname = filepath.Clean(winfo.fname) + shared.watch <- winfo + return <-shared.error +} + +// RemoveWatch signals the run goroutine to remove the watch for the input filename +func RemoveWatch(fname string) { + remove(&watchInfo{ + fname: fname, + }) +} + +// RemoveWatch create signals the run goroutine to remove the watch for the input filename +func RemoveWatchCreate(fname string) { + remove(&watchInfo{ + op: fsnotify.Create, + fname: fname, + }) +} + +func remove(winfo *watchInfo) { + // start running the shared InotifyTracker if not already running + once.Do(goRun) + + winfo.fname = filepath.Clean(winfo.fname) + shared.mux.Lock() + done := shared.done[winfo.fname] + if done != nil { + delete(shared.done, winfo.fname) + close(done) + } + + fname := winfo.fname + if winfo.isCreate() { + // Watch for new files to be created in the parent directory. + fname = filepath.Dir(fname) + } + shared.watchNums[fname]-- + watchNum := shared.watchNums[fname] + if watchNum == 0 { + delete(shared.watchNums, fname) + } + shared.mux.Unlock() + + // If we were the last ones to watch this file, unsubscribe from inotify. + // This needs to happen after releasing the lock because fsnotify waits + // synchronously for the kernel to acknowledge the removal of the watch + // for this file, which causes us to deadlock if we still held the lock. + if watchNum == 0 { + shared.watcher.Remove(fname) + } + shared.remove <- winfo +} + +// Events returns a channel to which FileEvents corresponding to the input filename +// will be sent. This channel will be closed when removeWatch is called on this +// filename. +func Events(fname string) <-chan fsnotify.Event { + shared.mux.Lock() + defer shared.mux.Unlock() + + return shared.chans[fname] +} + +// Cleanup removes the watch for the input filename if necessary. +func Cleanup(fname string) { + RemoveWatch(fname) +} + +// watchFlags calls fsnotify.WatchFlags for the input filename and flags, creating +// a new Watcher if the previous Watcher was closed. +func (shared *InotifyTracker) addWatch(winfo *watchInfo) error { + shared.mux.Lock() + defer shared.mux.Unlock() + + if shared.chans[winfo.fname] == nil { + shared.chans[winfo.fname] = make(chan fsnotify.Event) + shared.done[winfo.fname] = make(chan bool) + } + + fname := winfo.fname + if winfo.isCreate() { + // Watch for new files to be created in the parent directory. + fname = filepath.Dir(fname) + } + + // already in inotify watch + if shared.watchNums[fname] > 0 { + shared.watchNums[fname]++ + if winfo.isCreate() { + shared.watchNums[winfo.fname]++ + } + return nil + } + + err := shared.watcher.Add(fname) + if err == nil { + shared.watchNums[fname]++ + if winfo.isCreate() { + shared.watchNums[winfo.fname]++ + } + } + return err +} + +// removeWatch calls fsnotify.RemoveWatch for the input filename and closes the +// corresponding events channel. +func (shared *InotifyTracker) removeWatch(winfo *watchInfo) { + shared.mux.Lock() + defer shared.mux.Unlock() + + ch := shared.chans[winfo.fname] + if ch == nil { + return + } + + delete(shared.chans, winfo.fname) + close(ch) + + if !winfo.isCreate() { + return + } + + shared.watchNums[winfo.fname]-- + if shared.watchNums[winfo.fname] == 0 { + delete(shared.watchNums, winfo.fname) + } +} + +// sendEvent sends the input event to the appropriate Tail. +func (shared *InotifyTracker) sendEvent(event fsnotify.Event) { + name := filepath.Clean(event.Name) + + shared.mux.Lock() + ch := shared.chans[name] + done := shared.done[name] + shared.mux.Unlock() + + if ch != nil && done != nil { + select { + case ch <- event: + case <-done: + } + } +} + +// run starts the goroutine in which the shared struct reads events from its +// Watcher's Event channel and sends the events to the appropriate Tail. +func (shared *InotifyTracker) run() { + watcher, err := fsnotify.NewWatcher() + if err != nil { + util.Fatal("failed to create Watcher") + } + shared.watcher = watcher + + for { + select { + case winfo := <-shared.watch: + shared.error <- shared.addWatch(winfo) + + case winfo := <-shared.remove: + shared.removeWatch(winfo) + + case event, open := <-shared.watcher.Events: + if !open { + return + } + shared.sendEvent(event) + + case err, open := <-shared.watcher.Errors: + if !open { + return + } else if err != nil { + sysErr, ok := err.(*os.SyscallError) + if !ok || sysErr.Err != syscall.EINTR { + logger.Printf("Error in Watcher Error channel: %s", err) + } + } + } + } +} diff --git a/vendor/github.com/hpcloud/tail/watch/polling.go b/vendor/github.com/hpcloud/tail/watch/polling.go new file mode 100644 index 0000000000..49491f21db --- /dev/null +++ b/vendor/github.com/hpcloud/tail/watch/polling.go @@ -0,0 +1,118 @@ +// Copyright (c) 2015 HPE Software Inc. All rights reserved. +// Copyright (c) 2013 ActiveState Software Inc. All rights reserved. + +package watch + +import ( + "os" + "runtime" + "time" + + "github.com/hpcloud/tail/util" + "gopkg.in/tomb.v1" +) + +// PollingFileWatcher polls the file for changes. +type PollingFileWatcher struct { + Filename string + Size int64 +} + +func NewPollingFileWatcher(filename string) *PollingFileWatcher { + fw := &PollingFileWatcher{filename, 0} + return fw +} + +var POLL_DURATION time.Duration + +func (fw *PollingFileWatcher) BlockUntilExists(t *tomb.Tomb) error { + for { + if _, err := os.Stat(fw.Filename); err == nil { + return nil + } else if !os.IsNotExist(err) { + return err + } + select { + case <-time.After(POLL_DURATION): + continue + case <-t.Dying(): + return tomb.ErrDying + } + } + panic("unreachable") +} + +func (fw *PollingFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChanges, error) { + origFi, err := os.Stat(fw.Filename) + if err != nil { + return nil, err + } + + changes := NewFileChanges() + var prevModTime time.Time + + // XXX: use tomb.Tomb to cleanly manage these goroutines. replace + // the fatal (below) with tomb's Kill. + + fw.Size = pos + + go func() { + prevSize := fw.Size + for { + select { + case <-t.Dying(): + return + default: + } + + time.Sleep(POLL_DURATION) + fi, err := os.Stat(fw.Filename) + if err != nil { + // Windows cannot delete a file if a handle is still open (tail keeps one open) + // so it gives access denied to anything trying to read it until all handles are released. + if os.IsNotExist(err) || (runtime.GOOS == "windows" && os.IsPermission(err)) { + // File does not exist (has been deleted). + changes.NotifyDeleted() + return + } + + // XXX: report this error back to the user + util.Fatal("Failed to stat file %v: %v", fw.Filename, err) + } + + // File got moved/renamed? + if !os.SameFile(origFi, fi) { + changes.NotifyDeleted() + return + } + + // File got truncated? + fw.Size = fi.Size() + if prevSize > 0 && prevSize > fw.Size { + changes.NotifyTruncated() + prevSize = fw.Size + continue + } + // File got bigger? + if prevSize > 0 && prevSize < fw.Size { + changes.NotifyModified() + prevSize = fw.Size + continue + } + prevSize = fw.Size + + // File was appended to (changed)? + modTime := fi.ModTime() + if modTime != prevModTime { + prevModTime = modTime + changes.NotifyModified() + } + } + }() + + return changes, nil +} + +func init() { + POLL_DURATION = 250 * time.Millisecond +} diff --git a/vendor/github.com/hpcloud/tail/watch/watch.go b/vendor/github.com/hpcloud/tail/watch/watch.go new file mode 100644 index 0000000000..2e1783ef0a --- /dev/null +++ b/vendor/github.com/hpcloud/tail/watch/watch.go @@ -0,0 +1,20 @@ +// Copyright (c) 2015 HPE Software Inc. All rights reserved. +// Copyright (c) 2013 ActiveState Software Inc. All rights reserved. + +package watch + +import "gopkg.in/tomb.v1" + +// FileWatcher monitors file-level events. +type FileWatcher interface { + // BlockUntilExists blocks until the file comes into existence. + BlockUntilExists(*tomb.Tomb) error + + // ChangeEvents reports on changes to a file, be it modification, + // deletion, renames or truncations. Returned FileChanges group of + // channels will be closed, thus become unusable, after a deletion + // or truncation event. + // In order to properly report truncations, ChangeEvents requires + // the caller to pass their current offset in the file. + ChangeEvents(*tomb.Tomb, int64) (*FileChanges, error) +} diff --git a/vendor/github.com/hpcloud/tail/winfile/winfile.go b/vendor/github.com/hpcloud/tail/winfile/winfile.go new file mode 100644 index 0000000000..aa7e7bc5df --- /dev/null +++ b/vendor/github.com/hpcloud/tail/winfile/winfile.go @@ -0,0 +1,92 @@ +// +build windows + +package winfile + +import ( + "os" + "syscall" + "unsafe" +) + +// issue also described here +//https://codereview.appspot.com/8203043/ + +// https://github.com/jnwhiteh/golang/blob/master/src/pkg/syscall/syscall_windows.go#L218 +func Open(path string, mode int, perm uint32) (fd syscall.Handle, err error) { + if len(path) == 0 { + return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND + } + pathp, err := syscall.UTF16PtrFromString(path) + if err != nil { + return syscall.InvalidHandle, err + } + var access uint32 + switch mode & (syscall.O_RDONLY | syscall.O_WRONLY | syscall.O_RDWR) { + case syscall.O_RDONLY: + access = syscall.GENERIC_READ + case syscall.O_WRONLY: + access = syscall.GENERIC_WRITE + case syscall.O_RDWR: + access = syscall.GENERIC_READ | syscall.GENERIC_WRITE + } + if mode&syscall.O_CREAT != 0 { + access |= syscall.GENERIC_WRITE + } + if mode&syscall.O_APPEND != 0 { + access &^= syscall.GENERIC_WRITE + access |= syscall.FILE_APPEND_DATA + } + sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE | syscall.FILE_SHARE_DELETE) + var sa *syscall.SecurityAttributes + if mode&syscall.O_CLOEXEC == 0 { + sa = makeInheritSa() + } + var createmode uint32 + switch { + case mode&(syscall.O_CREAT|syscall.O_EXCL) == (syscall.O_CREAT | syscall.O_EXCL): + createmode = syscall.CREATE_NEW + case mode&(syscall.O_CREAT|syscall.O_TRUNC) == (syscall.O_CREAT | syscall.O_TRUNC): + createmode = syscall.CREATE_ALWAYS + case mode&syscall.O_CREAT == syscall.O_CREAT: + createmode = syscall.OPEN_ALWAYS + case mode&syscall.O_TRUNC == syscall.O_TRUNC: + createmode = syscall.TRUNCATE_EXISTING + default: + createmode = syscall.OPEN_EXISTING + } + h, e := syscall.CreateFile(pathp, access, sharemode, sa, createmode, syscall.FILE_ATTRIBUTE_NORMAL, 0) + return h, e +} + +// https://github.com/jnwhiteh/golang/blob/master/src/pkg/syscall/syscall_windows.go#L211 +func makeInheritSa() *syscall.SecurityAttributes { + var sa syscall.SecurityAttributes + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + return &sa +} + +// https://github.com/jnwhiteh/golang/blob/master/src/pkg/os/file_windows.go#L133 +func OpenFile(name string, flag int, perm os.FileMode) (file *os.File, err error) { + r, e := Open(name, flag|syscall.O_CLOEXEC, syscallMode(perm)) + if e != nil { + return nil, e + } + return os.NewFile(uintptr(r), name), nil +} + +// https://github.com/jnwhiteh/golang/blob/master/src/pkg/os/file_posix.go#L61 +func syscallMode(i os.FileMode) (o uint32) { + o |= uint32(i.Perm()) + if i&os.ModeSetuid != 0 { + o |= syscall.S_ISUID + } + if i&os.ModeSetgid != 0 { + o |= syscall.S_ISGID + } + if i&os.ModeSticky != 0 { + o |= syscall.S_ISVTX + } + // No mapping for Go's ModeTemporary (plan9 only). + return +} diff --git a/vendor/github.com/imdario/mergo/.gitignore b/vendor/github.com/imdario/mergo/.gitignore new file mode 100644 index 0000000000..529c3412ba --- /dev/null +++ b/vendor/github.com/imdario/mergo/.gitignore @@ -0,0 +1,33 @@ +#### joe made this: http://goel.io/joe + +#### go #### +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ + +#### vim #### +# Swap +[._]*.s[a-v][a-z] +[._]*.sw[a-p] +[._]s[a-v][a-z] +[._]sw[a-p] + +# Session +Session.vim + +# Temporary +.netrwhist +*~ +# Auto-generated tag files +tags diff --git a/vendor/github.com/imdario/mergo/.travis.yml b/vendor/github.com/imdario/mergo/.travis.yml index 9d91c6339f..b13a50ed1f 100644 --- a/vendor/github.com/imdario/mergo/.travis.yml +++ b/vendor/github.com/imdario/mergo/.travis.yml @@ -1,2 +1,7 @@ language: go -install: go get -t +install: + - go get -t + - go get golang.org/x/tools/cmd/cover + - go get github.com/mattn/goveralls +script: + - $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md index b131069799..02fc81e062 100644 --- a/vendor/github.com/imdario/mergo/README.md +++ b/vendor/github.com/imdario/mergo/README.md @@ -2,17 +2,18 @@ A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. -Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region Marche. - -![Mergo dall'alto](http://www.comune.mergo.an.it/Siti/Mergo/Immagini/Foto/mergo_dall_alto.jpg) +Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche. ## Status -It is ready for production use. It works fine after extensive use in the wild. +It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild). -[![Build Status][1]][2] [![GoDoc][3]][4] [![GoCard][5]][6] +[![Build Status][1]][2] +[![Coverage Status][7]][8] +[![Sourcegraph][9]][10] +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield) [1]: https://travis-ci.org/imdario/mergo.png [2]: https://travis-ci.org/imdario/mergo @@ -20,19 +21,54 @@ It is ready for production use. It works fine after extensive use in the wild. [4]: https://godoc.org/github.com/imdario/mergo [5]: https://goreportcard.com/badge/imdario/mergo [6]: https://goreportcard.com/report/github.com/imdario/mergo +[7]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master +[8]: https://coveralls.io/github/imdario/mergo?branch=master +[9]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg +[10]: https://sourcegraph.com/github.com/imdario/mergo?badge + +### Latest release + +[Release v0.3.7](https://github.com/imdario/mergo/releases/tag/v0.3.7). ### Important note -Mergo is intended to assign **only** zero value fields on destination with source value. Since April 6th it works like this. Before it didn't work properly, causing some random overwrites. After some issues and PRs I found it didn't merge as I designed it. Thanks to [imdario/mergo#8](https://github.com/imdario/mergo/pull/8) overwriting functions were added and the wrong behavior was clearly detected. +Please keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2) Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). An optional/variadic argument has been added, so it won't break existing code. If you were using Mergo **before** April 6th 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause (I hope it won't!) in existing projects after the change (release 0.2.0). +### Donations + +If Mergo is useful to you, consider buying me a coffee, a beer or making a monthly donation so I can keep building great free software. :heart_eyes: + +Buy Me a Coffee at ko-fi.com +[![Beerpay](https://beerpay.io/imdario/mergo/badge.svg)](https://beerpay.io/imdario/mergo) +[![Beerpay](https://beerpay.io/imdario/mergo/make-wish.svg)](https://beerpay.io/imdario/mergo) +Donate using Liberapay + ### Mergo in the wild -- [docker/docker](https://github.com/docker/docker/) +- [moby/moby](https://github.com/moby/moby) - [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) +- [vmware/dispatch](https://github.com/vmware/dispatch) +- [Shopify/themekit](https://github.com/Shopify/themekit) - [imdario/zas](https://github.com/imdario/zas) +- [matcornic/hermes](https://github.com/matcornic/hermes) +- [OpenBazaar/openbazaar-go](https://github.com/OpenBazaar/openbazaar-go) +- [kataras/iris](https://github.com/kataras/iris) +- [michaelsauter/crane](https://github.com/michaelsauter/crane) +- [go-task/task](https://github.com/go-task/task) +- [sensu/uchiwa](https://github.com/sensu/uchiwa) +- [ory/hydra](https://github.com/ory/hydra) +- [sisatech/vcli](https://github.com/sisatech/vcli) +- [dairycart/dairycart](https://github.com/dairycart/dairycart) +- [projectcalico/felix](https://github.com/projectcalico/felix) +- [resin-os/balena](https://github.com/resin-os/balena) +- [go-kivik/kivik](https://github.com/go-kivik/kivik) +- [Telefonica/govice](https://github.com/Telefonica/govice) +- [supergiant/supergiant](supergiant/supergiant) +- [SergeyTsalkov/brooce](https://github.com/SergeyTsalkov/brooce) - [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy) +- [ohsu-comp-bio/funnel](https://github.com/ohsu-comp-bio/funnel) - [EagerIO/Stout](https://github.com/EagerIO/Stout) - [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api) - [russross/canvasassignments](https://github.com/russross/canvasassignments) @@ -50,7 +86,7 @@ If you were using Mergo **before** April 6th 2015, please check your project wor - [thoas/picfit](https://github.com/thoas/picfit) - [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server) - [jnuthong/item_search](https://github.com/jnuthong/item_search) -- [Iris Web Framework](https://github.com/kataras/iris) +- [bukalapak/snowboard](https://github.com/bukalapak/snowboard) ## Installation @@ -63,7 +99,7 @@ If you were using Mergo **before** April 6th 2015, please check your project wor ## Usage -You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). +You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are not considered zero values](https://golang.org/ref/spec#The_zero_value) either. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). ```go if err := mergo.Merge(&dst, src); err != nil { @@ -71,15 +107,15 @@ if err := mergo.Merge(&dst, src); err != nil { } ``` -Also, you can merge overwriting values using MergeWithOverwrite. +Also, you can merge overwriting values using the transformer `WithOverride`. ```go -if err := mergo.MergeWithOverwrite(&dst, src); err != nil { +if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { // ... } ``` -Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field. +Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field. ```go if err := mergo.Map(&dst, srcMap); err != nil { @@ -87,7 +123,7 @@ if err := mergo.Map(&dst, srcMap); err != nil { } ``` -Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values. +Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values. More information and examples in [godoc documentation](http://godoc.org/github.com/imdario/mergo). @@ -111,13 +147,10 @@ func main() { A: "one", B: 2, } - dest := Foo{ A: "two", } - mergo.Merge(&dest, src) - fmt.Println(dest) // Will print // {two 2} @@ -126,7 +159,56 @@ func main() { Note: if test are failing due missing package, please execute: - go get gopkg.in/yaml.v1 + go get gopkg.in/yaml.v2 + +### Transformers + +Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`? + +```go +package main + +import ( + "fmt" + "github.com/imdario/mergo" + "reflect" + "time" +) + +type timeTransfomer struct { +} + +func (t timeTransfomer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { + if typ == reflect.TypeOf(time.Time{}) { + return func(dst, src reflect.Value) error { + if dst.CanSet() { + isZero := dst.MethodByName("IsZero") + result := isZero.Call([]reflect.Value{}) + if result[0].Bool() { + dst.Set(src) + } + } + return nil + } + } + return nil +} + +type Snapshot struct { + Time time.Time + // ... +} + +func main() { + src := Snapshot{time.Now()} + dest := Snapshot{} + mergo.Merge(&dest, src, mergo.WithTransformers(timeTransfomer{})) + fmt.Println(dest) + // Will print + // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } +} +``` + ## Contact me @@ -136,6 +218,21 @@ If I can help you, you have an idea or you are using Mergo in your projects, don Written by [Dario Castañé](http://dario.im). +## Top Contributors + +[![0](https://sourcerer.io/fame/imdario/imdario/mergo/images/0)](https://sourcerer.io/fame/imdario/imdario/mergo/links/0) +[![1](https://sourcerer.io/fame/imdario/imdario/mergo/images/1)](https://sourcerer.io/fame/imdario/imdario/mergo/links/1) +[![2](https://sourcerer.io/fame/imdario/imdario/mergo/images/2)](https://sourcerer.io/fame/imdario/imdario/mergo/links/2) +[![3](https://sourcerer.io/fame/imdario/imdario/mergo/images/3)](https://sourcerer.io/fame/imdario/imdario/mergo/links/3) +[![4](https://sourcerer.io/fame/imdario/imdario/mergo/images/4)](https://sourcerer.io/fame/imdario/imdario/mergo/links/4) +[![5](https://sourcerer.io/fame/imdario/imdario/mergo/images/5)](https://sourcerer.io/fame/imdario/imdario/mergo/links/5) +[![6](https://sourcerer.io/fame/imdario/imdario/mergo/images/6)](https://sourcerer.io/fame/imdario/imdario/mergo/links/6) +[![7](https://sourcerer.io/fame/imdario/imdario/mergo/images/7)](https://sourcerer.io/fame/imdario/imdario/mergo/links/7) + + ## License [BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). + + +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large) diff --git a/vendor/github.com/imdario/mergo/issue17_test.go b/vendor/github.com/imdario/mergo/issue17_test.go deleted file mode 100644 index 0ee96f3771..0000000000 --- a/vendor/github.com/imdario/mergo/issue17_test.go +++ /dev/null @@ -1,25 +0,0 @@ -package mergo - -import ( - "encoding/json" - "testing" -) - -var ( - request = `{"timestamp":null, "name": "foo"}` - maprequest = map[string]interface{}{ - "timestamp": nil, - "name": "foo", - "newStuff": "foo", - } -) - -func TestIssue17MergeWithOverwrite(t *testing.T) { - var something map[string]interface{} - if err := json.Unmarshal([]byte(request), &something); err != nil { - t.Errorf("Error while Unmarshalling maprequest %s", err) - } - if err := MergeWithOverwrite(&something, maprequest); err != nil { - t.Errorf("Error while merging %s", err) - } -} diff --git a/vendor/github.com/imdario/mergo/issue23_test.go b/vendor/github.com/imdario/mergo/issue23_test.go deleted file mode 100644 index 9c32584131..0000000000 --- a/vendor/github.com/imdario/mergo/issue23_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package mergo - -import ( - "testing" - "time" -) - -type document struct { - Created *time.Time -} - -func TestIssue23MergeWithOverwrite(t *testing.T) { - now := time.Now() - dst := document{ - &now, - } - expected := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC) - src := document{ - &expected, - } - if err := MergeWithOverwrite(&dst, src); err != nil { - t.Errorf("Error while merging %s", err) - } - if dst.Created != src.Created { - t.Fatalf("Created not merged in properly: dst.Created(%v) != src.Created(%v)", dst.Created, src.Created) - } -} diff --git a/vendor/github.com/imdario/mergo/issue38_test.go b/vendor/github.com/imdario/mergo/issue38_test.go deleted file mode 100644 index 286b68cb1c..0000000000 --- a/vendor/github.com/imdario/mergo/issue38_test.go +++ /dev/null @@ -1,59 +0,0 @@ -package mergo - -import ( - "testing" - "time" -) - -type structWithoutTimePointer struct { - Created time.Time -} - -func TestIssue38Merge(t *testing.T) { - dst := structWithoutTimePointer{ - time.Now(), - } - - expected := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC) - src := structWithoutTimePointer{ - expected, - } - if err := Merge(&dst, src); err != nil { - t.Errorf("Error while merging %s", err) - } - if dst.Created == src.Created { - t.Fatalf("Created merged unexpectedly: dst.Created(%v) == src.Created(%v)", dst.Created, src.Created) - } -} - -func TestIssue38MergeEmptyStruct(t *testing.T) { - dst := structWithoutTimePointer{} - - expected := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC) - src := structWithoutTimePointer{ - expected, - } - if err := Merge(&dst, src); err != nil { - t.Errorf("Error while merging %s", err) - } - if dst.Created == src.Created { - t.Fatalf("Created merged unexpectedly: dst.Created(%v) == src.Created(%v)", dst.Created, src.Created) - } -} - -func TestIssue38MergeWithOverwrite(t *testing.T) { - dst := structWithoutTimePointer{ - time.Now(), - } - - expected := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC) - src := structWithoutTimePointer{ - expected, - } - if err := MergeWithOverwrite(&dst, src); err != nil { - t.Errorf("Error while merging %s", err) - } - if dst.Created != src.Created { - t.Fatalf("Created not merged in properly: dst.Created(%v) != src.Created(%v)", dst.Created, src.Created) - } -} diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go index 99002565f2..3f5afa83a1 100644 --- a/vendor/github.com/imdario/mergo/map.go +++ b/vendor/github.com/imdario/mergo/map.go @@ -31,7 +31,8 @@ func isExported(field reflect.StructField) bool { // Traverses recursively both values, assigning src's fields values to dst. // The map argument tracks comparisons that have already been seen, which allows // short circuiting on recursive types. -func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, overwrite bool) (err error) { +func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { + overwrite := config.Overwrite if dst.CanAddr() { addr := dst.UnsafeAddr() h := 17 * addr @@ -71,6 +72,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, over case reflect.Struct: srcMap := src.Interface().(map[string]interface{}) for key := range srcMap { + config.overwriteWithEmptyValue = true srcValue := srcMap[key] fieldName := changeInitialCase(key, unicode.ToUpper) dstElement := dst.FieldByName(fieldName) @@ -97,15 +99,15 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, over continue } if srcKind == dstKind { - if err = deepMerge(dstElement, srcElement, visited, depth+1, overwrite); err != nil { + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { return } } else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface { - if err = deepMerge(dstElement, srcElement, visited, depth+1, overwrite); err != nil { + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { return } } else if srcKind == reflect.Map { - if err = deepMap(dstElement, srcElement, visited, depth+1, overwrite); err != nil { + if err = deepMap(dstElement, srcElement, visited, depth+1, config); err != nil { return } } else { @@ -127,28 +129,35 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, over // doesn't apply if dst is a map. // This is separated method from Merge because it is cleaner and it keeps sane // semantics: merging equal types, mapping different (restricted) types. -func Map(dst, src interface{}) error { - return _map(dst, src, false) +func Map(dst, src interface{}, opts ...func(*Config)) error { + return _map(dst, src, opts...) } -// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overriden by +// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overridden by // non-empty src attribute values. -func MapWithOverwrite(dst, src interface{}) error { - return _map(dst, src, true) +// Deprecated: Use Map(…) with WithOverride +func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { + return _map(dst, src, append(opts, WithOverride)...) } -func _map(dst, src interface{}, overwrite bool) error { +func _map(dst, src interface{}, opts ...func(*Config)) error { var ( vDst, vSrc reflect.Value err error ) + config := &Config{} + + for _, opt := range opts { + opt(config) + } + if vDst, vSrc, err = resolveValues(dst, src); err != nil { return err } // To be friction-less, we redirect equal-type arguments // to deepMerge. Only because arguments can be anything. if vSrc.Kind() == vDst.Kind() { - return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, overwrite) + return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) } switch vSrc.Kind() { case reflect.Struct: @@ -162,5 +171,5 @@ func _map(dst, src interface{}, overwrite bool) error { default: return ErrNotSupported } - return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, overwrite) + return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, config) } diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go index 052b9fe782..f8de6c5430 100644 --- a/vendor/github.com/imdario/mergo/merge.go +++ b/vendor/github.com/imdario/mergo/merge.go @@ -9,13 +9,14 @@ package mergo import ( + "fmt" "reflect" ) func hasExportedField(dst reflect.Value) (exported bool) { for i, n := 0, dst.NumField(); i < n; i++ { field := dst.Type().Field(i) - if field.Anonymous { + if field.Anonymous && dst.Field(i).Kind() == reflect.Struct { exported = exported || hasExportedField(dst.Field(i)) } else { exported = exported || len(field.PkgPath) == 0 @@ -24,10 +25,25 @@ func hasExportedField(dst reflect.Value) (exported bool) { return } +type Config struct { + Overwrite bool + AppendSlice bool + Transformers Transformers + overwriteWithEmptyValue bool +} + +type Transformers interface { + Transformer(reflect.Type) func(dst, src reflect.Value) error +} + // Traverses recursively both values, assigning src's fields values to dst. // The map argument tracks comparisons that have already been seen, which allows // short circuiting on recursive types. -func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, overwrite bool) (err error) { +func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { + overwrite := config.Overwrite + overwriteWithEmptySrc := config.overwriteWithEmptyValue + config.overwriteWithEmptyValue = false + if !src.IsValid() { return } @@ -44,23 +60,30 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, ov // Remember, remember... visited[h] = &visit{addr, typ, seen} } + + if config.Transformers != nil && !isEmptyValue(dst) { + if fn := config.Transformers.Transformer(dst.Type()); fn != nil { + err = fn(dst, src) + return + } + } + switch dst.Kind() { case reflect.Struct: if hasExportedField(dst) { for i, n := 0, dst.NumField(); i < n; i++ { - if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, overwrite); err != nil { + if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil { return } } } else { - if dst.CanSet() && !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) { + if dst.CanSet() && (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) { dst.Set(src) } } case reflect.Map: - if len(src.MapKeys()) == 0 && !src.IsNil() && len(dst.MapKeys()) == 0 { + if dst.IsNil() && !src.IsNil() { dst.Set(reflect.MakeMap(dst.Type())) - return } for _, key := range src.MapKeys() { srcElement := src.MapIndex(key) @@ -69,7 +92,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, ov } dstElement := dst.MapIndex(key) switch srcElement.Kind() { - case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Slice: + case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice: if srcElement.IsNil() { continue } @@ -84,36 +107,78 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, ov case reflect.Ptr: fallthrough case reflect.Map: - if err = deepMerge(dstElement, srcElement, visited, depth+1, overwrite); err != nil { + srcMapElm := srcElement + dstMapElm := dstElement + if srcMapElm.CanInterface() { + srcMapElm = reflect.ValueOf(srcMapElm.Interface()) + if dstMapElm.IsValid() { + dstMapElm = reflect.ValueOf(dstMapElm.Interface()) + } + } + if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil { return } + case reflect.Slice: + srcSlice := reflect.ValueOf(srcElement.Interface()) + + var dstSlice reflect.Value + if !dstElement.IsValid() || dstElement.IsNil() { + dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len()) + } else { + dstSlice = reflect.ValueOf(dstElement.Interface()) + } + + if (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { + dstSlice = srcSlice + } else if config.AppendSlice { + if srcSlice.Type() != dstSlice.Type() { + return fmt.Errorf("cannot append two slice with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) + } + dstSlice = reflect.AppendSlice(dstSlice, srcSlice) + } + dst.SetMapIndex(key, dstSlice) } } - if dstElement.IsValid() && reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map { + if dstElement.IsValid() && !isEmptyValue(dstElement) && (reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map || reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice) { continue } - if !isEmptyValue(srcElement) && (overwrite || (!dstElement.IsValid() || isEmptyValue(dst))) { + if srcElement.IsValid() && (overwrite || (!dstElement.IsValid() || isEmptyValue(dstElement))) { if dst.IsNil() { dst.Set(reflect.MakeMap(dst.Type())) } dst.SetMapIndex(key, srcElement) } } + case reflect.Slice: + if !dst.CanSet() { + break + } + if (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { + dst.Set(src) + } else if config.AppendSlice { + if src.Type() != dst.Type() { + return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type()) + } + dst.Set(reflect.AppendSlice(dst, src)) + } case reflect.Ptr: fallthrough case reflect.Interface: + if src.IsNil() { + break + } if src.Kind() != reflect.Interface { if dst.IsNil() || overwrite { if dst.CanSet() && (overwrite || isEmptyValue(dst)) { dst.Set(src) } } else if src.Kind() == reflect.Ptr { - if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, overwrite); err != nil { + if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { return } } else if dst.Elem().Type() == src.Type() { - if err = deepMerge(dst.Elem(), src, visited, depth+1, overwrite); err != nil { + if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil { return } } else { @@ -121,17 +186,15 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, ov } break } - if src.IsNil() { - break - } else if dst.IsNil() || overwrite { + if dst.IsNil() || overwrite { if dst.CanSet() && (overwrite || isEmptyValue(dst)) { dst.Set(src) } - } else if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, overwrite); err != nil { + } else if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { return } default: - if dst.CanSet() && !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) { + if dst.CanSet() && (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) { dst.Set(src) } } @@ -142,26 +205,51 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, ov // src attributes if they themselves are not empty. dst and src must be valid same-type structs // and dst must be a pointer to struct. // It won't merge unexported (private) fields and will do recursively any exported field. -func Merge(dst, src interface{}) error { - return merge(dst, src, false) +func Merge(dst, src interface{}, opts ...func(*Config)) error { + return merge(dst, src, opts...) } // MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overriden by // non-empty src attribute values. -func MergeWithOverwrite(dst, src interface{}) error { - return merge(dst, src, true) +// Deprecated: use Merge(…) with WithOverride +func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { + return merge(dst, src, append(opts, WithOverride)...) +} + +// WithTransformers adds transformers to merge, allowing to customize the merging of some types. +func WithTransformers(transformers Transformers) func(*Config) { + return func(config *Config) { + config.Transformers = transformers + } +} + +// WithOverride will make merge override non-empty dst attributes with non-empty src attributes values. +func WithOverride(config *Config) { + config.Overwrite = true +} + +// WithAppendSlice will make merge append slices instead of overwriting it +func WithAppendSlice(config *Config) { + config.AppendSlice = true } -func merge(dst, src interface{}, overwrite bool) error { +func merge(dst, src interface{}, opts ...func(*Config)) error { var ( vDst, vSrc reflect.Value err error ) + + config := &Config{} + + for _, opt := range opts { + opt(config) + } + if vDst, vSrc, err = resolveValues(dst, src); err != nil { return err } if vDst.Type() != vSrc.Type() { return ErrDifferentArgumentsTypes } - return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, overwrite) + return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) } diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go index 79ccdf5cb0..a82fea2fdc 100644 --- a/vendor/github.com/imdario/mergo/mergo.go +++ b/vendor/github.com/imdario/mergo/mergo.go @@ -32,7 +32,7 @@ type visit struct { next *visit } -// From src/pkg/encoding/json. +// From src/pkg/encoding/json/encode.go. func isEmptyValue(v reflect.Value) bool { switch v.Kind() { case reflect.Array, reflect.Map, reflect.Slice, reflect.String: @@ -45,8 +45,15 @@ func isEmptyValue(v reflect.Value) bool { return v.Uint() == 0 case reflect.Float32, reflect.Float64: return v.Float() == 0 - case reflect.Interface, reflect.Ptr, reflect.Func: + case reflect.Interface, reflect.Ptr: + if v.IsNil() { + return true + } + return isEmptyValue(v.Elem()) + case reflect.Func: return v.IsNil() + case reflect.Invalid: + return true } return false } diff --git a/vendor/github.com/imdario/mergo/mergo_test.go b/vendor/github.com/imdario/mergo/mergo_test.go deleted file mode 100644 index e167c332ab..0000000000 --- a/vendor/github.com/imdario/mergo/mergo_test.go +++ /dev/null @@ -1,662 +0,0 @@ -// Copyright 2013 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mergo - -import ( - "gopkg.in/yaml.v2" - "io/ioutil" - "reflect" - "testing" - "time" -) - -type simpleTest struct { - Value int -} - -type complexTest struct { - St simpleTest - sz int - ID string -} - -type mapTest struct { - M map[int]int -} - -type ifcTest struct { - I interface{} -} - -type moreComplextText struct { - Ct complexTest - St simpleTest - Nt simpleTest -} - -type pointerTest struct { - C *simpleTest -} - -type sliceTest struct { - S []int -} - -func TestKb(t *testing.T) { - type testStruct struct { - Name string - KeyValue map[string]interface{} - } - - akv := make(map[string]interface{}) - akv["Key1"] = "not value 1" - akv["Key2"] = "value2" - a := testStruct{} - a.Name = "A" - a.KeyValue = akv - - bkv := make(map[string]interface{}) - bkv["Key1"] = "value1" - bkv["Key3"] = "value3" - b := testStruct{} - b.Name = "B" - b.KeyValue = bkv - - ekv := make(map[string]interface{}) - ekv["Key1"] = "value1" - ekv["Key2"] = "value2" - ekv["Key3"] = "value3" - expected := testStruct{} - expected.Name = "B" - expected.KeyValue = ekv - - Merge(&b, a) - - if !reflect.DeepEqual(b, expected) { - t.Errorf("Actual: %#v did not match \nExpected: %#v", b, expected) - } -} - -func TestNil(t *testing.T) { - if err := Merge(nil, nil); err != ErrNilArguments { - t.Fail() - } -} - -func TestDifferentTypes(t *testing.T) { - a := simpleTest{42} - b := 42 - if err := Merge(&a, b); err != ErrDifferentArgumentsTypes { - t.Fail() - } -} - -func TestSimpleStruct(t *testing.T) { - a := simpleTest{} - b := simpleTest{42} - if err := Merge(&a, b); err != nil { - t.FailNow() - } - if a.Value != 42 { - t.Fatalf("b not merged in properly: a.Value(%d) != b.Value(%d)", a.Value, b.Value) - } - if !reflect.DeepEqual(a, b) { - t.FailNow() - } -} - -func TestComplexStruct(t *testing.T) { - a := complexTest{} - a.ID = "athing" - b := complexTest{simpleTest{42}, 1, "bthing"} - if err := Merge(&a, b); err != nil { - t.FailNow() - } - if a.St.Value != 42 { - t.Fatalf("b not merged in properly: a.St.Value(%d) != b.St.Value(%d)", a.St.Value, b.St.Value) - } - if a.sz == 1 { - t.Fatalf("a's private field sz not preserved from merge: a.sz(%d) == b.sz(%d)", a.sz, b.sz) - } - if a.ID == b.ID { - t.Fatalf("a's field ID merged unexpectedly: a.ID(%s) == b.ID(%s)", a.ID, b.ID) - } -} - -func TestComplexStructWithOverwrite(t *testing.T) { - a := complexTest{simpleTest{1}, 1, "do-not-overwrite-with-empty-value"} - b := complexTest{simpleTest{42}, 2, ""} - - expect := complexTest{simpleTest{42}, 1, "do-not-overwrite-with-empty-value"} - if err := MergeWithOverwrite(&a, b); err != nil { - t.FailNow() - } - - if !reflect.DeepEqual(a, expect) { - t.Fatalf("Test failed:\ngot :\n%#v\n\nwant :\n%#v\n\n", a, expect) - } -} - -func TestPointerStruct(t *testing.T) { - s1 := simpleTest{} - s2 := simpleTest{19} - a := pointerTest{&s1} - b := pointerTest{&s2} - if err := Merge(&a, b); err != nil { - t.FailNow() - } - if a.C.Value != b.C.Value { - t.Fatalf("b not merged in properly: a.C.Value(%d) != b.C.Value(%d)", a.C.Value, b.C.Value) - } -} - -type embeddingStruct struct { - embeddedStruct -} - -type embeddedStruct struct { - A string -} - -func TestEmbeddedStruct(t *testing.T) { - tests := []struct { - src embeddingStruct - dst embeddingStruct - expected embeddingStruct - }{ - { - src: embeddingStruct{ - embeddedStruct{"foo"}, - }, - dst: embeddingStruct{ - embeddedStruct{""}, - }, - expected: embeddingStruct{ - embeddedStruct{"foo"}, - }, - }, - { - src: embeddingStruct{ - embeddedStruct{""}, - }, - dst: embeddingStruct{ - embeddedStruct{"bar"}, - }, - expected: embeddingStruct{ - embeddedStruct{"bar"}, - }, - }, - { - src: embeddingStruct{ - embeddedStruct{"foo"}, - }, - dst: embeddingStruct{ - embeddedStruct{"bar"}, - }, - expected: embeddingStruct{ - embeddedStruct{"bar"}, - }, - }, - } - - for _, test := range tests { - err := Merge(&test.dst, test.src) - if err != nil { - t.Errorf("unexpected error: %v", err) - continue - } - if !reflect.DeepEqual(test.dst, test.expected) { - t.Errorf("unexpected output\nexpected:\n%+v\nsaw:\n%+v\n", test.expected, test.dst) - } - } -} - -func TestPointerStructNil(t *testing.T) { - a := pointerTest{nil} - b := pointerTest{&simpleTest{19}} - if err := Merge(&a, b); err != nil { - t.FailNow() - } - if a.C.Value != b.C.Value { - t.Fatalf("b not merged in a properly: a.C.Value(%d) != b.C.Value(%d)", a.C.Value, b.C.Value) - } -} - -func TestSliceStruct(t *testing.T) { - a := sliceTest{} - b := sliceTest{[]int{1, 2, 3}} - if err := Merge(&a, b); err != nil { - t.FailNow() - } - if len(b.S) != 3 { - t.FailNow() - } - if len(a.S) != len(b.S) { - t.Fatalf("b not merged in a proper way %d != %d", len(a.S), len(b.S)) - } - - a = sliceTest{[]int{1}} - b = sliceTest{[]int{1, 2, 3}} - if err := Merge(&a, b); err != nil { - t.FailNow() - } - if len(a.S) != 1 { - t.FailNow() - } - if len(a.S) == len(b.S) { - t.Fatalf("b merged unexpectedly %d != %d", len(a.S), len(b.S)) - } -} - -func TestEmptyMaps(t *testing.T) { - a := mapTest{} - b := mapTest{ - map[int]int{}, - } - if err := Merge(&a, b); err != nil { - t.Fail() - } - if !reflect.DeepEqual(a, b) { - t.FailNow() - } -} - -func TestEmptyToEmptyMaps(t *testing.T) { - a := mapTest{} - b := mapTest{} - if err := Merge(&a, b); err != nil { - t.Fail() - } - if !reflect.DeepEqual(a, b) { - t.FailNow() - } -} - -func TestEmptyToNotEmptyMaps(t *testing.T) { - a := mapTest{map[int]int{ - 1: 2, - 3: 4, - }} - aa := mapTest{map[int]int{ - 1: 2, - 3: 4, - }} - b := mapTest{ - map[int]int{}, - } - if err := Merge(&a, b); err != nil { - t.Fail() - } - if !reflect.DeepEqual(a, aa) { - t.FailNow() - } -} - -func TestMapsWithOverwrite(t *testing.T) { - m := map[string]simpleTest{ - "a": {}, // overwritten by 16 - "b": {42}, // not overwritten by empty value - "c": {13}, // overwritten by 12 - "d": {61}, - } - n := map[string]simpleTest{ - "a": {16}, - "b": {}, - "c": {12}, - "e": {14}, - } - expect := map[string]simpleTest{ - "a": {16}, - "b": {}, - "c": {12}, - "d": {61}, - "e": {14}, - } - - if err := MergeWithOverwrite(&m, n); err != nil { - t.Fatalf(err.Error()) - } - - if !reflect.DeepEqual(m, expect) { - t.Fatalf("Test failed:\ngot :\n%#v\n\nwant :\n%#v\n\n", m, expect) - } -} - -func TestMaps(t *testing.T) { - m := map[string]simpleTest{ - "a": {}, - "b": {42}, - "c": {13}, - "d": {61}, - } - n := map[string]simpleTest{ - "a": {16}, - "b": {}, - "c": {12}, - "e": {14}, - } - expect := map[string]simpleTest{ - "a": {0}, - "b": {42}, - "c": {13}, - "d": {61}, - "e": {14}, - } - - if err := Merge(&m, n); err != nil { - t.Fatalf(err.Error()) - } - - if !reflect.DeepEqual(m, expect) { - t.Fatalf("Test failed:\ngot :\n%#v\n\nwant :\n%#v\n\n", m, expect) - } - if m["a"].Value != 0 { - t.Fatalf(`n merged in m because I solved non-addressable map values TODO: m["a"].Value(%d) != n["a"].Value(%d)`, m["a"].Value, n["a"].Value) - } - if m["b"].Value != 42 { - t.Fatalf(`n wrongly merged in m: m["b"].Value(%d) != n["b"].Value(%d)`, m["b"].Value, n["b"].Value) - } - if m["c"].Value != 13 { - t.Fatalf(`n overwritten in m: m["c"].Value(%d) != n["c"].Value(%d)`, m["c"].Value, n["c"].Value) - } -} - -func TestYAMLMaps(t *testing.T) { - thing := loadYAML("testdata/thing.yml") - license := loadYAML("testdata/license.yml") - ft := thing["fields"].(map[interface{}]interface{}) - fl := license["fields"].(map[interface{}]interface{}) - // license has one extra field (site) and another already existing in thing (author) that Mergo won't override. - expectedLength := len(ft) + len(fl) - 1 - if err := Merge(&license, thing); err != nil { - t.Fatal(err.Error()) - } - currentLength := len(license["fields"].(map[interface{}]interface{})) - if currentLength != expectedLength { - t.Fatalf(`thing not merged in license properly, license must have %d elements instead of %d`, expectedLength, currentLength) - } - fields := license["fields"].(map[interface{}]interface{}) - if _, ok := fields["id"]; !ok { - t.Fatalf(`thing not merged in license properly, license must have a new id field from thing`) - } -} - -func TestTwoPointerValues(t *testing.T) { - a := &simpleTest{} - b := &simpleTest{42} - if err := Merge(a, b); err != nil { - t.Fatalf(`Boom. You crossed the streams: %s`, err) - } -} - -func TestMap(t *testing.T) { - a := complexTest{} - a.ID = "athing" - c := moreComplextText{a, simpleTest{}, simpleTest{}} - b := map[string]interface{}{ - "ct": map[string]interface{}{ - "st": map[string]interface{}{ - "value": 42, - }, - "sz": 1, - "id": "bthing", - }, - "st": &simpleTest{144}, // Mapping a reference - "zt": simpleTest{299}, // Mapping a missing field (zt doesn't exist) - "nt": simpleTest{3}, - } - if err := Map(&c, b); err != nil { - t.FailNow() - } - m := b["ct"].(map[string]interface{}) - n := m["st"].(map[string]interface{}) - o := b["st"].(*simpleTest) - p := b["nt"].(simpleTest) - if c.Ct.St.Value != 42 { - t.Fatalf("b not merged in properly: c.Ct.St.Value(%d) != b.Ct.St.Value(%d)", c.Ct.St.Value, n["value"]) - } - if c.St.Value != 144 { - t.Fatalf("b not merged in properly: c.St.Value(%d) != b.St.Value(%d)", c.St.Value, o.Value) - } - if c.Nt.Value != 3 { - t.Fatalf("b not merged in properly: c.Nt.Value(%d) != b.Nt.Value(%d)", c.St.Value, p.Value) - } - if c.Ct.sz == 1 { - t.Fatalf("a's private field sz not preserved from merge: c.Ct.sz(%d) == b.Ct.sz(%d)", c.Ct.sz, m["sz"]) - } - if c.Ct.ID == m["id"] { - t.Fatalf("a's field ID merged unexpectedly: c.Ct.ID(%s) == b.Ct.ID(%s)", c.Ct.ID, m["id"]) - } -} - -func TestSimpleMap(t *testing.T) { - a := simpleTest{} - b := map[string]interface{}{ - "value": 42, - } - if err := Map(&a, b); err != nil { - t.FailNow() - } - if a.Value != 42 { - t.Fatalf("b not merged in properly: a.Value(%d) != b.Value(%v)", a.Value, b["value"]) - } -} - -func TestIfcMap(t *testing.T) { - a := ifcTest{} - b := ifcTest{42} - if err := Map(&a, b); err != nil { - t.FailNow() - } - if a.I != 42 { - t.Fatalf("b not merged in properly: a.I(%d) != b.I(%d)", a.I, b.I) - } - if !reflect.DeepEqual(a, b) { - t.FailNow() - } -} - -func TestIfcMapNoOverwrite(t *testing.T) { - a := ifcTest{13} - b := ifcTest{42} - if err := Map(&a, b); err != nil { - t.FailNow() - } - if a.I != 13 { - t.Fatalf("a not left alone: a.I(%d) == b.I(%d)", a.I, b.I) - } -} - -func TestIfcMapWithOverwrite(t *testing.T) { - a := ifcTest{13} - b := ifcTest{42} - if err := MapWithOverwrite(&a, b); err != nil { - t.FailNow() - } - if a.I != 42 { - t.Fatalf("b not merged in properly: a.I(%d) != b.I(%d)", a.I, b.I) - } - if !reflect.DeepEqual(a, b) { - t.FailNow() - } -} - -type pointerMapTest struct { - A int - hidden int - B *simpleTest -} - -func TestBackAndForth(t *testing.T) { - pt := pointerMapTest{42, 1, &simpleTest{66}} - m := make(map[string]interface{}) - if err := Map(&m, pt); err != nil { - t.FailNow() - } - var ( - v interface{} - ok bool - ) - if v, ok = m["a"]; v.(int) != pt.A || !ok { - t.Fatalf("pt not merged in properly: m[`a`](%d) != pt.A(%d)", v, pt.A) - } - if v, ok = m["b"]; !ok { - t.Fatalf("pt not merged in properly: B is missing in m") - } - var st *simpleTest - if st = v.(*simpleTest); st.Value != 66 { - t.Fatalf("something went wrong while mapping pt on m, B wasn't copied") - } - bpt := pointerMapTest{} - if err := Map(&bpt, m); err != nil { - t.Fatal(err) - } - if bpt.A != pt.A { - t.Fatalf("pt not merged in properly: bpt.A(%d) != pt.A(%d)", bpt.A, pt.A) - } - if bpt.hidden == pt.hidden { - t.Fatalf("pt unexpectedly merged: bpt.hidden(%d) == pt.hidden(%d)", bpt.hidden, pt.hidden) - } - if bpt.B.Value != pt.B.Value { - t.Fatalf("pt not merged in properly: bpt.B.Value(%d) != pt.B.Value(%d)", bpt.B.Value, pt.B.Value) - } -} - -func TestEmbeddedPointerUnpacking(t *testing.T) { - tests := []struct{ input pointerMapTest }{ - {pointerMapTest{42, 1, nil}}, - {pointerMapTest{42, 1, &simpleTest{66}}}, - } - newValue := 77 - m := map[string]interface{}{ - "b": map[string]interface{}{ - "value": newValue, - }, - } - for _, test := range tests { - pt := test.input - if err := MapWithOverwrite(&pt, m); err != nil { - t.FailNow() - } - if pt.B.Value != newValue { - t.Fatalf("pt not mapped properly: pt.A.Value(%d) != m[`b`][`value`](%d)", pt.B.Value, newValue) - } - - } -} - -type structWithTimePointer struct { - Birth *time.Time -} - -func TestTime(t *testing.T) { - now := time.Now() - dataStruct := structWithTimePointer{ - Birth: &now, - } - dataMap := map[string]interface{}{ - "Birth": &now, - } - b := structWithTimePointer{} - if err := Merge(&b, dataStruct); err != nil { - t.FailNow() - } - if b.Birth.IsZero() { - t.Fatalf("time.Time not merged in properly: b.Birth(%v) != dataStruct['Birth'](%v)", b.Birth, dataStruct.Birth) - } - if b.Birth != dataStruct.Birth { - t.Fatalf("time.Time not merged in properly: b.Birth(%v) != dataStruct['Birth'](%v)", b.Birth, dataStruct.Birth) - } - b = structWithTimePointer{} - if err := Map(&b, dataMap); err != nil { - t.FailNow() - } - if b.Birth.IsZero() { - t.Fatalf("time.Time not merged in properly: b.Birth(%v) != dataMap['Birth'](%v)", b.Birth, dataMap["Birth"]) - } -} - -type simpleNested struct { - A int -} - -type structWithNestedPtrValueMap struct { - NestedPtrValue map[string]*simpleNested -} - -func TestNestedPtrValueInMap(t *testing.T) { - src := &structWithNestedPtrValueMap{ - NestedPtrValue: map[string]*simpleNested{ - "x": { - A: 1, - }, - }, - } - dst := &structWithNestedPtrValueMap{ - NestedPtrValue: map[string]*simpleNested{ - "x": {}, - }, - } - if err := Map(dst, src); err != nil { - t.FailNow() - } - if dst.NestedPtrValue["x"].A == 0 { - t.Fatalf("Nested Ptr value not merged in properly: dst.NestedPtrValue[\"x\"].A(%v) != src.NestedPtrValue[\"x\"].A(%v)", dst.NestedPtrValue["x"].A, src.NestedPtrValue["x"].A) - } -} - -func loadYAML(path string) (m map[string]interface{}) { - m = make(map[string]interface{}) - raw, _ := ioutil.ReadFile(path) - _ = yaml.Unmarshal(raw, &m) - return -} - -type structWithMap struct { - m map[string]structWithUnexportedProperty -} - -type structWithUnexportedProperty struct { - s string -} - -func TestUnexportedProperty(t *testing.T) { - a := structWithMap{map[string]structWithUnexportedProperty{ - "key": structWithUnexportedProperty{"hello"}, - }} - b := structWithMap{map[string]structWithUnexportedProperty{ - "key": structWithUnexportedProperty{"hi"}, - }} - defer func() { - if r := recover(); r != nil { - t.Errorf("Should not have panicked") - } - }() - Merge(&a, b) -} - -type structWithBoolPointer struct { - C *bool -} - -func TestBooleanPointer(t *testing.T) { - bt, bf := true, false - src := structWithBoolPointer{ - &bt, - } - dst := structWithBoolPointer{ - &bf, - } - if err := Merge(&dst, src); err != nil { - t.FailNow() - } - if dst.C == src.C { - t.Fatalf("dst.C should be a different pointer than src.C") - } - if *dst.C != *src.C { - t.Fatalf("dst.C should be true") - } -} diff --git a/vendor/github.com/inconshreveable/mousetrap/LICENSE b/vendor/github.com/inconshreveable/mousetrap/LICENSE new file mode 100644 index 0000000000..5f0d1fb6a7 --- /dev/null +++ b/vendor/github.com/inconshreveable/mousetrap/LICENSE @@ -0,0 +1,13 @@ +Copyright 2014 Alan Shreve + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/inconshreveable/mousetrap/README.md b/vendor/github.com/inconshreveable/mousetrap/README.md new file mode 100644 index 0000000000..7a950d1774 --- /dev/null +++ b/vendor/github.com/inconshreveable/mousetrap/README.md @@ -0,0 +1,23 @@ +# mousetrap + +mousetrap is a tiny library that answers a single question. + +On a Windows machine, was the process invoked by someone double clicking on +the executable file while browsing in explorer? + +### Motivation + +Windows developers unfamiliar with command line tools will often "double-click" +the executable for a tool. Because most CLI tools print the help and then exit +when invoked without arguments, this is often very frustrating for those users. + +mousetrap provides a way to detect these invocations so that you can provide +more helpful behavior and instructions on how to run the CLI tool. To see what +this looks like, both from an organizational and a technical perspective, see +https://inconshreveable.com/09-09-2014/sweat-the-small-stuff/ + +### The interface + +The library exposes a single interface: + + func StartedByExplorer() (bool) diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_others.go b/vendor/github.com/inconshreveable/mousetrap/trap_others.go new file mode 100644 index 0000000000..9d2d8a4bab --- /dev/null +++ b/vendor/github.com/inconshreveable/mousetrap/trap_others.go @@ -0,0 +1,15 @@ +// +build !windows + +package mousetrap + +// StartedByExplorer returns true if the program was invoked by the user +// double-clicking on the executable from explorer.exe +// +// It is conservative and returns false if any of the internal calls fail. +// It does not guarantee that the program was run from a terminal. It only can tell you +// whether it was launched from explorer.exe +// +// On non-Windows platforms, it always returns false. +func StartedByExplorer() bool { + return false +} diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go new file mode 100644 index 0000000000..336142a5e3 --- /dev/null +++ b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go @@ -0,0 +1,98 @@ +// +build windows +// +build !go1.4 + +package mousetrap + +import ( + "fmt" + "os" + "syscall" + "unsafe" +) + +const ( + // defined by the Win32 API + th32cs_snapprocess uintptr = 0x2 +) + +var ( + kernel = syscall.MustLoadDLL("kernel32.dll") + CreateToolhelp32Snapshot = kernel.MustFindProc("CreateToolhelp32Snapshot") + Process32First = kernel.MustFindProc("Process32FirstW") + Process32Next = kernel.MustFindProc("Process32NextW") +) + +// ProcessEntry32 structure defined by the Win32 API +type processEntry32 struct { + dwSize uint32 + cntUsage uint32 + th32ProcessID uint32 + th32DefaultHeapID int + th32ModuleID uint32 + cntThreads uint32 + th32ParentProcessID uint32 + pcPriClassBase int32 + dwFlags uint32 + szExeFile [syscall.MAX_PATH]uint16 +} + +func getProcessEntry(pid int) (pe *processEntry32, err error) { + snapshot, _, e1 := CreateToolhelp32Snapshot.Call(th32cs_snapprocess, uintptr(0)) + if snapshot == uintptr(syscall.InvalidHandle) { + err = fmt.Errorf("CreateToolhelp32Snapshot: %v", e1) + return + } + defer syscall.CloseHandle(syscall.Handle(snapshot)) + + var processEntry processEntry32 + processEntry.dwSize = uint32(unsafe.Sizeof(processEntry)) + ok, _, e1 := Process32First.Call(snapshot, uintptr(unsafe.Pointer(&processEntry))) + if ok == 0 { + err = fmt.Errorf("Process32First: %v", e1) + return + } + + for { + if processEntry.th32ProcessID == uint32(pid) { + pe = &processEntry + return + } + + ok, _, e1 = Process32Next.Call(snapshot, uintptr(unsafe.Pointer(&processEntry))) + if ok == 0 { + err = fmt.Errorf("Process32Next: %v", e1) + return + } + } +} + +func getppid() (pid int, err error) { + pe, err := getProcessEntry(os.Getpid()) + if err != nil { + return + } + + pid = int(pe.th32ParentProcessID) + return +} + +// StartedByExplorer returns true if the program was invoked by the user double-clicking +// on the executable from explorer.exe +// +// It is conservative and returns false if any of the internal calls fail. +// It does not guarantee that the program was run from a terminal. It only can tell you +// whether it was launched from explorer.exe +func StartedByExplorer() bool { + ppid, err := getppid() + if err != nil { + return false + } + + pe, err := getProcessEntry(ppid) + if err != nil { + return false + } + + name := syscall.UTF16ToString(pe.szExeFile[:]) + return name == "explorer.exe" +} diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go new file mode 100644 index 0000000000..9a28e57c3c --- /dev/null +++ b/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go @@ -0,0 +1,46 @@ +// +build windows +// +build go1.4 + +package mousetrap + +import ( + "os" + "syscall" + "unsafe" +) + +func getProcessEntry(pid int) (*syscall.ProcessEntry32, error) { + snapshot, err := syscall.CreateToolhelp32Snapshot(syscall.TH32CS_SNAPPROCESS, 0) + if err != nil { + return nil, err + } + defer syscall.CloseHandle(snapshot) + var procEntry syscall.ProcessEntry32 + procEntry.Size = uint32(unsafe.Sizeof(procEntry)) + if err = syscall.Process32First(snapshot, &procEntry); err != nil { + return nil, err + } + for { + if procEntry.ProcessID == uint32(pid) { + return &procEntry, nil + } + err = syscall.Process32Next(snapshot, &procEntry) + if err != nil { + return nil, err + } + } +} + +// StartedByExplorer returns true if the program was invoked by the user double-clicking +// on the executable from explorer.exe +// +// It is conservative and returns false if any of the internal calls fail. +// It does not guarantee that the program was run from a terminal. It only can tell you +// whether it was launched from explorer.exe +func StartedByExplorer() bool { + pe, err := getProcessEntry(os.Getppid()) + if err != nil { + return false + } + return "explorer.exe" == syscall.UTF16ToString(pe.ExeFile[:]) +} diff --git a/vendor/github.com/json-iterator/go/.gitignore b/vendor/github.com/json-iterator/go/.gitignore index 501fcdc9a6..15556530a8 100644 --- a/vendor/github.com/json-iterator/go/.gitignore +++ b/vendor/github.com/json-iterator/go/.gitignore @@ -1,3 +1,4 @@ -.idea +/vendor +/bug_test.go /coverage.txt -/profile.out +/.idea diff --git a/vendor/github.com/json-iterator/go/Gopkg.lock b/vendor/github.com/json-iterator/go/Gopkg.lock new file mode 100644 index 0000000000..c8a9fbb387 --- /dev/null +++ b/vendor/github.com/json-iterator/go/Gopkg.lock @@ -0,0 +1,21 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/modern-go/concurrent" + packages = ["."] + revision = "e0a39a4cb4216ea8db28e22a69f4ec25610d513a" + version = "1.0.0" + +[[projects]] + name = "github.com/modern-go/reflect2" + packages = ["."] + revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" + version = "1.0.1" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "ea54a775e5a354cb015502d2e7aa4b74230fc77e894f34a838b268c25ec8eeb8" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/json-iterator/go/Gopkg.toml b/vendor/github.com/json-iterator/go/Gopkg.toml new file mode 100644 index 0000000000..313a0f887b --- /dev/null +++ b/vendor/github.com/json-iterator/go/Gopkg.toml @@ -0,0 +1,26 @@ +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" + +ignored = ["github.com/davecgh/go-spew*","github.com/google/gofuzz*","github.com/stretchr/testify*"] + +[[constraint]] + name = "github.com/modern-go/reflect2" + version = "1.0.1" diff --git a/vendor/github.com/json-iterator/go/README.md b/vendor/github.com/json-iterator/go/README.md index 3a0d680983..50d56ffbf0 100644 --- a/vendor/github.com/json-iterator/go/README.md +++ b/vendor/github.com/json-iterator/go/README.md @@ -8,9 +8,7 @@ A high-performance 100% compatible drop-in replacement of "encoding/json" -``` -Go开发者们请加入我们,滴滴出行平台技术部 taowen@didichuxing.com -``` +You can also use thrift like JSON using [thrift-iterator](https://github.com/thrift-iterator/go) # Benchmark @@ -29,6 +27,9 @@ Raw Result (easyjson requires static code generation) | easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op | | jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op | +Always benchmark with your own workload. +The result depends heavily on the data input. + # Usage 100% compatibility with standard lib diff --git a/vendor/github.com/json-iterator/go/adapter.go b/vendor/github.com/json-iterator/go/adapter.go new file mode 100644 index 0000000000..e674d0f397 --- /dev/null +++ b/vendor/github.com/json-iterator/go/adapter.go @@ -0,0 +1,150 @@ +package jsoniter + +import ( + "bytes" + "io" +) + +// RawMessage to make replace json with jsoniter +type RawMessage []byte + +// Unmarshal adapts to json/encoding Unmarshal API +// +// Unmarshal parses the JSON-encoded data and stores the result in the value pointed to by v. +// Refer to https://godoc.org/encoding/json#Unmarshal for more information +func Unmarshal(data []byte, v interface{}) error { + return ConfigDefault.Unmarshal(data, v) +} + +// UnmarshalFromString convenient method to read from string instead of []byte +func UnmarshalFromString(str string, v interface{}) error { + return ConfigDefault.UnmarshalFromString(str, v) +} + +// Get quick method to get value from deeply nested JSON structure +func Get(data []byte, path ...interface{}) Any { + return ConfigDefault.Get(data, path...) +} + +// Marshal adapts to json/encoding Marshal API +// +// Marshal returns the JSON encoding of v, adapts to json/encoding Marshal API +// Refer to https://godoc.org/encoding/json#Marshal for more information +func Marshal(v interface{}) ([]byte, error) { + return ConfigDefault.Marshal(v) +} + +// MarshalIndent same as json.MarshalIndent. Prefix is not supported. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + return ConfigDefault.MarshalIndent(v, prefix, indent) +} + +// MarshalToString convenient method to write as string instead of []byte +func MarshalToString(v interface{}) (string, error) { + return ConfigDefault.MarshalToString(v) +} + +// NewDecoder adapts to json/stream NewDecoder API. +// +// NewDecoder returns a new decoder that reads from r. +// +// Instead of a json/encoding Decoder, an Decoder is returned +// Refer to https://godoc.org/encoding/json#NewDecoder for more information +func NewDecoder(reader io.Reader) *Decoder { + return ConfigDefault.NewDecoder(reader) +} + +// Decoder reads and decodes JSON values from an input stream. +// Decoder provides identical APIs with json/stream Decoder (Token() and UseNumber() are in progress) +type Decoder struct { + iter *Iterator +} + +// Decode decode JSON into interface{} +func (adapter *Decoder) Decode(obj interface{}) error { + if adapter.iter.head == adapter.iter.tail && adapter.iter.reader != nil { + if !adapter.iter.loadMore() { + return io.EOF + } + } + adapter.iter.ReadVal(obj) + err := adapter.iter.Error + if err == io.EOF { + return nil + } + return adapter.iter.Error +} + +// More is there more? +func (adapter *Decoder) More() bool { + iter := adapter.iter + if iter.Error != nil { + return false + } + c := iter.nextToken() + if c == 0 { + return false + } + iter.unreadByte() + return c != ']' && c != '}' +} + +// Buffered remaining buffer +func (adapter *Decoder) Buffered() io.Reader { + remaining := adapter.iter.buf[adapter.iter.head:adapter.iter.tail] + return bytes.NewReader(remaining) +} + +// UseNumber causes the Decoder to unmarshal a number into an interface{} as a +// Number instead of as a float64. +func (adapter *Decoder) UseNumber() { + cfg := adapter.iter.cfg.configBeforeFrozen + cfg.UseNumber = true + adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions) +} + +// DisallowUnknownFields causes the Decoder to return an error when the destination +// is a struct and the input contains object keys which do not match any +// non-ignored, exported fields in the destination. +func (adapter *Decoder) DisallowUnknownFields() { + cfg := adapter.iter.cfg.configBeforeFrozen + cfg.DisallowUnknownFields = true + adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions) +} + +// NewEncoder same as json.NewEncoder +func NewEncoder(writer io.Writer) *Encoder { + return ConfigDefault.NewEncoder(writer) +} + +// Encoder same as json.Encoder +type Encoder struct { + stream *Stream +} + +// Encode encode interface{} as JSON to io.Writer +func (adapter *Encoder) Encode(val interface{}) error { + adapter.stream.WriteVal(val) + adapter.stream.WriteRaw("\n") + adapter.stream.Flush() + return adapter.stream.Error +} + +// SetIndent set the indention. Prefix is not supported +func (adapter *Encoder) SetIndent(prefix, indent string) { + config := adapter.stream.cfg.configBeforeFrozen + config.IndentionStep = len(indent) + adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions) +} + +// SetEscapeHTML escape html by default, set to false to disable +func (adapter *Encoder) SetEscapeHTML(escapeHTML bool) { + config := adapter.stream.cfg.configBeforeFrozen + config.EscapeHTML = escapeHTML + adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions) +} + +// Valid reports whether data is a valid JSON encoding. +func Valid(data []byte) bool { + return ConfigDefault.Valid(data) +} diff --git a/vendor/github.com/json-iterator/go/any.go b/vendor/github.com/json-iterator/go/any.go new file mode 100644 index 0000000000..f6b8aeab0a --- /dev/null +++ b/vendor/github.com/json-iterator/go/any.go @@ -0,0 +1,325 @@ +package jsoniter + +import ( + "errors" + "fmt" + "github.com/modern-go/reflect2" + "io" + "reflect" + "strconv" + "unsafe" +) + +// Any generic object representation. +// The lazy json implementation holds []byte and parse lazily. +type Any interface { + LastError() error + ValueType() ValueType + MustBeValid() Any + ToBool() bool + ToInt() int + ToInt32() int32 + ToInt64() int64 + ToUint() uint + ToUint32() uint32 + ToUint64() uint64 + ToFloat32() float32 + ToFloat64() float64 + ToString() string + ToVal(val interface{}) + Get(path ...interface{}) Any + Size() int + Keys() []string + GetInterface() interface{} + WriteTo(stream *Stream) +} + +type baseAny struct{} + +func (any *baseAny) Get(path ...interface{}) Any { + return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)} +} + +func (any *baseAny) Size() int { + return 0 +} + +func (any *baseAny) Keys() []string { + return []string{} +} + +func (any *baseAny) ToVal(obj interface{}) { + panic("not implemented") +} + +// WrapInt32 turn int32 into Any interface +func WrapInt32(val int32) Any { + return &int32Any{baseAny{}, val} +} + +// WrapInt64 turn int64 into Any interface +func WrapInt64(val int64) Any { + return &int64Any{baseAny{}, val} +} + +// WrapUint32 turn uint32 into Any interface +func WrapUint32(val uint32) Any { + return &uint32Any{baseAny{}, val} +} + +// WrapUint64 turn uint64 into Any interface +func WrapUint64(val uint64) Any { + return &uint64Any{baseAny{}, val} +} + +// WrapFloat64 turn float64 into Any interface +func WrapFloat64(val float64) Any { + return &floatAny{baseAny{}, val} +} + +// WrapString turn string into Any interface +func WrapString(val string) Any { + return &stringAny{baseAny{}, val} +} + +// Wrap turn a go object into Any interface +func Wrap(val interface{}) Any { + if val == nil { + return &nilAny{} + } + asAny, isAny := val.(Any) + if isAny { + return asAny + } + typ := reflect2.TypeOf(val) + switch typ.Kind() { + case reflect.Slice: + return wrapArray(val) + case reflect.Struct: + return wrapStruct(val) + case reflect.Map: + return wrapMap(val) + case reflect.String: + return WrapString(val.(string)) + case reflect.Int: + if strconv.IntSize == 32 { + return WrapInt32(int32(val.(int))) + } + return WrapInt64(int64(val.(int))) + case reflect.Int8: + return WrapInt32(int32(val.(int8))) + case reflect.Int16: + return WrapInt32(int32(val.(int16))) + case reflect.Int32: + return WrapInt32(val.(int32)) + case reflect.Int64: + return WrapInt64(val.(int64)) + case reflect.Uint: + if strconv.IntSize == 32 { + return WrapUint32(uint32(val.(uint))) + } + return WrapUint64(uint64(val.(uint))) + case reflect.Uintptr: + if ptrSize == 32 { + return WrapUint32(uint32(val.(uintptr))) + } + return WrapUint64(uint64(val.(uintptr))) + case reflect.Uint8: + return WrapUint32(uint32(val.(uint8))) + case reflect.Uint16: + return WrapUint32(uint32(val.(uint16))) + case reflect.Uint32: + return WrapUint32(uint32(val.(uint32))) + case reflect.Uint64: + return WrapUint64(val.(uint64)) + case reflect.Float32: + return WrapFloat64(float64(val.(float32))) + case reflect.Float64: + return WrapFloat64(val.(float64)) + case reflect.Bool: + if val.(bool) == true { + return &trueAny{} + } + return &falseAny{} + } + return &invalidAny{baseAny{}, fmt.Errorf("unsupported type: %v", typ)} +} + +// ReadAny read next JSON element as an Any object. It is a better json.RawMessage. +func (iter *Iterator) ReadAny() Any { + return iter.readAny() +} + +func (iter *Iterator) readAny() Any { + c := iter.nextToken() + switch c { + case '"': + iter.unreadByte() + return &stringAny{baseAny{}, iter.ReadString()} + case 'n': + iter.skipThreeBytes('u', 'l', 'l') // null + return &nilAny{} + case 't': + iter.skipThreeBytes('r', 'u', 'e') // true + return &trueAny{} + case 'f': + iter.skipFourBytes('a', 'l', 's', 'e') // false + return &falseAny{} + case '{': + return iter.readObjectAny() + case '[': + return iter.readArrayAny() + case '-': + return iter.readNumberAny(false) + case 0: + return &invalidAny{baseAny{}, errors.New("input is empty")} + default: + return iter.readNumberAny(true) + } +} + +func (iter *Iterator) readNumberAny(positive bool) Any { + iter.startCapture(iter.head - 1) + iter.skipNumber() + lazyBuf := iter.stopCapture() + return &numberLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func (iter *Iterator) readObjectAny() Any { + iter.startCapture(iter.head - 1) + iter.skipObject() + lazyBuf := iter.stopCapture() + return &objectLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func (iter *Iterator) readArrayAny() Any { + iter.startCapture(iter.head - 1) + iter.skipArray() + lazyBuf := iter.stopCapture() + return &arrayLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func locateObjectField(iter *Iterator, target string) []byte { + var found []byte + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + if field == target { + found = iter.SkipAndReturnBytes() + return false + } + iter.Skip() + return true + }) + return found +} + +func locateArrayElement(iter *Iterator, target int) []byte { + var found []byte + n := 0 + iter.ReadArrayCB(func(iter *Iterator) bool { + if n == target { + found = iter.SkipAndReturnBytes() + return false + } + iter.Skip() + n++ + return true + }) + return found +} + +func locatePath(iter *Iterator, path []interface{}) Any { + for i, pathKeyObj := range path { + switch pathKey := pathKeyObj.(type) { + case string: + valueBytes := locateObjectField(iter, pathKey) + if valueBytes == nil { + return newInvalidAny(path[i:]) + } + iter.ResetBytes(valueBytes) + case int: + valueBytes := locateArrayElement(iter, pathKey) + if valueBytes == nil { + return newInvalidAny(path[i:]) + } + iter.ResetBytes(valueBytes) + case int32: + if '*' == pathKey { + return iter.readAny().Get(path[i:]...) + } + return newInvalidAny(path[i:]) + default: + return newInvalidAny(path[i:]) + } + } + if iter.Error != nil && iter.Error != io.EOF { + return &invalidAny{baseAny{}, iter.Error} + } + return iter.readAny() +} + +var anyType = reflect2.TypeOfPtr((*Any)(nil)).Elem() + +func createDecoderOfAny(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ == anyType { + return &directAnyCodec{} + } + if typ.Implements(anyType) { + return &anyCodec{ + valType: typ, + } + } + return nil +} + +func createEncoderOfAny(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ == anyType { + return &directAnyCodec{} + } + if typ.Implements(anyType) { + return &anyCodec{ + valType: typ, + } + } + return nil +} + +type anyCodec struct { + valType reflect2.Type +} + +func (codec *anyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + panic("not implemented") +} + +func (codec *anyCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := codec.valType.UnsafeIndirect(ptr) + any := obj.(Any) + any.WriteTo(stream) +} + +func (codec *anyCodec) IsEmpty(ptr unsafe.Pointer) bool { + obj := codec.valType.UnsafeIndirect(ptr) + any := obj.(Any) + return any.Size() == 0 +} + +type directAnyCodec struct { +} + +func (codec *directAnyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *(*Any)(ptr) = iter.readAny() +} + +func (codec *directAnyCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + any := *(*Any)(ptr) + if any == nil { + stream.WriteNil() + return + } + any.WriteTo(stream) +} + +func (codec *directAnyCodec) IsEmpty(ptr unsafe.Pointer) bool { + any := *(*Any)(ptr) + return any.Size() == 0 +} diff --git a/vendor/github.com/json-iterator/go/feature_any_array.go b/vendor/github.com/json-iterator/go/any_array.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_any_array.go rename to vendor/github.com/json-iterator/go/any_array.go diff --git a/vendor/github.com/json-iterator/go/feature_any_bool.go b/vendor/github.com/json-iterator/go/any_bool.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_any_bool.go rename to vendor/github.com/json-iterator/go/any_bool.go diff --git a/vendor/github.com/json-iterator/go/feature_any_float.go b/vendor/github.com/json-iterator/go/any_float.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_any_float.go rename to vendor/github.com/json-iterator/go/any_float.go diff --git a/vendor/github.com/json-iterator/go/feature_any_int32.go b/vendor/github.com/json-iterator/go/any_int32.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_any_int32.go rename to vendor/github.com/json-iterator/go/any_int32.go diff --git a/vendor/github.com/json-iterator/go/feature_any_int64.go b/vendor/github.com/json-iterator/go/any_int64.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_any_int64.go rename to vendor/github.com/json-iterator/go/any_int64.go diff --git a/vendor/github.com/json-iterator/go/feature_any_invalid.go b/vendor/github.com/json-iterator/go/any_invalid.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_any_invalid.go rename to vendor/github.com/json-iterator/go/any_invalid.go diff --git a/vendor/github.com/json-iterator/go/feature_any_nil.go b/vendor/github.com/json-iterator/go/any_nil.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_any_nil.go rename to vendor/github.com/json-iterator/go/any_nil.go diff --git a/vendor/github.com/json-iterator/go/any_number.go b/vendor/github.com/json-iterator/go/any_number.go new file mode 100644 index 0000000000..9d1e901a66 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_number.go @@ -0,0 +1,123 @@ +package jsoniter + +import ( + "io" + "unsafe" +) + +type numberLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *numberLazyAny) ValueType() ValueType { + return NumberValue +} + +func (any *numberLazyAny) MustBeValid() Any { + return any +} + +func (any *numberLazyAny) LastError() error { + return any.err +} + +func (any *numberLazyAny) ToBool() bool { + return any.ToFloat64() != 0 +} + +func (any *numberLazyAny) ToInt() int { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToInt32() int32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt32() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToInt64() int64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt64() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToUint() uint { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToUint32() uint32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint32() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToUint64() uint64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint64() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToFloat32() float32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadFloat32() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToFloat64() float64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadFloat64() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *numberLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *numberLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} diff --git a/vendor/github.com/json-iterator/go/feature_any_object.go b/vendor/github.com/json-iterator/go/any_object.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_any_object.go rename to vendor/github.com/json-iterator/go/any_object.go diff --git a/vendor/github.com/json-iterator/go/any_str.go b/vendor/github.com/json-iterator/go/any_str.go new file mode 100644 index 0000000000..a4b93c78c8 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_str.go @@ -0,0 +1,166 @@ +package jsoniter + +import ( + "fmt" + "strconv" +) + +type stringAny struct { + baseAny + val string +} + +func (any *stringAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)} +} + +func (any *stringAny) Parse() *Iterator { + return nil +} + +func (any *stringAny) ValueType() ValueType { + return StringValue +} + +func (any *stringAny) MustBeValid() Any { + return any +} + +func (any *stringAny) LastError() error { + return nil +} + +func (any *stringAny) ToBool() bool { + str := any.ToString() + if str == "0" { + return false + } + for _, c := range str { + switch c { + case ' ', '\n', '\r', '\t': + default: + return true + } + } + return false +} + +func (any *stringAny) ToInt() int { + return int(any.ToInt64()) + +} + +func (any *stringAny) ToInt32() int32 { + return int32(any.ToInt64()) +} + +func (any *stringAny) ToInt64() int64 { + if any.val == "" { + return 0 + } + + flag := 1 + startPos := 0 + endPos := 0 + if any.val[0] == '+' || any.val[0] == '-' { + startPos = 1 + } + + if any.val[0] == '-' { + flag = -1 + } + + for i := startPos; i < len(any.val); i++ { + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + break + } + } + parsed, _ := strconv.ParseInt(any.val[startPos:endPos], 10, 64) + return int64(flag) * parsed +} + +func (any *stringAny) ToUint() uint { + return uint(any.ToUint64()) +} + +func (any *stringAny) ToUint32() uint32 { + return uint32(any.ToUint64()) +} + +func (any *stringAny) ToUint64() uint64 { + if any.val == "" { + return 0 + } + + startPos := 0 + endPos := 0 + + if any.val[0] == '-' { + return 0 + } + if any.val[0] == '+' { + startPos = 1 + } + + for i := startPos; i < len(any.val); i++ { + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + break + } + } + parsed, _ := strconv.ParseUint(any.val[startPos:endPos], 10, 64) + return parsed +} + +func (any *stringAny) ToFloat32() float32 { + return float32(any.ToFloat64()) +} + +func (any *stringAny) ToFloat64() float64 { + if len(any.val) == 0 { + return 0 + } + + // first char invalid + if any.val[0] != '+' && any.val[0] != '-' && (any.val[0] > '9' || any.val[0] < '0') { + return 0 + } + + // extract valid num expression from string + // eg 123true => 123, -12.12xxa => -12.12 + endPos := 1 + for i := 1; i < len(any.val); i++ { + if any.val[i] == '.' || any.val[i] == 'e' || any.val[i] == 'E' || any.val[i] == '+' || any.val[i] == '-' { + endPos = i + 1 + continue + } + + // end position is the first char which is not digit + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + endPos = i + break + } + } + parsed, _ := strconv.ParseFloat(any.val[:endPos], 64) + return parsed +} + +func (any *stringAny) ToString() string { + return any.val +} + +func (any *stringAny) WriteTo(stream *Stream) { + stream.WriteString(any.val) +} + +func (any *stringAny) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/feature_any_uint32.go b/vendor/github.com/json-iterator/go/any_uint32.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_any_uint32.go rename to vendor/github.com/json-iterator/go/any_uint32.go diff --git a/vendor/github.com/json-iterator/go/feature_any_uint64.go b/vendor/github.com/json-iterator/go/any_uint64.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_any_uint64.go rename to vendor/github.com/json-iterator/go/any_uint64.go diff --git a/vendor/github.com/json-iterator/go/build.sh b/vendor/github.com/json-iterator/go/build.sh new file mode 100644 index 0000000000..b45ef68831 --- /dev/null +++ b/vendor/github.com/json-iterator/go/build.sh @@ -0,0 +1,12 @@ +#!/bin/bash +set -e +set -x + +if [ ! -d /tmp/build-golang/src/github.com/json-iterator ]; then + mkdir -p /tmp/build-golang/src/github.com/json-iterator + ln -s $PWD /tmp/build-golang/src/github.com/json-iterator/go +fi +export GOPATH=/tmp/build-golang +go get -u github.com/golang/dep/cmd/dep +cd /tmp/build-golang/src/github.com/json-iterator/go +exec $GOPATH/bin/dep ensure -update diff --git a/vendor/github.com/json-iterator/go/config.go b/vendor/github.com/json-iterator/go/config.go new file mode 100644 index 0000000000..8c58fcba59 --- /dev/null +++ b/vendor/github.com/json-iterator/go/config.go @@ -0,0 +1,375 @@ +package jsoniter + +import ( + "encoding/json" + "io" + "reflect" + "sync" + "unsafe" + + "github.com/modern-go/concurrent" + "github.com/modern-go/reflect2" +) + +// Config customize how the API should behave. +// The API is created from Config by Froze. +type Config struct { + IndentionStep int + MarshalFloatWith6Digits bool + EscapeHTML bool + SortMapKeys bool + UseNumber bool + DisallowUnknownFields bool + TagKey string + OnlyTaggedField bool + ValidateJsonRawMessage bool + ObjectFieldMustBeSimpleString bool + CaseSensitive bool +} + +// API the public interface of this package. +// Primary Marshal and Unmarshal. +type API interface { + IteratorPool + StreamPool + MarshalToString(v interface{}) (string, error) + Marshal(v interface{}) ([]byte, error) + MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) + UnmarshalFromString(str string, v interface{}) error + Unmarshal(data []byte, v interface{}) error + Get(data []byte, path ...interface{}) Any + NewEncoder(writer io.Writer) *Encoder + NewDecoder(reader io.Reader) *Decoder + Valid(data []byte) bool + RegisterExtension(extension Extension) + DecoderOf(typ reflect2.Type) ValDecoder + EncoderOf(typ reflect2.Type) ValEncoder +} + +// ConfigDefault the default API +var ConfigDefault = Config{ + EscapeHTML: true, +}.Froze() + +// ConfigCompatibleWithStandardLibrary tries to be 100% compatible with standard library behavior +var ConfigCompatibleWithStandardLibrary = Config{ + EscapeHTML: true, + SortMapKeys: true, + ValidateJsonRawMessage: true, +}.Froze() + +// ConfigFastest marshals float with only 6 digits precision +var ConfigFastest = Config{ + EscapeHTML: false, + MarshalFloatWith6Digits: true, // will lose precession + ObjectFieldMustBeSimpleString: true, // do not unescape object field +}.Froze() + +type frozenConfig struct { + configBeforeFrozen Config + sortMapKeys bool + indentionStep int + objectFieldMustBeSimpleString bool + onlyTaggedField bool + disallowUnknownFields bool + decoderCache *concurrent.Map + encoderCache *concurrent.Map + encoderExtension Extension + decoderExtension Extension + extraExtensions []Extension + streamPool *sync.Pool + iteratorPool *sync.Pool + caseSensitive bool +} + +func (cfg *frozenConfig) initCache() { + cfg.decoderCache = concurrent.NewMap() + cfg.encoderCache = concurrent.NewMap() +} + +func (cfg *frozenConfig) addDecoderToCache(cacheKey uintptr, decoder ValDecoder) { + cfg.decoderCache.Store(cacheKey, decoder) +} + +func (cfg *frozenConfig) addEncoderToCache(cacheKey uintptr, encoder ValEncoder) { + cfg.encoderCache.Store(cacheKey, encoder) +} + +func (cfg *frozenConfig) getDecoderFromCache(cacheKey uintptr) ValDecoder { + decoder, found := cfg.decoderCache.Load(cacheKey) + if found { + return decoder.(ValDecoder) + } + return nil +} + +func (cfg *frozenConfig) getEncoderFromCache(cacheKey uintptr) ValEncoder { + encoder, found := cfg.encoderCache.Load(cacheKey) + if found { + return encoder.(ValEncoder) + } + return nil +} + +var cfgCache = concurrent.NewMap() + +func getFrozenConfigFromCache(cfg Config) *frozenConfig { + obj, found := cfgCache.Load(cfg) + if found { + return obj.(*frozenConfig) + } + return nil +} + +func addFrozenConfigToCache(cfg Config, frozenConfig *frozenConfig) { + cfgCache.Store(cfg, frozenConfig) +} + +// Froze forge API from config +func (cfg Config) Froze() API { + api := &frozenConfig{ + sortMapKeys: cfg.SortMapKeys, + indentionStep: cfg.IndentionStep, + objectFieldMustBeSimpleString: cfg.ObjectFieldMustBeSimpleString, + onlyTaggedField: cfg.OnlyTaggedField, + disallowUnknownFields: cfg.DisallowUnknownFields, + caseSensitive: cfg.CaseSensitive, + } + api.streamPool = &sync.Pool{ + New: func() interface{} { + return NewStream(api, nil, 512) + }, + } + api.iteratorPool = &sync.Pool{ + New: func() interface{} { + return NewIterator(api) + }, + } + api.initCache() + encoderExtension := EncoderExtension{} + decoderExtension := DecoderExtension{} + if cfg.MarshalFloatWith6Digits { + api.marshalFloatWith6Digits(encoderExtension) + } + if cfg.EscapeHTML { + api.escapeHTML(encoderExtension) + } + if cfg.UseNumber { + api.useNumber(decoderExtension) + } + if cfg.ValidateJsonRawMessage { + api.validateJsonRawMessage(encoderExtension) + } + api.encoderExtension = encoderExtension + api.decoderExtension = decoderExtension + api.configBeforeFrozen = cfg + return api +} + +func (cfg Config) frozeWithCacheReuse(extraExtensions []Extension) *frozenConfig { + api := getFrozenConfigFromCache(cfg) + if api != nil { + return api + } + api = cfg.Froze().(*frozenConfig) + for _, extension := range extraExtensions { + api.RegisterExtension(extension) + } + addFrozenConfigToCache(cfg, api) + return api +} + +func (cfg *frozenConfig) validateJsonRawMessage(extension EncoderExtension) { + encoder := &funcEncoder{func(ptr unsafe.Pointer, stream *Stream) { + rawMessage := *(*json.RawMessage)(ptr) + iter := cfg.BorrowIterator([]byte(rawMessage)) + iter.Read() + if iter.Error != nil { + stream.WriteRaw("null") + } else { + cfg.ReturnIterator(iter) + stream.WriteRaw(string(rawMessage)) + } + }, func(ptr unsafe.Pointer) bool { + return len(*((*json.RawMessage)(ptr))) == 0 + }} + extension[reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem()] = encoder + extension[reflect2.TypeOfPtr((*RawMessage)(nil)).Elem()] = encoder +} + +func (cfg *frozenConfig) useNumber(extension DecoderExtension) { + extension[reflect2.TypeOfPtr((*interface{})(nil)).Elem()] = &funcDecoder{func(ptr unsafe.Pointer, iter *Iterator) { + exitingValue := *((*interface{})(ptr)) + if exitingValue != nil && reflect.TypeOf(exitingValue).Kind() == reflect.Ptr { + iter.ReadVal(exitingValue) + return + } + if iter.WhatIsNext() == NumberValue { + *((*interface{})(ptr)) = json.Number(iter.readNumberAsString()) + } else { + *((*interface{})(ptr)) = iter.Read() + } + }} +} +func (cfg *frozenConfig) getTagKey() string { + tagKey := cfg.configBeforeFrozen.TagKey + if tagKey == "" { + return "json" + } + return tagKey +} + +func (cfg *frozenConfig) RegisterExtension(extension Extension) { + cfg.extraExtensions = append(cfg.extraExtensions, extension) + copied := cfg.configBeforeFrozen + cfg.configBeforeFrozen = copied +} + +type lossyFloat32Encoder struct { +} + +func (encoder *lossyFloat32Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat32Lossy(*((*float32)(ptr))) +} + +func (encoder *lossyFloat32Encoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float32)(ptr)) == 0 +} + +type lossyFloat64Encoder struct { +} + +func (encoder *lossyFloat64Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat64Lossy(*((*float64)(ptr))) +} + +func (encoder *lossyFloat64Encoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float64)(ptr)) == 0 +} + +// EnableLossyFloatMarshalling keeps 10**(-6) precision +// for float variables for better performance. +func (cfg *frozenConfig) marshalFloatWith6Digits(extension EncoderExtension) { + // for better performance + extension[reflect2.TypeOfPtr((*float32)(nil)).Elem()] = &lossyFloat32Encoder{} + extension[reflect2.TypeOfPtr((*float64)(nil)).Elem()] = &lossyFloat64Encoder{} +} + +type htmlEscapedStringEncoder struct { +} + +func (encoder *htmlEscapedStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + str := *((*string)(ptr)) + stream.WriteStringWithHTMLEscaped(str) +} + +func (encoder *htmlEscapedStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*string)(ptr)) == "" +} + +func (cfg *frozenConfig) escapeHTML(encoderExtension EncoderExtension) { + encoderExtension[reflect2.TypeOfPtr((*string)(nil)).Elem()] = &htmlEscapedStringEncoder{} +} + +func (cfg *frozenConfig) cleanDecoders() { + typeDecoders = map[string]ValDecoder{} + fieldDecoders = map[string]ValDecoder{} + *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) +} + +func (cfg *frozenConfig) cleanEncoders() { + typeEncoders = map[string]ValEncoder{} + fieldEncoders = map[string]ValEncoder{} + *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) +} + +func (cfg *frozenConfig) MarshalToString(v interface{}) (string, error) { + stream := cfg.BorrowStream(nil) + defer cfg.ReturnStream(stream) + stream.WriteVal(v) + if stream.Error != nil { + return "", stream.Error + } + return string(stream.Buffer()), nil +} + +func (cfg *frozenConfig) Marshal(v interface{}) ([]byte, error) { + stream := cfg.BorrowStream(nil) + defer cfg.ReturnStream(stream) + stream.WriteVal(v) + if stream.Error != nil { + return nil, stream.Error + } + result := stream.Buffer() + copied := make([]byte, len(result)) + copy(copied, result) + return copied, nil +} + +func (cfg *frozenConfig) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + if prefix != "" { + panic("prefix is not supported") + } + for _, r := range indent { + if r != ' ' { + panic("indent can only be space") + } + } + newCfg := cfg.configBeforeFrozen + newCfg.IndentionStep = len(indent) + return newCfg.frozeWithCacheReuse(cfg.extraExtensions).Marshal(v) +} + +func (cfg *frozenConfig) UnmarshalFromString(str string, v interface{}) error { + data := []byte(str) + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + iter.ReadVal(v) + c := iter.nextToken() + if c == 0 { + if iter.Error == io.EOF { + return nil + } + return iter.Error + } + iter.ReportError("Unmarshal", "there are bytes left after unmarshal") + return iter.Error +} + +func (cfg *frozenConfig) Get(data []byte, path ...interface{}) Any { + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + return locatePath(iter, path) +} + +func (cfg *frozenConfig) Unmarshal(data []byte, v interface{}) error { + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + iter.ReadVal(v) + c := iter.nextToken() + if c == 0 { + if iter.Error == io.EOF { + return nil + } + return iter.Error + } + iter.ReportError("Unmarshal", "there are bytes left after unmarshal") + return iter.Error +} + +func (cfg *frozenConfig) NewEncoder(writer io.Writer) *Encoder { + stream := NewStream(cfg, writer, 512) + return &Encoder{stream} +} + +func (cfg *frozenConfig) NewDecoder(reader io.Reader) *Decoder { + iter := Parse(cfg, reader, 512) + return &Decoder{iter} +} + +func (cfg *frozenConfig) Valid(data []byte) bool { + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + iter.Skip() + return iter.Error == nil +} diff --git a/vendor/github.com/json-iterator/go/example_test.go b/vendor/github.com/json-iterator/go/example_test.go deleted file mode 100644 index 1c8f341c14..0000000000 --- a/vendor/github.com/json-iterator/go/example_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package jsoniter - -import ( - "fmt" - "os" -) - -func ExampleMarshal() { - type ColorGroup struct { - ID int - Name string - Colors []string - } - group := ColorGroup{ - ID: 1, - Name: "Reds", - Colors: []string{"Crimson", "Red", "Ruby", "Maroon"}, - } - b, err := Marshal(group) - if err != nil { - fmt.Println("error:", err) - } - os.Stdout.Write(b) - // Output: - // {"ID":1,"Name":"Reds","Colors":["Crimson","Red","Ruby","Maroon"]} -} - -func ExampleUnmarshal() { - var jsonBlob = []byte(`[ - {"Name": "Platypus", "Order": "Monotremata"}, - {"Name": "Quoll", "Order": "Dasyuromorphia"} - ]`) - type Animal struct { - Name string - Order string - } - var animals []Animal - err := Unmarshal(jsonBlob, &animals) - if err != nil { - fmt.Println("error:", err) - } - fmt.Printf("%+v", animals) - // Output: - // [{Name:Platypus Order:Monotremata} {Name:Quoll Order:Dasyuromorphia}] -} - -func ExampleConfigFastest_Marshal() { - type ColorGroup struct { - ID int - Name string - Colors []string - } - group := ColorGroup{ - ID: 1, - Name: "Reds", - Colors: []string{"Crimson", "Red", "Ruby", "Maroon"}, - } - stream := ConfigFastest.BorrowStream(nil) - defer ConfigFastest.ReturnStream(stream) - stream.WriteVal(group) - if stream.Error != nil { - fmt.Println("error:", stream.Error) - } - os.Stdout.Write(stream.Buffer()) - // Output: - // {"ID":1,"Name":"Reds","Colors":["Crimson","Red","Ruby","Maroon"]} -} - -func ExampleConfigFastest_Unmarshal() { - var jsonBlob = []byte(`[ - {"Name": "Platypus", "Order": "Monotremata"}, - {"Name": "Quoll", "Order": "Dasyuromorphia"} - ]`) - type Animal struct { - Name string - Order string - } - var animals []Animal - iter := ConfigFastest.BorrowIterator(jsonBlob) - defer ConfigFastest.ReturnIterator(iter) - iter.ReadVal(&animals) - if iter.Error != nil { - fmt.Println("error:", iter.Error) - } - fmt.Printf("%+v", animals) - // Output: - // [{Name:Platypus Order:Monotremata} {Name:Quoll Order:Dasyuromorphia}] -} - -func ExampleGet() { - val := []byte(`{"ID":1,"Name":"Reds","Colors":["Crimson","Red","Ruby","Maroon"]}`) - fmt.Printf(Get(val, "Colors", 0).ToString()) - // Output: - // Crimson -} diff --git a/vendor/github.com/json-iterator/go/feature_adapter.go b/vendor/github.com/json-iterator/go/feature_adapter.go deleted file mode 100644 index 40a701ab6b..0000000000 --- a/vendor/github.com/json-iterator/go/feature_adapter.go +++ /dev/null @@ -1,132 +0,0 @@ -package jsoniter - -import ( - "bytes" - "io" -) - -// RawMessage to make replace json with jsoniter -type RawMessage []byte - -// Unmarshal adapts to json/encoding Unmarshal API -// -// Unmarshal parses the JSON-encoded data and stores the result in the value pointed to by v. -// Refer to https://godoc.org/encoding/json#Unmarshal for more information -func Unmarshal(data []byte, v interface{}) error { - return ConfigDefault.Unmarshal(data, v) -} - -func lastNotSpacePos(data []byte) int { - for i := len(data) - 1; i >= 0; i-- { - if data[i] != ' ' && data[i] != '\t' && data[i] != '\r' && data[i] != '\n' { - return i + 1 - } - } - return 0 -} - -// UnmarshalFromString convenient method to read from string instead of []byte -func UnmarshalFromString(str string, v interface{}) error { - return ConfigDefault.UnmarshalFromString(str, v) -} - -// Get quick method to get value from deeply nested JSON structure -func Get(data []byte, path ...interface{}) Any { - return ConfigDefault.Get(data, path...) -} - -// Marshal adapts to json/encoding Marshal API -// -// Marshal returns the JSON encoding of v, adapts to json/encoding Marshal API -// Refer to https://godoc.org/encoding/json#Marshal for more information -func Marshal(v interface{}) ([]byte, error) { - return ConfigDefault.Marshal(v) -} - -// MarshalIndent same as json.MarshalIndent. Prefix is not supported. -func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { - return ConfigDefault.MarshalIndent(v, prefix, indent) -} - -// MarshalToString convenient method to write as string instead of []byte -func MarshalToString(v interface{}) (string, error) { - return ConfigDefault.MarshalToString(v) -} - -// NewDecoder adapts to json/stream NewDecoder API. -// -// NewDecoder returns a new decoder that reads from r. -// -// Instead of a json/encoding Decoder, an Decoder is returned -// Refer to https://godoc.org/encoding/json#NewDecoder for more information -func NewDecoder(reader io.Reader) *Decoder { - return ConfigDefault.NewDecoder(reader) -} - -// Decoder reads and decodes JSON values from an input stream. -// Decoder provides identical APIs with json/stream Decoder (Token() and UseNumber() are in progress) -type Decoder struct { - iter *Iterator -} - -// Decode decode JSON into interface{} -func (adapter *Decoder) Decode(obj interface{}) error { - adapter.iter.ReadVal(obj) - err := adapter.iter.Error - if err == io.EOF { - return nil - } - return adapter.iter.Error -} - -// More is there more? -func (adapter *Decoder) More() bool { - return adapter.iter.head != adapter.iter.tail -} - -// Buffered remaining buffer -func (adapter *Decoder) Buffered() io.Reader { - remaining := adapter.iter.buf[adapter.iter.head:adapter.iter.tail] - return bytes.NewReader(remaining) -} - -// UseNumber for number JSON element, use float64 or json.NumberValue (alias of string) -func (adapter *Decoder) UseNumber() { - origCfg := adapter.iter.cfg.configBeforeFrozen - origCfg.UseNumber = true - adapter.iter.cfg = origCfg.Froze().(*frozenConfig) -} - -// NewEncoder same as json.NewEncoder -func NewEncoder(writer io.Writer) *Encoder { - return ConfigDefault.NewEncoder(writer) -} - -// Encoder same as json.Encoder -type Encoder struct { - stream *Stream -} - -// Encode encode interface{} as JSON to io.Writer -func (adapter *Encoder) Encode(val interface{}) error { - adapter.stream.WriteVal(val) - adapter.stream.Flush() - return adapter.stream.Error -} - -// SetIndent set the indention. Prefix is not supported -func (adapter *Encoder) SetIndent(prefix, indent string) { - adapter.stream.cfg.indentionStep = len(indent) -} - -// SetEscapeHTML escape html by default, set to false to disable -func (adapter *Encoder) SetEscapeHTML(escapeHTML bool) { - config := adapter.stream.cfg.configBeforeFrozen - config.EscapeHTML = escapeHTML - adapter.stream.cfg = config.Froze().(*frozenConfig) -} - -// Valid reports whether data is a valid JSON encoding. -func Valid(data []byte) bool { - return ConfigDefault.Valid(data) -} diff --git a/vendor/github.com/json-iterator/go/feature_any.go b/vendor/github.com/json-iterator/go/feature_any.go deleted file mode 100644 index 6733dce4cc..0000000000 --- a/vendor/github.com/json-iterator/go/feature_any.go +++ /dev/null @@ -1,242 +0,0 @@ -package jsoniter - -import ( - "fmt" - "io" - "reflect" -) - -// Any generic object representation. -// The lazy json implementation holds []byte and parse lazily. -type Any interface { - LastError() error - ValueType() ValueType - MustBeValid() Any - ToBool() bool - ToInt() int - ToInt32() int32 - ToInt64() int64 - ToUint() uint - ToUint32() uint32 - ToUint64() uint64 - ToFloat32() float32 - ToFloat64() float64 - ToString() string - ToVal(val interface{}) - Get(path ...interface{}) Any - // TODO: add Set - Size() int - Keys() []string - GetInterface() interface{} - WriteTo(stream *Stream) -} - -type baseAny struct{} - -func (any *baseAny) Get(path ...interface{}) Any { - return &invalidAny{baseAny{}, fmt.Errorf("Get %v from simple value", path)} -} - -func (any *baseAny) Size() int { - return 0 -} - -func (any *baseAny) Keys() []string { - return []string{} -} - -func (any *baseAny) ToVal(obj interface{}) { - panic("not implemented") -} - -// WrapInt32 turn int32 into Any interface -func WrapInt32(val int32) Any { - return &int32Any{baseAny{}, val} -} - -// WrapInt64 turn int64 into Any interface -func WrapInt64(val int64) Any { - return &int64Any{baseAny{}, val} -} - -// WrapUint32 turn uint32 into Any interface -func WrapUint32(val uint32) Any { - return &uint32Any{baseAny{}, val} -} - -// WrapUint64 turn uint64 into Any interface -func WrapUint64(val uint64) Any { - return &uint64Any{baseAny{}, val} -} - -// WrapFloat64 turn float64 into Any interface -func WrapFloat64(val float64) Any { - return &floatAny{baseAny{}, val} -} - -// WrapString turn string into Any interface -func WrapString(val string) Any { - return &stringAny{baseAny{}, val} -} - -// Wrap turn a go object into Any interface -func Wrap(val interface{}) Any { - if val == nil { - return &nilAny{} - } - asAny, isAny := val.(Any) - if isAny { - return asAny - } - typ := reflect.TypeOf(val) - switch typ.Kind() { - case reflect.Slice: - return wrapArray(val) - case reflect.Struct: - return wrapStruct(val) - case reflect.Map: - return wrapMap(val) - case reflect.String: - return WrapString(val.(string)) - case reflect.Int: - return WrapInt64(int64(val.(int))) - case reflect.Int8: - return WrapInt32(int32(val.(int8))) - case reflect.Int16: - return WrapInt32(int32(val.(int16))) - case reflect.Int32: - return WrapInt32(val.(int32)) - case reflect.Int64: - return WrapInt64(val.(int64)) - case reflect.Uint: - return WrapUint64(uint64(val.(uint))) - case reflect.Uint8: - return WrapUint32(uint32(val.(uint8))) - case reflect.Uint16: - return WrapUint32(uint32(val.(uint16))) - case reflect.Uint32: - return WrapUint32(uint32(val.(uint32))) - case reflect.Uint64: - return WrapUint64(val.(uint64)) - case reflect.Float32: - return WrapFloat64(float64(val.(float32))) - case reflect.Float64: - return WrapFloat64(val.(float64)) - case reflect.Bool: - if val.(bool) == true { - return &trueAny{} - } - return &falseAny{} - } - return &invalidAny{baseAny{}, fmt.Errorf("unsupported type: %v", typ)} -} - -// ReadAny read next JSON element as an Any object. It is a better json.RawMessage. -func (iter *Iterator) ReadAny() Any { - return iter.readAny() -} - -func (iter *Iterator) readAny() Any { - c := iter.nextToken() - switch c { - case '"': - iter.unreadByte() - return &stringAny{baseAny{}, iter.ReadString()} - case 'n': - iter.skipThreeBytes('u', 'l', 'l') // null - return &nilAny{} - case 't': - iter.skipThreeBytes('r', 'u', 'e') // true - return &trueAny{} - case 'f': - iter.skipFourBytes('a', 'l', 's', 'e') // false - return &falseAny{} - case '{': - return iter.readObjectAny() - case '[': - return iter.readArrayAny() - case '-': - return iter.readNumberAny(false) - default: - return iter.readNumberAny(true) - } -} - -func (iter *Iterator) readNumberAny(positive bool) Any { - iter.startCapture(iter.head - 1) - iter.skipNumber() - lazyBuf := iter.stopCapture() - return &numberLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} -} - -func (iter *Iterator) readObjectAny() Any { - iter.startCapture(iter.head - 1) - iter.skipObject() - lazyBuf := iter.stopCapture() - return &objectLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} -} - -func (iter *Iterator) readArrayAny() Any { - iter.startCapture(iter.head - 1) - iter.skipArray() - lazyBuf := iter.stopCapture() - return &arrayLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} -} - -func locateObjectField(iter *Iterator, target string) []byte { - var found []byte - iter.ReadObjectCB(func(iter *Iterator, field string) bool { - if field == target { - found = iter.SkipAndReturnBytes() - return false - } - iter.Skip() - return true - }) - return found -} - -func locateArrayElement(iter *Iterator, target int) []byte { - var found []byte - n := 0 - iter.ReadArrayCB(func(iter *Iterator) bool { - if n == target { - found = iter.SkipAndReturnBytes() - return false - } - iter.Skip() - n++ - return true - }) - return found -} - -func locatePath(iter *Iterator, path []interface{}) Any { - for i, pathKeyObj := range path { - switch pathKey := pathKeyObj.(type) { - case string: - valueBytes := locateObjectField(iter, pathKey) - if valueBytes == nil { - return newInvalidAny(path[i:]) - } - iter.ResetBytes(valueBytes) - case int: - valueBytes := locateArrayElement(iter, pathKey) - if valueBytes == nil { - return newInvalidAny(path[i:]) - } - iter.ResetBytes(valueBytes) - case int32: - if '*' == pathKey { - return iter.readAny().Get(path[i:]...) - } - return newInvalidAny(path[i:]) - default: - return newInvalidAny(path[i:]) - } - } - if iter.Error != nil && iter.Error != io.EOF { - return &invalidAny{baseAny{}, iter.Error} - } - return iter.readAny() -} diff --git a/vendor/github.com/json-iterator/go/feature_any_number.go b/vendor/github.com/json-iterator/go/feature_any_number.go deleted file mode 100644 index 4e1c27641d..0000000000 --- a/vendor/github.com/json-iterator/go/feature_any_number.go +++ /dev/null @@ -1,104 +0,0 @@ -package jsoniter - -import "unsafe" - -type numberLazyAny struct { - baseAny - cfg *frozenConfig - buf []byte - err error -} - -func (any *numberLazyAny) ValueType() ValueType { - return NumberValue -} - -func (any *numberLazyAny) MustBeValid() Any { - return any -} - -func (any *numberLazyAny) LastError() error { - return any.err -} - -func (any *numberLazyAny) ToBool() bool { - return any.ToFloat64() != 0 -} - -func (any *numberLazyAny) ToInt() int { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadInt() - any.err = iter.Error - return val -} - -func (any *numberLazyAny) ToInt32() int32 { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadInt32() - any.err = iter.Error - return val -} - -func (any *numberLazyAny) ToInt64() int64 { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadInt64() - any.err = iter.Error - return val -} - -func (any *numberLazyAny) ToUint() uint { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadUint() - any.err = iter.Error - return val -} - -func (any *numberLazyAny) ToUint32() uint32 { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadUint32() - any.err = iter.Error - return val -} - -func (any *numberLazyAny) ToUint64() uint64 { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadUint64() - any.err = iter.Error - return val -} - -func (any *numberLazyAny) ToFloat32() float32 { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadFloat32() - any.err = iter.Error - return val -} - -func (any *numberLazyAny) ToFloat64() float64 { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadFloat64() - any.err = iter.Error - return val -} - -func (any *numberLazyAny) ToString() string { - return *(*string)(unsafe.Pointer(&any.buf)) -} - -func (any *numberLazyAny) WriteTo(stream *Stream) { - stream.Write(any.buf) -} - -func (any *numberLazyAny) GetInterface() interface{} { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - return iter.Read() -} diff --git a/vendor/github.com/json-iterator/go/feature_any_string.go b/vendor/github.com/json-iterator/go/feature_any_string.go deleted file mode 100644 index abf060bd59..0000000000 --- a/vendor/github.com/json-iterator/go/feature_any_string.go +++ /dev/null @@ -1,166 +0,0 @@ -package jsoniter - -import ( - "fmt" - "strconv" -) - -type stringAny struct { - baseAny - val string -} - -func (any *stringAny) Get(path ...interface{}) Any { - if len(path) == 0 { - return any - } - return &invalidAny{baseAny{}, fmt.Errorf("Get %v from simple value", path)} -} - -func (any *stringAny) Parse() *Iterator { - return nil -} - -func (any *stringAny) ValueType() ValueType { - return StringValue -} - -func (any *stringAny) MustBeValid() Any { - return any -} - -func (any *stringAny) LastError() error { - return nil -} - -func (any *stringAny) ToBool() bool { - str := any.ToString() - if str == "0" { - return false - } - for _, c := range str { - switch c { - case ' ', '\n', '\r', '\t': - default: - return true - } - } - return false -} - -func (any *stringAny) ToInt() int { - return int(any.ToInt64()) - -} - -func (any *stringAny) ToInt32() int32 { - return int32(any.ToInt64()) -} - -func (any *stringAny) ToInt64() int64 { - if any.val == "" { - return 0 - } - - flag := 1 - startPos := 0 - endPos := 0 - if any.val[0] == '+' || any.val[0] == '-' { - startPos = 1 - } - - if any.val[0] == '-' { - flag = -1 - } - - for i := startPos; i < len(any.val); i++ { - if any.val[i] >= '0' && any.val[i] <= '9' { - endPos = i + 1 - } else { - break - } - } - parsed, _ := strconv.ParseInt(any.val[startPos:endPos], 10, 64) - return int64(flag) * parsed -} - -func (any *stringAny) ToUint() uint { - return uint(any.ToUint64()) -} - -func (any *stringAny) ToUint32() uint32 { - return uint32(any.ToUint64()) -} - -func (any *stringAny) ToUint64() uint64 { - if any.val == "" { - return 0 - } - - startPos := 0 - endPos := 0 - - if any.val[0] == '-' { - return 0 - } - if any.val[0] == '+' { - startPos = 1 - } - - for i := startPos; i < len(any.val); i++ { - if any.val[i] >= '0' && any.val[i] <= '9' { - endPos = i + 1 - } else { - break - } - } - parsed, _ := strconv.ParseUint(any.val[startPos:endPos], 10, 64) - return parsed -} - -func (any *stringAny) ToFloat32() float32 { - return float32(any.ToFloat64()) -} - -func (any *stringAny) ToFloat64() float64 { - if len(any.val) == 0 { - return 0 - } - - // first char invalid - if any.val[0] != '+' && any.val[0] != '-' && (any.val[0] > '9' || any.val[0] < '0') { - return 0 - } - - // extract valid num expression from string - // eg 123true => 123, -12.12xxa => -12.12 - endPos := 1 - for i := 1; i < len(any.val); i++ { - if any.val[i] == '.' || any.val[i] == 'e' || any.val[i] == 'E' || any.val[i] == '+' || any.val[i] == '-' { - endPos = i + 1 - continue - } - - // end position is the first char which is not digit - if any.val[i] >= '0' && any.val[i] <= '9' { - endPos = i + 1 - } else { - endPos = i - break - } - } - parsed, _ := strconv.ParseFloat(any.val[:endPos], 64) - return parsed -} - -func (any *stringAny) ToString() string { - return any.val -} - -func (any *stringAny) WriteTo(stream *Stream) { - stream.WriteString(any.val) -} - -func (any *stringAny) GetInterface() interface{} { - return any.val -} diff --git a/vendor/github.com/json-iterator/go/feature_config.go b/vendor/github.com/json-iterator/go/feature_config.go deleted file mode 100644 index 687210820b..0000000000 --- a/vendor/github.com/json-iterator/go/feature_config.go +++ /dev/null @@ -1,343 +0,0 @@ -package jsoniter - -import ( - "encoding/json" - "errors" - "io" - "reflect" - "sync/atomic" - "unsafe" -) - -// Config customize how the API should behave. -// The API is created from Config by Froze. -type Config struct { - IndentionStep int - MarshalFloatWith6Digits bool - EscapeHTML bool - SortMapKeys bool - UseNumber bool - TagKey string - ValidateJsonRawMessage bool -} - -type frozenConfig struct { - configBeforeFrozen Config - sortMapKeys bool - indentionStep int - decoderCache unsafe.Pointer - encoderCache unsafe.Pointer - extensions []Extension - streamPool chan *Stream - iteratorPool chan *Iterator -} - -// API the public interface of this package. -// Primary Marshal and Unmarshal. -type API interface { - IteratorPool - StreamPool - MarshalToString(v interface{}) (string, error) - Marshal(v interface{}) ([]byte, error) - MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) - UnmarshalFromString(str string, v interface{}) error - Unmarshal(data []byte, v interface{}) error - Get(data []byte, path ...interface{}) Any - NewEncoder(writer io.Writer) *Encoder - NewDecoder(reader io.Reader) *Decoder - Valid(data []byte) bool -} - -// ConfigDefault the default API -var ConfigDefault = Config{ - EscapeHTML: true, -}.Froze() - -// ConfigCompatibleWithStandardLibrary tries to be 100% compatible with standard library behavior -var ConfigCompatibleWithStandardLibrary = Config{ - EscapeHTML: true, - SortMapKeys: true, - ValidateJsonRawMessage: true, -}.Froze() - -// ConfigFastest marshals float with only 6 digits precision -var ConfigFastest = Config{ - EscapeHTML: false, - MarshalFloatWith6Digits: true, -}.Froze() - -// Froze forge API from config -func (cfg Config) Froze() API { - // TODO: cache frozen config - frozenConfig := &frozenConfig{ - sortMapKeys: cfg.SortMapKeys, - indentionStep: cfg.IndentionStep, - streamPool: make(chan *Stream, 16), - iteratorPool: make(chan *Iterator, 16), - } - atomic.StorePointer(&frozenConfig.decoderCache, unsafe.Pointer(&map[string]ValDecoder{})) - atomic.StorePointer(&frozenConfig.encoderCache, unsafe.Pointer(&map[string]ValEncoder{})) - if cfg.MarshalFloatWith6Digits { - frozenConfig.marshalFloatWith6Digits() - } - if cfg.EscapeHTML { - frozenConfig.escapeHTML() - } - if cfg.UseNumber { - frozenConfig.useNumber() - } - if cfg.ValidateJsonRawMessage { - frozenConfig.validateJsonRawMessage() - } - frozenConfig.configBeforeFrozen = cfg - return frozenConfig -} - -func (cfg *frozenConfig) validateJsonRawMessage() { - encoder := &funcEncoder{func(ptr unsafe.Pointer, stream *Stream) { - rawMessage := *(*json.RawMessage)(ptr) - iter := cfg.BorrowIterator([]byte(rawMessage)) - iter.Read() - if iter.Error != nil { - stream.WriteRaw("null") - } else { - cfg.ReturnIterator(iter) - stream.WriteRaw(string(rawMessage)) - } - }, func(ptr unsafe.Pointer) bool { - return false - }} - cfg.addEncoderToCache(reflect.TypeOf((*json.RawMessage)(nil)).Elem(), encoder) - cfg.addEncoderToCache(reflect.TypeOf((*RawMessage)(nil)).Elem(), encoder) -} - -func (cfg *frozenConfig) useNumber() { - cfg.addDecoderToCache(reflect.TypeOf((*interface{})(nil)).Elem(), &funcDecoder{func(ptr unsafe.Pointer, iter *Iterator) { - if iter.WhatIsNext() == NumberValue { - *((*interface{})(ptr)) = json.Number(iter.readNumberAsString()) - } else { - *((*interface{})(ptr)) = iter.Read() - } - }}) -} -func (cfg *frozenConfig) getTagKey() string { - tagKey := cfg.configBeforeFrozen.TagKey - if tagKey == "" { - return "json" - } - return tagKey -} - -func (cfg *frozenConfig) registerExtension(extension Extension) { - cfg.extensions = append(cfg.extensions, extension) -} - -type lossyFloat32Encoder struct { -} - -func (encoder *lossyFloat32Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteFloat32Lossy(*((*float32)(ptr))) -} - -func (encoder *lossyFloat32Encoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *lossyFloat32Encoder) IsEmpty(ptr unsafe.Pointer) bool { - return *((*float32)(ptr)) == 0 -} - -type lossyFloat64Encoder struct { -} - -func (encoder *lossyFloat64Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteFloat64Lossy(*((*float64)(ptr))) -} - -func (encoder *lossyFloat64Encoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *lossyFloat64Encoder) IsEmpty(ptr unsafe.Pointer) bool { - return *((*float64)(ptr)) == 0 -} - -// EnableLossyFloatMarshalling keeps 10**(-6) precision -// for float variables for better performance. -func (cfg *frozenConfig) marshalFloatWith6Digits() { - // for better performance - cfg.addEncoderToCache(reflect.TypeOf((*float32)(nil)).Elem(), &lossyFloat32Encoder{}) - cfg.addEncoderToCache(reflect.TypeOf((*float64)(nil)).Elem(), &lossyFloat64Encoder{}) -} - -type htmlEscapedStringEncoder struct { -} - -func (encoder *htmlEscapedStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - str := *((*string)(ptr)) - stream.WriteStringWithHTMLEscaped(str) -} - -func (encoder *htmlEscapedStringEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *htmlEscapedStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return *((*string)(ptr)) == "" -} - -func (cfg *frozenConfig) escapeHTML() { - cfg.addEncoderToCache(reflect.TypeOf((*string)(nil)).Elem(), &htmlEscapedStringEncoder{}) -} - -func (cfg *frozenConfig) addDecoderToCache(cacheKey reflect.Type, decoder ValDecoder) { - done := false - for !done { - ptr := atomic.LoadPointer(&cfg.decoderCache) - cache := *(*map[reflect.Type]ValDecoder)(ptr) - copied := map[reflect.Type]ValDecoder{} - for k, v := range cache { - copied[k] = v - } - copied[cacheKey] = decoder - done = atomic.CompareAndSwapPointer(&cfg.decoderCache, ptr, unsafe.Pointer(&copied)) - } -} - -func (cfg *frozenConfig) addEncoderToCache(cacheKey reflect.Type, encoder ValEncoder) { - done := false - for !done { - ptr := atomic.LoadPointer(&cfg.encoderCache) - cache := *(*map[reflect.Type]ValEncoder)(ptr) - copied := map[reflect.Type]ValEncoder{} - for k, v := range cache { - copied[k] = v - } - copied[cacheKey] = encoder - done = atomic.CompareAndSwapPointer(&cfg.encoderCache, ptr, unsafe.Pointer(&copied)) - } -} - -func (cfg *frozenConfig) getDecoderFromCache(cacheKey reflect.Type) ValDecoder { - ptr := atomic.LoadPointer(&cfg.decoderCache) - cache := *(*map[reflect.Type]ValDecoder)(ptr) - return cache[cacheKey] -} - -func (cfg *frozenConfig) getEncoderFromCache(cacheKey reflect.Type) ValEncoder { - ptr := atomic.LoadPointer(&cfg.encoderCache) - cache := *(*map[reflect.Type]ValEncoder)(ptr) - return cache[cacheKey] -} - -func (cfg *frozenConfig) cleanDecoders() { - typeDecoders = map[string]ValDecoder{} - fieldDecoders = map[string]ValDecoder{} - *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) -} - -func (cfg *frozenConfig) cleanEncoders() { - typeEncoders = map[string]ValEncoder{} - fieldEncoders = map[string]ValEncoder{} - *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) -} - -func (cfg *frozenConfig) MarshalToString(v interface{}) (string, error) { - stream := cfg.BorrowStream(nil) - defer cfg.ReturnStream(stream) - stream.WriteVal(v) - if stream.Error != nil { - return "", stream.Error - } - return string(stream.Buffer()), nil -} - -func (cfg *frozenConfig) Marshal(v interface{}) ([]byte, error) { - stream := cfg.BorrowStream(nil) - defer cfg.ReturnStream(stream) - stream.WriteVal(v) - if stream.Error != nil { - return nil, stream.Error - } - result := stream.Buffer() - copied := make([]byte, len(result)) - copy(copied, result) - return copied, nil -} - -func (cfg *frozenConfig) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { - if prefix != "" { - panic("prefix is not supported") - } - for _, r := range indent { - if r != ' ' { - panic("indent can only be space") - } - } - newCfg := cfg.configBeforeFrozen - newCfg.IndentionStep = len(indent) - return newCfg.Froze().Marshal(v) -} - -func (cfg *frozenConfig) UnmarshalFromString(str string, v interface{}) error { - data := []byte(str) - data = data[:lastNotSpacePos(data)] - iter := cfg.BorrowIterator(data) - defer cfg.ReturnIterator(iter) - iter.ReadVal(v) - if iter.head == iter.tail { - iter.loadMore() - } - if iter.Error == io.EOF { - return nil - } - if iter.Error == nil { - iter.ReportError("UnmarshalFromString", "there are bytes left after unmarshal") - } - return iter.Error -} - -func (cfg *frozenConfig) Get(data []byte, path ...interface{}) Any { - iter := cfg.BorrowIterator(data) - defer cfg.ReturnIterator(iter) - return locatePath(iter, path) -} - -func (cfg *frozenConfig) Unmarshal(data []byte, v interface{}) error { - data = data[:lastNotSpacePos(data)] - iter := cfg.BorrowIterator(data) - defer cfg.ReturnIterator(iter) - typ := reflect.TypeOf(v) - if typ.Kind() != reflect.Ptr { - // return non-pointer error - return errors.New("the second param must be ptr type") - } - iter.ReadVal(v) - if iter.head == iter.tail { - iter.loadMore() - } - if iter.Error == io.EOF { - return nil - } - if iter.Error == nil { - iter.ReportError("Unmarshal", "there are bytes left after unmarshal") - } - return iter.Error -} - -func (cfg *frozenConfig) NewEncoder(writer io.Writer) *Encoder { - stream := NewStream(cfg, writer, 512) - return &Encoder{stream} -} - -func (cfg *frozenConfig) NewDecoder(reader io.Reader) *Decoder { - iter := Parse(cfg, reader, 512) - return &Decoder{iter} -} - -func (cfg *frozenConfig) Valid(data []byte) bool { - iter := cfg.BorrowIterator(data) - defer cfg.ReturnIterator(iter) - iter.Skip() - return iter.Error == nil -} diff --git a/vendor/github.com/json-iterator/go/feature_iter.go b/vendor/github.com/json-iterator/go/feature_iter.go deleted file mode 100644 index 54a3a0fd9c..0000000000 --- a/vendor/github.com/json-iterator/go/feature_iter.go +++ /dev/null @@ -1,321 +0,0 @@ -package jsoniter - -import ( - "encoding/json" - "fmt" - "io" -) - -// ValueType the type for JSON element -type ValueType int - -const ( - // InvalidValue invalid JSON element - InvalidValue ValueType = iota - // StringValue JSON element "string" - StringValue - // NumberValue JSON element 100 or 0.10 - NumberValue - // NilValue JSON element null - NilValue - // BoolValue JSON element true or false - BoolValue - // ArrayValue JSON element [] - ArrayValue - // ObjectValue JSON element {} - ObjectValue -) - -var hexDigits []byte -var valueTypes []ValueType - -func init() { - hexDigits = make([]byte, 256) - for i := 0; i < len(hexDigits); i++ { - hexDigits[i] = 255 - } - for i := '0'; i <= '9'; i++ { - hexDigits[i] = byte(i - '0') - } - for i := 'a'; i <= 'f'; i++ { - hexDigits[i] = byte((i - 'a') + 10) - } - for i := 'A'; i <= 'F'; i++ { - hexDigits[i] = byte((i - 'A') + 10) - } - valueTypes = make([]ValueType, 256) - for i := 0; i < len(valueTypes); i++ { - valueTypes[i] = InvalidValue - } - valueTypes['"'] = StringValue - valueTypes['-'] = NumberValue - valueTypes['0'] = NumberValue - valueTypes['1'] = NumberValue - valueTypes['2'] = NumberValue - valueTypes['3'] = NumberValue - valueTypes['4'] = NumberValue - valueTypes['5'] = NumberValue - valueTypes['6'] = NumberValue - valueTypes['7'] = NumberValue - valueTypes['8'] = NumberValue - valueTypes['9'] = NumberValue - valueTypes['t'] = BoolValue - valueTypes['f'] = BoolValue - valueTypes['n'] = NilValue - valueTypes['['] = ArrayValue - valueTypes['{'] = ObjectValue -} - -// Iterator is a io.Reader like object, with JSON specific read functions. -// Error is not returned as return value, but stored as Error member on this iterator instance. -type Iterator struct { - cfg *frozenConfig - reader io.Reader - buf []byte - head int - tail int - captureStartedAt int - captured []byte - Error error -} - -// NewIterator creates an empty Iterator instance -func NewIterator(cfg API) *Iterator { - return &Iterator{ - cfg: cfg.(*frozenConfig), - reader: nil, - buf: nil, - head: 0, - tail: 0, - } -} - -// Parse creates an Iterator instance from io.Reader -func Parse(cfg API, reader io.Reader, bufSize int) *Iterator { - return &Iterator{ - cfg: cfg.(*frozenConfig), - reader: reader, - buf: make([]byte, bufSize), - head: 0, - tail: 0, - } -} - -// ParseBytes creates an Iterator instance from byte array -func ParseBytes(cfg API, input []byte) *Iterator { - return &Iterator{ - cfg: cfg.(*frozenConfig), - reader: nil, - buf: input, - head: 0, - tail: len(input), - } -} - -// ParseString creates an Iterator instance from string -func ParseString(cfg API, input string) *Iterator { - return ParseBytes(cfg, []byte(input)) -} - -// Pool returns a pool can provide more iterator with same configuration -func (iter *Iterator) Pool() IteratorPool { - return iter.cfg -} - -// Reset reuse iterator instance by specifying another reader -func (iter *Iterator) Reset(reader io.Reader) *Iterator { - iter.reader = reader - iter.head = 0 - iter.tail = 0 - return iter -} - -// ResetBytes reuse iterator instance by specifying another byte array as input -func (iter *Iterator) ResetBytes(input []byte) *Iterator { - iter.reader = nil - iter.buf = input - iter.head = 0 - iter.tail = len(input) - return iter -} - -// WhatIsNext gets ValueType of relatively next json element -func (iter *Iterator) WhatIsNext() ValueType { - valueType := valueTypes[iter.nextToken()] - iter.unreadByte() - return valueType -} - -func (iter *Iterator) skipWhitespacesWithoutLoadMore() bool { - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - switch c { - case ' ', '\n', '\t', '\r': - continue - } - iter.head = i - return false - } - return true -} - -func (iter *Iterator) isObjectEnd() bool { - c := iter.nextToken() - if c == ',' { - return false - } - if c == '}' { - return true - } - iter.ReportError("isObjectEnd", "object ended prematurely") - return true -} - -func (iter *Iterator) nextToken() byte { - // a variation of skip whitespaces, returning the next non-whitespace token - for { - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - switch c { - case ' ', '\n', '\t', '\r': - continue - } - iter.head = i + 1 - return c - } - if !iter.loadMore() { - return 0 - } - } -} - -// ReportError record a error in iterator instance with current position. -func (iter *Iterator) ReportError(operation string, msg string) { - if iter.Error != nil { - if iter.Error != io.EOF { - return - } - } - peekStart := iter.head - 10 - if peekStart < 0 { - peekStart = 0 - } - peekEnd := iter.head + 10 - if peekEnd > iter.tail { - peekEnd = iter.tail - } - parsing := string(iter.buf[peekStart:peekEnd]) - contextStart := iter.head - 50 - if contextStart < 0 { - contextStart = 0 - } - contextEnd := iter.head + 50 - if contextEnd > iter.tail { - contextEnd = iter.tail - } - context := string(iter.buf[contextStart:contextEnd]) - iter.Error = fmt.Errorf("%s: %s, error found in #%v byte of ...|%s|..., bigger context ...|%s|...", - operation, msg, iter.head-peekStart, parsing, context) -} - -// CurrentBuffer gets current buffer as string for debugging purpose -func (iter *Iterator) CurrentBuffer() string { - peekStart := iter.head - 10 - if peekStart < 0 { - peekStart = 0 - } - return fmt.Sprintf("parsing #%v byte, around ...|%s|..., whole buffer ...|%s|...", iter.head, - string(iter.buf[peekStart:iter.head]), string(iter.buf[0:iter.tail])) -} - -func (iter *Iterator) readByte() (ret byte) { - if iter.head == iter.tail { - if iter.loadMore() { - ret = iter.buf[iter.head] - iter.head++ - return ret - } - return 0 - } - ret = iter.buf[iter.head] - iter.head++ - return ret -} - -func (iter *Iterator) loadMore() bool { - if iter.reader == nil { - if iter.Error == nil { - iter.head = iter.tail - iter.Error = io.EOF - } - return false - } - if iter.captured != nil { - iter.captured = append(iter.captured, - iter.buf[iter.captureStartedAt:iter.tail]...) - iter.captureStartedAt = 0 - } - for { - n, err := iter.reader.Read(iter.buf) - if n == 0 { - if err != nil { - if iter.Error == nil { - iter.Error = err - } - return false - } - } else { - iter.head = 0 - iter.tail = n - return true - } - } -} - -func (iter *Iterator) unreadByte() { - if iter.Error != nil { - return - } - iter.head-- - return -} - -// Read read the next JSON element as generic interface{}. -func (iter *Iterator) Read() interface{} { - valueType := iter.WhatIsNext() - switch valueType { - case StringValue: - return iter.ReadString() - case NumberValue: - if iter.cfg.configBeforeFrozen.UseNumber { - return json.Number(iter.readNumberAsString()) - } - return iter.ReadFloat64() - case NilValue: - iter.skipFourBytes('n', 'u', 'l', 'l') - return nil - case BoolValue: - return iter.ReadBool() - case ArrayValue: - arr := []interface{}{} - iter.ReadArrayCB(func(iter *Iterator) bool { - var elem interface{} - iter.ReadVal(&elem) - arr = append(arr, elem) - return true - }) - return arr - case ObjectValue: - obj := map[string]interface{}{} - iter.ReadMapCB(func(Iter *Iterator, field string) bool { - var elem interface{} - iter.ReadVal(&elem) - obj[field] = elem - return true - }) - return obj - default: - iter.ReportError("Read", fmt.Sprintf("unexpected value type: %v", valueType)) - return nil - } -} diff --git a/vendor/github.com/json-iterator/go/feature_iter_float.go b/vendor/github.com/json-iterator/go/feature_iter_float.go deleted file mode 100644 index 86f4599122..0000000000 --- a/vendor/github.com/json-iterator/go/feature_iter_float.go +++ /dev/null @@ -1,341 +0,0 @@ -package jsoniter - -import ( - "io" - "math/big" - "strconv" - "strings" - "unsafe" -) - -var floatDigits []int8 - -const invalidCharForNumber = int8(-1) -const endOfNumber = int8(-2) -const dotInNumber = int8(-3) - -func init() { - floatDigits = make([]int8, 256) - for i := 0; i < len(floatDigits); i++ { - floatDigits[i] = invalidCharForNumber - } - for i := int8('0'); i <= int8('9'); i++ { - floatDigits[i] = i - int8('0') - } - floatDigits[','] = endOfNumber - floatDigits[']'] = endOfNumber - floatDigits['}'] = endOfNumber - floatDigits[' '] = endOfNumber - floatDigits['\t'] = endOfNumber - floatDigits['\n'] = endOfNumber - floatDigits['.'] = dotInNumber -} - -// ReadBigFloat read big.Float -func (iter *Iterator) ReadBigFloat() (ret *big.Float) { - str := iter.readNumberAsString() - if iter.Error != nil && iter.Error != io.EOF { - return nil - } - prec := 64 - if len(str) > prec { - prec = len(str) - } - val, _, err := big.ParseFloat(str, 10, uint(prec), big.ToZero) - if err != nil { - iter.Error = err - return nil - } - return val -} - -// ReadBigInt read big.Int -func (iter *Iterator) ReadBigInt() (ret *big.Int) { - str := iter.readNumberAsString() - if iter.Error != nil && iter.Error != io.EOF { - return nil - } - ret = big.NewInt(0) - var success bool - ret, success = ret.SetString(str, 10) - if !success { - iter.ReportError("ReadBigInt", "invalid big int") - return nil - } - return ret -} - -//ReadFloat32 read float32 -func (iter *Iterator) ReadFloat32() (ret float32) { - c := iter.nextToken() - if c == '-' { - return -iter.readPositiveFloat32() - } - iter.unreadByte() - return iter.readPositiveFloat32() -} - -func (iter *Iterator) readPositiveFloat32() (ret float32) { - value := uint64(0) - c := byte(' ') - i := iter.head - // first char - if i == iter.tail { - return iter.readFloat32SlowPath() - } - c = iter.buf[i] - i++ - ind := floatDigits[c] - switch ind { - case invalidCharForNumber: - return iter.readFloat32SlowPath() - case endOfNumber: - iter.ReportError("readFloat32", "empty number") - return - case dotInNumber: - iter.ReportError("readFloat32", "leading dot is invalid") - return - case 0: - if i == iter.tail { - return iter.readFloat32SlowPath() - } - c = iter.buf[i] - switch c { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - iter.ReportError("readFloat32", "leading zero is invalid") - return - } - } - value = uint64(ind) - // chars before dot -non_decimal_loop: - for ; i < iter.tail; i++ { - c = iter.buf[i] - ind := floatDigits[c] - switch ind { - case invalidCharForNumber: - return iter.readFloat32SlowPath() - case endOfNumber: - iter.head = i - return float32(value) - case dotInNumber: - break non_decimal_loop - } - if value > uint64SafeToMultiple10 { - return iter.readFloat32SlowPath() - } - value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind; - } - // chars after dot - if c == '.' { - i++ - decimalPlaces := 0 - if i == iter.tail { - return iter.readFloat32SlowPath() - } - for ; i < iter.tail; i++ { - c = iter.buf[i] - ind := floatDigits[c] - switch ind { - case endOfNumber: - if decimalPlaces > 0 && decimalPlaces < len(pow10) { - iter.head = i - return float32(float64(value) / float64(pow10[decimalPlaces])) - } - // too many decimal places - return iter.readFloat32SlowPath() - case invalidCharForNumber: - fallthrough - case dotInNumber: - return iter.readFloat32SlowPath() - } - decimalPlaces++ - if value > uint64SafeToMultiple10 { - return iter.readFloat32SlowPath() - } - value = (value << 3) + (value << 1) + uint64(ind) - } - } - return iter.readFloat32SlowPath() -} - -func (iter *Iterator) readNumberAsString() (ret string) { - strBuf := [16]byte{} - str := strBuf[0:0] -load_loop: - for { - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - switch c { - case '+', '-', '.', 'e', 'E', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - str = append(str, c) - continue - default: - iter.head = i - break load_loop - } - } - if !iter.loadMore() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - return - } - if len(str) == 0 { - iter.ReportError("readNumberAsString", "invalid number") - } - return *(*string)(unsafe.Pointer(&str)) -} - -func (iter *Iterator) readFloat32SlowPath() (ret float32) { - str := iter.readNumberAsString() - if iter.Error != nil && iter.Error != io.EOF { - return - } - errMsg := validateFloat(str) - if errMsg != "" { - iter.ReportError("readFloat32SlowPath", errMsg) - return - } - val, err := strconv.ParseFloat(str, 32) - if err != nil { - iter.Error = err - return - } - return float32(val) -} - -// ReadFloat64 read float64 -func (iter *Iterator) ReadFloat64() (ret float64) { - c := iter.nextToken() - if c == '-' { - return -iter.readPositiveFloat64() - } - iter.unreadByte() - return iter.readPositiveFloat64() -} - -func (iter *Iterator) readPositiveFloat64() (ret float64) { - value := uint64(0) - c := byte(' ') - i := iter.head - // first char - if i == iter.tail { - return iter.readFloat64SlowPath() - } - c = iter.buf[i] - i++ - ind := floatDigits[c] - switch ind { - case invalidCharForNumber: - return iter.readFloat64SlowPath() - case endOfNumber: - iter.ReportError("readFloat64", "empty number") - return - case dotInNumber: - iter.ReportError("readFloat64", "leading dot is invalid") - return - case 0: - if i == iter.tail { - return iter.readFloat64SlowPath() - } - c = iter.buf[i] - switch c { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - iter.ReportError("readFloat64", "leading zero is invalid") - return - } - } - value = uint64(ind) - // chars before dot -non_decimal_loop: - for ; i < iter.tail; i++ { - c = iter.buf[i] - ind := floatDigits[c] - switch ind { - case invalidCharForNumber: - return iter.readFloat64SlowPath() - case endOfNumber: - iter.head = i - return float64(value) - case dotInNumber: - break non_decimal_loop - } - if value > uint64SafeToMultiple10 { - return iter.readFloat64SlowPath() - } - value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind; - } - // chars after dot - if c == '.' { - i++ - decimalPlaces := 0 - if i == iter.tail { - return iter.readFloat64SlowPath() - } - for ; i < iter.tail; i++ { - c = iter.buf[i] - ind := floatDigits[c] - switch ind { - case endOfNumber: - if decimalPlaces > 0 && decimalPlaces < len(pow10) { - iter.head = i - return float64(value) / float64(pow10[decimalPlaces]) - } - // too many decimal places - return iter.readFloat64SlowPath() - case invalidCharForNumber: - fallthrough - case dotInNumber: - return iter.readFloat64SlowPath() - } - decimalPlaces++ - if value > uint64SafeToMultiple10 { - return iter.readFloat64SlowPath() - } - value = (value << 3) + (value << 1) + uint64(ind) - } - } - return iter.readFloat64SlowPath() -} - -func (iter *Iterator) readFloat64SlowPath() (ret float64) { - str := iter.readNumberAsString() - if iter.Error != nil && iter.Error != io.EOF { - return - } - errMsg := validateFloat(str) - if errMsg != "" { - iter.ReportError("readFloat64SlowPath", errMsg) - return - } - val, err := strconv.ParseFloat(str, 64) - if err != nil { - iter.Error = err - return - } - return val -} - -func validateFloat(str string) string { - // strconv.ParseFloat is not validating `1.` or `1.e1` - if len(str) == 0 { - return "empty number" - } - if str[0] == '-' { - return "-- is not valid" - } - dotPos := strings.IndexByte(str, '.') - if dotPos != -1 { - if dotPos == len(str)-1 { - return "dot can not be last character" - } - switch str[dotPos+1] { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - default: - return "missing digit after dot" - } - } - return "" -} diff --git a/vendor/github.com/json-iterator/go/feature_iter_int.go b/vendor/github.com/json-iterator/go/feature_iter_int.go deleted file mode 100644 index 886879efdb..0000000000 --- a/vendor/github.com/json-iterator/go/feature_iter_int.go +++ /dev/null @@ -1,258 +0,0 @@ -package jsoniter - -import ( - "math" - "strconv" -) - -var intDigits []int8 - -const uint32SafeToMultiply10 = uint32(0xffffffff)/10 - 1 -const uint64SafeToMultiple10 = uint64(0xffffffffffffffff)/10 - 1 - -func init() { - intDigits = make([]int8, 256) - for i := 0; i < len(intDigits); i++ { - intDigits[i] = invalidCharForNumber - } - for i := int8('0'); i <= int8('9'); i++ { - intDigits[i] = i - int8('0') - } -} - -// ReadUint read uint -func (iter *Iterator) ReadUint() uint { - return uint(iter.ReadUint64()) -} - -// ReadInt read int -func (iter *Iterator) ReadInt() int { - return int(iter.ReadInt64()) -} - -// ReadInt8 read int8 -func (iter *Iterator) ReadInt8() (ret int8) { - c := iter.nextToken() - if c == '-' { - val := iter.readUint32(iter.readByte()) - if val > math.MaxInt8+1 { - iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return -int8(val) - } - val := iter.readUint32(c) - if val > math.MaxInt8 { - iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return int8(val) -} - -// ReadUint8 read uint8 -func (iter *Iterator) ReadUint8() (ret uint8) { - val := iter.readUint32(iter.nextToken()) - if val > math.MaxUint8 { - iter.ReportError("ReadUint8", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return uint8(val) -} - -// ReadInt16 read int16 -func (iter *Iterator) ReadInt16() (ret int16) { - c := iter.nextToken() - if c == '-' { - val := iter.readUint32(iter.readByte()) - if val > math.MaxInt16+1 { - iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return -int16(val) - } - val := iter.readUint32(c) - if val > math.MaxInt16 { - iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return int16(val) -} - -// ReadUint16 read uint16 -func (iter *Iterator) ReadUint16() (ret uint16) { - val := iter.readUint32(iter.nextToken()) - if val > math.MaxUint16 { - iter.ReportError("ReadUint16", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return uint16(val) -} - -// ReadInt32 read int32 -func (iter *Iterator) ReadInt32() (ret int32) { - c := iter.nextToken() - if c == '-' { - val := iter.readUint32(iter.readByte()) - if val > math.MaxInt32+1 { - iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return -int32(val) - } - val := iter.readUint32(c) - if val > math.MaxInt32 { - iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return int32(val) -} - -// ReadUint32 read uint32 -func (iter *Iterator) ReadUint32() (ret uint32) { - return iter.readUint32(iter.nextToken()) -} - -func (iter *Iterator) readUint32(c byte) (ret uint32) { - ind := intDigits[c] - if ind == 0 { - return 0 // single zero - } - if ind == invalidCharForNumber { - iter.ReportError("readUint32", "unexpected character: "+string([]byte{byte(ind)})) - return - } - value := uint32(ind) - if iter.tail-iter.head > 10 { - i := iter.head - ind2 := intDigits[iter.buf[i]] - if ind2 == invalidCharForNumber { - iter.head = i - return value - } - i++ - ind3 := intDigits[iter.buf[i]] - if ind3 == invalidCharForNumber { - iter.head = i - return value*10 + uint32(ind2) - } - //iter.head = i + 1 - //value = value * 100 + uint32(ind2) * 10 + uint32(ind3) - i++ - ind4 := intDigits[iter.buf[i]] - if ind4 == invalidCharForNumber { - iter.head = i - return value*100 + uint32(ind2)*10 + uint32(ind3) - } - i++ - ind5 := intDigits[iter.buf[i]] - if ind5 == invalidCharForNumber { - iter.head = i - return value*1000 + uint32(ind2)*100 + uint32(ind3)*10 + uint32(ind4) - } - i++ - ind6 := intDigits[iter.buf[i]] - if ind6 == invalidCharForNumber { - iter.head = i - return value*10000 + uint32(ind2)*1000 + uint32(ind3)*100 + uint32(ind4)*10 + uint32(ind5) - } - i++ - ind7 := intDigits[iter.buf[i]] - if ind7 == invalidCharForNumber { - iter.head = i - return value*100000 + uint32(ind2)*10000 + uint32(ind3)*1000 + uint32(ind4)*100 + uint32(ind5)*10 + uint32(ind6) - } - i++ - ind8 := intDigits[iter.buf[i]] - if ind8 == invalidCharForNumber { - iter.head = i - return value*1000000 + uint32(ind2)*100000 + uint32(ind3)*10000 + uint32(ind4)*1000 + uint32(ind5)*100 + uint32(ind6)*10 + uint32(ind7) - } - i++ - ind9 := intDigits[iter.buf[i]] - value = value*10000000 + uint32(ind2)*1000000 + uint32(ind3)*100000 + uint32(ind4)*10000 + uint32(ind5)*1000 + uint32(ind6)*100 + uint32(ind7)*10 + uint32(ind8) - iter.head = i - if ind9 == invalidCharForNumber { - return value - } - } - for { - for i := iter.head; i < iter.tail; i++ { - ind = intDigits[iter.buf[i]] - if ind == invalidCharForNumber { - iter.head = i - return value - } - if value > uint32SafeToMultiply10 { - value2 := (value << 3) + (value << 1) + uint32(ind) - if value2 < value { - iter.ReportError("readUint32", "overflow") - return - } - value = value2 - continue - } - value = (value << 3) + (value << 1) + uint32(ind) - } - if !iter.loadMore() { - return value - } - } -} - -// ReadInt64 read int64 -func (iter *Iterator) ReadInt64() (ret int64) { - c := iter.nextToken() - if c == '-' { - val := iter.readUint64(iter.readByte()) - if val > math.MaxInt64+1 { - iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10)) - return - } - return -int64(val) - } - val := iter.readUint64(c) - if val > math.MaxInt64 { - iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10)) - return - } - return int64(val) -} - -// ReadUint64 read uint64 -func (iter *Iterator) ReadUint64() uint64 { - return iter.readUint64(iter.nextToken()) -} - -func (iter *Iterator) readUint64(c byte) (ret uint64) { - ind := intDigits[c] - if ind == 0 { - return 0 // single zero - } - if ind == invalidCharForNumber { - iter.ReportError("readUint64", "unexpected character: "+string([]byte{byte(ind)})) - return - } - value := uint64(ind) - for { - for i := iter.head; i < iter.tail; i++ { - ind = intDigits[iter.buf[i]] - if ind == invalidCharForNumber { - iter.head = i - return value - } - if value > uint64SafeToMultiple10 { - value2 := (value << 3) + (value << 1) + uint64(ind) - if value2 < value { - iter.ReportError("readUint64", "overflow") - return - } - value = value2 - continue - } - value = (value << 3) + (value << 1) + uint64(ind) - } - if !iter.loadMore() { - return value - } - } -} diff --git a/vendor/github.com/json-iterator/go/feature_iter_object.go b/vendor/github.com/json-iterator/go/feature_iter_object.go deleted file mode 100644 index 6ec8fb7fd7..0000000000 --- a/vendor/github.com/json-iterator/go/feature_iter_object.go +++ /dev/null @@ -1,212 +0,0 @@ -package jsoniter - -import ( - "fmt" - "unicode" - "unsafe" -) - -// ReadObject read one field from object. -// If object ended, returns empty string. -// Otherwise, returns the field name. -func (iter *Iterator) ReadObject() (ret string) { - c := iter.nextToken() - switch c { - case 'n': - iter.skipThreeBytes('u', 'l', 'l') - return "" // null - case '{': - c = iter.nextToken() - if c == '"' { - iter.unreadByte() - return string(iter.readObjectFieldAsBytes()) - } - if c == '}' { - return "" // end of object - } - iter.ReportError("ReadObject", `expect " after {, but found `+string([]byte{c})) - return - case ',': - return string(iter.readObjectFieldAsBytes()) - case '}': - return "" // end of object - default: - iter.ReportError("ReadObject", fmt.Sprintf(`expect { or , or } or n, but found %s`, string([]byte{c}))) - return - } -} - -func (iter *Iterator) readFieldHash() int32 { - hash := int64(0x811c9dc5) - c := iter.nextToken() - if c == '"' { - for { - for i := iter.head; i < iter.tail; i++ { - // require ascii string and no escape - b := iter.buf[i] - if 'A' <= b && b <= 'Z' { - b += 'a' - 'A' - } - if b == '"' { - iter.head = i + 1 - c = iter.nextToken() - if c != ':' { - iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c})) - } - return int32(hash) - } - hash ^= int64(b) - hash *= 0x1000193 - } - if !iter.loadMore() { - iter.ReportError("readFieldHash", `incomplete field name`) - return 0 - } - } - } - iter.ReportError("readFieldHash", `expect ", but found `+string([]byte{c})) - return 0 -} - -func calcHash(str string) int32 { - hash := int64(0x811c9dc5) - for _, b := range str { - hash ^= int64(unicode.ToLower(b)) - hash *= 0x1000193 - } - return int32(hash) -} - -// ReadObjectCB read object with callback, the key is ascii only and field name not copied -func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { - c := iter.nextToken() - if c == '{' { - c = iter.nextToken() - if c == '"' { - iter.unreadByte() - field := iter.readObjectFieldAsBytes() - if !callback(iter, *(*string)(unsafe.Pointer(&field))) { - return false - } - c = iter.nextToken() - for c == ',' { - field = iter.readObjectFieldAsBytes() - if !callback(iter, *(*string)(unsafe.Pointer(&field))) { - return false - } - c = iter.nextToken() - } - if c != '}' { - iter.ReportError("ReadObjectCB", `object not ended with }`) - return false - } - return true - } - if c == '}' { - return true - } - iter.ReportError("ReadObjectCB", `expect " after }, but found `+string([]byte{c})) - return false - } - if c == 'n' { - iter.skipThreeBytes('u', 'l', 'l') - return true // null - } - iter.ReportError("ReadObjectCB", `expect { or n, but found `+string([]byte{c})) - return false -} - -// ReadMapCB read map with callback, the key can be any string -func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool { - c := iter.nextToken() - if c == '{' { - c = iter.nextToken() - if c == '"' { - iter.unreadByte() - field := iter.ReadString() - if iter.nextToken() != ':' { - iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) - return false - } - if !callback(iter, field) { - return false - } - c = iter.nextToken() - for c == ',' { - field = iter.ReadString() - if iter.nextToken() != ':' { - iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) - return false - } - if !callback(iter, field) { - return false - } - c = iter.nextToken() - } - if c != '}' { - iter.ReportError("ReadMapCB", `object not ended with }`) - return false - } - return true - } - if c == '}' { - return true - } - iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c})) - return false - } - if c == 'n' { - iter.skipThreeBytes('u', 'l', 'l') - return true // null - } - iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c})) - return false -} - -func (iter *Iterator) readObjectStart() bool { - c := iter.nextToken() - if c == '{' { - c = iter.nextToken() - if c == '}' { - return false - } - iter.unreadByte() - return true - } else if c == 'n' { - iter.skipThreeBytes('u', 'l', 'l') - return false - } - iter.ReportError("readObjectStart", "expect { or n, but found "+string([]byte{c})) - return false -} - -func (iter *Iterator) readObjectFieldAsBytes() (ret []byte) { - str := iter.ReadStringAsSlice() - if iter.skipWhitespacesWithoutLoadMore() { - if ret == nil { - ret = make([]byte, len(str)) - copy(ret, str) - } - if !iter.loadMore() { - return - } - } - if iter.buf[iter.head] != ':' { - iter.ReportError("readObjectFieldAsBytes", "expect : after object field, but found "+string([]byte{iter.buf[iter.head]})) - return - } - iter.head++ - if iter.skipWhitespacesWithoutLoadMore() { - if ret == nil { - ret = make([]byte, len(str)) - copy(ret, str) - } - if !iter.loadMore() { - return - } - } - if ret == nil { - return str - } - return ret -} diff --git a/vendor/github.com/json-iterator/go/feature_iter_skip_sloppy.go b/vendor/github.com/json-iterator/go/feature_iter_skip_sloppy.go deleted file mode 100644 index 047d58a4bc..0000000000 --- a/vendor/github.com/json-iterator/go/feature_iter_skip_sloppy.go +++ /dev/null @@ -1,144 +0,0 @@ -//+build jsoniter-sloppy - -package jsoniter - -// sloppy but faster implementation, do not validate the input json - -func (iter *Iterator) skipNumber() { - for { - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - switch c { - case ' ', '\n', '\r', '\t', ',', '}', ']': - iter.head = i - return - } - } - if !iter.loadMore() { - return - } - } -} - -func (iter *Iterator) skipArray() { - level := 1 - for { - for i := iter.head; i < iter.tail; i++ { - switch iter.buf[i] { - case '"': // If inside string, skip it - iter.head = i + 1 - iter.skipString() - i = iter.head - 1 // it will be i++ soon - case '[': // If open symbol, increase level - level++ - case ']': // If close symbol, increase level - level-- - - // If we have returned to the original level, we're done - if level == 0 { - iter.head = i + 1 - return - } - } - } - if !iter.loadMore() { - iter.ReportError("skipObject", "incomplete array") - return - } - } -} - -func (iter *Iterator) skipObject() { - level := 1 - for { - for i := iter.head; i < iter.tail; i++ { - switch iter.buf[i] { - case '"': // If inside string, skip it - iter.head = i + 1 - iter.skipString() - i = iter.head - 1 // it will be i++ soon - case '{': // If open symbol, increase level - level++ - case '}': // If close symbol, increase level - level-- - - // If we have returned to the original level, we're done - if level == 0 { - iter.head = i + 1 - return - } - } - } - if !iter.loadMore() { - iter.ReportError("skipObject", "incomplete object") - return - } - } -} - -func (iter *Iterator) skipString() { - for { - end, escaped := iter.findStringEnd() - if end == -1 { - if !iter.loadMore() { - iter.ReportError("skipString", "incomplete string") - return - } - if escaped { - iter.head = 1 // skip the first char as last char read is \ - } - } else { - iter.head = end - return - } - } -} - -// adapted from: https://github.com/buger/jsonparser/blob/master/parser.go -// Tries to find the end of string -// Support if string contains escaped quote symbols. -func (iter *Iterator) findStringEnd() (int, bool) { - escaped := false - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - if c == '"' { - if !escaped { - return i + 1, false - } - j := i - 1 - for { - if j < iter.head || iter.buf[j] != '\\' { - // even number of backslashes - // either end of buffer, or " found - return i + 1, true - } - j-- - if j < iter.head || iter.buf[j] != '\\' { - // odd number of backslashes - // it is \" or \\\" - break - } - j-- - } - } else if c == '\\' { - escaped = true - } - } - j := iter.tail - 1 - for { - if j < iter.head || iter.buf[j] != '\\' { - // even number of backslashes - // either end of buffer, or " found - return -1, false // do not end with \ - } - j-- - if j < iter.head || iter.buf[j] != '\\' { - // odd number of backslashes - // it is \" or \\\" - break - } - j-- - - } - return -1, true // end with \ -} diff --git a/vendor/github.com/json-iterator/go/feature_iter_skip_strict.go b/vendor/github.com/json-iterator/go/feature_iter_skip_strict.go deleted file mode 100644 index cc1933de2e..0000000000 --- a/vendor/github.com/json-iterator/go/feature_iter_skip_strict.go +++ /dev/null @@ -1,89 +0,0 @@ -//+build !jsoniter-sloppy - -package jsoniter - -import "fmt" - -func (iter *Iterator) skipNumber() { - if !iter.trySkipNumber() { - iter.unreadByte() - iter.ReadFloat32() - } -} - -func (iter *Iterator) trySkipNumber() bool { - dotFound := false - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - switch c { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - case '.': - if dotFound { - iter.ReportError("validateNumber", `more than one dot found in number`) - return true // already failed - } - if i+1 == iter.tail { - return false - } - c = iter.buf[i+1] - switch c { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - default: - iter.ReportError("validateNumber", `missing digit after dot`) - return true // already failed - } - dotFound = true - default: - switch c { - case ',', ']', '}', ' ', '\t', '\n', '\r': - if iter.head == i { - return false // if - without following digits - } - iter.head = i - return true // must be valid - } - return false // may be invalid - } - } - return false -} - -func (iter *Iterator) skipString() { - if !iter.trySkipString() { - iter.unreadByte() - iter.ReadString() - } -} - -func (iter *Iterator) trySkipString() bool { - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - if c == '"' { - iter.head = i + 1 - return true // valid - } else if c == '\\' { - return false - } else if c < ' ' { - iter.ReportError("trySkipString", - fmt.Sprintf(`invalid control character found: %d`, c)) - return true // already failed - } - } - return false -} - -func (iter *Iterator) skipObject() { - iter.unreadByte() - iter.ReadObjectCB(func(iter *Iterator, field string) bool { - iter.Skip() - return true - }) -} - -func (iter *Iterator) skipArray() { - iter.unreadByte() - iter.ReadArrayCB(func(iter *Iterator) bool { - iter.Skip() - return true - }) -} diff --git a/vendor/github.com/json-iterator/go/feature_json_number.go b/vendor/github.com/json-iterator/go/feature_json_number.go deleted file mode 100644 index e187b200a9..0000000000 --- a/vendor/github.com/json-iterator/go/feature_json_number.go +++ /dev/null @@ -1,31 +0,0 @@ -package jsoniter - -import ( - "encoding/json" - "strconv" -) - -type Number string - -// String returns the literal text of the number. -func (n Number) String() string { return string(n) } - -// Float64 returns the number as a float64. -func (n Number) Float64() (float64, error) { - return strconv.ParseFloat(string(n), 64) -} - -// Int64 returns the number as an int64. -func (n Number) Int64() (int64, error) { - return strconv.ParseInt(string(n), 10, 64) -} - -func CastJsonNumber(val interface{}) (string, bool) { - switch typedVal := val.(type) { - case json.Number: - return string(typedVal), true - case Number: - return string(typedVal), true - } - return "", false -} diff --git a/vendor/github.com/json-iterator/go/feature_pool.go b/vendor/github.com/json-iterator/go/feature_pool.go deleted file mode 100644 index 73962bc6f6..0000000000 --- a/vendor/github.com/json-iterator/go/feature_pool.go +++ /dev/null @@ -1,57 +0,0 @@ -package jsoniter - -import ( - "io" -) - -// IteratorPool a thread safe pool of iterators with same configuration -type IteratorPool interface { - BorrowIterator(data []byte) *Iterator - ReturnIterator(iter *Iterator) -} - -// StreamPool a thread safe pool of streams with same configuration -type StreamPool interface { - BorrowStream(writer io.Writer) *Stream - ReturnStream(stream *Stream) -} - -func (cfg *frozenConfig) BorrowStream(writer io.Writer) *Stream { - select { - case stream := <-cfg.streamPool: - stream.Reset(writer) - return stream - default: - return NewStream(cfg, writer, 512) - } -} - -func (cfg *frozenConfig) ReturnStream(stream *Stream) { - stream.Error = nil - select { - case cfg.streamPool <- stream: - return - default: - return - } -} - -func (cfg *frozenConfig) BorrowIterator(data []byte) *Iterator { - select { - case iter := <-cfg.iteratorPool: - iter.ResetBytes(data) - return iter - default: - return ParseBytes(cfg, data) - } -} - -func (cfg *frozenConfig) ReturnIterator(iter *Iterator) { - iter.Error = nil - select { - case cfg.iteratorPool <- iter: - return - default: - return - } -} diff --git a/vendor/github.com/json-iterator/go/feature_reflect.go b/vendor/github.com/json-iterator/go/feature_reflect.go deleted file mode 100644 index 4483e34b84..0000000000 --- a/vendor/github.com/json-iterator/go/feature_reflect.go +++ /dev/null @@ -1,703 +0,0 @@ -package jsoniter - -import ( - "encoding" - "encoding/json" - "fmt" - "reflect" - "time" - "unsafe" -) - -// ValDecoder is an internal type registered to cache as needed. -// Don't confuse jsoniter.ValDecoder with json.Decoder. -// For json.Decoder's adapter, refer to jsoniter.AdapterDecoder(todo link). -// -// Reflection on type to create decoders, which is then cached -// Reflection on value is avoided as we can, as the reflect.Value itself will allocate, with following exceptions -// 1. create instance of new value, for example *int will need a int to be allocated -// 2. append to slice, if the existing cap is not enough, allocate will be done using Reflect.New -// 3. assignment to map, both key and value will be reflect.Value -// For a simple struct binding, it will be reflect.Value free and allocation free -type ValDecoder interface { - Decode(ptr unsafe.Pointer, iter *Iterator) -} - -// ValEncoder is an internal type registered to cache as needed. -// Don't confuse jsoniter.ValEncoder with json.Encoder. -// For json.Encoder's adapter, refer to jsoniter.AdapterEncoder(todo godoc link). -type ValEncoder interface { - IsEmpty(ptr unsafe.Pointer) bool - Encode(ptr unsafe.Pointer, stream *Stream) - EncodeInterface(val interface{}, stream *Stream) -} - -type checkIsEmpty interface { - IsEmpty(ptr unsafe.Pointer) bool -} - -// WriteToStream the default implementation for TypeEncoder method EncodeInterface -func WriteToStream(val interface{}, stream *Stream, encoder ValEncoder) { - e := (*emptyInterface)(unsafe.Pointer(&val)) - if e.word == nil { - stream.WriteNil() - return - } - if reflect.TypeOf(val).Kind() == reflect.Ptr { - encoder.Encode(unsafe.Pointer(&e.word), stream) - } else { - encoder.Encode(e.word, stream) - } -} - -var jsonNumberType reflect.Type -var jsoniterNumberType reflect.Type -var jsonRawMessageType reflect.Type -var jsoniterRawMessageType reflect.Type -var anyType reflect.Type -var marshalerType reflect.Type -var unmarshalerType reflect.Type -var textMarshalerType reflect.Type -var textUnmarshalerType reflect.Type - -func init() { - jsonNumberType = reflect.TypeOf((*json.Number)(nil)).Elem() - jsoniterNumberType = reflect.TypeOf((*Number)(nil)).Elem() - jsonRawMessageType = reflect.TypeOf((*json.RawMessage)(nil)).Elem() - jsoniterRawMessageType = reflect.TypeOf((*RawMessage)(nil)).Elem() - anyType = reflect.TypeOf((*Any)(nil)).Elem() - marshalerType = reflect.TypeOf((*json.Marshaler)(nil)).Elem() - unmarshalerType = reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() - textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() - textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() -} - -type optionalDecoder struct { - valueType reflect.Type - valueDecoder ValDecoder -} - -func (decoder *optionalDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if iter.ReadNil() { - *((*unsafe.Pointer)(ptr)) = nil - } else { - if *((*unsafe.Pointer)(ptr)) == nil { - //pointer to null, we have to allocate memory to hold the value - value := reflect.New(decoder.valueType) - newPtr := extractInterface(value.Interface()).word - decoder.valueDecoder.Decode(newPtr, iter) - *((*uintptr)(ptr)) = uintptr(newPtr) - } else { - //reuse existing instance - decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) - } - } -} - -type deferenceDecoder struct { - // only to deference a pointer - valueType reflect.Type - valueDecoder ValDecoder -} - -func (decoder *deferenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if *((*unsafe.Pointer)(ptr)) == nil { - //pointer to null, we have to allocate memory to hold the value - value := reflect.New(decoder.valueType) - newPtr := extractInterface(value.Interface()).word - decoder.valueDecoder.Decode(newPtr, iter) - *((*uintptr)(ptr)) = uintptr(newPtr) - } else { - //reuse existing instance - decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) - } -} - -type optionalEncoder struct { - valueEncoder ValEncoder -} - -func (encoder *optionalEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - if *((*unsafe.Pointer)(ptr)) == nil { - stream.WriteNil() - } else { - encoder.valueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream) - } -} - -func (encoder *optionalEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *optionalEncoder) IsEmpty(ptr unsafe.Pointer) bool { - if *((*unsafe.Pointer)(ptr)) == nil { - return true - } - return false -} - -type placeholderEncoder struct { - cfg *frozenConfig - cacheKey reflect.Type -} - -func (encoder *placeholderEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - encoder.getRealEncoder().Encode(ptr, stream) -} - -func (encoder *placeholderEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *placeholderEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.getRealEncoder().IsEmpty(ptr) -} - -func (encoder *placeholderEncoder) getRealEncoder() ValEncoder { - for i := 0; i < 500; i++ { - realDecoder := encoder.cfg.getEncoderFromCache(encoder.cacheKey) - _, isPlaceholder := realDecoder.(*placeholderEncoder) - if isPlaceholder { - time.Sleep(10 * time.Millisecond) - } else { - return realDecoder - } - } - panic(fmt.Sprintf("real encoder not found for cache key: %v", encoder.cacheKey)) -} - -type placeholderDecoder struct { - cfg *frozenConfig - cacheKey reflect.Type -} - -func (decoder *placeholderDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - for i := 0; i < 500; i++ { - realDecoder := decoder.cfg.getDecoderFromCache(decoder.cacheKey) - _, isPlaceholder := realDecoder.(*placeholderDecoder) - if isPlaceholder { - time.Sleep(10 * time.Millisecond) - } else { - realDecoder.Decode(ptr, iter) - return - } - } - panic(fmt.Sprintf("real decoder not found for cache key: %v", decoder.cacheKey)) -} - -// emptyInterface is the header for an interface{} value. -type emptyInterface struct { - typ unsafe.Pointer - word unsafe.Pointer -} - -// emptyInterface is the header for an interface with method (not interface{}) -type nonEmptyInterface struct { - // see ../runtime/iface.go:/Itab - itab *struct { - ityp unsafe.Pointer // static interface type - typ unsafe.Pointer // dynamic concrete type - link unsafe.Pointer - bad int32 - unused int32 - fun [100000]unsafe.Pointer // method table - } - word unsafe.Pointer -} - -// ReadVal copy the underlying JSON into go interface, same as json.Unmarshal -func (iter *Iterator) ReadVal(obj interface{}) { - typ := reflect.TypeOf(obj) - cacheKey := typ.Elem() - decoder, err := decoderOfType(iter.cfg, cacheKey) - if err != nil { - iter.Error = err - return - } - e := (*emptyInterface)(unsafe.Pointer(&obj)) - decoder.Decode(e.word, iter) -} - -// WriteVal copy the go interface into underlying JSON, same as json.Marshal -func (stream *Stream) WriteVal(val interface{}) { - if nil == val { - stream.WriteNil() - return - } - typ := reflect.TypeOf(val) - cacheKey := typ - encoder, err := encoderOfType(stream.cfg, cacheKey) - if err != nil { - stream.Error = err - return - } - encoder.EncodeInterface(val, stream) -} - -type prefix string - -func (p prefix) addToDecoder(decoder ValDecoder, err error) (ValDecoder, error) { - if err != nil { - return nil, fmt.Errorf("%s: %s", p, err.Error()) - } - return decoder, err -} - -func (p prefix) addToEncoder(encoder ValEncoder, err error) (ValEncoder, error) { - if err != nil { - return nil, fmt.Errorf("%s: %s", p, err.Error()) - } - return encoder, err -} - -func decoderOfType(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { - cacheKey := typ - decoder := cfg.getDecoderFromCache(cacheKey) - if decoder != nil { - return decoder, nil - } - decoder = getTypeDecoderFromExtension(typ) - if decoder != nil { - cfg.addDecoderToCache(cacheKey, decoder) - return decoder, nil - } - decoder = &placeholderDecoder{cfg: cfg, cacheKey: cacheKey} - cfg.addDecoderToCache(cacheKey, decoder) - decoder, err := createDecoderOfType(cfg, typ) - for _, extension := range extensions { - decoder = extension.DecorateDecoder(typ, decoder) - } - cfg.addDecoderToCache(cacheKey, decoder) - return decoder, err -} - -func createDecoderOfType(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { - typeName := typ.String() - if typ == jsonRawMessageType { - return &jsonRawMessageCodec{}, nil - } - if typ == jsoniterRawMessageType { - return &jsoniterRawMessageCodec{}, nil - } - if typ.AssignableTo(jsonNumberType) { - return &jsonNumberCodec{}, nil - } - if typ.AssignableTo(jsoniterNumberType) { - return &jsoniterNumberCodec{}, nil - } - if typ.Implements(unmarshalerType) { - templateInterface := reflect.New(typ).Elem().Interface() - var decoder ValDecoder = &unmarshalerDecoder{extractInterface(templateInterface)} - if typ.Kind() == reflect.Ptr { - decoder = &optionalDecoder{typ.Elem(), decoder} - } - return decoder, nil - } - if reflect.PtrTo(typ).Implements(unmarshalerType) { - templateInterface := reflect.New(typ).Interface() - var decoder ValDecoder = &unmarshalerDecoder{extractInterface(templateInterface)} - return decoder, nil - } - if typ.Implements(textUnmarshalerType) { - templateInterface := reflect.New(typ).Elem().Interface() - var decoder ValDecoder = &textUnmarshalerDecoder{extractInterface(templateInterface)} - if typ.Kind() == reflect.Ptr { - decoder = &optionalDecoder{typ.Elem(), decoder} - } - return decoder, nil - } - if reflect.PtrTo(typ).Implements(textUnmarshalerType) { - templateInterface := reflect.New(typ).Interface() - var decoder ValDecoder = &textUnmarshalerDecoder{extractInterface(templateInterface)} - return decoder, nil - } - if typ.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Uint8 { - sliceDecoder, err := prefix("[slice]").addToDecoder(decoderOfSlice(cfg, typ)) - if err != nil { - return nil, err - } - return &base64Codec{sliceDecoder: sliceDecoder}, nil - } - if typ.Implements(anyType) { - return &anyCodec{}, nil - } - switch typ.Kind() { - case reflect.String: - if typeName != "string" { - return decoderOfType(cfg, reflect.TypeOf((*string)(nil)).Elem()) - } - return &stringCodec{}, nil - case reflect.Int: - if typeName != "int" { - return decoderOfType(cfg, reflect.TypeOf((*int)(nil)).Elem()) - } - return &intCodec{}, nil - case reflect.Int8: - if typeName != "int8" { - return decoderOfType(cfg, reflect.TypeOf((*int8)(nil)).Elem()) - } - return &int8Codec{}, nil - case reflect.Int16: - if typeName != "int16" { - return decoderOfType(cfg, reflect.TypeOf((*int16)(nil)).Elem()) - } - return &int16Codec{}, nil - case reflect.Int32: - if typeName != "int32" { - return decoderOfType(cfg, reflect.TypeOf((*int32)(nil)).Elem()) - } - return &int32Codec{}, nil - case reflect.Int64: - if typeName != "int64" { - return decoderOfType(cfg, reflect.TypeOf((*int64)(nil)).Elem()) - } - return &int64Codec{}, nil - case reflect.Uint: - if typeName != "uint" { - return decoderOfType(cfg, reflect.TypeOf((*uint)(nil)).Elem()) - } - return &uintCodec{}, nil - case reflect.Uint8: - if typeName != "uint8" { - return decoderOfType(cfg, reflect.TypeOf((*uint8)(nil)).Elem()) - } - return &uint8Codec{}, nil - case reflect.Uint16: - if typeName != "uint16" { - return decoderOfType(cfg, reflect.TypeOf((*uint16)(nil)).Elem()) - } - return &uint16Codec{}, nil - case reflect.Uint32: - if typeName != "uint32" { - return decoderOfType(cfg, reflect.TypeOf((*uint32)(nil)).Elem()) - } - return &uint32Codec{}, nil - case reflect.Uintptr: - if typeName != "uintptr" { - return decoderOfType(cfg, reflect.TypeOf((*uintptr)(nil)).Elem()) - } - return &uintptrCodec{}, nil - case reflect.Uint64: - if typeName != "uint64" { - return decoderOfType(cfg, reflect.TypeOf((*uint64)(nil)).Elem()) - } - return &uint64Codec{}, nil - case reflect.Float32: - if typeName != "float32" { - return decoderOfType(cfg, reflect.TypeOf((*float32)(nil)).Elem()) - } - return &float32Codec{}, nil - case reflect.Float64: - if typeName != "float64" { - return decoderOfType(cfg, reflect.TypeOf((*float64)(nil)).Elem()) - } - return &float64Codec{}, nil - case reflect.Bool: - if typeName != "bool" { - return decoderOfType(cfg, reflect.TypeOf((*bool)(nil)).Elem()) - } - return &boolCodec{}, nil - case reflect.Interface: - if typ.NumMethod() == 0 { - return &emptyInterfaceCodec{}, nil - } - return &nonEmptyInterfaceCodec{}, nil - case reflect.Struct: - return prefix(fmt.Sprintf("[%s]", typeName)).addToDecoder(decoderOfStruct(cfg, typ)) - case reflect.Array: - return prefix("[array]").addToDecoder(decoderOfArray(cfg, typ)) - case reflect.Slice: - return prefix("[slice]").addToDecoder(decoderOfSlice(cfg, typ)) - case reflect.Map: - return prefix("[map]").addToDecoder(decoderOfMap(cfg, typ)) - case reflect.Ptr: - return prefix("[optional]").addToDecoder(decoderOfOptional(cfg, typ)) - default: - return nil, fmt.Errorf("unsupported type: %v", typ) - } -} - -func encoderOfType(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { - cacheKey := typ - encoder := cfg.getEncoderFromCache(cacheKey) - if encoder != nil { - return encoder, nil - } - encoder = getTypeEncoderFromExtension(typ) - if encoder != nil { - cfg.addEncoderToCache(cacheKey, encoder) - return encoder, nil - } - encoder = &placeholderEncoder{cfg: cfg, cacheKey: cacheKey} - cfg.addEncoderToCache(cacheKey, encoder) - encoder, err := createEncoderOfType(cfg, typ) - for _, extension := range extensions { - encoder = extension.DecorateEncoder(typ, encoder) - } - cfg.addEncoderToCache(cacheKey, encoder) - return encoder, err -} - -func createEncoderOfType(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { - if typ == jsonRawMessageType { - return &jsonRawMessageCodec{}, nil - } - if typ == jsoniterRawMessageType { - return &jsoniterRawMessageCodec{}, nil - } - if typ.AssignableTo(jsonNumberType) { - return &jsonNumberCodec{}, nil - } - if typ.AssignableTo(jsoniterNumberType) { - return &jsoniterNumberCodec{}, nil - } - if typ.Implements(marshalerType) { - checkIsEmpty, err := createCheckIsEmpty(typ) - if err != nil { - return nil, err - } - templateInterface := reflect.New(typ).Elem().Interface() - var encoder ValEncoder = &marshalerEncoder{ - templateInterface: extractInterface(templateInterface), - checkIsEmpty: checkIsEmpty, - } - if typ.Kind() == reflect.Ptr { - encoder = &optionalEncoder{encoder} - } - return encoder, nil - } - if reflect.PtrTo(typ).Implements(marshalerType) { - checkIsEmpty, err := createCheckIsEmpty(reflect.PtrTo(typ)) - if err != nil { - return nil, err - } - templateInterface := reflect.New(typ).Interface() - var encoder ValEncoder = &marshalerEncoder{ - templateInterface: extractInterface(templateInterface), - checkIsEmpty: checkIsEmpty, - } - return encoder, nil - } - if typ.Implements(textMarshalerType) { - checkIsEmpty, err := createCheckIsEmpty(typ) - if err != nil { - return nil, err - } - templateInterface := reflect.New(typ).Elem().Interface() - var encoder ValEncoder = &textMarshalerEncoder{ - templateInterface: extractInterface(templateInterface), - checkIsEmpty: checkIsEmpty, - } - if typ.Kind() == reflect.Ptr { - encoder = &optionalEncoder{encoder} - } - return encoder, nil - } - if typ.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Uint8 { - return &base64Codec{}, nil - } - if typ.Implements(anyType) { - return &anyCodec{}, nil - } - return createEncoderOfSimpleType(cfg, typ) -} - -func createCheckIsEmpty(typ reflect.Type) (checkIsEmpty, error) { - kind := typ.Kind() - switch kind { - case reflect.String: - return &stringCodec{}, nil - case reflect.Int: - return &intCodec{}, nil - case reflect.Int8: - return &int8Codec{}, nil - case reflect.Int16: - return &int16Codec{}, nil - case reflect.Int32: - return &int32Codec{}, nil - case reflect.Int64: - return &int64Codec{}, nil - case reflect.Uint: - return &uintCodec{}, nil - case reflect.Uint8: - return &uint8Codec{}, nil - case reflect.Uint16: - return &uint16Codec{}, nil - case reflect.Uint32: - return &uint32Codec{}, nil - case reflect.Uintptr: - return &uintptrCodec{}, nil - case reflect.Uint64: - return &uint64Codec{}, nil - case reflect.Float32: - return &float32Codec{}, nil - case reflect.Float64: - return &float64Codec{}, nil - case reflect.Bool: - return &boolCodec{}, nil - case reflect.Interface: - if typ.NumMethod() == 0 { - return &emptyInterfaceCodec{}, nil - } - return &nonEmptyInterfaceCodec{}, nil - case reflect.Struct: - return &structEncoder{}, nil - case reflect.Array: - return &arrayEncoder{}, nil - case reflect.Slice: - return &sliceEncoder{}, nil - case reflect.Map: - return &mapEncoder{}, nil - case reflect.Ptr: - return &optionalEncoder{}, nil - default: - return nil, fmt.Errorf("unsupported type: %v", typ) - } -} - -func createEncoderOfSimpleType(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { - typeName := typ.String() - kind := typ.Kind() - switch kind { - case reflect.String: - if typeName != "string" { - return encoderOfType(cfg, reflect.TypeOf((*string)(nil)).Elem()) - } - return &stringCodec{}, nil - case reflect.Int: - if typeName != "int" { - return encoderOfType(cfg, reflect.TypeOf((*int)(nil)).Elem()) - } - return &intCodec{}, nil - case reflect.Int8: - if typeName != "int8" { - return encoderOfType(cfg, reflect.TypeOf((*int8)(nil)).Elem()) - } - return &int8Codec{}, nil - case reflect.Int16: - if typeName != "int16" { - return encoderOfType(cfg, reflect.TypeOf((*int16)(nil)).Elem()) - } - return &int16Codec{}, nil - case reflect.Int32: - if typeName != "int32" { - return encoderOfType(cfg, reflect.TypeOf((*int32)(nil)).Elem()) - } - return &int32Codec{}, nil - case reflect.Int64: - if typeName != "int64" { - return encoderOfType(cfg, reflect.TypeOf((*int64)(nil)).Elem()) - } - return &int64Codec{}, nil - case reflect.Uint: - if typeName != "uint" { - return encoderOfType(cfg, reflect.TypeOf((*uint)(nil)).Elem()) - } - return &uintCodec{}, nil - case reflect.Uint8: - if typeName != "uint8" { - return encoderOfType(cfg, reflect.TypeOf((*uint8)(nil)).Elem()) - } - return &uint8Codec{}, nil - case reflect.Uint16: - if typeName != "uint16" { - return encoderOfType(cfg, reflect.TypeOf((*uint16)(nil)).Elem()) - } - return &uint16Codec{}, nil - case reflect.Uint32: - if typeName != "uint32" { - return encoderOfType(cfg, reflect.TypeOf((*uint32)(nil)).Elem()) - } - return &uint32Codec{}, nil - case reflect.Uintptr: - if typeName != "uintptr" { - return encoderOfType(cfg, reflect.TypeOf((*uintptr)(nil)).Elem()) - } - return &uintptrCodec{}, nil - case reflect.Uint64: - if typeName != "uint64" { - return encoderOfType(cfg, reflect.TypeOf((*uint64)(nil)).Elem()) - } - return &uint64Codec{}, nil - case reflect.Float32: - if typeName != "float32" { - return encoderOfType(cfg, reflect.TypeOf((*float32)(nil)).Elem()) - } - return &float32Codec{}, nil - case reflect.Float64: - if typeName != "float64" { - return encoderOfType(cfg, reflect.TypeOf((*float64)(nil)).Elem()) - } - return &float64Codec{}, nil - case reflect.Bool: - if typeName != "bool" { - return encoderOfType(cfg, reflect.TypeOf((*bool)(nil)).Elem()) - } - return &boolCodec{}, nil - case reflect.Interface: - if typ.NumMethod() == 0 { - return &emptyInterfaceCodec{}, nil - } - return &nonEmptyInterfaceCodec{}, nil - case reflect.Struct: - return prefix(fmt.Sprintf("[%s]", typeName)).addToEncoder(encoderOfStruct(cfg, typ)) - case reflect.Array: - return prefix("[array]").addToEncoder(encoderOfArray(cfg, typ)) - case reflect.Slice: - return prefix("[slice]").addToEncoder(encoderOfSlice(cfg, typ)) - case reflect.Map: - return prefix("[map]").addToEncoder(encoderOfMap(cfg, typ)) - case reflect.Ptr: - return prefix("[optional]").addToEncoder(encoderOfOptional(cfg, typ)) - default: - return nil, fmt.Errorf("unsupported type: %v", typ) - } -} - -func decoderOfOptional(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { - elemType := typ.Elem() - decoder, err := decoderOfType(cfg, elemType) - if err != nil { - return nil, err - } - return &optionalDecoder{elemType, decoder}, nil -} - -func encoderOfOptional(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { - elemType := typ.Elem() - elemEncoder, err := encoderOfType(cfg, elemType) - if err != nil { - return nil, err - } - encoder := &optionalEncoder{elemEncoder} - if elemType.Kind() == reflect.Map { - encoder = &optionalEncoder{encoder} - } - return encoder, nil -} - -func decoderOfMap(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { - decoder, err := decoderOfType(cfg, typ.Elem()) - if err != nil { - return nil, err - } - mapInterface := reflect.New(typ).Interface() - return &mapDecoder{typ, typ.Key(), typ.Elem(), decoder, extractInterface(mapInterface)}, nil -} - -func extractInterface(val interface{}) emptyInterface { - return *((*emptyInterface)(unsafe.Pointer(&val))) -} - -func encoderOfMap(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { - elemType := typ.Elem() - encoder, err := encoderOfType(cfg, elemType) - if err != nil { - return nil, err - } - mapInterface := reflect.New(typ).Elem().Interface() - if cfg.sortMapKeys { - return &sortKeysMapEncoder{typ, elemType, encoder, *((*emptyInterface)(unsafe.Pointer(&mapInterface)))}, nil - } - return &mapEncoder{typ, elemType, encoder, *((*emptyInterface)(unsafe.Pointer(&mapInterface)))}, nil -} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_array.go b/vendor/github.com/json-iterator/go/feature_reflect_array.go deleted file mode 100644 index e23f187b7c..0000000000 --- a/vendor/github.com/json-iterator/go/feature_reflect_array.go +++ /dev/null @@ -1,99 +0,0 @@ -package jsoniter - -import ( - "fmt" - "io" - "reflect" - "unsafe" -) - -func decoderOfArray(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { - decoder, err := decoderOfType(cfg, typ.Elem()) - if err != nil { - return nil, err - } - return &arrayDecoder{typ, typ.Elem(), decoder}, nil -} - -func encoderOfArray(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { - encoder, err := encoderOfType(cfg, typ.Elem()) - if err != nil { - return nil, err - } - if typ.Elem().Kind() == reflect.Map { - encoder = &optionalEncoder{encoder} - } - return &arrayEncoder{typ, typ.Elem(), encoder}, nil -} - -type arrayEncoder struct { - arrayType reflect.Type - elemType reflect.Type - elemEncoder ValEncoder -} - -func (encoder *arrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteArrayStart() - elemPtr := unsafe.Pointer(ptr) - encoder.elemEncoder.Encode(elemPtr, stream) - for i := 1; i < encoder.arrayType.Len(); i++ { - stream.WriteMore() - elemPtr = unsafe.Pointer(uintptr(elemPtr) + encoder.elemType.Size()) - encoder.elemEncoder.Encode(unsafe.Pointer(elemPtr), stream) - } - stream.WriteArrayEnd() - if stream.Error != nil && stream.Error != io.EOF { - stream.Error = fmt.Errorf("%v: %s", encoder.arrayType, stream.Error.Error()) - } -} - -func (encoder *arrayEncoder) EncodeInterface(val interface{}, stream *Stream) { - // special optimization for interface{} - e := (*emptyInterface)(unsafe.Pointer(&val)) - if e.word == nil { - stream.WriteArrayStart() - stream.WriteNil() - stream.WriteArrayEnd() - return - } - elemType := encoder.arrayType.Elem() - if encoder.arrayType.Len() == 1 && (elemType.Kind() == reflect.Ptr || elemType.Kind() == reflect.Map) { - ptr := uintptr(e.word) - e.word = unsafe.Pointer(&ptr) - } - if reflect.TypeOf(val).Kind() == reflect.Ptr { - encoder.Encode(unsafe.Pointer(&e.word), stream) - } else { - encoder.Encode(e.word, stream) - } -} - -func (encoder *arrayEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return false -} - -type arrayDecoder struct { - arrayType reflect.Type - elemType reflect.Type - elemDecoder ValDecoder -} - -func (decoder *arrayDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - decoder.doDecode(ptr, iter) - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.arrayType, iter.Error.Error()) - } -} - -func (decoder *arrayDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { - offset := uintptr(0) - iter.ReadArrayCB(func(iter *Iterator) bool { - if offset < decoder.arrayType.Size() { - decoder.elemDecoder.Decode(unsafe.Pointer(uintptr(ptr)+offset), iter) - offset += decoder.elemType.Size() - } else { - iter.Skip() - } - return true - }) -} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_extension.go b/vendor/github.com/json-iterator/go/feature_reflect_extension.go deleted file mode 100644 index 74f4b8babb..0000000000 --- a/vendor/github.com/json-iterator/go/feature_reflect_extension.go +++ /dev/null @@ -1,413 +0,0 @@ -package jsoniter - -import ( - "fmt" - "reflect" - "sort" - "strings" - "unicode" - "unsafe" -) - -var typeDecoders = map[string]ValDecoder{} -var fieldDecoders = map[string]ValDecoder{} -var typeEncoders = map[string]ValEncoder{} -var fieldEncoders = map[string]ValEncoder{} -var extensions = []Extension{} - -// StructDescriptor describe how should we encode/decode the struct -type StructDescriptor struct { - onePtrEmbedded bool - onePtrOptimization bool - Type reflect.Type - Fields []*Binding -} - -// GetField get one field from the descriptor by its name. -// Can not use map here to keep field orders. -func (structDescriptor *StructDescriptor) GetField(fieldName string) *Binding { - for _, binding := range structDescriptor.Fields { - if binding.Field.Name == fieldName { - return binding - } - } - return nil -} - -// Binding describe how should we encode/decode the struct field -type Binding struct { - levels []int - Field *reflect.StructField - FromNames []string - ToNames []string - Encoder ValEncoder - Decoder ValDecoder -} - -// Extension the one for all SPI. Customize encoding/decoding by specifying alternate encoder/decoder. -// Can also rename fields by UpdateStructDescriptor. -type Extension interface { - UpdateStructDescriptor(structDescriptor *StructDescriptor) - CreateDecoder(typ reflect.Type) ValDecoder - CreateEncoder(typ reflect.Type) ValEncoder - DecorateDecoder(typ reflect.Type, decoder ValDecoder) ValDecoder - DecorateEncoder(typ reflect.Type, encoder ValEncoder) ValEncoder -} - -// DummyExtension embed this type get dummy implementation for all methods of Extension -type DummyExtension struct { -} - -// UpdateStructDescriptor No-op -func (extension *DummyExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { -} - -// CreateDecoder No-op -func (extension *DummyExtension) CreateDecoder(typ reflect.Type) ValDecoder { - return nil -} - -// CreateEncoder No-op -func (extension *DummyExtension) CreateEncoder(typ reflect.Type) ValEncoder { - return nil -} - -// DecorateDecoder No-op -func (extension *DummyExtension) DecorateDecoder(typ reflect.Type, decoder ValDecoder) ValDecoder { - return decoder -} - -// DecorateEncoder No-op -func (extension *DummyExtension) DecorateEncoder(typ reflect.Type, encoder ValEncoder) ValEncoder { - return encoder -} - -type funcDecoder struct { - fun DecoderFunc -} - -func (decoder *funcDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - decoder.fun(ptr, iter) -} - -type funcEncoder struct { - fun EncoderFunc - isEmptyFunc func(ptr unsafe.Pointer) bool -} - -func (encoder *funcEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - encoder.fun(ptr, stream) -} - -func (encoder *funcEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *funcEncoder) IsEmpty(ptr unsafe.Pointer) bool { - if encoder.isEmptyFunc == nil { - return false - } - return encoder.isEmptyFunc(ptr) -} - -// DecoderFunc the function form of TypeDecoder -type DecoderFunc func(ptr unsafe.Pointer, iter *Iterator) - -// EncoderFunc the function form of TypeEncoder -type EncoderFunc func(ptr unsafe.Pointer, stream *Stream) - -// RegisterTypeDecoderFunc register TypeDecoder for a type with function -func RegisterTypeDecoderFunc(typ string, fun DecoderFunc) { - typeDecoders[typ] = &funcDecoder{fun} -} - -// RegisterTypeDecoder register TypeDecoder for a typ -func RegisterTypeDecoder(typ string, decoder ValDecoder) { - typeDecoders[typ] = decoder -} - -// RegisterFieldDecoderFunc register TypeDecoder for a struct field with function -func RegisterFieldDecoderFunc(typ string, field string, fun DecoderFunc) { - RegisterFieldDecoder(typ, field, &funcDecoder{fun}) -} - -// RegisterFieldDecoder register TypeDecoder for a struct field -func RegisterFieldDecoder(typ string, field string, decoder ValDecoder) { - fieldDecoders[fmt.Sprintf("%s/%s", typ, field)] = decoder -} - -// RegisterTypeEncoderFunc register TypeEncoder for a type with encode/isEmpty function -func RegisterTypeEncoderFunc(typ string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) { - typeEncoders[typ] = &funcEncoder{fun, isEmptyFunc} -} - -// RegisterTypeEncoder register TypeEncoder for a type -func RegisterTypeEncoder(typ string, encoder ValEncoder) { - typeEncoders[typ] = encoder -} - -// RegisterFieldEncoderFunc register TypeEncoder for a struct field with encode/isEmpty function -func RegisterFieldEncoderFunc(typ string, field string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) { - RegisterFieldEncoder(typ, field, &funcEncoder{fun, isEmptyFunc}) -} - -// RegisterFieldEncoder register TypeEncoder for a struct field -func RegisterFieldEncoder(typ string, field string, encoder ValEncoder) { - fieldEncoders[fmt.Sprintf("%s/%s", typ, field)] = encoder -} - -// RegisterExtension register extension -func RegisterExtension(extension Extension) { - extensions = append(extensions, extension) -} - -func getTypeDecoderFromExtension(typ reflect.Type) ValDecoder { - decoder := _getTypeDecoderFromExtension(typ) - if decoder != nil { - for _, extension := range extensions { - decoder = extension.DecorateDecoder(typ, decoder) - } - } - return decoder -} -func _getTypeDecoderFromExtension(typ reflect.Type) ValDecoder { - for _, extension := range extensions { - decoder := extension.CreateDecoder(typ) - if decoder != nil { - return decoder - } - } - typeName := typ.String() - decoder := typeDecoders[typeName] - if decoder != nil { - return decoder - } - if typ.Kind() == reflect.Ptr { - decoder := typeDecoders[typ.Elem().String()] - if decoder != nil { - return &optionalDecoder{typ.Elem(), decoder} - } - } - return nil -} - -func getTypeEncoderFromExtension(typ reflect.Type) ValEncoder { - encoder := _getTypeEncoderFromExtension(typ) - if encoder != nil { - for _, extension := range extensions { - encoder = extension.DecorateEncoder(typ, encoder) - } - } - return encoder -} - -func _getTypeEncoderFromExtension(typ reflect.Type) ValEncoder { - for _, extension := range extensions { - encoder := extension.CreateEncoder(typ) - if encoder != nil { - return encoder - } - } - typeName := typ.String() - encoder := typeEncoders[typeName] - if encoder != nil { - return encoder - } - if typ.Kind() == reflect.Ptr { - encoder := typeEncoders[typ.Elem().String()] - if encoder != nil { - return &optionalEncoder{encoder} - } - } - return nil -} - -func describeStruct(cfg *frozenConfig, typ reflect.Type) (*StructDescriptor, error) { - embeddedBindings := []*Binding{} - bindings := []*Binding{} - for i := 0; i < typ.NumField(); i++ { - field := typ.Field(i) - tag := field.Tag.Get(cfg.getTagKey()) - tagParts := strings.Split(tag, ",") - if tag == "-" { - continue - } - if field.Anonymous && (tag == "" || tagParts[0] == "") { - if field.Type.Kind() == reflect.Struct { - structDescriptor, err := describeStruct(cfg, field.Type) - if err != nil { - return nil, err - } - for _, binding := range structDescriptor.Fields { - binding.levels = append([]int{i}, binding.levels...) - omitempty := binding.Encoder.(*structFieldEncoder).omitempty - binding.Encoder = &structFieldEncoder{&field, binding.Encoder, omitempty} - binding.Decoder = &structFieldDecoder{&field, binding.Decoder} - embeddedBindings = append(embeddedBindings, binding) - } - continue - } else if field.Type.Kind() == reflect.Ptr && field.Type.Elem().Kind() == reflect.Struct { - structDescriptor, err := describeStruct(cfg, field.Type.Elem()) - if err != nil { - return nil, err - } - for _, binding := range structDescriptor.Fields { - binding.levels = append([]int{i}, binding.levels...) - omitempty := binding.Encoder.(*structFieldEncoder).omitempty - binding.Encoder = &optionalEncoder{binding.Encoder} - binding.Encoder = &structFieldEncoder{&field, binding.Encoder, omitempty} - binding.Decoder = &deferenceDecoder{field.Type.Elem(), binding.Decoder} - binding.Decoder = &structFieldDecoder{&field, binding.Decoder} - embeddedBindings = append(embeddedBindings, binding) - } - continue - } - } - fieldNames := calcFieldNames(field.Name, tagParts[0], tag) - fieldCacheKey := fmt.Sprintf("%s/%s", typ.String(), field.Name) - decoder := fieldDecoders[fieldCacheKey] - if decoder == nil { - var err error - decoder, err = decoderOfType(cfg, field.Type) - if len(fieldNames) > 0 && err != nil { - return nil, err - } - } - encoder := fieldEncoders[fieldCacheKey] - if encoder == nil { - var err error - encoder, err = encoderOfType(cfg, field.Type) - if len(fieldNames) > 0 && err != nil { - return nil, err - } - // map is stored as pointer in the struct - if encoder != nil && field.Type.Kind() == reflect.Map { - encoder = &optionalEncoder{encoder} - } - } - binding := &Binding{ - Field: &field, - FromNames: fieldNames, - ToNames: fieldNames, - Decoder: decoder, - Encoder: encoder, - } - binding.levels = []int{i} - bindings = append(bindings, binding) - } - return createStructDescriptor(cfg, typ, bindings, embeddedBindings), nil -} -func createStructDescriptor(cfg *frozenConfig, typ reflect.Type, bindings []*Binding, embeddedBindings []*Binding) *StructDescriptor { - onePtrEmbedded := false - onePtrOptimization := false - if typ.NumField() == 1 { - firstField := typ.Field(0) - switch firstField.Type.Kind() { - case reflect.Ptr: - if firstField.Anonymous && firstField.Type.Elem().Kind() == reflect.Struct { - onePtrEmbedded = true - } - fallthrough - case reflect.Map: - onePtrOptimization = true - case reflect.Struct: - onePtrOptimization = isStructOnePtr(firstField.Type) - } - } - structDescriptor := &StructDescriptor{ - onePtrEmbedded: onePtrEmbedded, - onePtrOptimization: onePtrOptimization, - Type: typ, - Fields: bindings, - } - for _, extension := range extensions { - extension.UpdateStructDescriptor(structDescriptor) - } - processTags(structDescriptor, cfg) - // merge normal & embedded bindings & sort with original order - allBindings := sortableBindings(append(embeddedBindings, structDescriptor.Fields...)) - sort.Sort(allBindings) - structDescriptor.Fields = allBindings - return structDescriptor -} - -func isStructOnePtr(typ reflect.Type) bool { - if typ.NumField() == 1 { - firstField := typ.Field(0) - switch firstField.Type.Kind() { - case reflect.Ptr: - return true - case reflect.Map: - return true - case reflect.Struct: - return isStructOnePtr(firstField.Type) - } - } - return false -} - -type sortableBindings []*Binding - -func (bindings sortableBindings) Len() int { - return len(bindings) -} - -func (bindings sortableBindings) Less(i, j int) bool { - left := bindings[i].levels - right := bindings[j].levels - k := 0 - for { - if left[k] < right[k] { - return true - } else if left[k] > right[k] { - return false - } - k++ - } -} - -func (bindings sortableBindings) Swap(i, j int) { - bindings[i], bindings[j] = bindings[j], bindings[i] -} - -func processTags(structDescriptor *StructDescriptor, cfg *frozenConfig) { - for _, binding := range structDescriptor.Fields { - shouldOmitEmpty := false - tagParts := strings.Split(binding.Field.Tag.Get(cfg.getTagKey()), ",") - for _, tagPart := range tagParts[1:] { - if tagPart == "omitempty" { - shouldOmitEmpty = true - } else if tagPart == "string" { - if binding.Field.Type.Kind() == reflect.String { - binding.Decoder = &stringModeStringDecoder{binding.Decoder, cfg} - binding.Encoder = &stringModeStringEncoder{binding.Encoder, cfg} - } else { - binding.Decoder = &stringModeNumberDecoder{binding.Decoder} - binding.Encoder = &stringModeNumberEncoder{binding.Encoder} - } - } - } - binding.Decoder = &structFieldDecoder{binding.Field, binding.Decoder} - binding.Encoder = &structFieldEncoder{binding.Field, binding.Encoder, shouldOmitEmpty} - } -} - -func calcFieldNames(originalFieldName string, tagProvidedFieldName string, wholeTag string) []string { - // ignore? - if wholeTag == "-" { - return []string{} - } - // rename? - var fieldNames []string - if tagProvidedFieldName == "" { - fieldNames = []string{originalFieldName} - } else { - fieldNames = []string{tagProvidedFieldName} - } - // private? - isNotExported := unicode.IsLower(rune(originalFieldName[0])) - if isNotExported { - fieldNames = []string{} - } - return fieldNames -} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_map.go b/vendor/github.com/json-iterator/go/feature_reflect_map.go deleted file mode 100644 index 005671e01b..0000000000 --- a/vendor/github.com/json-iterator/go/feature_reflect_map.go +++ /dev/null @@ -1,244 +0,0 @@ -package jsoniter - -import ( - "encoding" - "encoding/json" - "reflect" - "sort" - "strconv" - "unsafe" -) - -type mapDecoder struct { - mapType reflect.Type - keyType reflect.Type - elemType reflect.Type - elemDecoder ValDecoder - mapInterface emptyInterface -} - -func (decoder *mapDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - // dark magic to cast unsafe.Pointer back to interface{} using reflect.Type - mapInterface := decoder.mapInterface - mapInterface.word = ptr - realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) - realVal := reflect.ValueOf(*realInterface).Elem() - if iter.ReadNil() { - realVal.Set(reflect.Zero(decoder.mapType)) - return - } - if realVal.IsNil() { - realVal.Set(reflect.MakeMap(realVal.Type())) - } - iter.ReadMapCB(func(iter *Iterator, keyStr string) bool { - elem := reflect.New(decoder.elemType) - decoder.elemDecoder.Decode(unsafe.Pointer(elem.Pointer()), iter) - // to put into map, we have to use reflection - keyType := decoder.keyType - // TODO: remove this from loop - switch { - case keyType.Kind() == reflect.String: - realVal.SetMapIndex(reflect.ValueOf(keyStr).Convert(keyType), elem.Elem()) - return true - case keyType.Implements(textUnmarshalerType): - textUnmarshaler := reflect.New(keyType.Elem()).Interface().(encoding.TextUnmarshaler) - err := textUnmarshaler.UnmarshalText([]byte(keyStr)) - if err != nil { - iter.ReportError("read map key as TextUnmarshaler", err.Error()) - return false - } - realVal.SetMapIndex(reflect.ValueOf(textUnmarshaler), elem.Elem()) - return true - case reflect.PtrTo(keyType).Implements(textUnmarshalerType): - textUnmarshaler := reflect.New(keyType).Interface().(encoding.TextUnmarshaler) - err := textUnmarshaler.UnmarshalText([]byte(keyStr)) - if err != nil { - iter.ReportError("read map key as TextUnmarshaler", err.Error()) - return false - } - realVal.SetMapIndex(reflect.ValueOf(textUnmarshaler).Elem(), elem.Elem()) - return true - default: - switch keyType.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - n, err := strconv.ParseInt(keyStr, 10, 64) - if err != nil || reflect.Zero(keyType).OverflowInt(n) { - iter.ReportError("read map key as int64", "read int64 failed") - return false - } - realVal.SetMapIndex(reflect.ValueOf(n).Convert(keyType), elem.Elem()) - return true - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - n, err := strconv.ParseUint(keyStr, 10, 64) - if err != nil || reflect.Zero(keyType).OverflowUint(n) { - iter.ReportError("read map key as uint64", "read uint64 failed") - return false - } - realVal.SetMapIndex(reflect.ValueOf(n).Convert(keyType), elem.Elem()) - return true - } - } - iter.ReportError("read map key", "unexpected map key type "+keyType.String()) - return true - }) -} - -type mapEncoder struct { - mapType reflect.Type - elemType reflect.Type - elemEncoder ValEncoder - mapInterface emptyInterface -} - -func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - mapInterface := encoder.mapInterface - mapInterface.word = ptr - realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) - realVal := reflect.ValueOf(*realInterface) - stream.WriteObjectStart() - for i, key := range realVal.MapKeys() { - if i != 0 { - stream.WriteMore() - } - encodeMapKey(key, stream) - if stream.indention > 0 { - stream.writeTwoBytes(byte(':'), byte(' ')) - } else { - stream.writeByte(':') - } - val := realVal.MapIndex(key).Interface() - encoder.elemEncoder.EncodeInterface(val, stream) - } - stream.WriteObjectEnd() -} - -func encodeMapKey(key reflect.Value, stream *Stream) { - if key.Kind() == reflect.String { - stream.WriteString(key.String()) - return - } - if tm, ok := key.Interface().(encoding.TextMarshaler); ok { - buf, err := tm.MarshalText() - if err != nil { - stream.Error = err - return - } - stream.writeByte('"') - stream.Write(buf) - stream.writeByte('"') - return - } - switch key.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - stream.writeByte('"') - stream.WriteInt64(key.Int()) - stream.writeByte('"') - return - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - stream.writeByte('"') - stream.WriteUint64(key.Uint()) - stream.writeByte('"') - return - } - stream.Error = &json.UnsupportedTypeError{Type: key.Type()} -} - -func (encoder *mapEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *mapEncoder) IsEmpty(ptr unsafe.Pointer) bool { - mapInterface := encoder.mapInterface - mapInterface.word = ptr - realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) - realVal := reflect.ValueOf(*realInterface) - return realVal.Len() == 0 -} - -type sortKeysMapEncoder struct { - mapType reflect.Type - elemType reflect.Type - elemEncoder ValEncoder - mapInterface emptyInterface -} - -func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - mapInterface := encoder.mapInterface - mapInterface.word = ptr - realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) - realVal := reflect.ValueOf(*realInterface) - - // Extract and sort the keys. - keys := realVal.MapKeys() - sv := stringValues(make([]reflectWithString, len(keys))) - for i, v := range keys { - sv[i].v = v - if err := sv[i].resolve(); err != nil { - stream.Error = err - return - } - } - sort.Sort(sv) - - stream.WriteObjectStart() - for i, key := range sv { - if i != 0 { - stream.WriteMore() - } - stream.WriteVal(key.s) // might need html escape, so can not WriteString directly - if stream.indention > 0 { - stream.writeTwoBytes(byte(':'), byte(' ')) - } else { - stream.writeByte(':') - } - val := realVal.MapIndex(key.v).Interface() - encoder.elemEncoder.EncodeInterface(val, stream) - } - stream.WriteObjectEnd() -} - -// stringValues is a slice of reflect.Value holding *reflect.StringValue. -// It implements the methods to sort by string. -type stringValues []reflectWithString - -type reflectWithString struct { - v reflect.Value - s string -} - -func (w *reflectWithString) resolve() error { - if w.v.Kind() == reflect.String { - w.s = w.v.String() - return nil - } - if tm, ok := w.v.Interface().(encoding.TextMarshaler); ok { - buf, err := tm.MarshalText() - w.s = string(buf) - return err - } - switch w.v.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - w.s = strconv.FormatInt(w.v.Int(), 10) - return nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - w.s = strconv.FormatUint(w.v.Uint(), 10) - return nil - } - return &json.UnsupportedTypeError{Type: w.v.Type()} -} - -func (sv stringValues) Len() int { return len(sv) } -func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } -func (sv stringValues) Less(i, j int) bool { return sv[i].s < sv[j].s } - -func (encoder *sortKeysMapEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *sortKeysMapEncoder) IsEmpty(ptr unsafe.Pointer) bool { - mapInterface := encoder.mapInterface - mapInterface.word = ptr - realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) - realVal := reflect.ValueOf(*realInterface) - return realVal.Len() == 0 -} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_native.go b/vendor/github.com/json-iterator/go/feature_reflect_native.go deleted file mode 100644 index 95bd1e87cc..0000000000 --- a/vendor/github.com/json-iterator/go/feature_reflect_native.go +++ /dev/null @@ -1,764 +0,0 @@ -package jsoniter - -import ( - "encoding" - "encoding/base64" - "encoding/json" - "reflect" - "unsafe" -) - -type stringCodec struct { -} - -func (codec *stringCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*string)(ptr)) = iter.ReadString() -} - -func (codec *stringCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - str := *((*string)(ptr)) - stream.WriteString(str) -} - -func (codec *stringCodec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *stringCodec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*string)(ptr)) == "" -} - -type intCodec struct { -} - -func (codec *intCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*int)(ptr)) = iter.ReadInt() - } -} - -func (codec *intCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteInt(*((*int)(ptr))) -} - -func (codec *intCodec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *intCodec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*int)(ptr)) == 0 -} - -type uintptrCodec struct { -} - -func (codec *uintptrCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*uintptr)(ptr)) = uintptr(iter.ReadUint64()) - } -} - -func (codec *uintptrCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteUint64(uint64(*((*uintptr)(ptr)))) -} - -func (codec *uintptrCodec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *uintptrCodec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*uintptr)(ptr)) == 0 -} - -type int8Codec struct { -} - -func (codec *int8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*int8)(ptr)) = iter.ReadInt8() - } -} - -func (codec *int8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteInt8(*((*int8)(ptr))) -} - -func (codec *int8Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *int8Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*int8)(ptr)) == 0 -} - -type int16Codec struct { -} - -func (codec *int16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*int16)(ptr)) = iter.ReadInt16() - } -} - -func (codec *int16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteInt16(*((*int16)(ptr))) -} - -func (codec *int16Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *int16Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*int16)(ptr)) == 0 -} - -type int32Codec struct { -} - -func (codec *int32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*int32)(ptr)) = iter.ReadInt32() - } -} - -func (codec *int32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteInt32(*((*int32)(ptr))) -} - -func (codec *int32Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *int32Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*int32)(ptr)) == 0 -} - -type int64Codec struct { -} - -func (codec *int64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*int64)(ptr)) = iter.ReadInt64() - } -} - -func (codec *int64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteInt64(*((*int64)(ptr))) -} - -func (codec *int64Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *int64Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*int64)(ptr)) == 0 -} - -type uintCodec struct { -} - -func (codec *uintCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*uint)(ptr)) = iter.ReadUint() - return - } -} - -func (codec *uintCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteUint(*((*uint)(ptr))) -} - -func (codec *uintCodec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *uintCodec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*uint)(ptr)) == 0 -} - -type uint8Codec struct { -} - -func (codec *uint8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*uint8)(ptr)) = iter.ReadUint8() - } -} - -func (codec *uint8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteUint8(*((*uint8)(ptr))) -} - -func (codec *uint8Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *uint8Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*uint8)(ptr)) == 0 -} - -type uint16Codec struct { -} - -func (codec *uint16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*uint16)(ptr)) = iter.ReadUint16() - } -} - -func (codec *uint16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteUint16(*((*uint16)(ptr))) -} - -func (codec *uint16Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *uint16Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*uint16)(ptr)) == 0 -} - -type uint32Codec struct { -} - -func (codec *uint32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*uint32)(ptr)) = iter.ReadUint32() - } -} - -func (codec *uint32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteUint32(*((*uint32)(ptr))) -} - -func (codec *uint32Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *uint32Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*uint32)(ptr)) == 0 -} - -type uint64Codec struct { -} - -func (codec *uint64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*uint64)(ptr)) = iter.ReadUint64() - } -} - -func (codec *uint64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteUint64(*((*uint64)(ptr))) -} - -func (codec *uint64Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *uint64Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*uint64)(ptr)) == 0 -} - -type float32Codec struct { -} - -func (codec *float32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*float32)(ptr)) = iter.ReadFloat32() - } -} - -func (codec *float32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteFloat32(*((*float32)(ptr))) -} - -func (codec *float32Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *float32Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*float32)(ptr)) == 0 -} - -type float64Codec struct { -} - -func (codec *float64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*float64)(ptr)) = iter.ReadFloat64() - } -} - -func (codec *float64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteFloat64(*((*float64)(ptr))) -} - -func (codec *float64Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *float64Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*float64)(ptr)) == 0 -} - -type boolCodec struct { -} - -func (codec *boolCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*bool)(ptr)) = iter.ReadBool() - } -} - -func (codec *boolCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteBool(*((*bool)(ptr))) -} - -func (codec *boolCodec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *boolCodec) IsEmpty(ptr unsafe.Pointer) bool { - return !(*((*bool)(ptr))) -} - -type emptyInterfaceCodec struct { -} - -func (codec *emptyInterfaceCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - existing := *((*interface{})(ptr)) - - // Checking for both typed and untyped nil pointers. - if existing != nil && - reflect.TypeOf(existing).Kind() == reflect.Ptr && - !reflect.ValueOf(existing).IsNil() { - - var ptrToExisting interface{} - for { - elem := reflect.ValueOf(existing).Elem() - if elem.Kind() != reflect.Ptr || elem.IsNil() { - break - } - ptrToExisting = existing - existing = elem.Interface() - } - - if iter.ReadNil() { - if ptrToExisting != nil { - nilPtr := reflect.Zero(reflect.TypeOf(ptrToExisting).Elem()) - reflect.ValueOf(ptrToExisting).Elem().Set(nilPtr) - } else { - *((*interface{})(ptr)) = nil - } - } else { - iter.ReadVal(existing) - } - - return - } - - if iter.ReadNil() { - *((*interface{})(ptr)) = nil - } else { - *((*interface{})(ptr)) = iter.Read() - } -} - -func (codec *emptyInterfaceCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteVal(*((*interface{})(ptr))) -} - -func (codec *emptyInterfaceCodec) EncodeInterface(val interface{}, stream *Stream) { - stream.WriteVal(val) -} - -func (codec *emptyInterfaceCodec) IsEmpty(ptr unsafe.Pointer) bool { - emptyInterface := (*emptyInterface)(ptr) - return emptyInterface.typ == nil -} - -type nonEmptyInterfaceCodec struct { -} - -func (codec *nonEmptyInterfaceCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - nonEmptyInterface := (*nonEmptyInterface)(ptr) - if nonEmptyInterface.itab == nil { - iter.ReportError("read non-empty interface", "do not know which concrete type to decode to") - return - } - var i interface{} - e := (*emptyInterface)(unsafe.Pointer(&i)) - e.typ = nonEmptyInterface.itab.typ - e.word = nonEmptyInterface.word - iter.ReadVal(&i) - if e.word == nil { - nonEmptyInterface.itab = nil - } - nonEmptyInterface.word = e.word -} - -func (codec *nonEmptyInterfaceCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - nonEmptyInterface := (*nonEmptyInterface)(ptr) - var i interface{} - if nonEmptyInterface.itab != nil { - e := (*emptyInterface)(unsafe.Pointer(&i)) - e.typ = nonEmptyInterface.itab.typ - e.word = nonEmptyInterface.word - } - stream.WriteVal(i) -} - -func (codec *nonEmptyInterfaceCodec) EncodeInterface(val interface{}, stream *Stream) { - stream.WriteVal(val) -} - -func (codec *nonEmptyInterfaceCodec) IsEmpty(ptr unsafe.Pointer) bool { - nonEmptyInterface := (*nonEmptyInterface)(ptr) - return nonEmptyInterface.word == nil -} - -type anyCodec struct { -} - -func (codec *anyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*Any)(ptr)) = iter.ReadAny() -} - -func (codec *anyCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - (*((*Any)(ptr))).WriteTo(stream) -} - -func (codec *anyCodec) EncodeInterface(val interface{}, stream *Stream) { - (val.(Any)).WriteTo(stream) -} - -func (codec *anyCodec) IsEmpty(ptr unsafe.Pointer) bool { - return (*((*Any)(ptr))).Size() == 0 -} - -type jsonNumberCodec struct { -} - -func (codec *jsonNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - switch iter.WhatIsNext() { - case StringValue: - *((*json.Number)(ptr)) = json.Number(iter.ReadString()) - case NilValue: - iter.skipFourBytes('n', 'u', 'l', 'l') - *((*json.Number)(ptr)) = "" - default: - *((*json.Number)(ptr)) = json.Number([]byte(iter.readNumberAsString())) - } -} - -func (codec *jsonNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteRaw(string(*((*json.Number)(ptr)))) -} - -func (codec *jsonNumberCodec) EncodeInterface(val interface{}, stream *Stream) { - stream.WriteRaw(string(val.(json.Number))) -} - -func (codec *jsonNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { - return len(*((*json.Number)(ptr))) == 0 -} - -type jsoniterNumberCodec struct { -} - -func (codec *jsoniterNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - switch iter.WhatIsNext() { - case StringValue: - *((*Number)(ptr)) = Number(iter.ReadString()) - case NilValue: - iter.skipFourBytes('n', 'u', 'l', 'l') - *((*Number)(ptr)) = "" - default: - *((*Number)(ptr)) = Number([]byte(iter.readNumberAsString())) - } -} - -func (codec *jsoniterNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteRaw(string(*((*Number)(ptr)))) -} - -func (codec *jsoniterNumberCodec) EncodeInterface(val interface{}, stream *Stream) { - stream.WriteRaw(string(val.(Number))) -} - -func (codec *jsoniterNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { - return len(*((*Number)(ptr))) == 0 -} - -type jsonRawMessageCodec struct { -} - -func (codec *jsonRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*json.RawMessage)(ptr)) = json.RawMessage(iter.SkipAndReturnBytes()) -} - -func (codec *jsonRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteRaw(string(*((*json.RawMessage)(ptr)))) -} - -func (codec *jsonRawMessageCodec) EncodeInterface(val interface{}, stream *Stream) { - stream.WriteRaw(string(val.(json.RawMessage))) -} - -func (codec *jsonRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { - return len(*((*json.RawMessage)(ptr))) == 0 -} - -type jsoniterRawMessageCodec struct { -} - -func (codec *jsoniterRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*RawMessage)(ptr)) = RawMessage(iter.SkipAndReturnBytes()) -} - -func (codec *jsoniterRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteRaw(string(*((*RawMessage)(ptr)))) -} - -func (codec *jsoniterRawMessageCodec) EncodeInterface(val interface{}, stream *Stream) { - stream.WriteRaw(string(val.(RawMessage))) -} - -func (codec *jsoniterRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { - return len(*((*RawMessage)(ptr))) == 0 -} - -type base64Codec struct { - sliceDecoder ValDecoder -} - -func (codec *base64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if iter.ReadNil() { - ptrSlice := (*sliceHeader)(ptr) - ptrSlice.Len = 0 - ptrSlice.Cap = 0 - ptrSlice.Data = nil - return - } - switch iter.WhatIsNext() { - case StringValue: - encoding := base64.StdEncoding - src := iter.SkipAndReturnBytes() - src = src[1 : len(src)-1] - decodedLen := encoding.DecodedLen(len(src)) - dst := make([]byte, decodedLen) - len, err := encoding.Decode(dst, src) - if err != nil { - iter.ReportError("decode base64", err.Error()) - } else { - dst = dst[:len] - dstSlice := (*sliceHeader)(unsafe.Pointer(&dst)) - ptrSlice := (*sliceHeader)(ptr) - ptrSlice.Data = dstSlice.Data - ptrSlice.Cap = dstSlice.Cap - ptrSlice.Len = dstSlice.Len - } - case ArrayValue: - codec.sliceDecoder.Decode(ptr, iter) - default: - iter.ReportError("base64Codec", "invalid input") - } -} - -func (codec *base64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - src := *((*[]byte)(ptr)) - if len(src) == 0 { - stream.WriteNil() - return - } - encoding := base64.StdEncoding - stream.writeByte('"') - toGrow := encoding.EncodedLen(len(src)) - stream.ensure(toGrow) - encoding.Encode(stream.buf[stream.n:], src) - stream.n += toGrow - stream.writeByte('"') -} - -func (codec *base64Codec) EncodeInterface(val interface{}, stream *Stream) { - ptr := extractInterface(val).word - src := *((*[]byte)(ptr)) - if len(src) == 0 { - stream.WriteNil() - return - } - encoding := base64.StdEncoding - stream.writeByte('"') - toGrow := encoding.EncodedLen(len(src)) - stream.ensure(toGrow) - encoding.Encode(stream.buf[stream.n:], src) - stream.n += toGrow - stream.writeByte('"') -} - -func (codec *base64Codec) IsEmpty(ptr unsafe.Pointer) bool { - return len(*((*[]byte)(ptr))) == 0 -} - -type stringModeNumberDecoder struct { - elemDecoder ValDecoder -} - -func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - c := iter.nextToken() - if c != '"' { - iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) - return - } - decoder.elemDecoder.Decode(ptr, iter) - if iter.Error != nil { - return - } - c = iter.readByte() - if c != '"' { - iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) - return - } -} - -type stringModeStringDecoder struct { - elemDecoder ValDecoder - cfg *frozenConfig -} - -func (decoder *stringModeStringDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - decoder.elemDecoder.Decode(ptr, iter) - str := *((*string)(ptr)) - tempIter := decoder.cfg.BorrowIterator([]byte(str)) - defer decoder.cfg.ReturnIterator(tempIter) - *((*string)(ptr)) = tempIter.ReadString() -} - -type stringModeNumberEncoder struct { - elemEncoder ValEncoder -} - -func (encoder *stringModeNumberEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.writeByte('"') - encoder.elemEncoder.Encode(ptr, stream) - stream.writeByte('"') -} - -func (encoder *stringModeNumberEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *stringModeNumberEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.elemEncoder.IsEmpty(ptr) -} - -type stringModeStringEncoder struct { - elemEncoder ValEncoder - cfg *frozenConfig -} - -func (encoder *stringModeStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - tempStream := encoder.cfg.BorrowStream(nil) - defer encoder.cfg.ReturnStream(tempStream) - encoder.elemEncoder.Encode(ptr, tempStream) - stream.WriteString(string(tempStream.Buffer())) -} - -func (encoder *stringModeStringEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *stringModeStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.elemEncoder.IsEmpty(ptr) -} - -type marshalerEncoder struct { - templateInterface emptyInterface - checkIsEmpty checkIsEmpty -} - -func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - templateInterface := encoder.templateInterface - templateInterface.word = ptr - realInterface := (*interface{})(unsafe.Pointer(&templateInterface)) - marshaler, ok := (*realInterface).(json.Marshaler) - if !ok { - stream.WriteVal(nil) - return - } - - bytes, err := marshaler.MarshalJSON() - if err != nil { - stream.Error = err - } else { - stream.Write(bytes) - } -} -func (encoder *marshalerEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *marshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.checkIsEmpty.IsEmpty(ptr) -} - -type textMarshalerEncoder struct { - templateInterface emptyInterface - checkIsEmpty checkIsEmpty -} - -func (encoder *textMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - templateInterface := encoder.templateInterface - templateInterface.word = ptr - realInterface := (*interface{})(unsafe.Pointer(&templateInterface)) - marshaler := (*realInterface).(encoding.TextMarshaler) - bytes, err := marshaler.MarshalText() - if err != nil { - stream.Error = err - } else { - stream.WriteString(string(bytes)) - } -} - -func (encoder *textMarshalerEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *textMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.checkIsEmpty.IsEmpty(ptr) -} - -type unmarshalerDecoder struct { - templateInterface emptyInterface -} - -func (decoder *unmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - templateInterface := decoder.templateInterface - templateInterface.word = ptr - realInterface := (*interface{})(unsafe.Pointer(&templateInterface)) - unmarshaler := (*realInterface).(json.Unmarshaler) - iter.nextToken() - iter.unreadByte() // skip spaces - bytes := iter.SkipAndReturnBytes() - err := unmarshaler.UnmarshalJSON(bytes) - if err != nil { - iter.ReportError("unmarshalerDecoder", err.Error()) - } -} - -type textUnmarshalerDecoder struct { - templateInterface emptyInterface -} - -func (decoder *textUnmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - templateInterface := decoder.templateInterface - templateInterface.word = ptr - realInterface := (*interface{})(unsafe.Pointer(&templateInterface)) - unmarshaler := (*realInterface).(encoding.TextUnmarshaler) - str := iter.ReadString() - err := unmarshaler.UnmarshalText([]byte(str)) - if err != nil { - iter.ReportError("textUnmarshalerDecoder", err.Error()) - } -} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_object.go b/vendor/github.com/json-iterator/go/feature_reflect_object.go deleted file mode 100644 index 59b1235c0d..0000000000 --- a/vendor/github.com/json-iterator/go/feature_reflect_object.go +++ /dev/null @@ -1,196 +0,0 @@ -package jsoniter - -import ( - "fmt" - "io" - "reflect" - "strings" - "unsafe" -) - -func encoderOfStruct(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { - type bindingTo struct { - binding *Binding - toName string - ignored bool - } - orderedBindings := []*bindingTo{} - structDescriptor, err := describeStruct(cfg, typ) - if err != nil { - return nil, err - } - for _, binding := range structDescriptor.Fields { - for _, toName := range binding.ToNames { - new := &bindingTo{ - binding: binding, - toName: toName, - } - for _, old := range orderedBindings { - if old.toName != toName { - continue - } - old.ignored, new.ignored = resolveConflictBinding(cfg, old.binding, new.binding) - } - orderedBindings = append(orderedBindings, new) - } - } - if len(orderedBindings) == 0 { - return &emptyStructEncoder{}, nil - } - finalOrderedFields := []structFieldTo{} - for _, bindingTo := range orderedBindings { - if !bindingTo.ignored { - finalOrderedFields = append(finalOrderedFields, structFieldTo{ - encoder: bindingTo.binding.Encoder.(*structFieldEncoder), - toName: bindingTo.toName, - }) - } - } - return &structEncoder{structDescriptor.onePtrEmbedded, structDescriptor.onePtrOptimization, finalOrderedFields}, nil -} - -func resolveConflictBinding(cfg *frozenConfig, old, new *Binding) (ignoreOld, ignoreNew bool) { - newTagged := new.Field.Tag.Get(cfg.getTagKey()) != "" - oldTagged := old.Field.Tag.Get(cfg.getTagKey()) != "" - if newTagged { - if oldTagged { - if len(old.levels) > len(new.levels) { - return true, false - } else if len(new.levels) > len(old.levels) { - return false, true - } else { - return true, true - } - } else { - return true, false - } - } else { - if oldTagged { - return true, false - } - if len(old.levels) > len(new.levels) { - return true, false - } else if len(new.levels) > len(old.levels) { - return false, true - } else { - return true, true - } - } -} - -func decoderOfStruct(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { - bindings := map[string]*Binding{} - structDescriptor, err := describeStruct(cfg, typ) - if err != nil { - return nil, err - } - for _, binding := range structDescriptor.Fields { - for _, fromName := range binding.FromNames { - old := bindings[fromName] - if old == nil { - bindings[fromName] = binding - continue - } - ignoreOld, ignoreNew := resolveConflictBinding(cfg, old, binding) - if ignoreOld { - delete(bindings, fromName) - } - if !ignoreNew { - bindings[fromName] = binding - } - } - } - fields := map[string]*structFieldDecoder{} - for k, binding := range bindings { - fields[strings.ToLower(k)] = binding.Decoder.(*structFieldDecoder) - } - return createStructDecoder(typ, fields) -} - -type structFieldEncoder struct { - field *reflect.StructField - fieldEncoder ValEncoder - omitempty bool -} - -func (encoder *structFieldEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - fieldPtr := unsafe.Pointer(uintptr(ptr) + encoder.field.Offset) - encoder.fieldEncoder.Encode(fieldPtr, stream) - if stream.Error != nil && stream.Error != io.EOF { - stream.Error = fmt.Errorf("%s: %s", encoder.field.Name, stream.Error.Error()) - } -} - -func (encoder *structFieldEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *structFieldEncoder) IsEmpty(ptr unsafe.Pointer) bool { - fieldPtr := unsafe.Pointer(uintptr(ptr) + encoder.field.Offset) - return encoder.fieldEncoder.IsEmpty(fieldPtr) -} - -type structEncoder struct { - onePtrEmbedded bool - onePtrOptimization bool - fields []structFieldTo -} - -type structFieldTo struct { - encoder *structFieldEncoder - toName string -} - -func (encoder *structEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteObjectStart() - isNotFirst := false - for _, field := range encoder.fields { - if field.encoder.omitempty && field.encoder.IsEmpty(ptr) { - continue - } - if isNotFirst { - stream.WriteMore() - } - stream.WriteObjectField(field.toName) - field.encoder.Encode(ptr, stream) - isNotFirst = true - } - stream.WriteObjectEnd() -} - -func (encoder *structEncoder) EncodeInterface(val interface{}, stream *Stream) { - e := (*emptyInterface)(unsafe.Pointer(&val)) - if encoder.onePtrOptimization { - if e.word == nil && encoder.onePtrEmbedded { - stream.WriteObjectStart() - stream.WriteObjectEnd() - return - } - ptr := uintptr(e.word) - e.word = unsafe.Pointer(&ptr) - } - if reflect.TypeOf(val).Kind() == reflect.Ptr { - encoder.Encode(unsafe.Pointer(&e.word), stream) - } else { - encoder.Encode(e.word, stream) - } -} - -func (encoder *structEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return false -} - -type emptyStructEncoder struct { -} - -func (encoder *emptyStructEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteEmptyObject() -} - -func (encoder *emptyStructEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *emptyStructEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return false -} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_slice.go b/vendor/github.com/json-iterator/go/feature_reflect_slice.go deleted file mode 100644 index 7377eec7b3..0000000000 --- a/vendor/github.com/json-iterator/go/feature_reflect_slice.go +++ /dev/null @@ -1,149 +0,0 @@ -package jsoniter - -import ( - "fmt" - "io" - "reflect" - "unsafe" -) - -func decoderOfSlice(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { - decoder, err := decoderOfType(cfg, typ.Elem()) - if err != nil { - return nil, err - } - return &sliceDecoder{typ, typ.Elem(), decoder}, nil -} - -func encoderOfSlice(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { - encoder, err := encoderOfType(cfg, typ.Elem()) - if err != nil { - return nil, err - } - if typ.Elem().Kind() == reflect.Map { - encoder = &optionalEncoder{encoder} - } - return &sliceEncoder{typ, typ.Elem(), encoder}, nil -} - -type sliceEncoder struct { - sliceType reflect.Type - elemType reflect.Type - elemEncoder ValEncoder -} - -func (encoder *sliceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - slice := (*sliceHeader)(ptr) - if slice.Data == nil { - stream.WriteNil() - return - } - if slice.Len == 0 { - stream.WriteEmptyArray() - return - } - stream.WriteArrayStart() - elemPtr := unsafe.Pointer(slice.Data) - encoder.elemEncoder.Encode(unsafe.Pointer(elemPtr), stream) - for i := 1; i < slice.Len; i++ { - stream.WriteMore() - elemPtr = unsafe.Pointer(uintptr(elemPtr) + encoder.elemType.Size()) - encoder.elemEncoder.Encode(unsafe.Pointer(elemPtr), stream) - } - stream.WriteArrayEnd() - if stream.Error != nil && stream.Error != io.EOF { - stream.Error = fmt.Errorf("%v: %s", encoder.sliceType, stream.Error.Error()) - } -} - -func (encoder *sliceEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *sliceEncoder) IsEmpty(ptr unsafe.Pointer) bool { - slice := (*sliceHeader)(ptr) - return slice.Len == 0 -} - -type sliceDecoder struct { - sliceType reflect.Type - elemType reflect.Type - elemDecoder ValDecoder -} - -// sliceHeader is a safe version of SliceHeader used within this package. -type sliceHeader struct { - Data unsafe.Pointer - Len int - Cap int -} - -func (decoder *sliceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - decoder.doDecode(ptr, iter) - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.sliceType, iter.Error.Error()) - } -} - -func (decoder *sliceDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { - slice := (*sliceHeader)(ptr) - if iter.ReadNil() { - slice.Len = 0 - slice.Cap = 0 - slice.Data = nil - return - } - reuseSlice(slice, decoder.sliceType, 4) - slice.Len = 0 - offset := uintptr(0) - iter.ReadArrayCB(func(iter *Iterator) bool { - growOne(slice, decoder.sliceType, decoder.elemType) - decoder.elemDecoder.Decode(unsafe.Pointer(uintptr(slice.Data)+offset), iter) - offset += decoder.elemType.Size() - return true - }) -} - -// grow grows the slice s so that it can hold extra more values, allocating -// more capacity if needed. It also returns the old and new slice lengths. -func growOne(slice *sliceHeader, sliceType reflect.Type, elementType reflect.Type) { - newLen := slice.Len + 1 - if newLen <= slice.Cap { - slice.Len = newLen - return - } - newCap := slice.Cap - if newCap == 0 { - newCap = 1 - } else { - for newCap < newLen { - if slice.Len < 1024 { - newCap += newCap - } else { - newCap += newCap / 4 - } - } - } - newVal := reflect.MakeSlice(sliceType, newLen, newCap) - dst := unsafe.Pointer(newVal.Pointer()) - // copy old array into new array - originalBytesCount := uintptr(slice.Len) * elementType.Size() - srcPtr := (*[1 << 30]byte)(slice.Data) - dstPtr := (*[1 << 30]byte)(dst) - for i := uintptr(0); i < originalBytesCount; i++ { - dstPtr[i] = srcPtr[i] - } - slice.Data = dst - slice.Len = newLen - slice.Cap = newCap -} - -func reuseSlice(slice *sliceHeader, sliceType reflect.Type, expectedCap int) { - if expectedCap <= slice.Cap { - return - } - newVal := reflect.MakeSlice(sliceType, 0, expectedCap) - dst := unsafe.Pointer(newVal.Pointer()) - slice.Data = dst - slice.Cap = expectedCap -} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/feature_reflect_struct_decoder.go deleted file mode 100644 index b3417fd73a..0000000000 --- a/vendor/github.com/json-iterator/go/feature_reflect_struct_decoder.go +++ /dev/null @@ -1,916 +0,0 @@ -package jsoniter - -import ( - "fmt" - "io" - "reflect" - "strings" - "unsafe" -) - -func createStructDecoder(typ reflect.Type, fields map[string]*structFieldDecoder) (ValDecoder, error) { - knownHash := map[int32]struct{}{ - 0: {}, - } - switch len(fields) { - case 0: - return &skipObjectDecoder{typ}, nil - case 1: - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields}, nil - } - knownHash[fieldHash] = struct{}{} - return &oneFieldStructDecoder{typ, fieldHash, fieldDecoder}, nil - } - case 2: - var fieldHash1 int32 - var fieldHash2 int32 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields}, nil - } - knownHash[fieldHash] = struct{}{} - if fieldHash1 == 0 { - fieldHash1 = fieldHash - fieldDecoder1 = fieldDecoder - } else { - fieldHash2 = fieldHash - fieldDecoder2 = fieldDecoder - } - } - return &twoFieldsStructDecoder{typ, fieldHash1, fieldDecoder1, fieldHash2, fieldDecoder2}, nil - case 3: - var fieldName1 int32 - var fieldName2 int32 - var fieldName3 int32 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields}, nil - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } - } - return &threeFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3}, nil - case 4: - var fieldName1 int32 - var fieldName2 int32 - var fieldName3 int32 - var fieldName4 int32 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - var fieldDecoder4 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields}, nil - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else if fieldName3 == 0 { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } else { - fieldName4 = fieldHash - fieldDecoder4 = fieldDecoder - } - } - return &fourFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4}, nil - case 5: - var fieldName1 int32 - var fieldName2 int32 - var fieldName3 int32 - var fieldName4 int32 - var fieldName5 int32 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - var fieldDecoder4 *structFieldDecoder - var fieldDecoder5 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields}, nil - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else if fieldName3 == 0 { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } else if fieldName4 == 0 { - fieldName4 = fieldHash - fieldDecoder4 = fieldDecoder - } else { - fieldName5 = fieldHash - fieldDecoder5 = fieldDecoder - } - } - return &fiveFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4, fieldName5, fieldDecoder5}, nil - case 6: - var fieldName1 int32 - var fieldName2 int32 - var fieldName3 int32 - var fieldName4 int32 - var fieldName5 int32 - var fieldName6 int32 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - var fieldDecoder4 *structFieldDecoder - var fieldDecoder5 *structFieldDecoder - var fieldDecoder6 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields}, nil - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else if fieldName3 == 0 { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } else if fieldName4 == 0 { - fieldName4 = fieldHash - fieldDecoder4 = fieldDecoder - } else if fieldName5 == 0 { - fieldName5 = fieldHash - fieldDecoder5 = fieldDecoder - } else { - fieldName6 = fieldHash - fieldDecoder6 = fieldDecoder - } - } - return &sixFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6}, nil - case 7: - var fieldName1 int32 - var fieldName2 int32 - var fieldName3 int32 - var fieldName4 int32 - var fieldName5 int32 - var fieldName6 int32 - var fieldName7 int32 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - var fieldDecoder4 *structFieldDecoder - var fieldDecoder5 *structFieldDecoder - var fieldDecoder6 *structFieldDecoder - var fieldDecoder7 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields}, nil - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else if fieldName3 == 0 { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } else if fieldName4 == 0 { - fieldName4 = fieldHash - fieldDecoder4 = fieldDecoder - } else if fieldName5 == 0 { - fieldName5 = fieldHash - fieldDecoder5 = fieldDecoder - } else if fieldName6 == 0 { - fieldName6 = fieldHash - fieldDecoder6 = fieldDecoder - } else { - fieldName7 = fieldHash - fieldDecoder7 = fieldDecoder - } - } - return &sevenFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6, - fieldName7, fieldDecoder7}, nil - case 8: - var fieldName1 int32 - var fieldName2 int32 - var fieldName3 int32 - var fieldName4 int32 - var fieldName5 int32 - var fieldName6 int32 - var fieldName7 int32 - var fieldName8 int32 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - var fieldDecoder4 *structFieldDecoder - var fieldDecoder5 *structFieldDecoder - var fieldDecoder6 *structFieldDecoder - var fieldDecoder7 *structFieldDecoder - var fieldDecoder8 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields}, nil - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else if fieldName3 == 0 { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } else if fieldName4 == 0 { - fieldName4 = fieldHash - fieldDecoder4 = fieldDecoder - } else if fieldName5 == 0 { - fieldName5 = fieldHash - fieldDecoder5 = fieldDecoder - } else if fieldName6 == 0 { - fieldName6 = fieldHash - fieldDecoder6 = fieldDecoder - } else if fieldName7 == 0 { - fieldName7 = fieldHash - fieldDecoder7 = fieldDecoder - } else { - fieldName8 = fieldHash - fieldDecoder8 = fieldDecoder - } - } - return &eightFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6, - fieldName7, fieldDecoder7, fieldName8, fieldDecoder8}, nil - case 9: - var fieldName1 int32 - var fieldName2 int32 - var fieldName3 int32 - var fieldName4 int32 - var fieldName5 int32 - var fieldName6 int32 - var fieldName7 int32 - var fieldName8 int32 - var fieldName9 int32 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - var fieldDecoder4 *structFieldDecoder - var fieldDecoder5 *structFieldDecoder - var fieldDecoder6 *structFieldDecoder - var fieldDecoder7 *structFieldDecoder - var fieldDecoder8 *structFieldDecoder - var fieldDecoder9 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields}, nil - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else if fieldName3 == 0 { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } else if fieldName4 == 0 { - fieldName4 = fieldHash - fieldDecoder4 = fieldDecoder - } else if fieldName5 == 0 { - fieldName5 = fieldHash - fieldDecoder5 = fieldDecoder - } else if fieldName6 == 0 { - fieldName6 = fieldHash - fieldDecoder6 = fieldDecoder - } else if fieldName7 == 0 { - fieldName7 = fieldHash - fieldDecoder7 = fieldDecoder - } else if fieldName8 == 0 { - fieldName8 = fieldHash - fieldDecoder8 = fieldDecoder - } else { - fieldName9 = fieldHash - fieldDecoder9 = fieldDecoder - } - } - return &nineFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6, - fieldName7, fieldDecoder7, fieldName8, fieldDecoder8, fieldName9, fieldDecoder9}, nil - case 10: - var fieldName1 int32 - var fieldName2 int32 - var fieldName3 int32 - var fieldName4 int32 - var fieldName5 int32 - var fieldName6 int32 - var fieldName7 int32 - var fieldName8 int32 - var fieldName9 int32 - var fieldName10 int32 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - var fieldDecoder4 *structFieldDecoder - var fieldDecoder5 *structFieldDecoder - var fieldDecoder6 *structFieldDecoder - var fieldDecoder7 *structFieldDecoder - var fieldDecoder8 *structFieldDecoder - var fieldDecoder9 *structFieldDecoder - var fieldDecoder10 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields}, nil - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else if fieldName3 == 0 { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } else if fieldName4 == 0 { - fieldName4 = fieldHash - fieldDecoder4 = fieldDecoder - } else if fieldName5 == 0 { - fieldName5 = fieldHash - fieldDecoder5 = fieldDecoder - } else if fieldName6 == 0 { - fieldName6 = fieldHash - fieldDecoder6 = fieldDecoder - } else if fieldName7 == 0 { - fieldName7 = fieldHash - fieldDecoder7 = fieldDecoder - } else if fieldName8 == 0 { - fieldName8 = fieldHash - fieldDecoder8 = fieldDecoder - } else if fieldName9 == 0 { - fieldName9 = fieldHash - fieldDecoder9 = fieldDecoder - } else { - fieldName10 = fieldHash - fieldDecoder10 = fieldDecoder - } - } - return &tenFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6, - fieldName7, fieldDecoder7, fieldName8, fieldDecoder8, fieldName9, fieldDecoder9, - fieldName10, fieldDecoder10}, nil - } - return &generalStructDecoder{typ, fields}, nil -} - -type generalStructDecoder struct { - typ reflect.Type - fields map[string]*structFieldDecoder -} - -func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - fieldBytes := iter.readObjectFieldAsBytes() - field := *(*string)(unsafe.Pointer(&fieldBytes)) - fieldDecoder := decoder.fields[strings.ToLower(field)] - if fieldDecoder == nil { - iter.Skip() - } else { - fieldDecoder.Decode(ptr, iter) - } - for iter.nextToken() == ',' { - fieldBytes = iter.readObjectFieldAsBytes() - field = *(*string)(unsafe.Pointer(&fieldBytes)) - fieldDecoder = decoder.fields[strings.ToLower(field)] - if fieldDecoder == nil { - iter.Skip() - } else { - fieldDecoder.Decode(ptr, iter) - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) - } -} - -type skipObjectDecoder struct { - typ reflect.Type -} - -func (decoder *skipObjectDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - valueType := iter.WhatIsNext() - if valueType != ObjectValue && valueType != NilValue { - iter.ReportError("skipObjectDecoder", "expect object or null") - return - } - iter.Skip() -} - -type oneFieldStructDecoder struct { - typ reflect.Type - fieldHash int32 - fieldDecoder *structFieldDecoder -} - -func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - if iter.readFieldHash() == decoder.fieldHash { - decoder.fieldDecoder.Decode(ptr, iter) - } else { - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) - } -} - -type twoFieldsStructDecoder struct { - typ reflect.Type - fieldHash1 int32 - fieldDecoder1 *structFieldDecoder - fieldHash2 int32 - fieldDecoder2 *structFieldDecoder -} - -func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) - } -} - -type threeFieldsStructDecoder struct { - typ reflect.Type - fieldHash1 int32 - fieldDecoder1 *structFieldDecoder - fieldHash2 int32 - fieldDecoder2 *structFieldDecoder - fieldHash3 int32 - fieldDecoder3 *structFieldDecoder -} - -func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) - } -} - -type fourFieldsStructDecoder struct { - typ reflect.Type - fieldHash1 int32 - fieldDecoder1 *structFieldDecoder - fieldHash2 int32 - fieldDecoder2 *structFieldDecoder - fieldHash3 int32 - fieldDecoder3 *structFieldDecoder - fieldHash4 int32 - fieldDecoder4 *structFieldDecoder -} - -func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - case decoder.fieldHash4: - decoder.fieldDecoder4.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) - } -} - -type fiveFieldsStructDecoder struct { - typ reflect.Type - fieldHash1 int32 - fieldDecoder1 *structFieldDecoder - fieldHash2 int32 - fieldDecoder2 *structFieldDecoder - fieldHash3 int32 - fieldDecoder3 *structFieldDecoder - fieldHash4 int32 - fieldDecoder4 *structFieldDecoder - fieldHash5 int32 - fieldDecoder5 *structFieldDecoder -} - -func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - case decoder.fieldHash4: - decoder.fieldDecoder4.Decode(ptr, iter) - case decoder.fieldHash5: - decoder.fieldDecoder5.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) - } -} - -type sixFieldsStructDecoder struct { - typ reflect.Type - fieldHash1 int32 - fieldDecoder1 *structFieldDecoder - fieldHash2 int32 - fieldDecoder2 *structFieldDecoder - fieldHash3 int32 - fieldDecoder3 *structFieldDecoder - fieldHash4 int32 - fieldDecoder4 *structFieldDecoder - fieldHash5 int32 - fieldDecoder5 *structFieldDecoder - fieldHash6 int32 - fieldDecoder6 *structFieldDecoder -} - -func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - case decoder.fieldHash4: - decoder.fieldDecoder4.Decode(ptr, iter) - case decoder.fieldHash5: - decoder.fieldDecoder5.Decode(ptr, iter) - case decoder.fieldHash6: - decoder.fieldDecoder6.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) - } -} - -type sevenFieldsStructDecoder struct { - typ reflect.Type - fieldHash1 int32 - fieldDecoder1 *structFieldDecoder - fieldHash2 int32 - fieldDecoder2 *structFieldDecoder - fieldHash3 int32 - fieldDecoder3 *structFieldDecoder - fieldHash4 int32 - fieldDecoder4 *structFieldDecoder - fieldHash5 int32 - fieldDecoder5 *structFieldDecoder - fieldHash6 int32 - fieldDecoder6 *structFieldDecoder - fieldHash7 int32 - fieldDecoder7 *structFieldDecoder -} - -func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - case decoder.fieldHash4: - decoder.fieldDecoder4.Decode(ptr, iter) - case decoder.fieldHash5: - decoder.fieldDecoder5.Decode(ptr, iter) - case decoder.fieldHash6: - decoder.fieldDecoder6.Decode(ptr, iter) - case decoder.fieldHash7: - decoder.fieldDecoder7.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) - } -} - -type eightFieldsStructDecoder struct { - typ reflect.Type - fieldHash1 int32 - fieldDecoder1 *structFieldDecoder - fieldHash2 int32 - fieldDecoder2 *structFieldDecoder - fieldHash3 int32 - fieldDecoder3 *structFieldDecoder - fieldHash4 int32 - fieldDecoder4 *structFieldDecoder - fieldHash5 int32 - fieldDecoder5 *structFieldDecoder - fieldHash6 int32 - fieldDecoder6 *structFieldDecoder - fieldHash7 int32 - fieldDecoder7 *structFieldDecoder - fieldHash8 int32 - fieldDecoder8 *structFieldDecoder -} - -func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - case decoder.fieldHash4: - decoder.fieldDecoder4.Decode(ptr, iter) - case decoder.fieldHash5: - decoder.fieldDecoder5.Decode(ptr, iter) - case decoder.fieldHash6: - decoder.fieldDecoder6.Decode(ptr, iter) - case decoder.fieldHash7: - decoder.fieldDecoder7.Decode(ptr, iter) - case decoder.fieldHash8: - decoder.fieldDecoder8.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) - } -} - -type nineFieldsStructDecoder struct { - typ reflect.Type - fieldHash1 int32 - fieldDecoder1 *structFieldDecoder - fieldHash2 int32 - fieldDecoder2 *structFieldDecoder - fieldHash3 int32 - fieldDecoder3 *structFieldDecoder - fieldHash4 int32 - fieldDecoder4 *structFieldDecoder - fieldHash5 int32 - fieldDecoder5 *structFieldDecoder - fieldHash6 int32 - fieldDecoder6 *structFieldDecoder - fieldHash7 int32 - fieldDecoder7 *structFieldDecoder - fieldHash8 int32 - fieldDecoder8 *structFieldDecoder - fieldHash9 int32 - fieldDecoder9 *structFieldDecoder -} - -func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - case decoder.fieldHash4: - decoder.fieldDecoder4.Decode(ptr, iter) - case decoder.fieldHash5: - decoder.fieldDecoder5.Decode(ptr, iter) - case decoder.fieldHash6: - decoder.fieldDecoder6.Decode(ptr, iter) - case decoder.fieldHash7: - decoder.fieldDecoder7.Decode(ptr, iter) - case decoder.fieldHash8: - decoder.fieldDecoder8.Decode(ptr, iter) - case decoder.fieldHash9: - decoder.fieldDecoder9.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) - } -} - -type tenFieldsStructDecoder struct { - typ reflect.Type - fieldHash1 int32 - fieldDecoder1 *structFieldDecoder - fieldHash2 int32 - fieldDecoder2 *structFieldDecoder - fieldHash3 int32 - fieldDecoder3 *structFieldDecoder - fieldHash4 int32 - fieldDecoder4 *structFieldDecoder - fieldHash5 int32 - fieldDecoder5 *structFieldDecoder - fieldHash6 int32 - fieldDecoder6 *structFieldDecoder - fieldHash7 int32 - fieldDecoder7 *structFieldDecoder - fieldHash8 int32 - fieldDecoder8 *structFieldDecoder - fieldHash9 int32 - fieldDecoder9 *structFieldDecoder - fieldHash10 int32 - fieldDecoder10 *structFieldDecoder -} - -func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - case decoder.fieldHash4: - decoder.fieldDecoder4.Decode(ptr, iter) - case decoder.fieldHash5: - decoder.fieldDecoder5.Decode(ptr, iter) - case decoder.fieldHash6: - decoder.fieldDecoder6.Decode(ptr, iter) - case decoder.fieldHash7: - decoder.fieldDecoder7.Decode(ptr, iter) - case decoder.fieldHash8: - decoder.fieldDecoder8.Decode(ptr, iter) - case decoder.fieldHash9: - decoder.fieldDecoder9.Decode(ptr, iter) - case decoder.fieldHash10: - decoder.fieldDecoder10.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) - } -} - -type structFieldDecoder struct { - field *reflect.StructField - fieldDecoder ValDecoder -} - -func (decoder *structFieldDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - fieldPtr := unsafe.Pointer(uintptr(ptr) + decoder.field.Offset) - decoder.fieldDecoder.Decode(fieldPtr, iter) - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%s: %s", decoder.field.Name, iter.Error.Error()) - } -} diff --git a/vendor/github.com/json-iterator/go/feature_stream.go b/vendor/github.com/json-iterator/go/feature_stream.go deleted file mode 100644 index 9323848398..0000000000 --- a/vendor/github.com/json-iterator/go/feature_stream.go +++ /dev/null @@ -1,307 +0,0 @@ -package jsoniter - -import ( - "io" -) - -// Stream is a io.Writer like object, with JSON specific write functions. -// Error is not returned as return value, but stored as Error member on this stream instance. -type Stream struct { - cfg *frozenConfig - out io.Writer - buf []byte - n int - Error error - indention int -} - -// NewStream create new stream instance. -// cfg can be jsoniter.ConfigDefault. -// out can be nil if write to internal buffer. -// bufSize is the initial size for the internal buffer in bytes. -func NewStream(cfg API, out io.Writer, bufSize int) *Stream { - return &Stream{ - cfg: cfg.(*frozenConfig), - out: out, - buf: make([]byte, bufSize), - n: 0, - Error: nil, - indention: 0, - } -} - -// Pool returns a pool can provide more stream with same configuration -func (stream *Stream) Pool() StreamPool { - return stream.cfg -} - -// Reset reuse this stream instance by assign a new writer -func (stream *Stream) Reset(out io.Writer) { - stream.out = out - stream.n = 0 -} - -// Available returns how many bytes are unused in the buffer. -func (stream *Stream) Available() int { - return len(stream.buf) - stream.n -} - -// Buffered returns the number of bytes that have been written into the current buffer. -func (stream *Stream) Buffered() int { - return stream.n -} - -// Buffer if writer is nil, use this method to take the result -func (stream *Stream) Buffer() []byte { - return stream.buf[:stream.n] -} - -// Write writes the contents of p into the buffer. -// It returns the number of bytes written. -// If nn < len(p), it also returns an error explaining -// why the write is short. -func (stream *Stream) Write(p []byte) (nn int, err error) { - for len(p) > stream.Available() && stream.Error == nil { - if stream.out == nil { - stream.growAtLeast(len(p)) - } else { - var n int - if stream.Buffered() == 0 { - // Large write, empty buffer. - // Write directly from p to avoid copy. - n, stream.Error = stream.out.Write(p) - } else { - n = copy(stream.buf[stream.n:], p) - stream.n += n - stream.Flush() - } - nn += n - p = p[n:] - } - } - if stream.Error != nil { - return nn, stream.Error - } - n := copy(stream.buf[stream.n:], p) - stream.n += n - nn += n - return nn, nil -} - -// WriteByte writes a single byte. -func (stream *Stream) writeByte(c byte) { - if stream.Error != nil { - return - } - if stream.Available() < 1 { - stream.growAtLeast(1) - } - stream.buf[stream.n] = c - stream.n++ -} - -func (stream *Stream) writeTwoBytes(c1 byte, c2 byte) { - if stream.Error != nil { - return - } - if stream.Available() < 2 { - stream.growAtLeast(2) - } - stream.buf[stream.n] = c1 - stream.buf[stream.n+1] = c2 - stream.n += 2 -} - -func (stream *Stream) writeThreeBytes(c1 byte, c2 byte, c3 byte) { - if stream.Error != nil { - return - } - if stream.Available() < 3 { - stream.growAtLeast(3) - } - stream.buf[stream.n] = c1 - stream.buf[stream.n+1] = c2 - stream.buf[stream.n+2] = c3 - stream.n += 3 -} - -func (stream *Stream) writeFourBytes(c1 byte, c2 byte, c3 byte, c4 byte) { - if stream.Error != nil { - return - } - if stream.Available() < 4 { - stream.growAtLeast(4) - } - stream.buf[stream.n] = c1 - stream.buf[stream.n+1] = c2 - stream.buf[stream.n+2] = c3 - stream.buf[stream.n+3] = c4 - stream.n += 4 -} - -func (stream *Stream) writeFiveBytes(c1 byte, c2 byte, c3 byte, c4 byte, c5 byte) { - if stream.Error != nil { - return - } - if stream.Available() < 5 { - stream.growAtLeast(5) - } - stream.buf[stream.n] = c1 - stream.buf[stream.n+1] = c2 - stream.buf[stream.n+2] = c3 - stream.buf[stream.n+3] = c4 - stream.buf[stream.n+4] = c5 - stream.n += 5 -} - -// Flush writes any buffered data to the underlying io.Writer. -func (stream *Stream) Flush() error { - if stream.out == nil { - return nil - } - if stream.Error != nil { - return stream.Error - } - if stream.n == 0 { - return nil - } - n, err := stream.out.Write(stream.buf[0:stream.n]) - if n < stream.n && err == nil { - err = io.ErrShortWrite - } - if err != nil { - if n > 0 && n < stream.n { - copy(stream.buf[0:stream.n-n], stream.buf[n:stream.n]) - } - stream.n -= n - stream.Error = err - return err - } - stream.n = 0 - return nil -} - -func (stream *Stream) ensure(minimal int) { - available := stream.Available() - if available < minimal { - stream.growAtLeast(minimal) - } -} - -func (stream *Stream) growAtLeast(minimal int) { - if stream.out != nil { - stream.Flush() - if stream.Available() >= minimal { - return - } - } - toGrow := len(stream.buf) - if toGrow < minimal { - toGrow = minimal - } - newBuf := make([]byte, len(stream.buf)+toGrow) - copy(newBuf, stream.Buffer()) - stream.buf = newBuf -} - -// WriteRaw write string out without quotes, just like []byte -func (stream *Stream) WriteRaw(s string) { - stream.ensure(len(s)) - if stream.Error != nil { - return - } - n := copy(stream.buf[stream.n:], s) - stream.n += n -} - -// WriteNil write null to stream -func (stream *Stream) WriteNil() { - stream.writeFourBytes('n', 'u', 'l', 'l') -} - -// WriteTrue write true to stream -func (stream *Stream) WriteTrue() { - stream.writeFourBytes('t', 'r', 'u', 'e') -} - -// WriteFalse write false to stream -func (stream *Stream) WriteFalse() { - stream.writeFiveBytes('f', 'a', 'l', 's', 'e') -} - -// WriteBool write true or false into stream -func (stream *Stream) WriteBool(val bool) { - if val { - stream.WriteTrue() - } else { - stream.WriteFalse() - } -} - -// WriteObjectStart write { with possible indention -func (stream *Stream) WriteObjectStart() { - stream.indention += stream.cfg.indentionStep - stream.writeByte('{') - stream.writeIndention(0) -} - -// WriteObjectField write "field": with possible indention -func (stream *Stream) WriteObjectField(field string) { - stream.WriteString(field) - if stream.indention > 0 { - stream.writeTwoBytes(':', ' ') - } else { - stream.writeByte(':') - } -} - -// WriteObjectEnd write } with possible indention -func (stream *Stream) WriteObjectEnd() { - stream.writeIndention(stream.cfg.indentionStep) - stream.indention -= stream.cfg.indentionStep - stream.writeByte('}') -} - -// WriteEmptyObject write {} -func (stream *Stream) WriteEmptyObject() { - stream.writeByte('{') - stream.writeByte('}') -} - -// WriteMore write , with possible indention -func (stream *Stream) WriteMore() { - stream.writeByte(',') - stream.writeIndention(0) -} - -// WriteArrayStart write [ with possible indention -func (stream *Stream) WriteArrayStart() { - stream.indention += stream.cfg.indentionStep - stream.writeByte('[') - stream.writeIndention(0) -} - -// WriteEmptyArray write [] -func (stream *Stream) WriteEmptyArray() { - stream.writeTwoBytes('[', ']') -} - -// WriteArrayEnd write ] with possible indention -func (stream *Stream) WriteArrayEnd() { - stream.writeIndention(stream.cfg.indentionStep) - stream.indention -= stream.cfg.indentionStep - stream.writeByte(']') -} - -func (stream *Stream) writeIndention(delta int) { - if stream.indention == 0 { - return - } - stream.writeByte('\n') - toWrite := stream.indention - delta - stream.ensure(toWrite) - for i := 0; i < toWrite && stream.n < len(stream.buf); i++ { - stream.buf[stream.n] = ' ' - stream.n++ - } -} diff --git a/vendor/github.com/json-iterator/go/feature_stream_float.go b/vendor/github.com/json-iterator/go/feature_stream_float.go deleted file mode 100644 index 9a404e11d4..0000000000 --- a/vendor/github.com/json-iterator/go/feature_stream_float.go +++ /dev/null @@ -1,96 +0,0 @@ -package jsoniter - -import ( - "math" - "strconv" -) - -var pow10 []uint64 - -func init() { - pow10 = []uint64{1, 10, 100, 1000, 10000, 100000, 1000000} -} - -// WriteFloat32 write float32 to stream -func (stream *Stream) WriteFloat32(val float32) { - abs := math.Abs(float64(val)) - fmt := byte('f') - // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. - if abs != 0 { - if float32(abs) < 1e-6 || float32(abs) >= 1e21 { - fmt = 'e' - } - } - stream.WriteRaw(strconv.FormatFloat(float64(val), fmt, -1, 32)) -} - -// WriteFloat32Lossy write float32 to stream with ONLY 6 digits precision although much much faster -func (stream *Stream) WriteFloat32Lossy(val float32) { - if val < 0 { - stream.writeByte('-') - val = -val - } - if val > 0x4ffffff { - stream.WriteFloat32(val) - return - } - precision := 6 - exp := uint64(1000000) // 6 - lval := uint64(float64(val)*float64(exp) + 0.5) - stream.WriteUint64(lval / exp) - fval := lval % exp - if fval == 0 { - return - } - stream.writeByte('.') - stream.ensure(10) - for p := precision - 1; p > 0 && fval < pow10[p]; p-- { - stream.writeByte('0') - } - stream.WriteUint64(fval) - for stream.buf[stream.n-1] == '0' { - stream.n-- - } -} - -// WriteFloat64 write float64 to stream -func (stream *Stream) WriteFloat64(val float64) { - abs := math.Abs(val) - fmt := byte('f') - // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. - if abs != 0 { - if abs < 1e-6 || abs >= 1e21 { - fmt = 'e' - } - } - stream.WriteRaw(strconv.FormatFloat(float64(val), fmt, -1, 64)) -} - -// WriteFloat64Lossy write float64 to stream with ONLY 6 digits precision although much much faster -func (stream *Stream) WriteFloat64Lossy(val float64) { - if val < 0 { - stream.writeByte('-') - val = -val - } - if val > 0x4ffffff { - stream.WriteFloat64(val) - return - } - precision := 6 - exp := uint64(1000000) // 6 - lval := uint64(val*float64(exp) + 0.5) - stream.WriteUint64(lval / exp) - fval := lval % exp - if fval == 0 { - return - } - stream.writeByte('.') - stream.ensure(10) - for p := precision - 1; p > 0 && fval < pow10[p]; p-- { - stream.writeByte('0') - } - stream.WriteUint64(fval) - for stream.buf[stream.n-1] == '0' { - stream.n-- - } -} diff --git a/vendor/github.com/json-iterator/go/feature_stream_int.go b/vendor/github.com/json-iterator/go/feature_stream_int.go deleted file mode 100644 index 7cfd522c10..0000000000 --- a/vendor/github.com/json-iterator/go/feature_stream_int.go +++ /dev/null @@ -1,320 +0,0 @@ -package jsoniter - -var digits []uint32 - -func init() { - digits = make([]uint32, 1000) - for i := uint32(0); i < 1000; i++ { - digits[i] = (((i / 100) + '0') << 16) + ((((i / 10) % 10) + '0') << 8) + i%10 + '0' - if i < 10 { - digits[i] += 2 << 24 - } else if i < 100 { - digits[i] += 1 << 24 - } - } -} - -func writeFirstBuf(buf []byte, v uint32, n int) int { - start := v >> 24 - if start == 0 { - buf[n] = byte(v >> 16) - n++ - buf[n] = byte(v >> 8) - n++ - } else if start == 1 { - buf[n] = byte(v >> 8) - n++ - } - buf[n] = byte(v) - n++ - return n -} - -func writeBuf(buf []byte, v uint32, n int) { - buf[n] = byte(v >> 16) - buf[n+1] = byte(v >> 8) - buf[n+2] = byte(v) -} - -// WriteUint8 write uint8 to stream -func (stream *Stream) WriteUint8(val uint8) { - stream.ensure(3) - stream.n = writeFirstBuf(stream.buf, digits[val], stream.n) -} - -// WriteInt8 write int8 to stream -func (stream *Stream) WriteInt8(nval int8) { - stream.ensure(4) - n := stream.n - var val uint8 - if nval < 0 { - val = uint8(-nval) - stream.buf[n] = '-' - n++ - } else { - val = uint8(nval) - } - stream.n = writeFirstBuf(stream.buf, digits[val], n) -} - -// WriteUint16 write uint16 to stream -func (stream *Stream) WriteUint16(val uint16) { - stream.ensure(5) - q1 := val / 1000 - if q1 == 0 { - stream.n = writeFirstBuf(stream.buf, digits[val], stream.n) - return - } - r1 := val - q1*1000 - n := writeFirstBuf(stream.buf, digits[q1], stream.n) - writeBuf(stream.buf, digits[r1], n) - stream.n = n + 3 - return -} - -// WriteInt16 write int16 to stream -func (stream *Stream) WriteInt16(nval int16) { - stream.ensure(6) - n := stream.n - var val uint16 - if nval < 0 { - val = uint16(-nval) - stream.buf[n] = '-' - n++ - } else { - val = uint16(nval) - } - q1 := val / 1000 - if q1 == 0 { - stream.n = writeFirstBuf(stream.buf, digits[val], n) - return - } - r1 := val - q1*1000 - n = writeFirstBuf(stream.buf, digits[q1], n) - writeBuf(stream.buf, digits[r1], n) - stream.n = n + 3 - return -} - -// WriteUint32 write uint32 to stream -func (stream *Stream) WriteUint32(val uint32) { - stream.ensure(10) - n := stream.n - q1 := val / 1000 - if q1 == 0 { - stream.n = writeFirstBuf(stream.buf, digits[val], n) - return - } - r1 := val - q1*1000 - q2 := q1 / 1000 - if q2 == 0 { - n := writeFirstBuf(stream.buf, digits[q1], n) - writeBuf(stream.buf, digits[r1], n) - stream.n = n + 3 - return - } - r2 := q1 - q2*1000 - q3 := q2 / 1000 - if q3 == 0 { - n = writeFirstBuf(stream.buf, digits[q2], n) - } else { - r3 := q2 - q3*1000 - stream.buf[n] = byte(q3 + '0') - n++ - writeBuf(stream.buf, digits[r3], n) - n += 3 - } - writeBuf(stream.buf, digits[r2], n) - writeBuf(stream.buf, digits[r1], n+3) - stream.n = n + 6 -} - -// WriteInt32 write int32 to stream -func (stream *Stream) WriteInt32(nval int32) { - stream.ensure(11) - n := stream.n - var val uint32 - if nval < 0 { - val = uint32(-nval) - stream.buf[n] = '-' - n++ - } else { - val = uint32(nval) - } - q1 := val / 1000 - if q1 == 0 { - stream.n = writeFirstBuf(stream.buf, digits[val], n) - return - } - r1 := val - q1*1000 - q2 := q1 / 1000 - if q2 == 0 { - n := writeFirstBuf(stream.buf, digits[q1], n) - writeBuf(stream.buf, digits[r1], n) - stream.n = n + 3 - return - } - r2 := q1 - q2*1000 - q3 := q2 / 1000 - if q3 == 0 { - n = writeFirstBuf(stream.buf, digits[q2], n) - } else { - r3 := q2 - q3*1000 - stream.buf[n] = byte(q3 + '0') - n++ - writeBuf(stream.buf, digits[r3], n) - n += 3 - } - writeBuf(stream.buf, digits[r2], n) - writeBuf(stream.buf, digits[r1], n+3) - stream.n = n + 6 -} - -// WriteUint64 write uint64 to stream -func (stream *Stream) WriteUint64(val uint64) { - stream.ensure(20) - n := stream.n - q1 := val / 1000 - if q1 == 0 { - stream.n = writeFirstBuf(stream.buf, digits[val], n) - return - } - r1 := val - q1*1000 - q2 := q1 / 1000 - if q2 == 0 { - n := writeFirstBuf(stream.buf, digits[q1], n) - writeBuf(stream.buf, digits[r1], n) - stream.n = n + 3 - return - } - r2 := q1 - q2*1000 - q3 := q2 / 1000 - if q3 == 0 { - n = writeFirstBuf(stream.buf, digits[q2], n) - writeBuf(stream.buf, digits[r2], n) - writeBuf(stream.buf, digits[r1], n+3) - stream.n = n + 6 - return - } - r3 := q2 - q3*1000 - q4 := q3 / 1000 - if q4 == 0 { - n = writeFirstBuf(stream.buf, digits[q3], n) - writeBuf(stream.buf, digits[r3], n) - writeBuf(stream.buf, digits[r2], n+3) - writeBuf(stream.buf, digits[r1], n+6) - stream.n = n + 9 - return - } - r4 := q3 - q4*1000 - q5 := q4 / 1000 - if q5 == 0 { - n = writeFirstBuf(stream.buf, digits[q4], n) - writeBuf(stream.buf, digits[r4], n) - writeBuf(stream.buf, digits[r3], n+3) - writeBuf(stream.buf, digits[r2], n+6) - writeBuf(stream.buf, digits[r1], n+9) - stream.n = n + 12 - return - } - r5 := q4 - q5*1000 - q6 := q5 / 1000 - if q6 == 0 { - n = writeFirstBuf(stream.buf, digits[q5], n) - } else { - n = writeFirstBuf(stream.buf, digits[q6], n) - r6 := q5 - q6*1000 - writeBuf(stream.buf, digits[r6], n) - n += 3 - } - writeBuf(stream.buf, digits[r5], n) - writeBuf(stream.buf, digits[r4], n+3) - writeBuf(stream.buf, digits[r3], n+6) - writeBuf(stream.buf, digits[r2], n+9) - writeBuf(stream.buf, digits[r1], n+12) - stream.n = n + 15 -} - -// WriteInt64 write int64 to stream -func (stream *Stream) WriteInt64(nval int64) { - stream.ensure(20) - n := stream.n - var val uint64 - if nval < 0 { - val = uint64(-nval) - stream.buf[n] = '-' - n++ - } else { - val = uint64(nval) - } - q1 := val / 1000 - if q1 == 0 { - stream.n = writeFirstBuf(stream.buf, digits[val], n) - return - } - r1 := val - q1*1000 - q2 := q1 / 1000 - if q2 == 0 { - n := writeFirstBuf(stream.buf, digits[q1], n) - writeBuf(stream.buf, digits[r1], n) - stream.n = n + 3 - return - } - r2 := q1 - q2*1000 - q3 := q2 / 1000 - if q3 == 0 { - n = writeFirstBuf(stream.buf, digits[q2], n) - writeBuf(stream.buf, digits[r2], n) - writeBuf(stream.buf, digits[r1], n+3) - stream.n = n + 6 - return - } - r3 := q2 - q3*1000 - q4 := q3 / 1000 - if q4 == 0 { - n = writeFirstBuf(stream.buf, digits[q3], n) - writeBuf(stream.buf, digits[r3], n) - writeBuf(stream.buf, digits[r2], n+3) - writeBuf(stream.buf, digits[r1], n+6) - stream.n = n + 9 - return - } - r4 := q3 - q4*1000 - q5 := q4 / 1000 - if q5 == 0 { - n = writeFirstBuf(stream.buf, digits[q4], n) - writeBuf(stream.buf, digits[r4], n) - writeBuf(stream.buf, digits[r3], n+3) - writeBuf(stream.buf, digits[r2], n+6) - writeBuf(stream.buf, digits[r1], n+9) - stream.n = n + 12 - return - } - r5 := q4 - q5*1000 - q6 := q5 / 1000 - if q6 == 0 { - n = writeFirstBuf(stream.buf, digits[q5], n) - } else { - stream.buf[n] = byte(q6 + '0') - n++ - r6 := q5 - q6*1000 - writeBuf(stream.buf, digits[r6], n) - n += 3 - } - writeBuf(stream.buf, digits[r5], n) - writeBuf(stream.buf, digits[r4], n+3) - writeBuf(stream.buf, digits[r3], n+6) - writeBuf(stream.buf, digits[r2], n+9) - writeBuf(stream.buf, digits[r1], n+12) - stream.n = n + 15 -} - -// WriteInt write int to stream -func (stream *Stream) WriteInt(val int) { - stream.WriteInt64(int64(val)) -} - -// WriteUint write uint to stream -func (stream *Stream) WriteUint(val uint) { - stream.WriteUint64(uint64(val)) -} diff --git a/vendor/github.com/json-iterator/go/feature_stream_string.go b/vendor/github.com/json-iterator/go/feature_stream_string.go deleted file mode 100644 index 334282f05f..0000000000 --- a/vendor/github.com/json-iterator/go/feature_stream_string.go +++ /dev/null @@ -1,396 +0,0 @@ -package jsoniter - -import ( - "unicode/utf8" -) - -// htmlSafeSet holds the value true if the ASCII character with the given -// array position can be safely represented inside a JSON string, embedded -// inside of HTML ", - "", - }, - { - "unfinished script end tag", - "$a", - }, - { - "'>' completes script end tag", - "", - "", - }, - { - "self-closing script end tag", - "", - "", - }, - { - "nested script tag", - "", - "", - }, - { - "script/style mismatched tags", - "